1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Marvell RVU Ethernet driver 3 * 4 * Copyright (C) 2020 Marvell. 5 * 6 */ 7 8 #ifndef OTX2_COMMON_H 9 #define OTX2_COMMON_H 10 11 #include <linux/ethtool.h> 12 #include <linux/pci.h> 13 #include <linux/iommu.h> 14 #include <linux/net_tstamp.h> 15 #include <linux/ptp_clock_kernel.h> 16 #include <linux/timecounter.h> 17 #include <linux/soc/marvell/octeontx2/asm.h> 18 #include <net/macsec.h> 19 #include <net/pkt_cls.h> 20 #include <net/devlink.h> 21 #include <linux/time64.h> 22 #include <linux/dim.h> 23 #include <uapi/linux/if_macsec.h> 24 25 #include <mbox.h> 26 #include <npc.h> 27 #include "otx2_reg.h" 28 #include "otx2_txrx.h" 29 #include "otx2_devlink.h" 30 #include <rvu_trace.h> 31 #include "qos.h" 32 33 /* IPv4 flag more fragment bit */ 34 #define IPV4_FLAG_MORE 0x20 35 36 /* PCI device IDs */ 37 #define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063 38 #define PCI_DEVID_OCTEONTX2_RVU_VF 0xA064 39 #define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8 40 41 #define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200 42 #define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00 43 44 /* PCI BAR nos */ 45 #define PCI_CFG_REG_BAR_NUM 2 46 #define PCI_MBOX_BAR_NUM 4 47 48 #define NAME_SIZE 32 49 50 #ifdef CONFIG_DCB 51 /* Max priority supported for PFC */ 52 #define NIX_PF_PFC_PRIO_MAX 8 53 #endif 54 55 enum arua_mapped_qtypes { 56 AURA_NIX_RQ, 57 AURA_NIX_SQ, 58 }; 59 60 /* NIX LF interrupts range*/ 61 #define NIX_LF_QINT_VEC_START 0x00 62 #define NIX_LF_CINT_VEC_START 0x40 63 #define NIX_LF_GINT_VEC 0x80 64 #define NIX_LF_ERR_VEC 0x81 65 #define NIX_LF_POISON_VEC 0x82 66 67 /* Send skid of 2000 packets required for CQ size of 4K CQEs. */ 68 #define SEND_CQ_SKID 2000 69 70 #define OTX2_GET_RX_STATS(reg) \ 71 otx2_read64(pfvf, NIX_LF_RX_STATX(reg)) 72 #define OTX2_GET_TX_STATS(reg) \ 73 otx2_read64(pfvf, NIX_LF_TX_STATX(reg)) 74 75 struct otx2_lmt_info { 76 u64 lmt_addr; 77 u16 lmt_id; 78 }; 79 /* RSS configuration */ 80 struct otx2_rss_ctx { 81 u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE]; 82 }; 83 84 struct otx2_rss_info { 85 u8 enable; 86 u32 flowkey_cfg; 87 u16 rss_size; 88 #define RSS_HASH_KEY_SIZE 44 /* 352 bit key */ 89 u8 key[RSS_HASH_KEY_SIZE]; 90 struct otx2_rss_ctx *rss_ctx[MAX_RSS_GROUPS]; 91 }; 92 93 /* NIX (or NPC) RX errors */ 94 enum otx2_errlvl { 95 NPC_ERRLVL_RE, 96 NPC_ERRLVL_LID_LA, 97 NPC_ERRLVL_LID_LB, 98 NPC_ERRLVL_LID_LC, 99 NPC_ERRLVL_LID_LD, 100 NPC_ERRLVL_LID_LE, 101 NPC_ERRLVL_LID_LF, 102 NPC_ERRLVL_LID_LG, 103 NPC_ERRLVL_LID_LH, 104 NPC_ERRLVL_NIX = 0x0F, 105 }; 106 107 enum otx2_errcodes_re { 108 /* NPC_ERRLVL_RE errcodes */ 109 ERRCODE_FCS = 0x7, 110 ERRCODE_FCS_RCV = 0x8, 111 ERRCODE_UNDERSIZE = 0x10, 112 ERRCODE_OVERSIZE = 0x11, 113 ERRCODE_OL2_LEN_MISMATCH = 0x12, 114 /* NPC_ERRLVL_NIX errcodes */ 115 ERRCODE_OL3_LEN = 0x10, 116 ERRCODE_OL4_LEN = 0x11, 117 ERRCODE_OL4_CSUM = 0x12, 118 ERRCODE_IL3_LEN = 0x20, 119 ERRCODE_IL4_LEN = 0x21, 120 ERRCODE_IL4_CSUM = 0x22, 121 }; 122 123 /* NIX TX stats */ 124 enum nix_stat_lf_tx { 125 TX_UCAST = 0x0, 126 TX_BCAST = 0x1, 127 TX_MCAST = 0x2, 128 TX_DROP = 0x3, 129 TX_OCTS = 0x4, 130 TX_STATS_ENUM_LAST, 131 }; 132 133 /* NIX RX stats */ 134 enum nix_stat_lf_rx { 135 RX_OCTS = 0x0, 136 RX_UCAST = 0x1, 137 RX_BCAST = 0x2, 138 RX_MCAST = 0x3, 139 RX_DROP = 0x4, 140 RX_DROP_OCTS = 0x5, 141 RX_FCS = 0x6, 142 RX_ERR = 0x7, 143 RX_DRP_BCAST = 0x8, 144 RX_DRP_MCAST = 0x9, 145 RX_DRP_L3BCAST = 0xa, 146 RX_DRP_L3MCAST = 0xb, 147 RX_STATS_ENUM_LAST, 148 }; 149 150 struct otx2_dev_stats { 151 u64 rx_bytes; 152 u64 rx_frames; 153 u64 rx_ucast_frames; 154 u64 rx_bcast_frames; 155 u64 rx_mcast_frames; 156 u64 rx_drops; 157 158 u64 tx_bytes; 159 u64 tx_frames; 160 u64 tx_ucast_frames; 161 u64 tx_bcast_frames; 162 u64 tx_mcast_frames; 163 u64 tx_drops; 164 }; 165 166 /* Driver counted stats */ 167 struct otx2_drv_stats { 168 atomic_t rx_fcs_errs; 169 atomic_t rx_oversize_errs; 170 atomic_t rx_undersize_errs; 171 atomic_t rx_csum_errs; 172 atomic_t rx_len_errs; 173 atomic_t rx_other_errs; 174 }; 175 176 struct mbox { 177 struct otx2_mbox mbox; 178 struct work_struct mbox_wrk; 179 struct otx2_mbox mbox_up; 180 struct work_struct mbox_up_wrk; 181 struct otx2_nic *pfvf; 182 void *bbuf_base; /* Bounce buffer for mbox memory */ 183 struct mutex lock; /* serialize mailbox access */ 184 int num_msgs; /* mbox number of messages */ 185 int up_num_msgs; /* mbox_up number of messages */ 186 }; 187 188 /* Egress rate limiting definitions */ 189 #define MAX_BURST_EXPONENT 0x0FULL 190 #define MAX_BURST_MANTISSA 0xFFULL 191 #define MAX_BURST_SIZE 130816ULL 192 #define MAX_RATE_DIVIDER_EXPONENT 12ULL 193 #define MAX_RATE_EXPONENT 0x0FULL 194 #define MAX_RATE_MANTISSA 0xFFULL 195 196 /* Bitfields in NIX_TLX_PIR register */ 197 #define TLX_RATE_MANTISSA GENMASK_ULL(8, 1) 198 #define TLX_RATE_EXPONENT GENMASK_ULL(12, 9) 199 #define TLX_RATE_DIVIDER_EXPONENT GENMASK_ULL(16, 13) 200 #define TLX_BURST_MANTISSA GENMASK_ULL(36, 29) 201 #define TLX_BURST_EXPONENT GENMASK_ULL(40, 37) 202 203 struct otx2_hw { 204 struct pci_dev *pdev; 205 struct otx2_rss_info rss_info; 206 u16 rx_queues; 207 u16 tx_queues; 208 u16 xdp_queues; 209 u16 tc_tx_queues; 210 u16 non_qos_queues; /* tx queues plus xdp queues */ 211 u16 max_queues; 212 u16 pool_cnt; 213 u16 rqpool_cnt; 214 u16 sqpool_cnt; 215 216 #define OTX2_DEFAULT_RBUF_LEN 2048 217 u16 rbuf_len; 218 u32 xqe_size; 219 220 /* NPA */ 221 u32 stack_pg_ptrs; /* No of ptrs per stack page */ 222 u32 stack_pg_bytes; /* Size of stack page */ 223 u16 sqb_size; 224 225 /* NIX */ 226 u8 txschq_link_cfg_lvl; 227 u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; 228 u16 matchall_ipolicer; 229 u32 dwrr_mtu; 230 231 /* HW settings, coalescing etc */ 232 u16 rx_chan_base; 233 u16 tx_chan_base; 234 u16 cq_qcount_wait; 235 u16 cq_ecount_wait; 236 u16 rq_skid; 237 u8 cq_time_wait; 238 239 /* Segmentation */ 240 u8 lso_tsov4_idx; 241 u8 lso_tsov6_idx; 242 u8 lso_udpv4_idx; 243 u8 lso_udpv6_idx; 244 245 /* RSS */ 246 u8 flowkey_alg_idx; 247 248 /* MSI-X */ 249 u8 cint_cnt; /* CQ interrupt count */ 250 u16 npa_msixoff; /* Offset of NPA vectors */ 251 u16 nix_msixoff; /* Offset of NIX vectors */ 252 char *irq_name; 253 cpumask_var_t *affinity_mask; 254 255 /* Stats */ 256 struct otx2_dev_stats dev_stats; 257 struct otx2_drv_stats drv_stats; 258 u64 cgx_rx_stats[CGX_RX_STATS_COUNT]; 259 u64 cgx_tx_stats[CGX_TX_STATS_COUNT]; 260 u64 cgx_fec_corr_blks; 261 u64 cgx_fec_uncorr_blks; 262 u8 cgx_links; /* No. of CGX links present in HW */ 263 u8 lbk_links; /* No. of LBK links present in HW */ 264 u8 tx_link; /* Transmit channel link number */ 265 #define HW_TSO 0 266 #define CN10K_MBOX 1 267 #define CN10K_LMTST 2 268 #define CN10K_RPM 3 269 #define CN10K_PTP_ONESTEP 4 270 #define CN10K_HW_MACSEC 5 271 #define QOS_CIR_PIR_SUPPORT 6 272 unsigned long cap_flag; 273 274 #define LMT_LINE_SIZE 128 275 #define LMT_BURST_SIZE 32 /* 32 LMTST lines for burst SQE flush */ 276 u64 *lmt_base; 277 struct otx2_lmt_info __percpu *lmt_info; 278 }; 279 280 enum vfperm { 281 OTX2_RESET_VF_PERM, 282 OTX2_TRUSTED_VF, 283 }; 284 285 struct otx2_vf_config { 286 struct otx2_nic *pf; 287 struct delayed_work link_event_work; 288 bool intf_down; /* interface was either configured or not */ 289 u8 mac[ETH_ALEN]; 290 u16 vlan; 291 int tx_vtag_idx; 292 bool trusted; 293 }; 294 295 struct flr_work { 296 struct work_struct work; 297 struct otx2_nic *pf; 298 }; 299 300 struct refill_work { 301 struct delayed_work pool_refill_work; 302 struct otx2_nic *pf; 303 }; 304 305 /* PTPv2 originTimestamp structure */ 306 struct ptpv2_tstamp { 307 __be16 seconds_msb; /* 16 bits + */ 308 __be32 seconds_lsb; /* 32 bits = 48 bits*/ 309 __be32 nanoseconds; 310 } __packed; 311 312 struct otx2_ptp { 313 struct ptp_clock_info ptp_info; 314 struct ptp_clock *ptp_clock; 315 struct otx2_nic *nic; 316 317 struct cyclecounter cycle_counter; 318 struct timecounter time_counter; 319 320 struct delayed_work extts_work; 321 u64 last_extts; 322 u64 thresh; 323 324 struct ptp_pin_desc extts_config; 325 u64 (*convert_rx_ptp_tstmp)(u64 timestamp); 326 u64 (*convert_tx_ptp_tstmp)(u64 timestamp); 327 struct delayed_work synctstamp_work; 328 u64 tstamp; 329 u32 base_ns; 330 }; 331 332 #define OTX2_HW_TIMESTAMP_LEN 8 333 334 struct otx2_mac_table { 335 u8 addr[ETH_ALEN]; 336 u16 mcam_entry; 337 bool inuse; 338 }; 339 340 struct otx2_flow_config { 341 u16 *flow_ent; 342 u16 *def_ent; 343 u16 nr_flows; 344 #define OTX2_DEFAULT_FLOWCOUNT 16 345 #define OTX2_MAX_UNICAST_FLOWS 8 346 #define OTX2_MAX_VLAN_FLOWS 1 347 #define OTX2_MAX_TC_FLOWS OTX2_DEFAULT_FLOWCOUNT 348 #define OTX2_MCAM_COUNT (OTX2_DEFAULT_FLOWCOUNT + \ 349 OTX2_MAX_UNICAST_FLOWS + \ 350 OTX2_MAX_VLAN_FLOWS) 351 u16 unicast_offset; 352 u16 rx_vlan_offset; 353 u16 vf_vlan_offset; 354 #define OTX2_PER_VF_VLAN_FLOWS 2 /* Rx + Tx per VF */ 355 #define OTX2_VF_VLAN_RX_INDEX 0 356 #define OTX2_VF_VLAN_TX_INDEX 1 357 u32 *bmap_to_dmacindex; 358 unsigned long *dmacflt_bmap; 359 struct list_head flow_list; 360 u32 dmacflt_max_flows; 361 u16 max_flows; 362 }; 363 364 struct otx2_tc_info { 365 /* hash table to store TC offloaded flows */ 366 struct rhashtable flow_table; 367 struct rhashtable_params flow_ht_params; 368 unsigned long *tc_entries_bitmap; 369 }; 370 371 struct dev_hw_ops { 372 int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura); 373 void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq, 374 int size, int qidx); 375 void (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq); 376 void (*aura_freeptr)(void *dev, int aura, u64 buf); 377 }; 378 379 #define CN10K_MCS_SA_PER_SC 4 380 381 /* Stats which need to be accumulated in software because 382 * of shared counters in hardware. 383 */ 384 struct cn10k_txsc_stats { 385 u64 InPktsUntagged; 386 u64 InPktsNoTag; 387 u64 InPktsBadTag; 388 u64 InPktsUnknownSCI; 389 u64 InPktsNoSCI; 390 u64 InPktsOverrun; 391 }; 392 393 struct cn10k_rxsc_stats { 394 u64 InOctetsValidated; 395 u64 InOctetsDecrypted; 396 u64 InPktsUnchecked; 397 u64 InPktsDelayed; 398 u64 InPktsOK; 399 u64 InPktsInvalid; 400 u64 InPktsLate; 401 u64 InPktsNotValid; 402 u64 InPktsNotUsingSA; 403 u64 InPktsUnusedSA; 404 }; 405 406 struct cn10k_mcs_txsc { 407 struct macsec_secy *sw_secy; 408 struct cn10k_txsc_stats stats; 409 struct list_head entry; 410 enum macsec_validation_type last_validate_frames; 411 bool last_replay_protect; 412 u16 hw_secy_id_tx; 413 u16 hw_secy_id_rx; 414 u16 hw_flow_id; 415 u16 hw_sc_id; 416 u16 hw_sa_id[CN10K_MCS_SA_PER_SC]; 417 u8 sa_bmap; 418 u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN]; 419 u8 encoding_sa; 420 u8 salt[CN10K_MCS_SA_PER_SC][MACSEC_SALT_LEN]; 421 ssci_t ssci[CN10K_MCS_SA_PER_SC]; 422 bool vlan_dev; /* macsec running on VLAN ? */ 423 }; 424 425 struct cn10k_mcs_rxsc { 426 struct macsec_secy *sw_secy; 427 struct macsec_rx_sc *sw_rxsc; 428 struct cn10k_rxsc_stats stats; 429 struct list_head entry; 430 u16 hw_flow_id; 431 u16 hw_sc_id; 432 u16 hw_sa_id[CN10K_MCS_SA_PER_SC]; 433 u8 sa_bmap; 434 u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN]; 435 u8 salt[CN10K_MCS_SA_PER_SC][MACSEC_SALT_LEN]; 436 ssci_t ssci[CN10K_MCS_SA_PER_SC]; 437 }; 438 439 struct cn10k_mcs_cfg { 440 struct list_head txsc_list; 441 struct list_head rxsc_list; 442 }; 443 444 struct otx2_nic { 445 void __iomem *reg_base; 446 struct net_device *netdev; 447 struct dev_hw_ops *hw_ops; 448 void *iommu_domain; 449 u16 tx_max_pktlen; 450 u16 rbsize; /* Receive buffer size */ 451 452 #define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0) 453 #define OTX2_FLAG_TX_TSTAMP_ENABLED BIT_ULL(1) 454 #define OTX2_FLAG_INTF_DOWN BIT_ULL(2) 455 #define OTX2_FLAG_MCAM_ENTRIES_ALLOC BIT_ULL(3) 456 #define OTX2_FLAG_NTUPLE_SUPPORT BIT_ULL(4) 457 #define OTX2_FLAG_UCAST_FLTR_SUPPORT BIT_ULL(5) 458 #define OTX2_FLAG_RX_VLAN_SUPPORT BIT_ULL(6) 459 #define OTX2_FLAG_VF_VLAN_SUPPORT BIT_ULL(7) 460 #define OTX2_FLAG_PF_SHUTDOWN BIT_ULL(8) 461 #define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9) 462 #define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10) 463 #define OTX2_FLAG_TC_FLOWER_SUPPORT BIT_ULL(11) 464 #define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12) 465 #define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13) 466 #define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14) 467 #define OTX2_FLAG_PTP_ONESTEP_SYNC BIT_ULL(15) 468 #define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16) 469 u64 flags; 470 u64 *cq_op_addr; 471 472 struct bpf_prog *xdp_prog; 473 struct otx2_qset qset; 474 struct otx2_hw hw; 475 struct pci_dev *pdev; 476 struct device *dev; 477 478 /* Mbox */ 479 struct mbox mbox; 480 struct mbox *mbox_pfvf; 481 struct workqueue_struct *mbox_wq; 482 struct workqueue_struct *mbox_pfvf_wq; 483 484 u8 total_vfs; 485 u16 pcifunc; /* RVU PF_FUNC */ 486 u16 bpid[NIX_MAX_BPID_CHAN]; 487 struct otx2_vf_config *vf_configs; 488 struct cgx_link_user_info linfo; 489 490 /* NPC MCAM */ 491 struct otx2_flow_config *flow_cfg; 492 struct otx2_mac_table *mac_table; 493 struct otx2_tc_info tc_info; 494 495 u64 reset_count; 496 struct work_struct reset_task; 497 struct workqueue_struct *flr_wq; 498 struct flr_work *flr_wrk; 499 struct refill_work *refill_wrk; 500 struct workqueue_struct *otx2_wq; 501 struct work_struct rx_mode_work; 502 503 /* Ethtool stuff */ 504 u32 msg_enable; 505 506 /* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */ 507 int nix_blkaddr; 508 /* LMTST Lines info */ 509 struct qmem *dync_lmt; 510 u16 tot_lmt_lines; 511 u16 npa_lmt_lines; 512 u32 nix_lmt_size; 513 514 struct otx2_ptp *ptp; 515 struct hwtstamp_config tstamp; 516 517 unsigned long rq_bmap; 518 519 /* Devlink */ 520 struct otx2_devlink *dl; 521 #ifdef CONFIG_DCB 522 /* PFC */ 523 u8 pfc_en; 524 u8 *queue_to_pfc_map; 525 u16 pfc_schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; 526 bool pfc_alloc_status[NIX_PF_PFC_PRIO_MAX]; 527 #endif 528 /* qos */ 529 struct otx2_qos qos; 530 531 /* napi event count. It is needed for adaptive irq coalescing. */ 532 u32 napi_events; 533 534 #if IS_ENABLED(CONFIG_MACSEC) 535 struct cn10k_mcs_cfg *macsec_cfg; 536 #endif 537 }; 538 539 static inline bool is_otx2_lbkvf(struct pci_dev *pdev) 540 { 541 return pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF; 542 } 543 544 static inline bool is_96xx_A0(struct pci_dev *pdev) 545 { 546 return (pdev->revision == 0x00) && 547 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF); 548 } 549 550 static inline bool is_96xx_B0(struct pci_dev *pdev) 551 { 552 return (pdev->revision == 0x01) && 553 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF); 554 } 555 556 /* REVID for PCIe devices. 557 * Bits 0..1: minor pass, bit 3..2: major pass 558 * bits 7..4: midr id 559 */ 560 #define PCI_REVISION_ID_96XX 0x00 561 #define PCI_REVISION_ID_95XX 0x10 562 #define PCI_REVISION_ID_95XXN 0x20 563 #define PCI_REVISION_ID_98XX 0x30 564 #define PCI_REVISION_ID_95XXMM 0x40 565 #define PCI_REVISION_ID_95XXO 0xE0 566 567 static inline bool is_dev_otx2(struct pci_dev *pdev) 568 { 569 u8 midr = pdev->revision & 0xF0; 570 571 return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX || 572 midr == PCI_REVISION_ID_95XXN || midr == PCI_REVISION_ID_98XX || 573 midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO); 574 } 575 576 static inline bool is_dev_cn10kb(struct pci_dev *pdev) 577 { 578 return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF; 579 } 580 581 static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf) 582 { 583 struct otx2_hw *hw = &pfvf->hw; 584 585 pfvf->hw.cq_time_wait = CQ_TIMER_THRESH_DEFAULT; 586 pfvf->hw.cq_ecount_wait = CQ_CQE_THRESH_DEFAULT; 587 pfvf->hw.cq_qcount_wait = CQ_QCOUNT_DEFAULT; 588 589 __set_bit(HW_TSO, &hw->cap_flag); 590 591 if (is_96xx_A0(pfvf->pdev)) { 592 __clear_bit(HW_TSO, &hw->cap_flag); 593 594 /* Time based irq coalescing is not supported */ 595 pfvf->hw.cq_qcount_wait = 0x0; 596 597 /* Due to HW issue previous silicons required minimum 598 * 600 unused CQE to avoid CQ overflow. 599 */ 600 pfvf->hw.rq_skid = 600; 601 pfvf->qset.rqe_cnt = Q_COUNT(Q_SIZE_1K); 602 } 603 if (is_96xx_B0(pfvf->pdev)) 604 __clear_bit(HW_TSO, &hw->cap_flag); 605 606 if (!is_dev_otx2(pfvf->pdev)) { 607 __set_bit(CN10K_MBOX, &hw->cap_flag); 608 __set_bit(CN10K_LMTST, &hw->cap_flag); 609 __set_bit(CN10K_RPM, &hw->cap_flag); 610 __set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag); 611 __set_bit(QOS_CIR_PIR_SUPPORT, &hw->cap_flag); 612 } 613 614 if (is_dev_cn10kb(pfvf->pdev)) 615 __set_bit(CN10K_HW_MACSEC, &hw->cap_flag); 616 } 617 618 /* Register read/write APIs */ 619 static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset) 620 { 621 u64 blkaddr; 622 623 switch ((offset >> RVU_FUNC_BLKADDR_SHIFT) & RVU_FUNC_BLKADDR_MASK) { 624 case BLKTYPE_NIX: 625 blkaddr = nic->nix_blkaddr; 626 break; 627 case BLKTYPE_NPA: 628 blkaddr = BLKADDR_NPA; 629 break; 630 default: 631 blkaddr = BLKADDR_RVUM; 632 break; 633 } 634 635 offset &= ~(RVU_FUNC_BLKADDR_MASK << RVU_FUNC_BLKADDR_SHIFT); 636 offset |= (blkaddr << RVU_FUNC_BLKADDR_SHIFT); 637 638 return nic->reg_base + offset; 639 } 640 641 static inline void otx2_write64(struct otx2_nic *nic, u64 offset, u64 val) 642 { 643 void __iomem *addr = otx2_get_regaddr(nic, offset); 644 645 writeq(val, addr); 646 } 647 648 static inline u64 otx2_read64(struct otx2_nic *nic, u64 offset) 649 { 650 void __iomem *addr = otx2_get_regaddr(nic, offset); 651 652 return readq(addr); 653 } 654 655 /* Mbox bounce buffer APIs */ 656 static inline int otx2_mbox_bbuf_init(struct mbox *mbox, struct pci_dev *pdev) 657 { 658 struct otx2_mbox *otx2_mbox; 659 struct otx2_mbox_dev *mdev; 660 661 mbox->bbuf_base = devm_kmalloc(&pdev->dev, MBOX_SIZE, GFP_KERNEL); 662 if (!mbox->bbuf_base) 663 return -ENOMEM; 664 665 /* Overwrite mbox mbase to point to bounce buffer, so that PF/VF 666 * prepare all mbox messages in bounce buffer instead of directly 667 * in hw mbox memory. 668 */ 669 otx2_mbox = &mbox->mbox; 670 mdev = &otx2_mbox->dev[0]; 671 mdev->mbase = mbox->bbuf_base; 672 673 otx2_mbox = &mbox->mbox_up; 674 mdev = &otx2_mbox->dev[0]; 675 mdev->mbase = mbox->bbuf_base; 676 return 0; 677 } 678 679 static inline void otx2_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid) 680 { 681 u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); 682 void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE); 683 struct otx2_mbox_dev *mdev = &mbox->dev[devid]; 684 struct mbox_hdr *hdr; 685 u64 msg_size; 686 687 if (mdev->mbase == hw_mbase) 688 return; 689 690 hdr = hw_mbase + mbox->rx_start; 691 msg_size = hdr->msg_size; 692 693 if (msg_size > mbox->rx_size - msgs_offset) 694 msg_size = mbox->rx_size - msgs_offset; 695 696 /* Copy mbox messages from mbox memory to bounce buffer */ 697 memcpy(mdev->mbase + mbox->rx_start, 698 hw_mbase + mbox->rx_start, msg_size + msgs_offset); 699 } 700 701 /* With the absence of API for 128-bit IO memory access for arm64, 702 * implement required operations at place. 703 */ 704 #if defined(CONFIG_ARM64) 705 static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr) 706 { 707 __asm__ volatile("stp %x[x0], %x[x1], [%x[p1],#0]!" 708 ::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr)); 709 } 710 711 static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr) 712 { 713 u64 result; 714 715 __asm__ volatile(".cpu generic+lse\n" 716 "ldadd %x[i], %x[r], [%[b]]" 717 : [r]"=r"(result), "+m"(*ptr) 718 : [i]"r"(incr), [b]"r"(ptr) 719 : "memory"); 720 return result; 721 } 722 723 #else 724 #define otx2_write128(lo, hi, addr) writeq((hi) | (lo), addr) 725 #define otx2_atomic64_add(incr, ptr) ({ *ptr += incr; }) 726 #endif 727 728 static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura, 729 u64 *ptrs, u64 num_ptrs) 730 { 731 struct otx2_lmt_info *lmt_info; 732 u64 size = 0, count_eot = 0; 733 u64 tar_addr, val = 0; 734 735 lmt_info = per_cpu_ptr(pfvf->hw.lmt_info, smp_processor_id()); 736 tar_addr = (__force u64)otx2_get_regaddr(pfvf, NPA_LF_AURA_BATCH_FREE0); 737 /* LMTID is same as AURA Id */ 738 val = (lmt_info->lmt_id & 0x7FF) | BIT_ULL(63); 739 /* Set if [127:64] of last 128bit word has a valid pointer */ 740 count_eot = (num_ptrs % 2) ? 0ULL : 1ULL; 741 /* Set AURA ID to free pointer */ 742 ptrs[0] = (count_eot << 32) | (aura & 0xFFFFF); 743 /* Target address for LMTST flush tells HW how many 128bit 744 * words are valid from NPA_LF_AURA_BATCH_FREE0. 745 * 746 * tar_addr[6:4] is LMTST size-1 in units of 128b. 747 */ 748 if (num_ptrs > 2) { 749 size = (sizeof(u64) * num_ptrs) / 16; 750 if (!count_eot) 751 size++; 752 tar_addr |= ((size - 1) & 0x7) << 4; 753 } 754 dma_wmb(); 755 memcpy((u64 *)lmt_info->lmt_addr, ptrs, sizeof(u64) * num_ptrs); 756 /* Perform LMTST flush */ 757 cn10k_lmt_flush(val, tar_addr); 758 } 759 760 static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf) 761 { 762 struct otx2_nic *pfvf = dev; 763 u64 ptrs[2]; 764 765 ptrs[1] = buf; 766 get_cpu(); 767 /* Free only one buffer at time during init and teardown */ 768 __cn10k_aura_freeptr(pfvf, aura, ptrs, 2); 769 put_cpu(); 770 } 771 772 /* Alloc pointer from pool/aura */ 773 static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura) 774 { 775 u64 *ptr = (__force u64 *)otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_ALLOCX(0)); 776 u64 incr = (u64)aura | BIT_ULL(63); 777 778 return otx2_atomic64_add(incr, ptr); 779 } 780 781 /* Free pointer to a pool/aura */ 782 static inline void otx2_aura_freeptr(void *dev, int aura, u64 buf) 783 { 784 struct otx2_nic *pfvf = dev; 785 void __iomem *addr = otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0); 786 787 otx2_write128(buf, (u64)aura | BIT_ULL(63), addr); 788 } 789 790 static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx) 791 { 792 if (type == AURA_NIX_SQ) 793 return pfvf->hw.rqpool_cnt + idx; 794 795 /* AURA_NIX_RQ */ 796 return idx; 797 } 798 799 /* Mbox APIs */ 800 static inline int otx2_sync_mbox_msg(struct mbox *mbox) 801 { 802 int err; 803 804 if (!otx2_mbox_nonempty(&mbox->mbox, 0)) 805 return 0; 806 otx2_mbox_msg_send(&mbox->mbox, 0); 807 err = otx2_mbox_wait_for_rsp(&mbox->mbox, 0); 808 if (err) 809 return err; 810 811 return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0); 812 } 813 814 static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid) 815 { 816 int err; 817 818 if (!otx2_mbox_nonempty(&mbox->mbox_up, devid)) 819 return 0; 820 otx2_mbox_msg_send(&mbox->mbox_up, devid); 821 err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid); 822 if (err) 823 return err; 824 825 return otx2_mbox_check_rsp_msgs(&mbox->mbox_up, devid); 826 } 827 828 /* Use this API to send mbox msgs in atomic context 829 * where sleeping is not allowed 830 */ 831 static inline int otx2_sync_mbox_msg_busy_poll(struct mbox *mbox) 832 { 833 int err; 834 835 if (!otx2_mbox_nonempty(&mbox->mbox, 0)) 836 return 0; 837 otx2_mbox_msg_send(&mbox->mbox, 0); 838 err = otx2_mbox_busy_poll_for_rsp(&mbox->mbox, 0); 839 if (err) 840 return err; 841 842 return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0); 843 } 844 845 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 846 static struct _req_type __maybe_unused \ 847 *otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox) \ 848 { \ 849 struct _req_type *req; \ 850 \ 851 req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ 852 &mbox->mbox, 0, sizeof(struct _req_type), \ 853 sizeof(struct _rsp_type)); \ 854 if (!req) \ 855 return NULL; \ 856 req->hdr.sig = OTX2_MBOX_REQ_SIG; \ 857 req->hdr.id = _id; \ 858 trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req)); \ 859 return req; \ 860 } 861 862 MBOX_MESSAGES 863 #undef M 864 865 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 866 int \ 867 otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \ 868 struct _req_type *req, \ 869 struct _rsp_type *rsp); \ 870 871 MBOX_UP_CGX_MESSAGES 872 MBOX_UP_MCS_MESSAGES 873 #undef M 874 875 /* Time to wait before watchdog kicks off */ 876 #define OTX2_TX_TIMEOUT (100 * HZ) 877 878 #define RVU_PFVF_PF_SHIFT 10 879 #define RVU_PFVF_PF_MASK 0x3F 880 #define RVU_PFVF_FUNC_SHIFT 0 881 #define RVU_PFVF_FUNC_MASK 0x3FF 882 883 static inline bool is_otx2_vf(u16 pcifunc) 884 { 885 return !!(pcifunc & RVU_PFVF_FUNC_MASK); 886 } 887 888 static inline int rvu_get_pf(u16 pcifunc) 889 { 890 return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; 891 } 892 893 static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf, 894 struct page *page, 895 size_t offset, size_t size, 896 enum dma_data_direction dir) 897 { 898 dma_addr_t iova; 899 900 iova = dma_map_page_attrs(pfvf->dev, page, 901 offset, size, dir, DMA_ATTR_SKIP_CPU_SYNC); 902 if (unlikely(dma_mapping_error(pfvf->dev, iova))) 903 return (dma_addr_t)NULL; 904 return iova; 905 } 906 907 static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf, 908 dma_addr_t addr, size_t size, 909 enum dma_data_direction dir) 910 { 911 dma_unmap_page_attrs(pfvf->dev, addr, size, 912 dir, DMA_ATTR_SKIP_CPU_SYNC); 913 } 914 915 static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx) 916 { 917 u16 smq; 918 #ifdef CONFIG_DCB 919 if (qidx < NIX_PF_PFC_PRIO_MAX && pfvf->pfc_alloc_status[qidx]) 920 return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx]; 921 #endif 922 /* check if qidx falls under QOS queues */ 923 if (qidx >= pfvf->hw.non_qos_queues) 924 smq = pfvf->qos.qid_to_sqmap[qidx - pfvf->hw.non_qos_queues]; 925 else 926 smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0]; 927 928 return smq; 929 } 930 931 static inline u16 otx2_get_total_tx_queues(struct otx2_nic *pfvf) 932 { 933 return pfvf->hw.non_qos_queues + pfvf->hw.tc_tx_queues; 934 } 935 936 static inline u64 otx2_convert_rate(u64 rate) 937 { 938 u64 converted_rate; 939 940 /* Convert bytes per second to Mbps */ 941 converted_rate = rate * 8; 942 converted_rate = max_t(u64, converted_rate / 1000000, 1); 943 944 return converted_rate; 945 } 946 947 /* MSI-X APIs */ 948 void otx2_free_cints(struct otx2_nic *pfvf, int n); 949 void otx2_set_cints_affinity(struct otx2_nic *pfvf); 950 int otx2_set_mac_address(struct net_device *netdev, void *p); 951 int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu); 952 void otx2_tx_timeout(struct net_device *netdev, unsigned int txq); 953 void otx2_get_mac_from_af(struct net_device *netdev); 954 void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx); 955 int otx2_config_pause_frm(struct otx2_nic *pfvf); 956 void otx2_setup_segmentation(struct otx2_nic *pfvf); 957 958 /* RVU block related APIs */ 959 int otx2_attach_npa_nix(struct otx2_nic *pfvf); 960 int otx2_detach_resources(struct mbox *mbox); 961 int otx2_config_npa(struct otx2_nic *pfvf); 962 int otx2_sq_aura_pool_init(struct otx2_nic *pfvf); 963 int otx2_rq_aura_pool_init(struct otx2_nic *pfvf); 964 void otx2_aura_pool_free(struct otx2_nic *pfvf); 965 void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type); 966 void otx2_sq_free_sqbs(struct otx2_nic *pfvf); 967 int otx2_config_nix(struct otx2_nic *pfvf); 968 int otx2_config_nix_queues(struct otx2_nic *pfvf); 969 int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool pfc_en); 970 int otx2_txsch_alloc(struct otx2_nic *pfvf); 971 void otx2_txschq_stop(struct otx2_nic *pfvf); 972 void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq); 973 void otx2_sqb_flush(struct otx2_nic *pfvf); 974 int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, 975 dma_addr_t *dma); 976 int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable); 977 void otx2_ctx_disable(struct mbox *mbox, int type, bool npa); 978 int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable); 979 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx); 980 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq); 981 int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura); 982 int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); 983 int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); 984 int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, 985 dma_addr_t *dma); 986 int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, 987 int stack_pages, int numptrs, int buf_size, int type); 988 int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, 989 int pool_id, int numptrs); 990 991 /* RSS configuration APIs*/ 992 int otx2_rss_init(struct otx2_nic *pfvf); 993 int otx2_set_flowkey_cfg(struct otx2_nic *pfvf); 994 void otx2_set_rss_key(struct otx2_nic *pfvf); 995 int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id); 996 997 /* Mbox handlers */ 998 void mbox_handler_msix_offset(struct otx2_nic *pfvf, 999 struct msix_offset_rsp *rsp); 1000 void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf, 1001 struct npa_lf_alloc_rsp *rsp); 1002 void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf, 1003 struct nix_lf_alloc_rsp *rsp); 1004 void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf, 1005 struct nix_txsch_alloc_rsp *rsp); 1006 void mbox_handler_cgx_stats(struct otx2_nic *pfvf, 1007 struct cgx_stats_rsp *rsp); 1008 void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf, 1009 struct cgx_fec_stats_rsp *rsp); 1010 void otx2_set_fec_stats_count(struct otx2_nic *pfvf); 1011 void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf, 1012 struct nix_bp_cfg_rsp *rsp); 1013 1014 /* Device stats APIs */ 1015 void otx2_get_dev_stats(struct otx2_nic *pfvf); 1016 void otx2_get_stats64(struct net_device *netdev, 1017 struct rtnl_link_stats64 *stats); 1018 void otx2_update_lmac_stats(struct otx2_nic *pfvf); 1019 void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf); 1020 int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx); 1021 int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx); 1022 void otx2_set_ethtool_ops(struct net_device *netdev); 1023 void otx2vf_set_ethtool_ops(struct net_device *netdev); 1024 1025 int otx2_open(struct net_device *netdev); 1026 int otx2_stop(struct net_device *netdev); 1027 int otx2_set_real_num_queues(struct net_device *netdev, 1028 int tx_queues, int rx_queues); 1029 int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd); 1030 int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr); 1031 1032 /* MCAM filter related APIs */ 1033 int otx2_mcam_flow_init(struct otx2_nic *pf); 1034 int otx2vf_mcam_flow_init(struct otx2_nic *pfvf); 1035 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count); 1036 void otx2_mcam_flow_del(struct otx2_nic *pf); 1037 int otx2_destroy_ntuple_flows(struct otx2_nic *pf); 1038 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf); 1039 int otx2_get_flow(struct otx2_nic *pfvf, 1040 struct ethtool_rxnfc *nfc, u32 location); 1041 int otx2_get_all_flows(struct otx2_nic *pfvf, 1042 struct ethtool_rxnfc *nfc, u32 *rule_locs); 1043 int otx2_add_flow(struct otx2_nic *pfvf, 1044 struct ethtool_rxnfc *nfc); 1045 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location); 1046 int otx2_get_maxflows(struct otx2_flow_config *flow_cfg); 1047 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id); 1048 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac); 1049 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac); 1050 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable); 1051 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf); 1052 bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx); 1053 u16 otx2_get_max_mtu(struct otx2_nic *pfvf); 1054 int otx2_handle_ntuple_tc_features(struct net_device *netdev, 1055 netdev_features_t features); 1056 int otx2_smq_flush(struct otx2_nic *pfvf, int smq); 1057 void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool, 1058 u64 iova, int size); 1059 1060 /* tc support */ 1061 int otx2_init_tc(struct otx2_nic *nic); 1062 void otx2_shutdown_tc(struct otx2_nic *nic); 1063 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, 1064 void *type_data); 1065 int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic); 1066 /* CGX/RPM DMAC filters support */ 1067 int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf); 1068 int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos); 1069 int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u32 bit_pos); 1070 int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos); 1071 void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf); 1072 void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf); 1073 1074 #ifdef CONFIG_DCB 1075 /* DCB support*/ 1076 void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx, bool pfc_enable); 1077 int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf); 1078 int otx2_dcbnl_set_ops(struct net_device *dev); 1079 /* PFC support */ 1080 int otx2_pfc_txschq_config(struct otx2_nic *pfvf); 1081 int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf); 1082 int otx2_pfc_txschq_update(struct otx2_nic *pfvf); 1083 int otx2_pfc_txschq_stop(struct otx2_nic *pfvf); 1084 #endif 1085 1086 #if IS_ENABLED(CONFIG_MACSEC) 1087 /* MACSEC offload support */ 1088 int cn10k_mcs_init(struct otx2_nic *pfvf); 1089 void cn10k_mcs_free(struct otx2_nic *pfvf); 1090 void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event); 1091 #else 1092 static inline int cn10k_mcs_init(struct otx2_nic *pfvf) { return 0; } 1093 static inline void cn10k_mcs_free(struct otx2_nic *pfvf) {} 1094 static inline void cn10k_handle_mcs_event(struct otx2_nic *pfvf, 1095 struct mcs_intr_info *event) 1096 {} 1097 #endif /* CONFIG_MACSEC */ 1098 1099 /* qos support */ 1100 static inline void otx2_qos_init(struct otx2_nic *pfvf, int qos_txqs) 1101 { 1102 struct otx2_hw *hw = &pfvf->hw; 1103 1104 hw->tc_tx_queues = qos_txqs; 1105 INIT_LIST_HEAD(&pfvf->qos.qos_tree); 1106 mutex_init(&pfvf->qos.qos_lock); 1107 } 1108 1109 static inline void otx2_shutdown_qos(struct otx2_nic *pfvf) 1110 { 1111 mutex_destroy(&pfvf->qos.qos_lock); 1112 } 1113 1114 u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb, 1115 struct net_device *sb_dev); 1116 int otx2_get_txq_by_classid(struct otx2_nic *pfvf, u16 classid); 1117 void otx2_qos_config_txschq(struct otx2_nic *pfvf); 1118 void otx2_clean_qos_queues(struct otx2_nic *pfvf); 1119 #endif /* OTX2_COMMON_H */ 1120