1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Marvell RVU Ethernet driver 3 * 4 * Copyright (C) 2020 Marvell. 5 * 6 */ 7 8 #ifndef OTX2_COMMON_H 9 #define OTX2_COMMON_H 10 11 #include <linux/ethtool.h> 12 #include <linux/pci.h> 13 #include <linux/iommu.h> 14 #include <linux/net_tstamp.h> 15 #include <linux/ptp_clock_kernel.h> 16 #include <linux/timecounter.h> 17 #include <linux/soc/marvell/octeontx2/asm.h> 18 #include <net/macsec.h> 19 #include <net/pkt_cls.h> 20 #include <net/devlink.h> 21 #include <linux/time64.h> 22 #include <linux/dim.h> 23 #include <uapi/linux/if_macsec.h> 24 25 #include <mbox.h> 26 #include <npc.h> 27 #include "otx2_reg.h" 28 #include "otx2_txrx.h" 29 #include "otx2_devlink.h" 30 #include <rvu_trace.h> 31 #include "qos.h" 32 33 /* IPv4 flag more fragment bit */ 34 #define IPV4_FLAG_MORE 0x20 35 36 /* PCI device IDs */ 37 #define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063 38 #define PCI_DEVID_OCTEONTX2_RVU_VF 0xA064 39 #define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8 40 41 #define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200 42 #define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00 43 44 /* PCI BAR nos */ 45 #define PCI_CFG_REG_BAR_NUM 2 46 #define PCI_MBOX_BAR_NUM 4 47 48 #define NAME_SIZE 32 49 50 #ifdef CONFIG_DCB 51 /* Max priority supported for PFC */ 52 #define NIX_PF_PFC_PRIO_MAX 8 53 #endif 54 55 enum arua_mapped_qtypes { 56 AURA_NIX_RQ, 57 AURA_NIX_SQ, 58 }; 59 60 /* NIX LF interrupts range*/ 61 #define NIX_LF_QINT_VEC_START 0x00 62 #define NIX_LF_CINT_VEC_START 0x40 63 #define NIX_LF_GINT_VEC 0x80 64 #define NIX_LF_ERR_VEC 0x81 65 #define NIX_LF_POISON_VEC 0x82 66 67 /* Send skid of 2000 packets required for CQ size of 4K CQEs. */ 68 #define SEND_CQ_SKID 2000 69 70 #define OTX2_GET_RX_STATS(reg) \ 71 otx2_read64(pfvf, NIX_LF_RX_STATX(reg)) 72 #define OTX2_GET_TX_STATS(reg) \ 73 otx2_read64(pfvf, NIX_LF_TX_STATX(reg)) 74 75 struct otx2_lmt_info { 76 u64 lmt_addr; 77 u16 lmt_id; 78 }; 79 /* RSS configuration */ 80 struct otx2_rss_ctx { 81 u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE]; 82 }; 83 84 struct otx2_rss_info { 85 u8 enable; 86 u32 flowkey_cfg; 87 u16 rss_size; 88 #define RSS_HASH_KEY_SIZE 44 /* 352 bit key */ 89 u8 key[RSS_HASH_KEY_SIZE]; 90 struct otx2_rss_ctx *rss_ctx[MAX_RSS_GROUPS]; 91 }; 92 93 /* NIX (or NPC) RX errors */ 94 enum otx2_errlvl { 95 NPC_ERRLVL_RE, 96 NPC_ERRLVL_LID_LA, 97 NPC_ERRLVL_LID_LB, 98 NPC_ERRLVL_LID_LC, 99 NPC_ERRLVL_LID_LD, 100 NPC_ERRLVL_LID_LE, 101 NPC_ERRLVL_LID_LF, 102 NPC_ERRLVL_LID_LG, 103 NPC_ERRLVL_LID_LH, 104 NPC_ERRLVL_NIX = 0x0F, 105 }; 106 107 enum otx2_errcodes_re { 108 /* NPC_ERRLVL_RE errcodes */ 109 ERRCODE_FCS = 0x7, 110 ERRCODE_FCS_RCV = 0x8, 111 ERRCODE_UNDERSIZE = 0x10, 112 ERRCODE_OVERSIZE = 0x11, 113 ERRCODE_OL2_LEN_MISMATCH = 0x12, 114 /* NPC_ERRLVL_NIX errcodes */ 115 ERRCODE_OL3_LEN = 0x10, 116 ERRCODE_OL4_LEN = 0x11, 117 ERRCODE_OL4_CSUM = 0x12, 118 ERRCODE_IL3_LEN = 0x20, 119 ERRCODE_IL4_LEN = 0x21, 120 ERRCODE_IL4_CSUM = 0x22, 121 }; 122 123 /* NIX TX stats */ 124 enum nix_stat_lf_tx { 125 TX_UCAST = 0x0, 126 TX_BCAST = 0x1, 127 TX_MCAST = 0x2, 128 TX_DROP = 0x3, 129 TX_OCTS = 0x4, 130 TX_STATS_ENUM_LAST, 131 }; 132 133 /* NIX RX stats */ 134 enum nix_stat_lf_rx { 135 RX_OCTS = 0x0, 136 RX_UCAST = 0x1, 137 RX_BCAST = 0x2, 138 RX_MCAST = 0x3, 139 RX_DROP = 0x4, 140 RX_DROP_OCTS = 0x5, 141 RX_FCS = 0x6, 142 RX_ERR = 0x7, 143 RX_DRP_BCAST = 0x8, 144 RX_DRP_MCAST = 0x9, 145 RX_DRP_L3BCAST = 0xa, 146 RX_DRP_L3MCAST = 0xb, 147 RX_STATS_ENUM_LAST, 148 }; 149 150 struct otx2_dev_stats { 151 u64 rx_bytes; 152 u64 rx_frames; 153 u64 rx_ucast_frames; 154 u64 rx_bcast_frames; 155 u64 rx_mcast_frames; 156 u64 rx_drops; 157 158 u64 tx_bytes; 159 u64 tx_frames; 160 u64 tx_ucast_frames; 161 u64 tx_bcast_frames; 162 u64 tx_mcast_frames; 163 u64 tx_drops; 164 }; 165 166 /* Driver counted stats */ 167 struct otx2_drv_stats { 168 atomic_t rx_fcs_errs; 169 atomic_t rx_oversize_errs; 170 atomic_t rx_undersize_errs; 171 atomic_t rx_csum_errs; 172 atomic_t rx_len_errs; 173 atomic_t rx_other_errs; 174 }; 175 176 struct mbox { 177 struct otx2_mbox mbox; 178 struct work_struct mbox_wrk; 179 struct otx2_mbox mbox_up; 180 struct work_struct mbox_up_wrk; 181 struct otx2_nic *pfvf; 182 void *bbuf_base; /* Bounce buffer for mbox memory */ 183 struct mutex lock; /* serialize mailbox access */ 184 int num_msgs; /* mbox number of messages */ 185 int up_num_msgs; /* mbox_up number of messages */ 186 }; 187 188 /* Egress rate limiting definitions */ 189 #define MAX_BURST_EXPONENT 0x0FULL 190 #define MAX_BURST_MANTISSA 0xFFULL 191 #define MAX_BURST_SIZE 130816ULL 192 #define MAX_RATE_DIVIDER_EXPONENT 12ULL 193 #define MAX_RATE_EXPONENT 0x0FULL 194 #define MAX_RATE_MANTISSA 0xFFULL 195 196 /* Bitfields in NIX_TLX_PIR register */ 197 #define TLX_RATE_MANTISSA GENMASK_ULL(8, 1) 198 #define TLX_RATE_EXPONENT GENMASK_ULL(12, 9) 199 #define TLX_RATE_DIVIDER_EXPONENT GENMASK_ULL(16, 13) 200 #define TLX_BURST_MANTISSA GENMASK_ULL(36, 29) 201 #define TLX_BURST_EXPONENT GENMASK_ULL(40, 37) 202 203 struct otx2_hw { 204 struct pci_dev *pdev; 205 struct otx2_rss_info rss_info; 206 u16 rx_queues; 207 u16 tx_queues; 208 u16 xdp_queues; 209 u16 tc_tx_queues; 210 u16 non_qos_queues; /* tx queues plus xdp queues */ 211 u16 max_queues; 212 u16 pool_cnt; 213 u16 rqpool_cnt; 214 u16 sqpool_cnt; 215 216 #define OTX2_DEFAULT_RBUF_LEN 2048 217 u16 rbuf_len; 218 u32 xqe_size; 219 220 /* NPA */ 221 u32 stack_pg_ptrs; /* No of ptrs per stack page */ 222 u32 stack_pg_bytes; /* Size of stack page */ 223 u16 sqb_size; 224 225 /* NIX */ 226 u8 txschq_link_cfg_lvl; 227 u8 txschq_aggr_lvl_rr_prio; 228 u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; 229 u16 matchall_ipolicer; 230 u32 dwrr_mtu; 231 u8 smq_link_type; 232 233 /* HW settings, coalescing etc */ 234 u16 rx_chan_base; 235 u16 tx_chan_base; 236 u16 cq_qcount_wait; 237 u16 cq_ecount_wait; 238 u16 rq_skid; 239 u8 cq_time_wait; 240 241 /* Segmentation */ 242 u8 lso_tsov4_idx; 243 u8 lso_tsov6_idx; 244 u8 lso_udpv4_idx; 245 u8 lso_udpv6_idx; 246 247 /* RSS */ 248 u8 flowkey_alg_idx; 249 250 /* MSI-X */ 251 u8 cint_cnt; /* CQ interrupt count */ 252 u16 npa_msixoff; /* Offset of NPA vectors */ 253 u16 nix_msixoff; /* Offset of NIX vectors */ 254 char *irq_name; 255 cpumask_var_t *affinity_mask; 256 257 /* Stats */ 258 struct otx2_dev_stats dev_stats; 259 struct otx2_drv_stats drv_stats; 260 u64 cgx_rx_stats[CGX_RX_STATS_COUNT]; 261 u64 cgx_tx_stats[CGX_TX_STATS_COUNT]; 262 u64 cgx_fec_corr_blks; 263 u64 cgx_fec_uncorr_blks; 264 u8 cgx_links; /* No. of CGX links present in HW */ 265 u8 lbk_links; /* No. of LBK links present in HW */ 266 u8 tx_link; /* Transmit channel link number */ 267 #define HW_TSO 0 268 #define CN10K_MBOX 1 269 #define CN10K_LMTST 2 270 #define CN10K_RPM 3 271 #define CN10K_PTP_ONESTEP 4 272 #define CN10K_HW_MACSEC 5 273 #define QOS_CIR_PIR_SUPPORT 6 274 unsigned long cap_flag; 275 276 #define LMT_LINE_SIZE 128 277 #define LMT_BURST_SIZE 32 /* 32 LMTST lines for burst SQE flush */ 278 u64 *lmt_base; 279 struct otx2_lmt_info __percpu *lmt_info; 280 }; 281 282 enum vfperm { 283 OTX2_RESET_VF_PERM, 284 OTX2_TRUSTED_VF, 285 }; 286 287 struct otx2_vf_config { 288 struct otx2_nic *pf; 289 struct delayed_work link_event_work; 290 bool intf_down; /* interface was either configured or not */ 291 u8 mac[ETH_ALEN]; 292 u16 vlan; 293 int tx_vtag_idx; 294 bool trusted; 295 }; 296 297 struct flr_work { 298 struct work_struct work; 299 struct otx2_nic *pf; 300 }; 301 302 struct refill_work { 303 struct delayed_work pool_refill_work; 304 struct otx2_nic *pf; 305 struct napi_struct *napi; 306 }; 307 308 /* PTPv2 originTimestamp structure */ 309 struct ptpv2_tstamp { 310 __be16 seconds_msb; /* 16 bits + */ 311 __be32 seconds_lsb; /* 32 bits = 48 bits*/ 312 __be32 nanoseconds; 313 } __packed; 314 315 struct otx2_ptp { 316 struct ptp_clock_info ptp_info; 317 struct ptp_clock *ptp_clock; 318 struct otx2_nic *nic; 319 320 struct cyclecounter cycle_counter; 321 struct timecounter time_counter; 322 323 struct delayed_work extts_work; 324 u64 last_extts; 325 u64 thresh; 326 327 struct ptp_pin_desc extts_config; 328 u64 (*convert_rx_ptp_tstmp)(u64 timestamp); 329 u64 (*convert_tx_ptp_tstmp)(u64 timestamp); 330 u64 (*ptp_tstamp2nsec)(const struct timecounter *time_counter, u64 timestamp); 331 struct delayed_work synctstamp_work; 332 u64 tstamp; 333 u32 base_ns; 334 }; 335 336 #define OTX2_HW_TIMESTAMP_LEN 8 337 338 struct otx2_mac_table { 339 u8 addr[ETH_ALEN]; 340 u16 mcam_entry; 341 bool inuse; 342 }; 343 344 struct otx2_flow_config { 345 u16 *flow_ent; 346 u16 *def_ent; 347 u16 nr_flows; 348 #define OTX2_DEFAULT_FLOWCOUNT 16 349 #define OTX2_MAX_UNICAST_FLOWS 8 350 #define OTX2_MAX_VLAN_FLOWS 1 351 #define OTX2_MAX_TC_FLOWS OTX2_DEFAULT_FLOWCOUNT 352 #define OTX2_MCAM_COUNT (OTX2_DEFAULT_FLOWCOUNT + \ 353 OTX2_MAX_UNICAST_FLOWS + \ 354 OTX2_MAX_VLAN_FLOWS) 355 u16 unicast_offset; 356 u16 rx_vlan_offset; 357 u16 vf_vlan_offset; 358 #define OTX2_PER_VF_VLAN_FLOWS 2 /* Rx + Tx per VF */ 359 #define OTX2_VF_VLAN_RX_INDEX 0 360 #define OTX2_VF_VLAN_TX_INDEX 1 361 u32 *bmap_to_dmacindex; 362 unsigned long *dmacflt_bmap; 363 struct list_head flow_list; 364 u32 dmacflt_max_flows; 365 u16 max_flows; 366 struct list_head flow_list_tc; 367 bool ntuple; 368 }; 369 370 struct dev_hw_ops { 371 int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura); 372 void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq, 373 int size, int qidx); 374 int (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq); 375 void (*aura_freeptr)(void *dev, int aura, u64 buf); 376 }; 377 378 #define CN10K_MCS_SA_PER_SC 4 379 380 /* Stats which need to be accumulated in software because 381 * of shared counters in hardware. 382 */ 383 struct cn10k_txsc_stats { 384 u64 InPktsUntagged; 385 u64 InPktsNoTag; 386 u64 InPktsBadTag; 387 u64 InPktsUnknownSCI; 388 u64 InPktsNoSCI; 389 u64 InPktsOverrun; 390 }; 391 392 struct cn10k_rxsc_stats { 393 u64 InOctetsValidated; 394 u64 InOctetsDecrypted; 395 u64 InPktsUnchecked; 396 u64 InPktsDelayed; 397 u64 InPktsOK; 398 u64 InPktsInvalid; 399 u64 InPktsLate; 400 u64 InPktsNotValid; 401 u64 InPktsNotUsingSA; 402 u64 InPktsUnusedSA; 403 }; 404 405 struct cn10k_mcs_txsc { 406 struct macsec_secy *sw_secy; 407 struct cn10k_txsc_stats stats; 408 struct list_head entry; 409 enum macsec_validation_type last_validate_frames; 410 bool last_replay_protect; 411 u16 hw_secy_id_tx; 412 u16 hw_secy_id_rx; 413 u16 hw_flow_id; 414 u16 hw_sc_id; 415 u16 hw_sa_id[CN10K_MCS_SA_PER_SC]; 416 u8 sa_bmap; 417 u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN]; 418 u8 encoding_sa; 419 u8 salt[CN10K_MCS_SA_PER_SC][MACSEC_SALT_LEN]; 420 ssci_t ssci[CN10K_MCS_SA_PER_SC]; 421 bool vlan_dev; /* macsec running on VLAN ? */ 422 }; 423 424 struct cn10k_mcs_rxsc { 425 struct macsec_secy *sw_secy; 426 struct macsec_rx_sc *sw_rxsc; 427 struct cn10k_rxsc_stats stats; 428 struct list_head entry; 429 u16 hw_flow_id; 430 u16 hw_sc_id; 431 u16 hw_sa_id[CN10K_MCS_SA_PER_SC]; 432 u8 sa_bmap; 433 u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN]; 434 u8 salt[CN10K_MCS_SA_PER_SC][MACSEC_SALT_LEN]; 435 ssci_t ssci[CN10K_MCS_SA_PER_SC]; 436 }; 437 438 struct cn10k_mcs_cfg { 439 struct list_head txsc_list; 440 struct list_head rxsc_list; 441 }; 442 443 struct otx2_nic { 444 void __iomem *reg_base; 445 struct net_device *netdev; 446 struct dev_hw_ops *hw_ops; 447 void *iommu_domain; 448 u16 tx_max_pktlen; 449 u16 rbsize; /* Receive buffer size */ 450 451 #define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0) 452 #define OTX2_FLAG_TX_TSTAMP_ENABLED BIT_ULL(1) 453 #define OTX2_FLAG_INTF_DOWN BIT_ULL(2) 454 #define OTX2_FLAG_MCAM_ENTRIES_ALLOC BIT_ULL(3) 455 #define OTX2_FLAG_NTUPLE_SUPPORT BIT_ULL(4) 456 #define OTX2_FLAG_UCAST_FLTR_SUPPORT BIT_ULL(5) 457 #define OTX2_FLAG_RX_VLAN_SUPPORT BIT_ULL(6) 458 #define OTX2_FLAG_VF_VLAN_SUPPORT BIT_ULL(7) 459 #define OTX2_FLAG_PF_SHUTDOWN BIT_ULL(8) 460 #define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9) 461 #define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10) 462 #define OTX2_FLAG_TC_FLOWER_SUPPORT BIT_ULL(11) 463 #define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12) 464 #define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13) 465 #define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14) 466 #define OTX2_FLAG_PTP_ONESTEP_SYNC BIT_ULL(15) 467 #define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16) 468 u64 flags; 469 u64 *cq_op_addr; 470 471 struct bpf_prog *xdp_prog; 472 struct otx2_qset qset; 473 struct otx2_hw hw; 474 struct pci_dev *pdev; 475 struct device *dev; 476 477 /* Mbox */ 478 struct mbox mbox; 479 struct mbox *mbox_pfvf; 480 struct workqueue_struct *mbox_wq; 481 struct workqueue_struct *mbox_pfvf_wq; 482 483 u8 total_vfs; 484 u16 pcifunc; /* RVU PF_FUNC */ 485 u16 bpid[NIX_MAX_BPID_CHAN]; 486 struct otx2_vf_config *vf_configs; 487 struct cgx_link_user_info linfo; 488 489 /* NPC MCAM */ 490 struct otx2_flow_config *flow_cfg; 491 struct otx2_mac_table *mac_table; 492 493 u64 reset_count; 494 struct work_struct reset_task; 495 struct workqueue_struct *flr_wq; 496 struct flr_work *flr_wrk; 497 struct refill_work *refill_wrk; 498 struct workqueue_struct *otx2_wq; 499 struct work_struct rx_mode_work; 500 501 /* Ethtool stuff */ 502 u32 msg_enable; 503 504 /* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */ 505 int nix_blkaddr; 506 /* LMTST Lines info */ 507 struct qmem *dync_lmt; 508 u16 tot_lmt_lines; 509 u16 npa_lmt_lines; 510 u32 nix_lmt_size; 511 512 struct otx2_ptp *ptp; 513 struct hwtstamp_config tstamp; 514 515 unsigned long rq_bmap; 516 517 /* Devlink */ 518 struct otx2_devlink *dl; 519 #ifdef CONFIG_DCB 520 /* PFC */ 521 u8 pfc_en; 522 u8 *queue_to_pfc_map; 523 u16 pfc_schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; 524 bool pfc_alloc_status[NIX_PF_PFC_PRIO_MAX]; 525 #endif 526 /* qos */ 527 struct otx2_qos qos; 528 529 /* napi event count. It is needed for adaptive irq coalescing. */ 530 u32 napi_events; 531 532 #if IS_ENABLED(CONFIG_MACSEC) 533 struct cn10k_mcs_cfg *macsec_cfg; 534 #endif 535 }; 536 537 static inline bool is_otx2_lbkvf(struct pci_dev *pdev) 538 { 539 return pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF; 540 } 541 542 static inline bool is_96xx_A0(struct pci_dev *pdev) 543 { 544 return (pdev->revision == 0x00) && 545 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF); 546 } 547 548 static inline bool is_96xx_B0(struct pci_dev *pdev) 549 { 550 return (pdev->revision == 0x01) && 551 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF); 552 } 553 554 /* REVID for PCIe devices. 555 * Bits 0..1: minor pass, bit 3..2: major pass 556 * bits 7..4: midr id 557 */ 558 #define PCI_REVISION_ID_96XX 0x00 559 #define PCI_REVISION_ID_95XX 0x10 560 #define PCI_REVISION_ID_95XXN 0x20 561 #define PCI_REVISION_ID_98XX 0x30 562 #define PCI_REVISION_ID_95XXMM 0x40 563 #define PCI_REVISION_ID_95XXO 0xE0 564 565 static inline bool is_dev_otx2(struct pci_dev *pdev) 566 { 567 u8 midr = pdev->revision & 0xF0; 568 569 return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX || 570 midr == PCI_REVISION_ID_95XXN || midr == PCI_REVISION_ID_98XX || 571 midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO); 572 } 573 574 static inline bool is_dev_cn10kb(struct pci_dev *pdev) 575 { 576 return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF; 577 } 578 579 static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf) 580 { 581 struct otx2_hw *hw = &pfvf->hw; 582 583 pfvf->hw.cq_time_wait = CQ_TIMER_THRESH_DEFAULT; 584 pfvf->hw.cq_ecount_wait = CQ_CQE_THRESH_DEFAULT; 585 pfvf->hw.cq_qcount_wait = CQ_QCOUNT_DEFAULT; 586 587 __set_bit(HW_TSO, &hw->cap_flag); 588 589 if (is_96xx_A0(pfvf->pdev)) { 590 __clear_bit(HW_TSO, &hw->cap_flag); 591 592 /* Time based irq coalescing is not supported */ 593 pfvf->hw.cq_qcount_wait = 0x0; 594 595 /* Due to HW issue previous silicons required minimum 596 * 600 unused CQE to avoid CQ overflow. 597 */ 598 pfvf->hw.rq_skid = 600; 599 pfvf->qset.rqe_cnt = Q_COUNT(Q_SIZE_1K); 600 } 601 if (is_96xx_B0(pfvf->pdev)) 602 __clear_bit(HW_TSO, &hw->cap_flag); 603 604 if (!is_dev_otx2(pfvf->pdev)) { 605 __set_bit(CN10K_MBOX, &hw->cap_flag); 606 __set_bit(CN10K_LMTST, &hw->cap_flag); 607 __set_bit(CN10K_RPM, &hw->cap_flag); 608 __set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag); 609 __set_bit(QOS_CIR_PIR_SUPPORT, &hw->cap_flag); 610 } 611 612 if (is_dev_cn10kb(pfvf->pdev)) 613 __set_bit(CN10K_HW_MACSEC, &hw->cap_flag); 614 } 615 616 /* Register read/write APIs */ 617 static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset) 618 { 619 u64 blkaddr; 620 621 switch ((offset >> RVU_FUNC_BLKADDR_SHIFT) & RVU_FUNC_BLKADDR_MASK) { 622 case BLKTYPE_NIX: 623 blkaddr = nic->nix_blkaddr; 624 break; 625 case BLKTYPE_NPA: 626 blkaddr = BLKADDR_NPA; 627 break; 628 default: 629 blkaddr = BLKADDR_RVUM; 630 break; 631 } 632 633 offset &= ~(RVU_FUNC_BLKADDR_MASK << RVU_FUNC_BLKADDR_SHIFT); 634 offset |= (blkaddr << RVU_FUNC_BLKADDR_SHIFT); 635 636 return nic->reg_base + offset; 637 } 638 639 static inline void otx2_write64(struct otx2_nic *nic, u64 offset, u64 val) 640 { 641 void __iomem *addr = otx2_get_regaddr(nic, offset); 642 643 writeq(val, addr); 644 } 645 646 static inline u64 otx2_read64(struct otx2_nic *nic, u64 offset) 647 { 648 void __iomem *addr = otx2_get_regaddr(nic, offset); 649 650 return readq(addr); 651 } 652 653 /* Mbox bounce buffer APIs */ 654 static inline int otx2_mbox_bbuf_init(struct mbox *mbox, struct pci_dev *pdev) 655 { 656 struct otx2_mbox *otx2_mbox; 657 struct otx2_mbox_dev *mdev; 658 659 mbox->bbuf_base = devm_kmalloc(&pdev->dev, MBOX_SIZE, GFP_KERNEL); 660 if (!mbox->bbuf_base) 661 return -ENOMEM; 662 663 /* Overwrite mbox mbase to point to bounce buffer, so that PF/VF 664 * prepare all mbox messages in bounce buffer instead of directly 665 * in hw mbox memory. 666 */ 667 otx2_mbox = &mbox->mbox; 668 mdev = &otx2_mbox->dev[0]; 669 mdev->mbase = mbox->bbuf_base; 670 671 otx2_mbox = &mbox->mbox_up; 672 mdev = &otx2_mbox->dev[0]; 673 mdev->mbase = mbox->bbuf_base; 674 return 0; 675 } 676 677 static inline void otx2_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid) 678 { 679 u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); 680 void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE); 681 struct otx2_mbox_dev *mdev = &mbox->dev[devid]; 682 struct mbox_hdr *hdr; 683 u64 msg_size; 684 685 if (mdev->mbase == hw_mbase) 686 return; 687 688 hdr = hw_mbase + mbox->rx_start; 689 msg_size = hdr->msg_size; 690 691 if (msg_size > mbox->rx_size - msgs_offset) 692 msg_size = mbox->rx_size - msgs_offset; 693 694 /* Copy mbox messages from mbox memory to bounce buffer */ 695 memcpy(mdev->mbase + mbox->rx_start, 696 hw_mbase + mbox->rx_start, msg_size + msgs_offset); 697 } 698 699 /* With the absence of API for 128-bit IO memory access for arm64, 700 * implement required operations at place. 701 */ 702 #if defined(CONFIG_ARM64) 703 static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr) 704 { 705 __asm__ volatile("stp %x[x0], %x[x1], [%x[p1],#0]!" 706 ::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr)); 707 } 708 709 static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr) 710 { 711 u64 result; 712 713 __asm__ volatile(".cpu generic+lse\n" 714 "ldadd %x[i], %x[r], [%[b]]" 715 : [r]"=r"(result), "+m"(*ptr) 716 : [i]"r"(incr), [b]"r"(ptr) 717 : "memory"); 718 return result; 719 } 720 721 #else 722 #define otx2_write128(lo, hi, addr) writeq((hi) | (lo), addr) 723 #define otx2_atomic64_add(incr, ptr) ({ *ptr += incr; }) 724 #endif 725 726 static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura, 727 u64 *ptrs, u64 num_ptrs) 728 { 729 struct otx2_lmt_info *lmt_info; 730 u64 size = 0, count_eot = 0; 731 u64 tar_addr, val = 0; 732 733 lmt_info = per_cpu_ptr(pfvf->hw.lmt_info, smp_processor_id()); 734 tar_addr = (__force u64)otx2_get_regaddr(pfvf, NPA_LF_AURA_BATCH_FREE0); 735 /* LMTID is same as AURA Id */ 736 val = (lmt_info->lmt_id & 0x7FF) | BIT_ULL(63); 737 /* Set if [127:64] of last 128bit word has a valid pointer */ 738 count_eot = (num_ptrs % 2) ? 0ULL : 1ULL; 739 /* Set AURA ID to free pointer */ 740 ptrs[0] = (count_eot << 32) | (aura & 0xFFFFF); 741 /* Target address for LMTST flush tells HW how many 128bit 742 * words are valid from NPA_LF_AURA_BATCH_FREE0. 743 * 744 * tar_addr[6:4] is LMTST size-1 in units of 128b. 745 */ 746 if (num_ptrs > 2) { 747 size = (sizeof(u64) * num_ptrs) / 16; 748 if (!count_eot) 749 size++; 750 tar_addr |= ((size - 1) & 0x7) << 4; 751 } 752 dma_wmb(); 753 memcpy((u64 *)lmt_info->lmt_addr, ptrs, sizeof(u64) * num_ptrs); 754 /* Perform LMTST flush */ 755 cn10k_lmt_flush(val, tar_addr); 756 } 757 758 static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf) 759 { 760 struct otx2_nic *pfvf = dev; 761 u64 ptrs[2]; 762 763 ptrs[1] = buf; 764 get_cpu(); 765 /* Free only one buffer at time during init and teardown */ 766 __cn10k_aura_freeptr(pfvf, aura, ptrs, 2); 767 put_cpu(); 768 } 769 770 /* Alloc pointer from pool/aura */ 771 static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura) 772 { 773 u64 *ptr = (__force u64 *)otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_ALLOCX(0)); 774 u64 incr = (u64)aura | BIT_ULL(63); 775 776 return otx2_atomic64_add(incr, ptr); 777 } 778 779 /* Free pointer to a pool/aura */ 780 static inline void otx2_aura_freeptr(void *dev, int aura, u64 buf) 781 { 782 struct otx2_nic *pfvf = dev; 783 void __iomem *addr = otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0); 784 785 otx2_write128(buf, (u64)aura | BIT_ULL(63), addr); 786 } 787 788 static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx) 789 { 790 if (type == AURA_NIX_SQ) 791 return pfvf->hw.rqpool_cnt + idx; 792 793 /* AURA_NIX_RQ */ 794 return idx; 795 } 796 797 /* Mbox APIs */ 798 static inline int otx2_sync_mbox_msg(struct mbox *mbox) 799 { 800 int err; 801 802 if (!otx2_mbox_nonempty(&mbox->mbox, 0)) 803 return 0; 804 otx2_mbox_msg_send(&mbox->mbox, 0); 805 err = otx2_mbox_wait_for_rsp(&mbox->mbox, 0); 806 if (err) 807 return err; 808 809 return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0); 810 } 811 812 static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid) 813 { 814 int err; 815 816 if (!otx2_mbox_nonempty(&mbox->mbox_up, devid)) 817 return 0; 818 otx2_mbox_msg_send_up(&mbox->mbox_up, devid); 819 err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid); 820 if (err) 821 return err; 822 823 return otx2_mbox_check_rsp_msgs(&mbox->mbox_up, devid); 824 } 825 826 /* Use this API to send mbox msgs in atomic context 827 * where sleeping is not allowed 828 */ 829 static inline int otx2_sync_mbox_msg_busy_poll(struct mbox *mbox) 830 { 831 int err; 832 833 if (!otx2_mbox_nonempty(&mbox->mbox, 0)) 834 return 0; 835 otx2_mbox_msg_send(&mbox->mbox, 0); 836 err = otx2_mbox_busy_poll_for_rsp(&mbox->mbox, 0); 837 if (err) 838 return err; 839 840 return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0); 841 } 842 843 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 844 static struct _req_type __maybe_unused \ 845 *otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox) \ 846 { \ 847 struct _req_type *req; \ 848 \ 849 req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ 850 &mbox->mbox, 0, sizeof(struct _req_type), \ 851 sizeof(struct _rsp_type)); \ 852 if (!req) \ 853 return NULL; \ 854 req->hdr.sig = OTX2_MBOX_REQ_SIG; \ 855 req->hdr.id = _id; \ 856 trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req)); \ 857 return req; \ 858 } 859 860 MBOX_MESSAGES 861 #undef M 862 863 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 864 int \ 865 otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \ 866 struct _req_type *req, \ 867 struct _rsp_type *rsp); \ 868 869 MBOX_UP_CGX_MESSAGES 870 MBOX_UP_MCS_MESSAGES 871 #undef M 872 873 /* Time to wait before watchdog kicks off */ 874 #define OTX2_TX_TIMEOUT (100 * HZ) 875 876 #define RVU_PFVF_PF_SHIFT 10 877 #define RVU_PFVF_PF_MASK 0x3F 878 #define RVU_PFVF_FUNC_SHIFT 0 879 #define RVU_PFVF_FUNC_MASK 0x3FF 880 881 static inline bool is_otx2_vf(u16 pcifunc) 882 { 883 return !!(pcifunc & RVU_PFVF_FUNC_MASK); 884 } 885 886 static inline int rvu_get_pf(u16 pcifunc) 887 { 888 return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; 889 } 890 891 static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf, 892 struct page *page, 893 size_t offset, size_t size, 894 enum dma_data_direction dir) 895 { 896 dma_addr_t iova; 897 898 iova = dma_map_page_attrs(pfvf->dev, page, 899 offset, size, dir, DMA_ATTR_SKIP_CPU_SYNC); 900 if (unlikely(dma_mapping_error(pfvf->dev, iova))) 901 return (dma_addr_t)NULL; 902 return iova; 903 } 904 905 static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf, 906 dma_addr_t addr, size_t size, 907 enum dma_data_direction dir) 908 { 909 dma_unmap_page_attrs(pfvf->dev, addr, size, 910 dir, DMA_ATTR_SKIP_CPU_SYNC); 911 } 912 913 static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx) 914 { 915 u16 smq; 916 #ifdef CONFIG_DCB 917 if (qidx < NIX_PF_PFC_PRIO_MAX && pfvf->pfc_alloc_status[qidx]) 918 return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx]; 919 #endif 920 /* check if qidx falls under QOS queues */ 921 if (qidx >= pfvf->hw.non_qos_queues) 922 smq = pfvf->qos.qid_to_sqmap[qidx - pfvf->hw.non_qos_queues]; 923 else 924 smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0]; 925 926 return smq; 927 } 928 929 static inline u16 otx2_get_total_tx_queues(struct otx2_nic *pfvf) 930 { 931 return pfvf->hw.non_qos_queues + pfvf->hw.tc_tx_queues; 932 } 933 934 static inline u64 otx2_convert_rate(u64 rate) 935 { 936 u64 converted_rate; 937 938 /* Convert bytes per second to Mbps */ 939 converted_rate = rate * 8; 940 converted_rate = max_t(u64, converted_rate / 1000000, 1); 941 942 return converted_rate; 943 } 944 945 static inline int otx2_tc_flower_rule_cnt(struct otx2_nic *pfvf) 946 { 947 /* return here if MCAM entries not allocated */ 948 if (!pfvf->flow_cfg) 949 return 0; 950 951 return pfvf->flow_cfg->nr_flows; 952 } 953 954 /* MSI-X APIs */ 955 void otx2_free_cints(struct otx2_nic *pfvf, int n); 956 void otx2_set_cints_affinity(struct otx2_nic *pfvf); 957 int otx2_set_mac_address(struct net_device *netdev, void *p); 958 int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu); 959 void otx2_tx_timeout(struct net_device *netdev, unsigned int txq); 960 void otx2_get_mac_from_af(struct net_device *netdev); 961 void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx); 962 int otx2_config_pause_frm(struct otx2_nic *pfvf); 963 void otx2_setup_segmentation(struct otx2_nic *pfvf); 964 int otx2_reset_mac_stats(struct otx2_nic *pfvf); 965 966 /* RVU block related APIs */ 967 int otx2_attach_npa_nix(struct otx2_nic *pfvf); 968 int otx2_detach_resources(struct mbox *mbox); 969 int otx2_config_npa(struct otx2_nic *pfvf); 970 int otx2_sq_aura_pool_init(struct otx2_nic *pfvf); 971 int otx2_rq_aura_pool_init(struct otx2_nic *pfvf); 972 void otx2_aura_pool_free(struct otx2_nic *pfvf); 973 void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type); 974 void otx2_sq_free_sqbs(struct otx2_nic *pfvf); 975 int otx2_config_nix(struct otx2_nic *pfvf); 976 int otx2_config_nix_queues(struct otx2_nic *pfvf); 977 int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool pfc_en); 978 int otx2_txsch_alloc(struct otx2_nic *pfvf); 979 void otx2_txschq_stop(struct otx2_nic *pfvf); 980 void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq); 981 void otx2_free_pending_sqe(struct otx2_nic *pfvf); 982 void otx2_sqb_flush(struct otx2_nic *pfvf); 983 int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, 984 dma_addr_t *dma); 985 int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable); 986 void otx2_ctx_disable(struct mbox *mbox, int type, bool npa); 987 int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable); 988 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx); 989 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq); 990 int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura); 991 int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); 992 int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); 993 int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, 994 dma_addr_t *dma); 995 int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, 996 int stack_pages, int numptrs, int buf_size, int type); 997 int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, 998 int pool_id, int numptrs); 999 1000 /* RSS configuration APIs*/ 1001 int otx2_rss_init(struct otx2_nic *pfvf); 1002 int otx2_set_flowkey_cfg(struct otx2_nic *pfvf); 1003 void otx2_set_rss_key(struct otx2_nic *pfvf); 1004 int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id); 1005 1006 /* Mbox handlers */ 1007 void mbox_handler_msix_offset(struct otx2_nic *pfvf, 1008 struct msix_offset_rsp *rsp); 1009 void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf, 1010 struct npa_lf_alloc_rsp *rsp); 1011 void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf, 1012 struct nix_lf_alloc_rsp *rsp); 1013 void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf, 1014 struct nix_txsch_alloc_rsp *rsp); 1015 void mbox_handler_cgx_stats(struct otx2_nic *pfvf, 1016 struct cgx_stats_rsp *rsp); 1017 void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf, 1018 struct cgx_fec_stats_rsp *rsp); 1019 void otx2_set_fec_stats_count(struct otx2_nic *pfvf); 1020 void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf, 1021 struct nix_bp_cfg_rsp *rsp); 1022 1023 /* Device stats APIs */ 1024 void otx2_get_dev_stats(struct otx2_nic *pfvf); 1025 void otx2_get_stats64(struct net_device *netdev, 1026 struct rtnl_link_stats64 *stats); 1027 void otx2_update_lmac_stats(struct otx2_nic *pfvf); 1028 void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf); 1029 int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx); 1030 int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx); 1031 void otx2_set_ethtool_ops(struct net_device *netdev); 1032 void otx2vf_set_ethtool_ops(struct net_device *netdev); 1033 1034 int otx2_open(struct net_device *netdev); 1035 int otx2_stop(struct net_device *netdev); 1036 int otx2_set_real_num_queues(struct net_device *netdev, 1037 int tx_queues, int rx_queues); 1038 int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd); 1039 int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr); 1040 1041 /* MCAM filter related APIs */ 1042 int otx2_mcam_flow_init(struct otx2_nic *pf); 1043 int otx2vf_mcam_flow_init(struct otx2_nic *pfvf); 1044 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count); 1045 void otx2_mcam_flow_del(struct otx2_nic *pf); 1046 int otx2_destroy_ntuple_flows(struct otx2_nic *pf); 1047 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf); 1048 int otx2_get_flow(struct otx2_nic *pfvf, 1049 struct ethtool_rxnfc *nfc, u32 location); 1050 int otx2_get_all_flows(struct otx2_nic *pfvf, 1051 struct ethtool_rxnfc *nfc, u32 *rule_locs); 1052 int otx2_add_flow(struct otx2_nic *pfvf, 1053 struct ethtool_rxnfc *nfc); 1054 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location); 1055 int otx2_get_maxflows(struct otx2_flow_config *flow_cfg); 1056 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id); 1057 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac); 1058 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac); 1059 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable); 1060 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf); 1061 bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx); 1062 u16 otx2_get_max_mtu(struct otx2_nic *pfvf); 1063 int otx2_handle_ntuple_tc_features(struct net_device *netdev, 1064 netdev_features_t features); 1065 int otx2_smq_flush(struct otx2_nic *pfvf, int smq); 1066 void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool, 1067 u64 iova, int size); 1068 1069 /* tc support */ 1070 int otx2_init_tc(struct otx2_nic *nic); 1071 void otx2_shutdown_tc(struct otx2_nic *nic); 1072 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, 1073 void *type_data); 1074 void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic); 1075 1076 /* CGX/RPM DMAC filters support */ 1077 int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf); 1078 int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos); 1079 int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u32 bit_pos); 1080 int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos); 1081 void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf); 1082 void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf); 1083 1084 #ifdef CONFIG_DCB 1085 /* DCB support*/ 1086 void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx, bool pfc_enable); 1087 int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf); 1088 int otx2_dcbnl_set_ops(struct net_device *dev); 1089 /* PFC support */ 1090 int otx2_pfc_txschq_config(struct otx2_nic *pfvf); 1091 int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf); 1092 int otx2_pfc_txschq_update(struct otx2_nic *pfvf); 1093 int otx2_pfc_txschq_stop(struct otx2_nic *pfvf); 1094 #endif 1095 1096 #if IS_ENABLED(CONFIG_MACSEC) 1097 /* MACSEC offload support */ 1098 int cn10k_mcs_init(struct otx2_nic *pfvf); 1099 void cn10k_mcs_free(struct otx2_nic *pfvf); 1100 void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event); 1101 #else 1102 static inline int cn10k_mcs_init(struct otx2_nic *pfvf) { return 0; } 1103 static inline void cn10k_mcs_free(struct otx2_nic *pfvf) {} 1104 static inline void cn10k_handle_mcs_event(struct otx2_nic *pfvf, 1105 struct mcs_intr_info *event) 1106 {} 1107 #endif /* CONFIG_MACSEC */ 1108 1109 /* qos support */ 1110 static inline void otx2_qos_init(struct otx2_nic *pfvf, int qos_txqs) 1111 { 1112 struct otx2_hw *hw = &pfvf->hw; 1113 1114 hw->tc_tx_queues = qos_txqs; 1115 INIT_LIST_HEAD(&pfvf->qos.qos_tree); 1116 mutex_init(&pfvf->qos.qos_lock); 1117 } 1118 1119 static inline void otx2_shutdown_qos(struct otx2_nic *pfvf) 1120 { 1121 mutex_destroy(&pfvf->qos.qos_lock); 1122 } 1123 1124 u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb, 1125 struct net_device *sb_dev); 1126 int otx2_get_txq_by_classid(struct otx2_nic *pfvf, u16 classid); 1127 void otx2_qos_config_txschq(struct otx2_nic *pfvf); 1128 void otx2_clean_qos_queues(struct otx2_nic *pfvf); 1129 #endif /* OTX2_COMMON_H */ 1130