/openbmc/linux/drivers/net/ethernet/marvell/octeontx2/af/ |
H A D | rvu.h | 58 struct rvu *rvu; member 87 struct rvu *rvu; member 113 struct rvu *rvu; member 338 struct rvu *rvu; member 399 struct rvu *rvu; member 475 struct rvu { struct 557 static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val) in rvu_write64() argument 559 writeq(val, rvu->afreg_base + ((block << 28) | offset)); in rvu_write64() 562 static inline u64 rvu_read64(struct rvu *rvu, u64 block, u64 offset) in rvu_read64() argument 564 return readq(rvu->afreg_base + ((block << 28) | offset)); in rvu_read64() [all …]
|
H A D | rvu_cgx.c | 26 *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \ 31 &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \ 37 trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req)); \ 44 bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature) in is_mac_feature_supported() argument 49 if (!is_pf_cgxmapped(rvu, pf)) in is_mac_feature_supported() 52 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in is_mac_feature_supported() 53 cgxd = rvu_cgx_pdata(cgx_id, rvu); in is_mac_feature_supported() 58 #define CGX_OFFSET(x) ((x) * rvu->hw->lmac_per_cgx) 60 static u64 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id) in cgxlmac_to_pfmap() argument 62 return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id]; in cgxlmac_to_pfmap() [all …]
|
H A D | rvu_cpt.c | 29 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e)); \ 43 struct rvu *rvu = block->rvu; in cpt_af_flt_intr_handler() local 49 reg = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(vec)); in cpt_af_flt_intr_handler() 50 dev_err_ratelimited(rvu->dev, "Received CPTAF FLT%d irq : 0x%llx", vec, reg); in cpt_af_flt_intr_handler() 65 grp = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng)) & 0xFF; in cpt_af_flt_intr_handler() 67 rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), 0x0); in cpt_af_flt_intr_handler() 68 val = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng)); in cpt_af_flt_intr_handler() 69 rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val & ~1ULL); in cpt_af_flt_intr_handler() 71 rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), grp); in cpt_af_flt_intr_handler() 72 rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val | 1ULL); in cpt_af_flt_intr_handler() [all …]
|
H A D | rvu.c | 27 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, 29 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, 31 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc); 33 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, 61 static void rvu_setup_hw_capabilities(struct rvu *rvu) in rvu_setup_hw_capabilities() argument 63 struct rvu_hwinfo *hw = rvu->hw; in rvu_setup_hw_capabilities() 73 hw->rvu = rvu; in rvu_setup_hw_capabilities() 75 if (is_rvu_pre_96xx_C0(rvu)) { in rvu_setup_hw_capabilities() 82 if (is_rvu_96xx_A0(rvu) || is_rvu_95xx_A0(rvu)) in rvu_setup_hw_capabilities() 85 if (!is_rvu_pre_96xx_C0(rvu)) in rvu_setup_hw_capabilities() [all …]
|
H A D | rvu_devlink.c | 39 static bool rvu_common_request_irq(struct rvu *rvu, int offset, in rvu_common_request_irq() argument 42 struct rvu_devlink *rvu_dl = rvu->rvu_dl; in rvu_common_request_irq() 45 sprintf(&rvu->irq_name[offset * NAME_SIZE], "%s", name); in rvu_common_request_irq() 46 rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0, in rvu_common_request_irq() 47 &rvu->irq_name[offset * NAME_SIZE], rvu_dl); in rvu_common_request_irq() 49 dev_warn(rvu->dev, "Failed to register %s irq\n", name); in rvu_common_request_irq() 51 rvu->irq_allocated[offset] = true; in rvu_common_request_irq() 53 return rvu->irq_allocated[offset]; in rvu_common_request_irq() 70 struct rvu *rvu; in rvu_nix_af_rvu_intr_handler() local 74 rvu = rvu_dl->rvu; in rvu_nix_af_rvu_intr_handler() [all …]
|
H A D | rvu_npc_hash.c | 103 static u64 npc_update_use_hash(struct rvu *rvu, int blkaddr, in npc_update_use_hash() argument 109 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, lt, ld)); in npc_update_use_hash() 122 static void npc_program_mkex_hash_rx(struct rvu *rvu, int blkaddr, in npc_program_mkex_hash_rx() argument 125 struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash; in npc_program_mkex_hash_rx() 141 cfg = npc_update_use_hash(rvu, blkaddr, in npc_program_mkex_hash_rx() 162 static void npc_program_mkex_hash_tx(struct rvu *rvu, int blkaddr, in npc_program_mkex_hash_tx() argument 165 struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash; in npc_program_mkex_hash_tx() 181 cfg = npc_update_use_hash(rvu, blkaddr, in npc_program_mkex_hash_tx() 200 void npc_config_secret_key(struct rvu *rvu, int blkaddr) in npc_config_secret_key() argument 202 struct hw_cap *hwcap = &rvu->hw->cap; in npc_config_secret_key() [all …]
|
H A D | rvu_nix.c | 20 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc); 21 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, 23 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, 25 static int nix_setup_ipolicers(struct rvu *rvu, 27 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw); 30 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc); 31 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, 83 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr) in rvu_get_next_nix_blkaddr() argument 89 return rvu->nix_blkaddr[blkaddr]; in rvu_get_next_nix_blkaddr() 92 if (rvu->nix_blkaddr[i] == blkaddr) in rvu_get_next_nix_blkaddr() [all …]
|
H A D | rvu_cn10k.c | 20 static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, in lmtst_map_table_ops() argument 26 tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); in lmtst_map_table_ops() 30 dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); in lmtst_map_table_ops() 43 rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, BIT_ULL(0)); in lmtst_map_table_ops() 44 rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CTL); in lmtst_map_table_ops() 45 rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, 0x00); in lmtst_map_table_ops() 53 static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc) in rvu_get_lmtst_tbl_index() argument 55 return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) + in rvu_get_lmtst_tbl_index() 59 static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc, in rvu_get_lmtaddr() argument 66 dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__); in rvu_get_lmtaddr() [all …]
|
H A D | mcs_rvu_if.c | 19 *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \ 24 &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \ 36 void rvu_mcs_ptp_cfg(struct rvu *rvu, u8 rpm_id, u8 lmac_id, bool ena) in rvu_mcs_ptp_cfg() argument 42 if (!rvu->mcs_blk_cnt) in rvu_mcs_ptp_cfg() 51 if (rvu->mcs_blk_cnt > 1) { in rvu_mcs_ptp_cfg() 63 port = (rpm_id * rvu->hw->lmac_per_cgx) + lmac_id; in rvu_mcs_ptp_cfg() 72 int rvu_mbox_handler_mcs_set_lmac_mode(struct rvu *rvu, in rvu_mbox_handler_mcs_set_lmac_mode() argument 78 if (req->mcs_id >= rvu->mcs_blk_cnt) in rvu_mbox_handler_mcs_set_lmac_mode() 93 struct rvu *rvu = mcs->rvu; in mcs_add_intr_wq_entry() local 98 pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)]; in mcs_add_intr_wq_entry() [all …]
|
H A D | rvu_npc.c | 32 static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam, 34 static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam, 47 bool is_npc_interface_valid(struct rvu *rvu, u8 intf) in is_npc_interface_valid() argument 49 struct rvu_hwinfo *hw = rvu->hw; in is_npc_interface_valid() 54 int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena) in rvu_npc_get_tx_nibble_cfg() argument 59 if (is_rvu_96xx_B0(rvu)) in rvu_npc_get_tx_nibble_cfg() 64 static int npc_mcam_verify_pf_func(struct rvu *rvu, in npc_mcam_verify_pf_func() argument 86 void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf) in rvu_npc_set_pkind() argument 91 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); in rvu_npc_set_pkind() 97 rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_CPI_DEFX(pkind, 0), val); in rvu_npc_set_pkind() [all …]
|
H A D | rvu_npa.c | 15 static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, in npa_aq_enqueue_wait() argument 26 reg = rvu_read64(rvu, block->addr, NPA_AF_AQ_STATUS); in npa_aq_enqueue_wait() 36 rvu_write64(rvu, block->addr, NPA_AF_AQ_DOOR, 1); in npa_aq_enqueue_wait() 50 if (rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NPA0)) in npa_aq_enqueue_wait() 51 dev_err(rvu->dev, in npa_aq_enqueue_wait() 61 int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req, in rvu_npa_aq_enq_inst() argument 64 struct rvu_hwinfo *hw = rvu->hw; in rvu_npa_aq_enq_inst() 74 pfvf = rvu_get_pfvf(rvu, pcifunc); in rvu_npa_aq_enq_inst() 78 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc); in rvu_npa_aq_enq_inst() 85 dev_warn(rvu->dev, "%s: NPA AQ not initialized\n", __func__); in rvu_npa_aq_enq_inst() [all …]
|
H A D | rvu_switch.c | 11 static void rvu_switch_enable_lbk_link(struct rvu *rvu, u16 pcifunc, bool enable) in rvu_switch_enable_lbk_link() argument 13 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); in rvu_switch_enable_lbk_link() 16 nix_hw = get_nix_hw(rvu->hw, pfvf->nix_blkaddr); in rvu_switch_enable_lbk_link() 18 rvu_nix_tx_tl2_cfg(rvu, pfvf->nix_blkaddr, pcifunc, in rvu_switch_enable_lbk_link() 22 static int rvu_switch_install_rx_rule(struct rvu *rvu, u16 pcifunc, in rvu_switch_install_rx_rule() argument 29 pfvf = rvu_get_pfvf(rvu, pcifunc); in rvu_switch_install_rx_rule() 48 return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); in rvu_switch_install_rx_rule() 51 static int rvu_switch_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry) in rvu_switch_install_tx_rule() argument 58 pfvf = rvu_get_pfvf(rvu, pcifunc); in rvu_switch_install_tx_rule() 66 rvu_switch_enable_lbk_link(rvu, pcifunc, true); in rvu_switch_install_tx_rule() [all …]
|
H A D | rvu_npc_hash.h | 27 rvu_write64(rvu, blkaddr, \ 31 rvu_write64(rvu, blkaddr, \ 35 rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_RESULT_CTRL(intf, ld)) 38 rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_MASKX(intf, ld, mask_idx)) 41 rvu_write64(rvu, blkaddr, \ 55 void npc_update_field_hash(struct rvu *rvu, u8 intf, 63 void npc_config_secret_key(struct rvu *rvu, int blkaddr); 64 void npc_program_mkex_hash(struct rvu *rvu, int blkaddr); 210 bool rvu_npc_exact_has_match_table(struct rvu *rvu); 211 u32 rvu_npc_exact_get_max_entries(struct rvu *rvu); [all …]
|
H A D | rvu_debugfs.c | 519 static void rvu_dbg_mcs_init(struct rvu *rvu) in rvu_dbg_mcs_init() argument 525 if (!rvu->mcs_blk_cnt) in rvu_dbg_mcs_init() 528 rvu->rvu_dbg.mcs_root = debugfs_create_dir("mcs", rvu->rvu_dbg.root); in rvu_dbg_mcs_init() 530 for (i = 0; i < rvu->mcs_blk_cnt; i++) { in rvu_dbg_mcs_init() 534 rvu->rvu_dbg.mcs = debugfs_create_dir(dname, in rvu_dbg_mcs_init() 535 rvu->rvu_dbg.mcs_root); in rvu_dbg_mcs_init() 537 rvu->rvu_dbg.mcs_rx = debugfs_create_dir("rx_stats", rvu->rvu_dbg.mcs); in rvu_dbg_mcs_init() 539 debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_rx, mcs, in rvu_dbg_mcs_init() 542 debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_rx, mcs, in rvu_dbg_mcs_init() 545 debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_rx, mcs, in rvu_dbg_mcs_init() [all …]
|
H A D | rvu_npc_fs.c | 49 bool npc_is_feature_supported(struct rvu *rvu, u64 features, u8 intf) in npc_is_feature_supported() argument 51 struct npc_mcam *mcam = &rvu->hw->mcam; in npc_is_feature_supported() 128 static bool npc_is_field_present(struct rvu *rvu, enum key_fields type, u8 intf) in npc_is_field_present() argument 130 struct npc_mcam *mcam = &rvu->hw->mcam; in npc_is_field_present() 187 static bool npc_check_overlap(struct rvu *rvu, int blkaddr, in npc_check_overlap() argument 190 struct npc_mcam *mcam = &rvu->hw->mcam; in npc_check_overlap() 207 cfg = rvu_read64(rvu, blkaddr, in npc_check_overlap() 236 static bool npc_check_field(struct rvu *rvu, int blkaddr, enum key_fields type, in npc_check_field() argument 239 if (!npc_is_field_present(rvu, type, intf) || in npc_check_field() 240 npc_check_overlap(rvu, blkaddr, type, 0, intf)) in npc_check_field() [all …]
|
H A D | rvu_sdp.c | 49 int rvu_sdp_init(struct rvu *rvu) in rvu_sdp_init() argument 60 pfvf = &rvu->pf[sdp_pf_num[i]]; in rvu_sdp_init() 62 pfvf->sdp_info = devm_kzalloc(rvu->dev, in rvu_sdp_init() 70 dev_info(rvu->dev, "SDP PF number:%d\n", sdp_pf_num[i]); in rvu_sdp_init() 81 rvu_mbox_handler_set_sdp_chan_info(struct rvu *rvu, in rvu_mbox_handler_set_sdp_chan_info() argument 85 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); in rvu_mbox_handler_set_sdp_chan_info() 88 dev_info(rvu->dev, "AF: SDP%d max_vfs %d num_pf_rings %d pf_srn %d\n", in rvu_mbox_handler_set_sdp_chan_info() 95 rvu_mbox_handler_get_sdp_chan_info(struct rvu *rvu, struct msg_req *req, in rvu_mbox_handler_get_sdp_chan_info() argument 98 struct rvu_hwinfo *hw = rvu->hw; in rvu_mbox_handler_get_sdp_chan_info() 105 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); in rvu_mbox_handler_get_sdp_chan_info() [all …]
|
H A D | ptp.c | 98 static bool is_tstmp_atomic_update_supported(struct rvu *rvu) in is_tstmp_atomic_update_supported() argument 100 struct ptp *ptp = rvu->ptp; in is_tstmp_atomic_update_supported() 102 if (is_rvu_otx2(rvu)) in is_tstmp_atomic_update_supported() 366 void ptp_start(struct rvu *rvu, u64 sclk, u32 ext_clk_freq, u32 extts) in ptp_start() argument 368 struct ptp *ptp = rvu->ptp; in ptp_start() 387 if (is_tstmp_atomic_update_supported(rvu)) { in ptp_start() 588 int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req, in rvu_mbox_handler_ptp_op() argument 600 if (!rvu->ptp) in rvu_mbox_handler_ptp_op() 605 err = ptp_adjfine(rvu->ptp, req->scaled_ppm); in rvu_mbox_handler_ptp_op() 608 err = ptp_get_clock(rvu->ptp, &rsp->clk); in rvu_mbox_handler_ptp_op() [all …]
|
H A D | rvu_devlink.h | 72 struct rvu *rvu; member 79 int rvu_register_dl(struct rvu *rvu); 80 void rvu_unregister_dl(struct rvu *rvu);
|
H A D | ptp.h | 26 struct rvu; 29 void ptp_start(struct rvu *rvu, u64 sclk, u32 ext_clk_freq, u32 extts);
|
H A D | rvu_npc_fs.h | 17 void npc_update_entry(struct rvu *rvu, enum key_fields type,
|
H A D | rvu_reg.h | 628 if (rvu->hw->npc_ext_set) \ 636 if (rvu->hw->npc_ext_set) \ 644 if (rvu->hw->npc_ext_set) \ 652 if (rvu->hw->npc_ext_set) \ 660 if (rvu->hw->npc_ext_set) \ 668 if (rvu->hw->npc_ext_set) \ 676 if (rvu->hw->npc_ext_set) \ 684 if (rvu->hw->npc_ext_set) \
|
H A D | Makefile | 11 rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
|
H A D | rvu_trace.h | 9 #define TRACE_SYSTEM rvu
|
H A D | npc.h | 14 rvu_write64(rvu, blkaddr, \ 18 rvu_write64(rvu, blkaddr, \
|
H A D | mcs.h | 150 void *rvu; member
|