1 /* SPDX-License-Identifier: GPL-2.0 2 * Marvell OcteonTx2 RVU Admin Function driver 3 * 4 * Copyright (C) 2018 Marvell International Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #ifndef RVU_H 12 #define RVU_H 13 14 #include "rvu_struct.h" 15 #include "common.h" 16 #include "mbox.h" 17 18 /* PCI device IDs */ 19 #define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065 20 21 /* PCI BAR nos */ 22 #define PCI_AF_REG_BAR_NUM 0 23 #define PCI_PF_REG_BAR_NUM 2 24 #define PCI_MBOX_BAR_NUM 4 25 26 #define NAME_SIZE 32 27 28 /* PF_FUNC */ 29 #define RVU_PFVF_PF_SHIFT 10 30 #define RVU_PFVF_PF_MASK 0x3F 31 #define RVU_PFVF_FUNC_SHIFT 0 32 #define RVU_PFVF_FUNC_MASK 0x3FF 33 34 struct rvu_work { 35 struct work_struct work; 36 struct rvu *rvu; 37 }; 38 39 struct rsrc_bmap { 40 unsigned long *bmap; /* Pointer to resource bitmap */ 41 u16 max; /* Max resource id or count */ 42 }; 43 44 struct rvu_block { 45 struct rsrc_bmap lf; 46 struct admin_queue *aq; /* NIX/NPA AQ */ 47 u16 *fn_map; /* LF to pcifunc mapping */ 48 bool multislot; 49 bool implemented; 50 u8 addr; /* RVU_BLOCK_ADDR_E */ 51 u8 type; /* RVU_BLOCK_TYPE_E */ 52 u8 lfshift; 53 u64 lookup_reg; 54 u64 pf_lfcnt_reg; 55 u64 vf_lfcnt_reg; 56 u64 lfcfg_reg; 57 u64 msixcfg_reg; 58 u64 lfreset_reg; 59 unsigned char name[NAME_SIZE]; 60 }; 61 62 struct nix_mcast { 63 struct qmem *mce_ctx; 64 struct qmem *mcast_buf; 65 int replay_pkind; 66 int next_free_mce; 67 spinlock_t mce_lock; /* Serialize MCE updates */ 68 }; 69 70 struct nix_mce_list { 71 struct hlist_head head; 72 int count; 73 int max; 74 }; 75 76 struct npc_mcam { 77 spinlock_t lock; /* MCAM entries and counters update lock */ 78 u8 keysize; /* MCAM keysize 112/224/448 bits */ 79 u8 banks; /* Number of MCAM banks */ 80 u8 banks_per_entry;/* Number of keywords in key */ 81 u16 banksize; /* Number of MCAM entries in each bank */ 82 u16 total_entries; /* Total number of MCAM entries */ 83 u16 entries; /* Total minus reserved for NIX LFs */ 84 u16 nixlf_offset; /* Offset of nixlf rsvd uncast entries */ 85 u16 pf_offset; /* Offset of PF's rsvd bcast, promisc entries */ 86 }; 87 88 /* Structure for per RVU func info ie PF/VF */ 89 struct rvu_pfvf { 90 bool npalf; /* Only one NPALF per RVU_FUNC */ 91 bool nixlf; /* Only one NIXLF per RVU_FUNC */ 92 u16 sso; 93 u16 ssow; 94 u16 cptlfs; 95 u16 timlfs; 96 u8 cgx_lmac; 97 98 /* Block LF's MSIX vector info */ 99 struct rsrc_bmap msix; /* Bitmap for MSIX vector alloc */ 100 #define MSIX_BLKLF(blkaddr, lf) (((blkaddr) << 8) | ((lf) & 0xFF)) 101 u16 *msix_lfmap; /* Vector to block LF mapping */ 102 103 /* NPA contexts */ 104 struct qmem *aura_ctx; 105 struct qmem *pool_ctx; 106 struct qmem *npa_qints_ctx; 107 unsigned long *aura_bmap; 108 unsigned long *pool_bmap; 109 110 /* NIX contexts */ 111 struct qmem *rq_ctx; 112 struct qmem *sq_ctx; 113 struct qmem *cq_ctx; 114 struct qmem *rss_ctx; 115 struct qmem *cq_ints_ctx; 116 struct qmem *nix_qints_ctx; 117 unsigned long *sq_bmap; 118 unsigned long *rq_bmap; 119 unsigned long *cq_bmap; 120 121 u16 rx_chan_base; 122 u16 tx_chan_base; 123 u8 rx_chan_cnt; /* total number of RX channels */ 124 u8 tx_chan_cnt; /* total number of TX channels */ 125 126 u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */ 127 128 /* Broadcast pkt replication info */ 129 u16 bcast_mce_idx; 130 struct nix_mce_list bcast_mce_list; 131 }; 132 133 struct nix_txsch { 134 struct rsrc_bmap schq; 135 u8 lvl; 136 u16 *pfvf_map; 137 }; 138 139 struct npc_pkind { 140 struct rsrc_bmap rsrc; 141 u32 *pfchan_map; 142 }; 143 144 struct nix_hw { 145 struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */ 146 struct nix_mcast mcast; 147 }; 148 149 struct rvu_hwinfo { 150 u8 total_pfs; /* MAX RVU PFs HW supports */ 151 u16 total_vfs; /* Max RVU VFs HW supports */ 152 u16 max_vfs_per_pf; /* Max VFs that can be attached to a PF */ 153 u8 cgx; 154 u8 lmac_per_cgx; 155 u8 cgx_links; 156 u8 lbk_links; 157 u8 sdp_links; 158 u8 npc_kpus; /* No of parser units */ 159 160 161 struct rvu_block block[BLK_COUNT]; /* Block info */ 162 struct nix_hw *nix0; 163 struct npc_pkind pkind; 164 struct npc_mcam mcam; 165 }; 166 167 struct rvu { 168 void __iomem *afreg_base; 169 void __iomem *pfreg_base; 170 struct pci_dev *pdev; 171 struct device *dev; 172 struct rvu_hwinfo *hw; 173 struct rvu_pfvf *pf; 174 struct rvu_pfvf *hwvf; 175 spinlock_t rsrc_lock; /* Serialize resource alloc/free */ 176 177 /* Mbox */ 178 struct otx2_mbox mbox; 179 struct rvu_work *mbox_wrk; 180 struct otx2_mbox mbox_up; 181 struct rvu_work *mbox_wrk_up; 182 struct workqueue_struct *mbox_wq; 183 184 /* MSI-X */ 185 u16 num_vec; 186 char *irq_name; 187 bool *irq_allocated; 188 dma_addr_t msix_base_iova; 189 190 /* CGX */ 191 #define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */ 192 u8 cgx_mapped_pfs; 193 u8 cgx_cnt; /* available cgx ports */ 194 u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */ 195 u16 *cgxlmac2pf_map; /* bitmap of mapped pfs for 196 * every cgx lmac port 197 */ 198 unsigned long pf_notify_bmap; /* Flags for PF notification */ 199 void **cgx_idmap; /* cgx id to cgx data map table */ 200 struct work_struct cgx_evh_work; 201 struct workqueue_struct *cgx_evh_wq; 202 spinlock_t cgx_evq_lock; /* cgx event queue lock */ 203 struct list_head cgx_evq_head; /* cgx event queue head */ 204 }; 205 206 static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val) 207 { 208 writeq(val, rvu->afreg_base + ((block << 28) | offset)); 209 } 210 211 static inline u64 rvu_read64(struct rvu *rvu, u64 block, u64 offset) 212 { 213 return readq(rvu->afreg_base + ((block << 28) | offset)); 214 } 215 216 static inline void rvupf_write64(struct rvu *rvu, u64 offset, u64 val) 217 { 218 writeq(val, rvu->pfreg_base + offset); 219 } 220 221 static inline u64 rvupf_read64(struct rvu *rvu, u64 offset) 222 { 223 return readq(rvu->pfreg_base + offset); 224 } 225 226 /* Function Prototypes 227 * RVU 228 */ 229 int rvu_alloc_bitmap(struct rsrc_bmap *rsrc); 230 int rvu_alloc_rsrc(struct rsrc_bmap *rsrc); 231 void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id); 232 int rvu_rsrc_free_count(struct rsrc_bmap *rsrc); 233 int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc); 234 bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc); 235 int rvu_get_pf(u16 pcifunc); 236 struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc); 237 void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf); 238 bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr); 239 int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot); 240 int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf); 241 int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc); 242 int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero); 243 244 /* RVU HW reg validation */ 245 enum regmap_block { 246 TXSCHQ_HWREGMAP = 0, 247 MAX_HWREGMAP, 248 }; 249 250 bool rvu_check_valid_reg(int regmap, int regblk, u64 reg); 251 252 /* NPA/NIX AQ APIs */ 253 int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue, 254 int qsize, int inst_size, int res_size); 255 void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq); 256 257 /* CGX APIs */ 258 static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf) 259 { 260 return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs); 261 } 262 263 static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id) 264 { 265 *cgx_id = (map >> 4) & 0xF; 266 *lmac_id = (map & 0xF); 267 } 268 269 int rvu_cgx_probe(struct rvu *rvu); 270 void rvu_cgx_wq_destroy(struct rvu *rvu); 271 void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu); 272 int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start); 273 int rvu_mbox_handler_CGX_START_RXTX(struct rvu *rvu, struct msg_req *req, 274 struct msg_rsp *rsp); 275 int rvu_mbox_handler_CGX_STOP_RXTX(struct rvu *rvu, struct msg_req *req, 276 struct msg_rsp *rsp); 277 int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req, 278 struct cgx_stats_rsp *rsp); 279 int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu, 280 struct cgx_mac_addr_set_or_get *req, 281 struct cgx_mac_addr_set_or_get *rsp); 282 int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu, 283 struct cgx_mac_addr_set_or_get *req, 284 struct cgx_mac_addr_set_or_get *rsp); 285 int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req, 286 struct msg_rsp *rsp); 287 int rvu_mbox_handler_CGX_PROMISC_DISABLE(struct rvu *rvu, struct msg_req *req, 288 struct msg_rsp *rsp); 289 int rvu_mbox_handler_CGX_START_LINKEVENTS(struct rvu *rvu, struct msg_req *req, 290 struct msg_rsp *rsp); 291 int rvu_mbox_handler_CGX_STOP_LINKEVENTS(struct rvu *rvu, struct msg_req *req, 292 struct msg_rsp *rsp); 293 int rvu_mbox_handler_CGX_GET_LINKINFO(struct rvu *rvu, struct msg_req *req, 294 struct cgx_link_info_msg *rsp); 295 int rvu_mbox_handler_CGX_INTLBK_ENABLE(struct rvu *rvu, struct msg_req *req, 296 struct msg_rsp *rsp); 297 int rvu_mbox_handler_CGX_INTLBK_DISABLE(struct rvu *rvu, struct msg_req *req, 298 struct msg_rsp *rsp); 299 300 /* NPA APIs */ 301 int rvu_npa_init(struct rvu *rvu); 302 void rvu_npa_freemem(struct rvu *rvu); 303 int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu, 304 struct npa_aq_enq_req *req, 305 struct npa_aq_enq_rsp *rsp); 306 int rvu_mbox_handler_NPA_HWCTX_DISABLE(struct rvu *rvu, 307 struct hwctx_disable_req *req, 308 struct msg_rsp *rsp); 309 int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu, 310 struct npa_lf_alloc_req *req, 311 struct npa_lf_alloc_rsp *rsp); 312 int rvu_mbox_handler_NPA_LF_FREE(struct rvu *rvu, struct msg_req *req, 313 struct msg_rsp *rsp); 314 315 /* NIX APIs */ 316 int rvu_nix_init(struct rvu *rvu); 317 void rvu_nix_freemem(struct rvu *rvu); 318 int rvu_get_nixlf_count(struct rvu *rvu); 319 int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu, 320 struct nix_lf_alloc_req *req, 321 struct nix_lf_alloc_rsp *rsp); 322 int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req, 323 struct msg_rsp *rsp); 324 int rvu_mbox_handler_NIX_AQ_ENQ(struct rvu *rvu, 325 struct nix_aq_enq_req *req, 326 struct nix_aq_enq_rsp *rsp); 327 int rvu_mbox_handler_NIX_HWCTX_DISABLE(struct rvu *rvu, 328 struct hwctx_disable_req *req, 329 struct msg_rsp *rsp); 330 int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu, 331 struct nix_txsch_alloc_req *req, 332 struct nix_txsch_alloc_rsp *rsp); 333 int rvu_mbox_handler_NIX_TXSCH_FREE(struct rvu *rvu, 334 struct nix_txsch_free_req *req, 335 struct msg_rsp *rsp); 336 int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu, 337 struct nix_txschq_config *req, 338 struct msg_rsp *rsp); 339 int rvu_mbox_handler_NIX_STATS_RST(struct rvu *rvu, struct msg_req *req, 340 struct msg_rsp *rsp); 341 int rvu_mbox_handler_NIX_VTAG_CFG(struct rvu *rvu, 342 struct nix_vtag_config *req, 343 struct msg_rsp *rsp); 344 int rvu_mbox_handler_NIX_RSS_FLOWKEY_CFG(struct rvu *rvu, 345 struct nix_rss_flowkey_cfg *req, 346 struct msg_rsp *rsp); 347 int rvu_mbox_handler_NIX_SET_MAC_ADDR(struct rvu *rvu, 348 struct nix_set_mac_addr *req, 349 struct msg_rsp *rsp); 350 int rvu_mbox_handler_NIX_SET_RX_MODE(struct rvu *rvu, struct nix_rx_mode *req, 351 struct msg_rsp *rsp); 352 353 /* NPC APIs */ 354 int rvu_npc_init(struct rvu *rvu); 355 void rvu_npc_freemem(struct rvu *rvu); 356 int rvu_npc_get_pkind(struct rvu *rvu, u16 pf); 357 void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf); 358 void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, 359 int nixlf, u64 chan, u8 *mac_addr); 360 void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, 361 int nixlf, u64 chan, bool allmulti); 362 void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf); 363 void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, 364 int nixlf, u64 chan); 365 void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf); 366 void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, 367 int group, int alg_idx, int mcam_index); 368 #endif /* RVU_H */ 369