1 /* 2 * Broadcom NetXtreme-E RoCE driver. 3 * 4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term 5 * Broadcom refers to Broadcom Limited and/or its subsidiaries. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in 21 * the documentation and/or other materials provided with the 22 * distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * 36 * Description: QPLib resource manager (header) 37 */ 38 39 #ifndef __BNXT_QPLIB_RES_H__ 40 #define __BNXT_QPLIB_RES_H__ 41 42 extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero; 43 44 #define CHIP_NUM_57508 0x1750 45 #define CHIP_NUM_57504 0x1751 46 #define CHIP_NUM_57502 0x1752 47 48 struct bnxt_qplib_drv_modes { 49 u8 wqe_mode; 50 /* Other modes to follow here */ 51 }; 52 53 struct bnxt_qplib_chip_ctx { 54 u16 chip_num; 55 u8 chip_rev; 56 u8 chip_metal; 57 u16 hw_stats_size; 58 struct bnxt_qplib_drv_modes modes; 59 }; 60 61 #define PTR_CNT_PER_PG (PAGE_SIZE / sizeof(void *)) 62 #define PTR_MAX_IDX_PER_PG (PTR_CNT_PER_PG - 1) 63 #define PTR_PG(x) (((x) & ~PTR_MAX_IDX_PER_PG) / PTR_CNT_PER_PG) 64 #define PTR_IDX(x) ((x) & PTR_MAX_IDX_PER_PG) 65 66 #define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1)) 67 68 #define HWQ_FREE_SLOTS(hwq) (hwq->max_elements - \ 69 ((HWQ_CMP(hwq->prod, hwq)\ 70 - HWQ_CMP(hwq->cons, hwq))\ 71 & (hwq->max_elements - 1))) 72 enum bnxt_qplib_hwq_type { 73 HWQ_TYPE_CTX, 74 HWQ_TYPE_QUEUE, 75 HWQ_TYPE_L2_CMPL, 76 HWQ_TYPE_MR 77 }; 78 79 #define MAX_PBL_LVL_0_PGS 1 80 #define MAX_PBL_LVL_1_PGS 512 81 #define MAX_PBL_LVL_1_PGS_SHIFT 9 82 #define MAX_PBL_LVL_1_PGS_FOR_LVL_2 256 83 #define MAX_PBL_LVL_2_PGS (256 * 512) 84 #define MAX_PDL_LVL_SHIFT 9 85 86 enum bnxt_qplib_pbl_lvl { 87 PBL_LVL_0, 88 PBL_LVL_1, 89 PBL_LVL_2, 90 PBL_LVL_MAX 91 }; 92 93 #define ROCE_PG_SIZE_4K (4 * 1024) 94 #define ROCE_PG_SIZE_8K (8 * 1024) 95 #define ROCE_PG_SIZE_64K (64 * 1024) 96 #define ROCE_PG_SIZE_2M (2 * 1024 * 1024) 97 #define ROCE_PG_SIZE_8M (8 * 1024 * 1024) 98 #define ROCE_PG_SIZE_1G (1024 * 1024 * 1024) 99 100 enum bnxt_qplib_hwrm_pg_size { 101 BNXT_QPLIB_HWRM_PG_SIZE_4K = 0, 102 BNXT_QPLIB_HWRM_PG_SIZE_8K = 1, 103 BNXT_QPLIB_HWRM_PG_SIZE_64K = 2, 104 BNXT_QPLIB_HWRM_PG_SIZE_2M = 3, 105 BNXT_QPLIB_HWRM_PG_SIZE_8M = 4, 106 BNXT_QPLIB_HWRM_PG_SIZE_1G = 5, 107 }; 108 109 struct bnxt_qplib_reg_desc { 110 u8 bar_id; 111 resource_size_t bar_base; 112 void __iomem *bar_reg; 113 size_t len; 114 }; 115 116 struct bnxt_qplib_pbl { 117 u32 pg_count; 118 u32 pg_size; 119 void **pg_arr; 120 dma_addr_t *pg_map_arr; 121 }; 122 123 struct bnxt_qplib_sg_info { 124 struct ib_umem *umem; 125 u32 npages; 126 u32 pgshft; 127 u32 pgsize; 128 bool nopte; 129 }; 130 131 struct bnxt_qplib_hwq_attr { 132 struct bnxt_qplib_res *res; 133 struct bnxt_qplib_sg_info *sginfo; 134 enum bnxt_qplib_hwq_type type; 135 u32 depth; 136 u32 stride; 137 u32 aux_stride; 138 u32 aux_depth; 139 }; 140 141 struct bnxt_qplib_hwq { 142 struct pci_dev *pdev; 143 /* lock to protect qplib_hwq */ 144 spinlock_t lock; 145 struct bnxt_qplib_pbl pbl[PBL_LVL_MAX + 1]; 146 enum bnxt_qplib_pbl_lvl level; /* 0, 1, or 2 */ 147 /* ptr for easy access to the PBL entries */ 148 void **pbl_ptr; 149 /* ptr for easy access to the dma_addr */ 150 dma_addr_t *pbl_dma_ptr; 151 u32 max_elements; 152 u32 depth; 153 u16 element_size; /* Size of each entry */ 154 u16 qe_ppg; /* queue entry per page */ 155 156 u32 prod; /* raw */ 157 u32 cons; /* raw */ 158 u8 cp_bit; 159 u8 is_user; 160 u64 *pad_pg; 161 u32 pad_stride; 162 u32 pad_pgofft; 163 }; 164 165 struct bnxt_qplib_db_info { 166 void __iomem *db; 167 void __iomem *priv_db; 168 struct bnxt_qplib_hwq *hwq; 169 u32 xid; 170 u32 max_slot; 171 }; 172 173 /* Tables */ 174 struct bnxt_qplib_pd_tbl { 175 unsigned long *tbl; 176 u32 max; 177 }; 178 179 struct bnxt_qplib_sgid_tbl { 180 struct bnxt_qplib_gid_info *tbl; 181 u16 *hw_id; 182 u16 max; 183 u16 active; 184 void *ctx; 185 u8 *vlan; 186 }; 187 188 struct bnxt_qplib_dpi { 189 u32 dpi; 190 void __iomem *dbr; 191 u64 umdbr; 192 }; 193 194 struct bnxt_qplib_dpi_tbl { 195 void **app_tbl; 196 unsigned long *tbl; 197 u16 max; 198 void __iomem *dbr_bar_reg_iomem; 199 u64 unmapped_dbr; 200 }; 201 202 struct bnxt_qplib_stats { 203 dma_addr_t dma_map; 204 void *dma; 205 u32 size; 206 u32 fw_id; 207 }; 208 209 struct bnxt_qplib_vf_res { 210 u32 max_qp_per_vf; 211 u32 max_mrw_per_vf; 212 u32 max_srq_per_vf; 213 u32 max_cq_per_vf; 214 u32 max_gid_per_vf; 215 }; 216 217 #define BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE 448 218 #define BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE 64 219 #define BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE 64 220 #define BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE 128 221 222 #define MAX_TQM_ALLOC_REQ 48 223 #define MAX_TQM_ALLOC_BLK_SIZE 8 224 struct bnxt_qplib_tqm_ctx { 225 struct bnxt_qplib_hwq pde; 226 u8 pde_level; /* Original level */ 227 struct bnxt_qplib_hwq qtbl[MAX_TQM_ALLOC_REQ]; 228 u8 qcount[MAX_TQM_ALLOC_REQ]; 229 }; 230 231 struct bnxt_qplib_ctx { 232 u32 qpc_count; 233 struct bnxt_qplib_hwq qpc_tbl; 234 u32 mrw_count; 235 struct bnxt_qplib_hwq mrw_tbl; 236 u32 srqc_count; 237 struct bnxt_qplib_hwq srqc_tbl; 238 u32 cq_count; 239 struct bnxt_qplib_hwq cq_tbl; 240 struct bnxt_qplib_hwq tim_tbl; 241 struct bnxt_qplib_tqm_ctx tqm_ctx; 242 struct bnxt_qplib_stats stats; 243 struct bnxt_qplib_vf_res vf_res; 244 u64 hwrm_intf_ver; 245 }; 246 247 struct bnxt_qplib_res { 248 struct pci_dev *pdev; 249 struct bnxt_qplib_chip_ctx *cctx; 250 struct bnxt_qplib_dev_attr *dattr; 251 struct net_device *netdev; 252 struct bnxt_qplib_rcfw *rcfw; 253 struct bnxt_qplib_pd_tbl pd_tbl; 254 struct bnxt_qplib_sgid_tbl sgid_tbl; 255 struct bnxt_qplib_dpi_tbl dpi_tbl; 256 bool prio; 257 bool is_vf; 258 }; 259 260 static inline bool bnxt_qplib_is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx) 261 { 262 return (cctx->chip_num == CHIP_NUM_57508 || 263 cctx->chip_num == CHIP_NUM_57504 || 264 cctx->chip_num == CHIP_NUM_57502); 265 } 266 267 static inline u8 bnxt_qplib_get_hwq_type(struct bnxt_qplib_res *res) 268 { 269 return bnxt_qplib_is_chip_gen_p5(res->cctx) ? 270 HWQ_TYPE_QUEUE : HWQ_TYPE_L2_CMPL; 271 } 272 273 static inline u8 bnxt_qplib_get_ring_type(struct bnxt_qplib_chip_ctx *cctx) 274 { 275 return bnxt_qplib_is_chip_gen_p5(cctx) ? 276 RING_ALLOC_REQ_RING_TYPE_NQ : 277 RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL; 278 } 279 280 static inline u8 bnxt_qplib_base_pg_size(struct bnxt_qplib_hwq *hwq) 281 { 282 u8 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_4K; 283 struct bnxt_qplib_pbl *pbl; 284 285 pbl = &hwq->pbl[PBL_LVL_0]; 286 switch (pbl->pg_size) { 287 case ROCE_PG_SIZE_4K: 288 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_4K; 289 break; 290 case ROCE_PG_SIZE_8K: 291 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_8K; 292 break; 293 case ROCE_PG_SIZE_64K: 294 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_64K; 295 break; 296 case ROCE_PG_SIZE_2M: 297 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_2M; 298 break; 299 case ROCE_PG_SIZE_8M: 300 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_8M; 301 break; 302 case ROCE_PG_SIZE_1G: 303 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_1G; 304 break; 305 default: 306 break; 307 } 308 309 return pg_size; 310 } 311 312 static inline void *bnxt_qplib_get_qe(struct bnxt_qplib_hwq *hwq, 313 u32 indx, u64 *pg) 314 { 315 u32 pg_num, pg_idx; 316 317 pg_num = (indx / hwq->qe_ppg); 318 pg_idx = (indx % hwq->qe_ppg); 319 if (pg) 320 *pg = (u64)&hwq->pbl_ptr[pg_num]; 321 return (void *)(hwq->pbl_ptr[pg_num] + hwq->element_size * pg_idx); 322 } 323 324 static inline void *bnxt_qplib_get_prod_qe(struct bnxt_qplib_hwq *hwq, u32 idx) 325 { 326 idx += hwq->prod; 327 if (idx >= hwq->depth) 328 idx -= hwq->depth; 329 return bnxt_qplib_get_qe(hwq, idx, NULL); 330 } 331 332 #define to_bnxt_qplib(ptr, type, member) \ 333 container_of(ptr, type, member) 334 335 struct bnxt_qplib_pd; 336 struct bnxt_qplib_dev_attr; 337 338 void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res, 339 struct bnxt_qplib_hwq *hwq); 340 int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq, 341 struct bnxt_qplib_hwq_attr *hwq_attr); 342 int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pd_tbl, 343 struct bnxt_qplib_pd *pd); 344 int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res, 345 struct bnxt_qplib_pd_tbl *pd_tbl, 346 struct bnxt_qplib_pd *pd); 347 int bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl *dpit, 348 struct bnxt_qplib_dpi *dpi, 349 void *app); 350 int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res, 351 struct bnxt_qplib_dpi_tbl *dpi_tbl, 352 struct bnxt_qplib_dpi *dpi); 353 void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res); 354 int bnxt_qplib_init_res(struct bnxt_qplib_res *res); 355 void bnxt_qplib_free_res(struct bnxt_qplib_res *res); 356 int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev, 357 struct net_device *netdev, 358 struct bnxt_qplib_dev_attr *dev_attr); 359 void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res, 360 struct bnxt_qplib_ctx *ctx); 361 int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res, 362 struct bnxt_qplib_ctx *ctx, 363 bool virt_fn, bool is_p5); 364 int bnxt_qplib_determine_atomics(struct pci_dev *dev); 365 366 static inline void bnxt_qplib_hwq_incr_prod(struct bnxt_qplib_hwq *hwq, u32 cnt) 367 { 368 hwq->prod = (hwq->prod + cnt) % hwq->depth; 369 } 370 371 static inline void bnxt_qplib_hwq_incr_cons(struct bnxt_qplib_hwq *hwq, 372 u32 cnt) 373 { 374 hwq->cons = (hwq->cons + cnt) % hwq->depth; 375 } 376 377 static inline void bnxt_qplib_ring_db32(struct bnxt_qplib_db_info *info, 378 bool arm) 379 { 380 u32 key; 381 382 key = info->hwq->cons & (info->hwq->max_elements - 1); 383 key |= (CMPL_DOORBELL_IDX_VALID | 384 (CMPL_DOORBELL_KEY_CMPL & CMPL_DOORBELL_KEY_MASK)); 385 if (!arm) 386 key |= CMPL_DOORBELL_MASK; 387 writel(key, info->db); 388 } 389 390 static inline void bnxt_qplib_ring_db(struct bnxt_qplib_db_info *info, 391 u32 type) 392 { 393 u64 key = 0; 394 395 key = (info->xid & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE | type; 396 key <<= 32; 397 key |= (info->hwq->cons & (info->hwq->max_elements - 1)) & 398 DBC_DBC_INDEX_MASK; 399 writeq(key, info->db); 400 } 401 402 static inline void bnxt_qplib_ring_prod_db(struct bnxt_qplib_db_info *info, 403 u32 type) 404 { 405 u64 key = 0; 406 407 key = (info->xid & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE | type; 408 key <<= 32; 409 key |= ((info->hwq->prod / info->max_slot)) & DBC_DBC_INDEX_MASK; 410 writeq(key, info->db); 411 } 412 413 static inline void bnxt_qplib_armen_db(struct bnxt_qplib_db_info *info, 414 u32 type) 415 { 416 u64 key = 0; 417 418 key = (info->xid & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE | type; 419 key <<= 32; 420 writeq(key, info->priv_db); 421 } 422 423 static inline void bnxt_qplib_srq_arm_db(struct bnxt_qplib_db_info *info, 424 u32 th) 425 { 426 u64 key = 0; 427 428 key = (info->xid & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE | th; 429 key <<= 32; 430 key |= th & DBC_DBC_INDEX_MASK; 431 writeq(key, info->priv_db); 432 } 433 434 static inline void bnxt_qplib_ring_nq_db(struct bnxt_qplib_db_info *info, 435 struct bnxt_qplib_chip_ctx *cctx, 436 bool arm) 437 { 438 u32 type; 439 440 type = arm ? DBC_DBC_TYPE_NQ_ARM : DBC_DBC_TYPE_NQ; 441 if (bnxt_qplib_is_chip_gen_p5(cctx)) 442 bnxt_qplib_ring_db(info, type); 443 else 444 bnxt_qplib_ring_db32(info, arm); 445 } 446 447 static inline bool _is_ext_stats_supported(u16 dev_cap_flags) 448 { 449 return dev_cap_flags & 450 CREQ_QUERY_FUNC_RESP_SB_EXT_STATS; 451 } 452 #endif /* __BNXT_QPLIB_RES_H__ */ 453