1 /* 2 * Broadcom NetXtreme-E RoCE driver. 3 * 4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term 5 * Broadcom refers to Broadcom Limited and/or its subsidiaries. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in 21 * the documentation and/or other materials provided with the 22 * distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * 36 * Description: Fast Path Operators (header) 37 */ 38 39 #ifndef __BNXT_QPLIB_FP_H__ 40 #define __BNXT_QPLIB_FP_H__ 41 42 struct bnxt_qplib_sge { 43 u64 addr; 44 u32 lkey; 45 u32 size; 46 }; 47 48 #define BNXT_QPLIB_MAX_SQE_ENTRY_SIZE sizeof(struct sq_send) 49 50 #define SQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_SQE_ENTRY_SIZE) 51 #define SQE_MAX_IDX_PER_PG (SQE_CNT_PER_PG - 1) 52 53 static inline u32 get_sqe_pg(u32 val) 54 { 55 return ((val & ~SQE_MAX_IDX_PER_PG) / SQE_CNT_PER_PG); 56 } 57 58 static inline u32 get_sqe_idx(u32 val) 59 { 60 return (val & SQE_MAX_IDX_PER_PG); 61 } 62 63 #define BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE sizeof(struct sq_psn_search) 64 65 #define PSNE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE) 66 #define PSNE_MAX_IDX_PER_PG (PSNE_CNT_PER_PG - 1) 67 68 static inline u32 get_psne_pg(u32 val) 69 { 70 return ((val & ~PSNE_MAX_IDX_PER_PG) / PSNE_CNT_PER_PG); 71 } 72 73 static inline u32 get_psne_idx(u32 val) 74 { 75 return (val & PSNE_MAX_IDX_PER_PG); 76 } 77 78 #define BNXT_QPLIB_QP_MAX_SGL 6 79 80 struct bnxt_qplib_swq { 81 u64 wr_id; 82 u8 type; 83 u8 flags; 84 u32 start_psn; 85 u32 next_psn; 86 struct sq_psn_search *psn_search; 87 }; 88 89 struct bnxt_qplib_swqe { 90 /* General */ 91 #define BNXT_QPLIB_FENCE_WRID 0x46454E43 /* "FENC" */ 92 u64 wr_id; 93 u8 reqs_type; 94 u8 type; 95 #define BNXT_QPLIB_SWQE_TYPE_SEND 0 96 #define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM 1 97 #define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV 2 98 #define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE 4 99 #define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM 5 100 #define BNXT_QPLIB_SWQE_TYPE_RDMA_READ 6 101 #define BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP 8 102 #define BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD 11 103 #define BNXT_QPLIB_SWQE_TYPE_LOCAL_INV 12 104 #define BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR 13 105 #define BNXT_QPLIB_SWQE_TYPE_REG_MR 13 106 #define BNXT_QPLIB_SWQE_TYPE_BIND_MW 14 107 #define BNXT_QPLIB_SWQE_TYPE_RECV 128 108 #define BNXT_QPLIB_SWQE_TYPE_RECV_RDMA_IMM 129 109 u8 flags; 110 #define BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP BIT(0) 111 #define BNXT_QPLIB_SWQE_FLAGS_RD_ATOMIC_FENCE BIT(1) 112 #define BNXT_QPLIB_SWQE_FLAGS_UC_FENCE BIT(2) 113 #define BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT BIT(3) 114 #define BNXT_QPLIB_SWQE_FLAGS_INLINE BIT(4) 115 struct bnxt_qplib_sge sg_list[BNXT_QPLIB_QP_MAX_SGL]; 116 int num_sge; 117 /* Max inline data is 96 bytes */ 118 u32 inline_len; 119 #define BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH 96 120 u8 inline_data[BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH]; 121 122 union { 123 /* Send, with imm, inval key */ 124 struct { 125 union { 126 __be32 imm_data; 127 u32 inv_key; 128 }; 129 u32 q_key; 130 u32 dst_qp; 131 u16 avid; 132 } send; 133 134 /* Send Raw Ethernet and QP1 */ 135 struct { 136 u16 lflags; 137 u16 cfa_action; 138 u32 cfa_meta; 139 } rawqp1; 140 141 /* RDMA write, with imm, read */ 142 struct { 143 union { 144 __be32 imm_data; 145 u32 inv_key; 146 }; 147 u64 remote_va; 148 u32 r_key; 149 } rdma; 150 151 /* Atomic cmp/swap, fetch/add */ 152 struct { 153 u64 remote_va; 154 u32 r_key; 155 u64 swap_data; 156 u64 cmp_data; 157 } atomic; 158 159 /* Local Invalidate */ 160 struct { 161 u32 inv_l_key; 162 } local_inv; 163 164 /* FR-PMR */ 165 struct { 166 u8 access_cntl; 167 u8 pg_sz_log; 168 bool zero_based; 169 u32 l_key; 170 u32 length; 171 u8 pbl_pg_sz_log; 172 #define BNXT_QPLIB_SWQE_PAGE_SIZE_4K 0 173 #define BNXT_QPLIB_SWQE_PAGE_SIZE_8K 1 174 #define BNXT_QPLIB_SWQE_PAGE_SIZE_64K 4 175 #define BNXT_QPLIB_SWQE_PAGE_SIZE_256K 6 176 #define BNXT_QPLIB_SWQE_PAGE_SIZE_1M 8 177 #define BNXT_QPLIB_SWQE_PAGE_SIZE_2M 9 178 #define BNXT_QPLIB_SWQE_PAGE_SIZE_4M 10 179 #define BNXT_QPLIB_SWQE_PAGE_SIZE_1G 18 180 u8 levels; 181 #define PAGE_SHIFT_4K 12 182 __le64 *pbl_ptr; 183 dma_addr_t pbl_dma_ptr; 184 u64 *page_list; 185 u16 page_list_len; 186 u64 va; 187 } frmr; 188 189 /* Bind */ 190 struct { 191 u8 access_cntl; 192 #define BNXT_QPLIB_BIND_SWQE_ACCESS_LOCAL_WRITE BIT(0) 193 #define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_READ BIT(1) 194 #define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_WRITE BIT(2) 195 #define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_ATOMIC BIT(3) 196 #define BNXT_QPLIB_BIND_SWQE_ACCESS_WINDOW_BIND BIT(4) 197 bool zero_based; 198 u8 mw_type; 199 u32 parent_l_key; 200 u32 r_key; 201 u64 va; 202 u32 length; 203 } bind; 204 }; 205 }; 206 207 #define BNXT_QPLIB_MAX_RQE_ENTRY_SIZE sizeof(struct rq_wqe) 208 209 #define RQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_RQE_ENTRY_SIZE) 210 #define RQE_MAX_IDX_PER_PG (RQE_CNT_PER_PG - 1) 211 #define RQE_PG(x) (((x) & ~RQE_MAX_IDX_PER_PG) / RQE_CNT_PER_PG) 212 #define RQE_IDX(x) ((x) & RQE_MAX_IDX_PER_PG) 213 214 struct bnxt_qplib_q { 215 struct bnxt_qplib_hwq hwq; 216 struct bnxt_qplib_swq *swq; 217 struct scatterlist *sglist; 218 u32 nmap; 219 u32 max_wqe; 220 u16 q_full_delta; 221 u16 max_sge; 222 u32 psn; 223 bool flush_in_progress; 224 bool condition; 225 bool single; 226 bool send_phantom; 227 u32 phantom_wqe_cnt; 228 u32 phantom_cqe_cnt; 229 u32 next_cq_cons; 230 }; 231 232 struct bnxt_qplib_qp { 233 struct bnxt_qplib_pd *pd; 234 struct bnxt_qplib_dpi *dpi; 235 u64 qp_handle; 236 u32 id; 237 u8 type; 238 u8 sig_type; 239 u32 modify_flags; 240 u8 state; 241 u8 cur_qp_state; 242 u32 max_inline_data; 243 u32 mtu; 244 u8 path_mtu; 245 bool en_sqd_async_notify; 246 u16 pkey_index; 247 u32 qkey; 248 u32 dest_qp_id; 249 u8 access; 250 u8 timeout; 251 u8 retry_cnt; 252 u8 rnr_retry; 253 u64 wqe_cnt; 254 u32 min_rnr_timer; 255 u32 max_rd_atomic; 256 u32 max_dest_rd_atomic; 257 u32 dest_qpn; 258 u8 smac[6]; 259 u16 vlan_id; 260 u8 nw_type; 261 struct bnxt_qplib_ah ah; 262 263 #define BTH_PSN_MASK ((1 << 24) - 1) 264 /* SQ */ 265 struct bnxt_qplib_q sq; 266 /* RQ */ 267 struct bnxt_qplib_q rq; 268 /* SRQ */ 269 struct bnxt_qplib_srq *srq; 270 /* CQ */ 271 struct bnxt_qplib_cq *scq; 272 struct bnxt_qplib_cq *rcq; 273 /* IRRQ and ORRQ */ 274 struct bnxt_qplib_hwq irrq; 275 struct bnxt_qplib_hwq orrq; 276 /* Header buffer for QP1 */ 277 int sq_hdr_buf_size; 278 int rq_hdr_buf_size; 279 /* 280 * Buffer space for ETH(14), IP or GRH(40), UDP header(8) 281 * and ib_bth + ib_deth (20). 282 * Max required is 82 when RoCE V2 is enabled 283 */ 284 #define BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2 86 285 /* Ethernet header = 14 */ 286 /* ib_grh = 40 (provided by MAD) */ 287 /* ib_bth + ib_deth = 20 */ 288 /* MAD = 256 (provided by MAD) */ 289 /* iCRC = 4 */ 290 #define BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE 14 291 #define BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2 512 292 #define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 20 293 #define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 40 294 #define BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE 20 295 void *sq_hdr_buf; 296 dma_addr_t sq_hdr_buf_map; 297 void *rq_hdr_buf; 298 dma_addr_t rq_hdr_buf_map; 299 }; 300 301 #define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE sizeof(struct cq_base) 302 303 #define CQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_CQE_ENTRY_SIZE) 304 #define CQE_MAX_IDX_PER_PG (CQE_CNT_PER_PG - 1) 305 #define CQE_PG(x) (((x) & ~CQE_MAX_IDX_PER_PG) / CQE_CNT_PER_PG) 306 #define CQE_IDX(x) ((x) & CQE_MAX_IDX_PER_PG) 307 308 #define ROCE_CQE_CMP_V 0 309 #define CQE_CMP_VALID(hdr, raw_cons, cp_bit) \ 310 (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \ 311 !((raw_cons) & (cp_bit))) 312 313 static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q *qplib_q) 314 { 315 return HWQ_CMP((qplib_q->hwq.prod + qplib_q->q_full_delta), 316 &qplib_q->hwq) == HWQ_CMP(qplib_q->hwq.cons, 317 &qplib_q->hwq); 318 } 319 320 struct bnxt_qplib_cqe { 321 u8 status; 322 u8 type; 323 u8 opcode; 324 u32 length; 325 u64 wr_id; 326 union { 327 __be32 immdata; 328 u32 invrkey; 329 }; 330 u64 qp_handle; 331 u64 mr_handle; 332 u16 flags; 333 u8 smac[6]; 334 u32 src_qp; 335 u16 raweth_qp1_flags; 336 u16 raweth_qp1_errors; 337 u16 raweth_qp1_cfa_code; 338 u32 raweth_qp1_flags2; 339 u32 raweth_qp1_metadata; 340 u8 raweth_qp1_payload_offset; 341 u16 pkey_index; 342 }; 343 344 #define BNXT_QPLIB_QUEUE_START_PERIOD 0x01 345 struct bnxt_qplib_cq { 346 struct bnxt_qplib_dpi *dpi; 347 void __iomem *dbr_base; 348 u32 max_wqe; 349 u32 id; 350 u16 count; 351 u16 period; 352 struct bnxt_qplib_hwq hwq; 353 u32 cnq_hw_ring_id; 354 bool resize_in_progress; 355 struct scatterlist *sghead; 356 u32 nmap; 357 u64 cq_handle; 358 359 #define CQ_RESIZE_WAIT_TIME_MS 500 360 unsigned long flags; 361 #define CQ_FLAGS_RESIZE_IN_PROG 1 362 wait_queue_head_t waitq; 363 }; 364 365 #define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq) 366 #define BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE sizeof(struct xrrq_orrq) 367 #define IRD_LIMIT_TO_IRRQ_SLOTS(x) (2 * (x) + 2) 368 #define IRRQ_SLOTS_TO_IRD_LIMIT(s) (((s) >> 1) - 1) 369 #define ORD_LIMIT_TO_ORRQ_SLOTS(x) ((x) + 1) 370 #define ORRQ_SLOTS_TO_ORD_LIMIT(s) ((s) - 1) 371 372 #define BNXT_QPLIB_MAX_NQE_ENTRY_SIZE sizeof(struct nq_base) 373 374 #define NQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_NQE_ENTRY_SIZE) 375 #define NQE_MAX_IDX_PER_PG (NQE_CNT_PER_PG - 1) 376 #define NQE_PG(x) (((x) & ~NQE_MAX_IDX_PER_PG) / NQE_CNT_PER_PG) 377 #define NQE_IDX(x) ((x) & NQE_MAX_IDX_PER_PG) 378 379 #define NQE_CMP_VALID(hdr, raw_cons, cp_bit) \ 380 (!!(le32_to_cpu((hdr)->info63_v[0]) & NQ_BASE_V) == \ 381 !((raw_cons) & (cp_bit))) 382 383 #define BNXT_QPLIB_NQE_MAX_CNT (128 * 1024) 384 385 #define NQ_CONS_PCI_BAR_REGION 2 386 #define NQ_DB_KEY_CP (0x2 << CMPL_DOORBELL_KEY_SFT) 387 #define NQ_DB_IDX_VALID CMPL_DOORBELL_IDX_VALID 388 #define NQ_DB_IRQ_DIS CMPL_DOORBELL_MASK 389 #define NQ_DB_CP_FLAGS_REARM (NQ_DB_KEY_CP | \ 390 NQ_DB_IDX_VALID) 391 #define NQ_DB_CP_FLAGS (NQ_DB_KEY_CP | \ 392 NQ_DB_IDX_VALID | \ 393 NQ_DB_IRQ_DIS) 394 #define NQ_DB_REARM(db, raw_cons, cp_bit) \ 395 writel(NQ_DB_CP_FLAGS_REARM | ((raw_cons) & ((cp_bit) - 1)), db) 396 #define NQ_DB(db, raw_cons, cp_bit) \ 397 writel(NQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db) 398 399 struct bnxt_qplib_nq { 400 struct pci_dev *pdev; 401 402 int vector; 403 int budget; 404 bool requested; 405 struct tasklet_struct worker; 406 struct bnxt_qplib_hwq hwq; 407 408 u16 bar_reg; 409 u16 bar_reg_off; 410 u16 ring_id; 411 void __iomem *bar_reg_iomem; 412 413 int (*cqn_handler) 414 (struct bnxt_qplib_nq *nq, 415 struct bnxt_qplib_cq *cq); 416 int (*srqn_handler) 417 (struct bnxt_qplib_nq *nq, 418 void *srq, 419 u8 event); 420 }; 421 422 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq); 423 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, 424 int msix_vector, int bar_reg_offset, 425 int (*cqn_handler)(struct bnxt_qplib_nq *nq, 426 struct bnxt_qplib_cq *cq), 427 int (*srqn_handler)(struct bnxt_qplib_nq *nq, 428 void *srq, 429 u8 event)); 430 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); 431 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); 432 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); 433 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); 434 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); 435 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp, 436 struct bnxt_qplib_sge *sge); 437 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp, 438 struct bnxt_qplib_sge *sge); 439 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp); 440 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, 441 u32 index); 442 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp); 443 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, 444 struct bnxt_qplib_swqe *wqe); 445 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp); 446 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, 447 struct bnxt_qplib_swqe *wqe); 448 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); 449 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); 450 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, 451 int num, struct bnxt_qplib_qp **qp); 452 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq); 453 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type); 454 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); 455 int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq); 456 #endif /* __BNXT_QPLIB_FP_H__ */ 457