1 /******************************************************************* 2 * This file is part of the Emulex RoCE Device Driver for * 3 * RoCE (RDMA over Converged Ethernet) adapters. * 4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * * 8 * This program is free software; you can redistribute it and/or * 9 * modify it under the terms of version 2 of the GNU General * 10 * Public License as published by the Free Software Foundation. * 11 * This program is distributed in the hope that it will be useful. * 12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 17 * more details, a copy of which can be found in the file COPYING * 18 * included with this package. * 19 * 20 * Contact Information: 21 * linux-drivers@emulex.com 22 * 23 * Emulex 24 * 3333 Susan Street 25 * Costa Mesa, CA 92626 26 *******************************************************************/ 27 28 #ifndef __OCRDMA_H__ 29 #define __OCRDMA_H__ 30 31 #include <linux/mutex.h> 32 #include <linux/list.h> 33 #include <linux/spinlock.h> 34 #include <linux/pci.h> 35 36 #include <rdma/ib_verbs.h> 37 #include <rdma/ib_user_verbs.h> 38 39 #include <be_roce.h> 40 #include "ocrdma_sli.h" 41 42 #define OCRDMA_ROCE_DEV_VERSION "1.0.0" 43 #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" 44 45 #define OCRDMA_MAX_AH 512 46 47 #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) 48 49 struct ocrdma_dev_attr { 50 u8 fw_ver[32]; 51 u32 vendor_id; 52 u32 device_id; 53 u16 max_pd; 54 u16 max_cq; 55 u16 max_cqe; 56 u16 max_qp; 57 u16 max_wqe; 58 u16 max_rqe; 59 u16 max_srq; 60 u32 max_inline_data; 61 int max_send_sge; 62 int max_recv_sge; 63 int max_srq_sge; 64 int max_rdma_sge; 65 int max_mr; 66 u64 max_mr_size; 67 u32 max_num_mr_pbl; 68 int max_fmr; 69 int max_map_per_fmr; 70 int max_pages_per_frmr; 71 u16 max_ord_per_qp; 72 u16 max_ird_per_qp; 73 74 int device_cap_flags; 75 u8 cq_overflow_detect; 76 u8 srq_supported; 77 78 u32 wqe_size; 79 u32 rqe_size; 80 u32 ird_page_size; 81 u8 local_ca_ack_delay; 82 u8 ird; 83 u8 num_ird_pages; 84 }; 85 86 struct ocrdma_pbl { 87 void *va; 88 dma_addr_t pa; 89 }; 90 91 struct ocrdma_queue_info { 92 void *va; 93 dma_addr_t dma; 94 u32 size; 95 u16 len; 96 u16 entry_size; /* Size of an element in the queue */ 97 u16 id; /* qid, where to ring the doorbell. */ 98 u16 head, tail; 99 bool created; 100 }; 101 102 struct ocrdma_eq { 103 struct ocrdma_queue_info q; 104 u32 vector; 105 int cq_cnt; 106 struct ocrdma_dev *dev; 107 char irq_name[32]; 108 }; 109 110 struct ocrdma_mq { 111 struct ocrdma_queue_info sq; 112 struct ocrdma_queue_info cq; 113 bool rearm_cq; 114 }; 115 116 struct mqe_ctx { 117 struct mutex lock; /* for serializing mailbox commands on MQ */ 118 wait_queue_head_t cmd_wait; 119 u32 tag; 120 u16 cqe_status; 121 u16 ext_status; 122 bool cmd_done; 123 }; 124 125 struct ocrdma_dev { 126 struct ib_device ibdev; 127 struct ocrdma_dev_attr attr; 128 129 struct mutex dev_lock; /* provides syncronise access to device data */ 130 spinlock_t flush_q_lock ____cacheline_aligned; 131 132 struct ocrdma_cq **cq_tbl; 133 struct ocrdma_qp **qp_tbl; 134 135 struct ocrdma_eq *eq_tbl; 136 int eq_cnt; 137 u16 base_eqid; 138 u16 max_eq; 139 140 union ib_gid *sgid_tbl; 141 /* provided synchronization to sgid table for 142 * updating gid entries triggered by notifier. 143 */ 144 spinlock_t sgid_lock; 145 146 int gsi_qp_created; 147 struct ocrdma_cq *gsi_sqcq; 148 struct ocrdma_cq *gsi_rqcq; 149 150 struct { 151 struct ocrdma_av *va; 152 dma_addr_t pa; 153 u32 size; 154 u32 num_ah; 155 /* provide synchronization for av 156 * entry allocations. 157 */ 158 spinlock_t lock; 159 u32 ahid; 160 struct ocrdma_pbl pbl; 161 } av_tbl; 162 163 void *mbx_cmd; 164 struct ocrdma_mq mq; 165 struct mqe_ctx mqe_ctx; 166 167 struct be_dev_info nic_info; 168 169 struct list_head entry; 170 struct rcu_head rcu; 171 int id; 172 u64 stag_arr[OCRDMA_MAX_STAG]; 173 u16 pvid; 174 }; 175 176 struct ocrdma_cq { 177 struct ib_cq ibcq; 178 struct ocrdma_cqe *va; 179 u32 phase; 180 u32 getp; /* pointer to pending wrs to 181 * return to stack, wrap arounds 182 * at max_hw_cqe 183 */ 184 u32 max_hw_cqe; 185 bool phase_change; 186 bool armed, solicited; 187 bool arm_needed; 188 189 spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization 190 * to cq polling 191 */ 192 /* syncronizes cq completion handler invoked from multiple context */ 193 spinlock_t comp_handler_lock ____cacheline_aligned; 194 u16 id; 195 u16 eqn; 196 197 struct ocrdma_ucontext *ucontext; 198 dma_addr_t pa; 199 u32 len; 200 201 /* head of all qp's sq and rq for which cqes need to be flushed 202 * by the software. 203 */ 204 struct list_head sq_head, rq_head; 205 }; 206 207 struct ocrdma_pd { 208 struct ib_pd ibpd; 209 struct ocrdma_dev *dev; 210 struct ocrdma_ucontext *uctx; 211 u32 id; 212 int num_dpp_qp; 213 u32 dpp_page; 214 bool dpp_enabled; 215 }; 216 217 struct ocrdma_ah { 218 struct ib_ah ibah; 219 struct ocrdma_av *av; 220 u16 sgid_index; 221 u32 id; 222 }; 223 224 struct ocrdma_qp_hwq_info { 225 u8 *va; /* virtual address */ 226 u32 max_sges; 227 u32 head, tail; 228 u32 entry_size; 229 u32 max_cnt; 230 u32 max_wqe_idx; 231 u16 dbid; /* qid, where to ring the doorbell. */ 232 u32 len; 233 dma_addr_t pa; 234 }; 235 236 struct ocrdma_srq { 237 struct ib_srq ibsrq; 238 u8 __iomem *db; 239 struct ocrdma_qp_hwq_info rq; 240 u64 *rqe_wr_id_tbl; 241 u32 *idx_bit_fields; 242 u32 bit_fields_len; 243 244 /* provide synchronization to multiple context(s) posting rqe */ 245 spinlock_t q_lock ____cacheline_aligned; 246 247 struct ocrdma_pd *pd; 248 u32 id; 249 }; 250 251 struct ocrdma_qp { 252 struct ib_qp ibqp; 253 struct ocrdma_dev *dev; 254 255 u8 __iomem *sq_db; 256 struct ocrdma_qp_hwq_info sq; 257 struct { 258 uint64_t wrid; 259 uint16_t dpp_wqe_idx; 260 uint16_t dpp_wqe; 261 uint8_t signaled; 262 uint8_t rsvd[3]; 263 } *wqe_wr_id_tbl; 264 u32 max_inline_data; 265 266 /* provide synchronization to multiple context(s) posting wqe, rqe */ 267 spinlock_t q_lock ____cacheline_aligned; 268 struct ocrdma_cq *sq_cq; 269 /* list maintained per CQ to flush SQ errors */ 270 struct list_head sq_entry; 271 272 u8 __iomem *rq_db; 273 struct ocrdma_qp_hwq_info rq; 274 u64 *rqe_wr_id_tbl; 275 struct ocrdma_cq *rq_cq; 276 struct ocrdma_srq *srq; 277 /* list maintained per CQ to flush RQ errors */ 278 struct list_head rq_entry; 279 280 enum ocrdma_qp_state state; /* QP state */ 281 int cap_flags; 282 u32 max_ord, max_ird; 283 284 u32 id; 285 struct ocrdma_pd *pd; 286 287 enum ib_qp_type qp_type; 288 289 int sgid_idx; 290 u32 qkey; 291 bool dpp_enabled; 292 u8 *ird_q_va; 293 bool signaled; 294 u16 db_cache; 295 }; 296 297 struct ocrdma_hw_mr { 298 u32 lkey; 299 u8 fr_mr; 300 u8 remote_atomic; 301 u8 remote_rd; 302 u8 remote_wr; 303 u8 local_rd; 304 u8 local_wr; 305 u8 mw_bind; 306 u8 rsvd; 307 u64 len; 308 struct ocrdma_pbl *pbl_table; 309 u32 num_pbls; 310 u32 num_pbes; 311 u32 pbl_size; 312 u32 pbe_size; 313 u64 fbo; 314 u64 va; 315 }; 316 317 struct ocrdma_mr { 318 struct ib_mr ibmr; 319 struct ib_umem *umem; 320 struct ocrdma_hw_mr hwmr; 321 }; 322 323 struct ocrdma_ucontext { 324 struct ib_ucontext ibucontext; 325 326 struct list_head mm_head; 327 struct mutex mm_list_lock; /* protects list entries of mm type */ 328 struct ocrdma_pd *cntxt_pd; 329 int pd_in_use; 330 331 struct { 332 u32 *va; 333 dma_addr_t pa; 334 u32 len; 335 } ah_tbl; 336 }; 337 338 struct ocrdma_mm { 339 struct { 340 u64 phy_addr; 341 unsigned long len; 342 } key; 343 struct list_head entry; 344 }; 345 346 static inline struct ocrdma_dev *get_ocrdma_dev(struct ib_device *ibdev) 347 { 348 return container_of(ibdev, struct ocrdma_dev, ibdev); 349 } 350 351 static inline struct ocrdma_ucontext *get_ocrdma_ucontext(struct ib_ucontext 352 *ibucontext) 353 { 354 return container_of(ibucontext, struct ocrdma_ucontext, ibucontext); 355 } 356 357 static inline struct ocrdma_pd *get_ocrdma_pd(struct ib_pd *ibpd) 358 { 359 return container_of(ibpd, struct ocrdma_pd, ibpd); 360 } 361 362 static inline struct ocrdma_cq *get_ocrdma_cq(struct ib_cq *ibcq) 363 { 364 return container_of(ibcq, struct ocrdma_cq, ibcq); 365 } 366 367 static inline struct ocrdma_qp *get_ocrdma_qp(struct ib_qp *ibqp) 368 { 369 return container_of(ibqp, struct ocrdma_qp, ibqp); 370 } 371 372 static inline struct ocrdma_mr *get_ocrdma_mr(struct ib_mr *ibmr) 373 { 374 return container_of(ibmr, struct ocrdma_mr, ibmr); 375 } 376 377 static inline struct ocrdma_ah *get_ocrdma_ah(struct ib_ah *ibah) 378 { 379 return container_of(ibah, struct ocrdma_ah, ibah); 380 } 381 382 static inline struct ocrdma_srq *get_ocrdma_srq(struct ib_srq *ibsrq) 383 { 384 return container_of(ibsrq, struct ocrdma_srq, ibsrq); 385 } 386 387 388 static inline int ocrdma_get_num_posted_shift(struct ocrdma_qp *qp) 389 { 390 return ((qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY && 391 qp->id < 128) ? 24 : 16); 392 } 393 394 static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe) 395 { 396 int cqe_valid; 397 cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID; 398 return (cqe_valid == cq->phase); 399 } 400 401 static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe) 402 { 403 return (le32_to_cpu(cqe->flags_status_srcqpn) & 404 OCRDMA_CQE_QTYPE) ? 0 : 1; 405 } 406 407 static inline int is_cqe_invalidated(struct ocrdma_cqe *cqe) 408 { 409 return (le32_to_cpu(cqe->flags_status_srcqpn) & 410 OCRDMA_CQE_INVALIDATE) ? 1 : 0; 411 } 412 413 static inline int is_cqe_imm(struct ocrdma_cqe *cqe) 414 { 415 return (le32_to_cpu(cqe->flags_status_srcqpn) & 416 OCRDMA_CQE_IMM) ? 1 : 0; 417 } 418 419 static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe) 420 { 421 return (le32_to_cpu(cqe->flags_status_srcqpn) & 422 OCRDMA_CQE_WRITE_IMM) ? 1 : 0; 423 } 424 425 426 #endif 427