1 /******************************************************************* 2 * This file is part of the Emulex RoCE Device Driver for * 3 * RoCE (RDMA over Converged Ethernet) adapters. * 4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * * 8 * This program is free software; you can redistribute it and/or * 9 * modify it under the terms of version 2 of the GNU General * 10 * Public License as published by the Free Software Foundation. * 11 * This program is distributed in the hope that it will be useful. * 12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 17 * more details, a copy of which can be found in the file COPYING * 18 * included with this package. * 19 * 20 * Contact Information: 21 * linux-drivers@emulex.com 22 * 23 * Emulex 24 * 3333 Susan Street 25 * Costa Mesa, CA 92626 26 *******************************************************************/ 27 28 #ifndef __OCRDMA_H__ 29 #define __OCRDMA_H__ 30 31 #include <linux/mutex.h> 32 #include <linux/list.h> 33 #include <linux/spinlock.h> 34 #include <linux/pci.h> 35 36 #include <rdma/ib_verbs.h> 37 #include <rdma/ib_user_verbs.h> 38 39 #include <be_roce.h> 40 #include "ocrdma_sli.h" 41 42 #define OCRDMA_ROCE_DEV_VERSION "1.0.0" 43 #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" 44 45 #define OCRDMA_MAX_AH 512 46 47 #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) 48 49 struct ocrdma_dev_attr { 50 u8 fw_ver[32]; 51 u32 vendor_id; 52 u32 device_id; 53 u16 max_pd; 54 u16 max_cq; 55 u16 max_cqe; 56 u16 max_qp; 57 u16 max_wqe; 58 u16 max_rqe; 59 u32 max_inline_data; 60 int max_send_sge; 61 int max_recv_sge; 62 int max_srq_sge; 63 int max_mr; 64 u64 max_mr_size; 65 u32 max_num_mr_pbl; 66 int max_fmr; 67 int max_map_per_fmr; 68 int max_pages_per_frmr; 69 u16 max_ord_per_qp; 70 u16 max_ird_per_qp; 71 72 int device_cap_flags; 73 u8 cq_overflow_detect; 74 u8 srq_supported; 75 76 u32 wqe_size; 77 u32 rqe_size; 78 u32 ird_page_size; 79 u8 local_ca_ack_delay; 80 u8 ird; 81 u8 num_ird_pages; 82 }; 83 84 struct ocrdma_pbl { 85 void *va; 86 dma_addr_t pa; 87 }; 88 89 struct ocrdma_queue_info { 90 void *va; 91 dma_addr_t dma; 92 u32 size; 93 u16 len; 94 u16 entry_size; /* Size of an element in the queue */ 95 u16 id; /* qid, where to ring the doorbell. */ 96 u16 head, tail; 97 bool created; 98 }; 99 100 struct ocrdma_eq { 101 struct ocrdma_queue_info q; 102 u32 vector; 103 int cq_cnt; 104 struct ocrdma_dev *dev; 105 char irq_name[32]; 106 }; 107 108 struct ocrdma_mq { 109 struct ocrdma_queue_info sq; 110 struct ocrdma_queue_info cq; 111 bool rearm_cq; 112 }; 113 114 struct mqe_ctx { 115 struct mutex lock; /* for serializing mailbox commands on MQ */ 116 wait_queue_head_t cmd_wait; 117 u32 tag; 118 u16 cqe_status; 119 u16 ext_status; 120 bool cmd_done; 121 }; 122 123 struct ocrdma_dev { 124 struct ib_device ibdev; 125 struct ocrdma_dev_attr attr; 126 127 struct mutex dev_lock; /* provides syncronise access to device data */ 128 spinlock_t flush_q_lock ____cacheline_aligned; 129 130 struct ocrdma_cq **cq_tbl; 131 struct ocrdma_qp **qp_tbl; 132 133 struct ocrdma_eq meq; 134 struct ocrdma_eq *qp_eq_tbl; 135 int eq_cnt; 136 u16 base_eqid; 137 u16 max_eq; 138 139 union ib_gid *sgid_tbl; 140 /* provided synchronization to sgid table for 141 * updating gid entries triggered by notifier. 142 */ 143 spinlock_t sgid_lock; 144 145 int gsi_qp_created; 146 struct ocrdma_cq *gsi_sqcq; 147 struct ocrdma_cq *gsi_rqcq; 148 149 struct { 150 struct ocrdma_av *va; 151 dma_addr_t pa; 152 u32 size; 153 u32 num_ah; 154 /* provide synchronization for av 155 * entry allocations. 156 */ 157 spinlock_t lock; 158 u32 ahid; 159 struct ocrdma_pbl pbl; 160 } av_tbl; 161 162 void *mbx_cmd; 163 struct ocrdma_mq mq; 164 struct mqe_ctx mqe_ctx; 165 166 struct be_dev_info nic_info; 167 168 struct list_head entry; 169 struct rcu_head rcu; 170 int id; 171 }; 172 173 struct ocrdma_cq { 174 struct ib_cq ibcq; 175 struct ocrdma_dev *dev; 176 struct ocrdma_cqe *va; 177 u32 phase; 178 u32 getp; /* pointer to pending wrs to 179 * return to stack, wrap arounds 180 * at max_hw_cqe 181 */ 182 u32 max_hw_cqe; 183 bool phase_change; 184 bool armed, solicited; 185 bool arm_needed; 186 187 spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization 188 * to cq polling 189 */ 190 /* syncronizes cq completion handler invoked from multiple context */ 191 spinlock_t comp_handler_lock ____cacheline_aligned; 192 u16 id; 193 u16 eqn; 194 195 struct ocrdma_ucontext *ucontext; 196 dma_addr_t pa; 197 u32 len; 198 199 /* head of all qp's sq and rq for which cqes need to be flushed 200 * by the software. 201 */ 202 struct list_head sq_head, rq_head; 203 }; 204 205 struct ocrdma_pd { 206 struct ib_pd ibpd; 207 struct ocrdma_dev *dev; 208 struct ocrdma_ucontext *uctx; 209 u32 id; 210 int num_dpp_qp; 211 u32 dpp_page; 212 bool dpp_enabled; 213 }; 214 215 struct ocrdma_ah { 216 struct ib_ah ibah; 217 struct ocrdma_dev *dev; 218 struct ocrdma_av *av; 219 u16 sgid_index; 220 u32 id; 221 }; 222 223 struct ocrdma_qp_hwq_info { 224 u8 *va; /* virtual address */ 225 u32 max_sges; 226 u32 head, tail; 227 u32 entry_size; 228 u32 max_cnt; 229 u32 max_wqe_idx; 230 u16 dbid; /* qid, where to ring the doorbell. */ 231 u32 len; 232 dma_addr_t pa; 233 }; 234 235 struct ocrdma_srq { 236 struct ib_srq ibsrq; 237 struct ocrdma_dev *dev; 238 u8 __iomem *db; 239 struct ocrdma_qp_hwq_info rq; 240 u64 *rqe_wr_id_tbl; 241 u32 *idx_bit_fields; 242 u32 bit_fields_len; 243 244 /* provide synchronization to multiple context(s) posting rqe */ 245 spinlock_t q_lock ____cacheline_aligned; 246 247 struct ocrdma_pd *pd; 248 u32 id; 249 }; 250 251 struct ocrdma_qp { 252 struct ib_qp ibqp; 253 struct ocrdma_dev *dev; 254 255 u8 __iomem *sq_db; 256 struct ocrdma_qp_hwq_info sq; 257 struct { 258 uint64_t wrid; 259 uint16_t dpp_wqe_idx; 260 uint16_t dpp_wqe; 261 uint8_t signaled; 262 uint8_t rsvd[3]; 263 } *wqe_wr_id_tbl; 264 u32 max_inline_data; 265 266 /* provide synchronization to multiple context(s) posting wqe, rqe */ 267 spinlock_t q_lock ____cacheline_aligned; 268 struct ocrdma_cq *sq_cq; 269 /* list maintained per CQ to flush SQ errors */ 270 struct list_head sq_entry; 271 272 u8 __iomem *rq_db; 273 struct ocrdma_qp_hwq_info rq; 274 u64 *rqe_wr_id_tbl; 275 struct ocrdma_cq *rq_cq; 276 struct ocrdma_srq *srq; 277 /* list maintained per CQ to flush RQ errors */ 278 struct list_head rq_entry; 279 280 enum ocrdma_qp_state state; /* QP state */ 281 int cap_flags; 282 u32 max_ord, max_ird; 283 284 u32 id; 285 struct ocrdma_pd *pd; 286 287 enum ib_qp_type qp_type; 288 289 int sgid_idx; 290 u32 qkey; 291 bool dpp_enabled; 292 u8 *ird_q_va; 293 }; 294 295 struct ocrdma_hw_mr { 296 struct ocrdma_dev *dev; 297 u32 lkey; 298 u8 fr_mr; 299 u8 remote_atomic; 300 u8 remote_rd; 301 u8 remote_wr; 302 u8 local_rd; 303 u8 local_wr; 304 u8 mw_bind; 305 u8 rsvd; 306 u64 len; 307 struct ocrdma_pbl *pbl_table; 308 u32 num_pbls; 309 u32 num_pbes; 310 u32 pbl_size; 311 u32 pbe_size; 312 u64 fbo; 313 u64 va; 314 }; 315 316 struct ocrdma_mr { 317 struct ib_mr ibmr; 318 struct ib_umem *umem; 319 struct ocrdma_hw_mr hwmr; 320 struct ocrdma_pd *pd; 321 }; 322 323 struct ocrdma_ucontext { 324 struct ib_ucontext ibucontext; 325 struct ocrdma_dev *dev; 326 327 struct list_head mm_head; 328 struct mutex mm_list_lock; /* protects list entries of mm type */ 329 struct { 330 u32 *va; 331 dma_addr_t pa; 332 u32 len; 333 } ah_tbl; 334 }; 335 336 struct ocrdma_mm { 337 struct { 338 u64 phy_addr; 339 unsigned long len; 340 } key; 341 struct list_head entry; 342 }; 343 344 static inline struct ocrdma_dev *get_ocrdma_dev(struct ib_device *ibdev) 345 { 346 return container_of(ibdev, struct ocrdma_dev, ibdev); 347 } 348 349 static inline struct ocrdma_ucontext *get_ocrdma_ucontext(struct ib_ucontext 350 *ibucontext) 351 { 352 return container_of(ibucontext, struct ocrdma_ucontext, ibucontext); 353 } 354 355 static inline struct ocrdma_pd *get_ocrdma_pd(struct ib_pd *ibpd) 356 { 357 return container_of(ibpd, struct ocrdma_pd, ibpd); 358 } 359 360 static inline struct ocrdma_cq *get_ocrdma_cq(struct ib_cq *ibcq) 361 { 362 return container_of(ibcq, struct ocrdma_cq, ibcq); 363 } 364 365 static inline struct ocrdma_qp *get_ocrdma_qp(struct ib_qp *ibqp) 366 { 367 return container_of(ibqp, struct ocrdma_qp, ibqp); 368 } 369 370 static inline struct ocrdma_mr *get_ocrdma_mr(struct ib_mr *ibmr) 371 { 372 return container_of(ibmr, struct ocrdma_mr, ibmr); 373 } 374 375 static inline struct ocrdma_ah *get_ocrdma_ah(struct ib_ah *ibah) 376 { 377 return container_of(ibah, struct ocrdma_ah, ibah); 378 } 379 380 static inline struct ocrdma_srq *get_ocrdma_srq(struct ib_srq *ibsrq) 381 { 382 return container_of(ibsrq, struct ocrdma_srq, ibsrq); 383 } 384 385 386 static inline int ocrdma_get_num_posted_shift(struct ocrdma_qp *qp) 387 { 388 return ((qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY && 389 qp->id < 64) ? 24 : 16); 390 } 391 392 static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe) 393 { 394 int cqe_valid; 395 cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID; 396 return ((cqe_valid == cq->phase) ? 1 : 0); 397 } 398 399 static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe) 400 { 401 return (le32_to_cpu(cqe->flags_status_srcqpn) & 402 OCRDMA_CQE_QTYPE) ? 0 : 1; 403 } 404 405 static inline int is_cqe_invalidated(struct ocrdma_cqe *cqe) 406 { 407 return (le32_to_cpu(cqe->flags_status_srcqpn) & 408 OCRDMA_CQE_INVALIDATE) ? 1 : 0; 409 } 410 411 static inline int is_cqe_imm(struct ocrdma_cqe *cqe) 412 { 413 return (le32_to_cpu(cqe->flags_status_srcqpn) & 414 OCRDMA_CQE_IMM) ? 1 : 0; 415 } 416 417 static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe) 418 { 419 return (le32_to_cpu(cqe->flags_status_srcqpn) & 420 OCRDMA_CQE_WRITE_IMM) ? 1 : 0; 421 } 422 423 424 #endif 425