1 /******************************************************************* 2 * This file is part of the Emulex RoCE Device Driver for * 3 * RoCE (RDMA over Converged Ethernet) adapters. * 4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * * 8 * This program is free software; you can redistribute it and/or * 9 * modify it under the terms of version 2 of the GNU General * 10 * Public License as published by the Free Software Foundation. * 11 * This program is distributed in the hope that it will be useful. * 12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 17 * more details, a copy of which can be found in the file COPYING * 18 * included with this package. * 19 * 20 * Contact Information: 21 * linux-drivers@emulex.com 22 * 23 * Emulex 24 * 3333 Susan Street 25 * Costa Mesa, CA 92626 26 *******************************************************************/ 27 28 #ifndef __OCRDMA_H__ 29 #define __OCRDMA_H__ 30 31 #include <linux/mutex.h> 32 #include <linux/list.h> 33 #include <linux/spinlock.h> 34 #include <linux/pci.h> 35 36 #include <rdma/ib_verbs.h> 37 #include <rdma/ib_user_verbs.h> 38 39 #include <be_roce.h> 40 #include "ocrdma_sli.h" 41 42 #define OCRDMA_ROCE_DEV_VERSION "1.0.0" 43 #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" 44 45 #define OCRDMA_MAX_AH 512 46 47 #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) 48 49 struct ocrdma_dev_attr { 50 u8 fw_ver[32]; 51 u32 vendor_id; 52 u32 device_id; 53 u16 max_pd; 54 u16 max_cq; 55 u16 max_cqe; 56 u16 max_qp; 57 u16 max_wqe; 58 u16 max_rqe; 59 u16 max_srq; 60 u32 max_inline_data; 61 int max_send_sge; 62 int max_recv_sge; 63 int max_srq_sge; 64 int max_rdma_sge; 65 int max_mr; 66 u64 max_mr_size; 67 u32 max_num_mr_pbl; 68 int max_fmr; 69 int max_map_per_fmr; 70 int max_pages_per_frmr; 71 u16 max_ord_per_qp; 72 u16 max_ird_per_qp; 73 74 int device_cap_flags; 75 u8 cq_overflow_detect; 76 u8 srq_supported; 77 78 u32 wqe_size; 79 u32 rqe_size; 80 u32 ird_page_size; 81 u8 local_ca_ack_delay; 82 u8 ird; 83 u8 num_ird_pages; 84 }; 85 86 struct ocrdma_pbl { 87 void *va; 88 dma_addr_t pa; 89 }; 90 91 struct ocrdma_queue_info { 92 void *va; 93 dma_addr_t dma; 94 u32 size; 95 u16 len; 96 u16 entry_size; /* Size of an element in the queue */ 97 u16 id; /* qid, where to ring the doorbell. */ 98 u16 head, tail; 99 bool created; 100 }; 101 102 struct ocrdma_eq { 103 struct ocrdma_queue_info q; 104 u32 vector; 105 int cq_cnt; 106 struct ocrdma_dev *dev; 107 char irq_name[32]; 108 }; 109 110 struct ocrdma_mq { 111 struct ocrdma_queue_info sq; 112 struct ocrdma_queue_info cq; 113 bool rearm_cq; 114 }; 115 116 struct mqe_ctx { 117 struct mutex lock; /* for serializing mailbox commands on MQ */ 118 wait_queue_head_t cmd_wait; 119 u32 tag; 120 u16 cqe_status; 121 u16 ext_status; 122 bool cmd_done; 123 }; 124 125 struct ocrdma_hw_mr { 126 u32 lkey; 127 u8 fr_mr; 128 u8 remote_atomic; 129 u8 remote_rd; 130 u8 remote_wr; 131 u8 local_rd; 132 u8 local_wr; 133 u8 mw_bind; 134 u8 rsvd; 135 u64 len; 136 struct ocrdma_pbl *pbl_table; 137 u32 num_pbls; 138 u32 num_pbes; 139 u32 pbl_size; 140 u32 pbe_size; 141 u64 fbo; 142 u64 va; 143 }; 144 145 struct ocrdma_mr { 146 struct ib_mr ibmr; 147 struct ib_umem *umem; 148 struct ocrdma_hw_mr hwmr; 149 }; 150 151 struct ocrdma_dev { 152 struct ib_device ibdev; 153 struct ocrdma_dev_attr attr; 154 155 struct mutex dev_lock; /* provides syncronise access to device data */ 156 spinlock_t flush_q_lock ____cacheline_aligned; 157 158 struct ocrdma_cq **cq_tbl; 159 struct ocrdma_qp **qp_tbl; 160 161 struct ocrdma_eq *eq_tbl; 162 int eq_cnt; 163 u16 base_eqid; 164 u16 max_eq; 165 166 union ib_gid *sgid_tbl; 167 /* provided synchronization to sgid table for 168 * updating gid entries triggered by notifier. 169 */ 170 spinlock_t sgid_lock; 171 172 int gsi_qp_created; 173 struct ocrdma_cq *gsi_sqcq; 174 struct ocrdma_cq *gsi_rqcq; 175 176 struct { 177 struct ocrdma_av *va; 178 dma_addr_t pa; 179 u32 size; 180 u32 num_ah; 181 /* provide synchronization for av 182 * entry allocations. 183 */ 184 spinlock_t lock; 185 u32 ahid; 186 struct ocrdma_pbl pbl; 187 } av_tbl; 188 189 void *mbx_cmd; 190 struct ocrdma_mq mq; 191 struct mqe_ctx mqe_ctx; 192 193 struct be_dev_info nic_info; 194 195 struct list_head entry; 196 struct rcu_head rcu; 197 int id; 198 struct ocrdma_mr *stag_arr[OCRDMA_MAX_STAG]; 199 u16 pvid; 200 }; 201 202 struct ocrdma_cq { 203 struct ib_cq ibcq; 204 struct ocrdma_cqe *va; 205 u32 phase; 206 u32 getp; /* pointer to pending wrs to 207 * return to stack, wrap arounds 208 * at max_hw_cqe 209 */ 210 u32 max_hw_cqe; 211 bool phase_change; 212 bool armed, solicited; 213 bool arm_needed; 214 215 spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization 216 * to cq polling 217 */ 218 /* syncronizes cq completion handler invoked from multiple context */ 219 spinlock_t comp_handler_lock ____cacheline_aligned; 220 u16 id; 221 u16 eqn; 222 223 struct ocrdma_ucontext *ucontext; 224 dma_addr_t pa; 225 u32 len; 226 227 /* head of all qp's sq and rq for which cqes need to be flushed 228 * by the software. 229 */ 230 struct list_head sq_head, rq_head; 231 }; 232 233 struct ocrdma_pd { 234 struct ib_pd ibpd; 235 struct ocrdma_dev *dev; 236 struct ocrdma_ucontext *uctx; 237 u32 id; 238 int num_dpp_qp; 239 u32 dpp_page; 240 bool dpp_enabled; 241 }; 242 243 struct ocrdma_ah { 244 struct ib_ah ibah; 245 struct ocrdma_av *av; 246 u16 sgid_index; 247 u32 id; 248 }; 249 250 struct ocrdma_qp_hwq_info { 251 u8 *va; /* virtual address */ 252 u32 max_sges; 253 u32 head, tail; 254 u32 entry_size; 255 u32 max_cnt; 256 u32 max_wqe_idx; 257 u16 dbid; /* qid, where to ring the doorbell. */ 258 u32 len; 259 dma_addr_t pa; 260 }; 261 262 struct ocrdma_srq { 263 struct ib_srq ibsrq; 264 u8 __iomem *db; 265 struct ocrdma_qp_hwq_info rq; 266 u64 *rqe_wr_id_tbl; 267 u32 *idx_bit_fields; 268 u32 bit_fields_len; 269 270 /* provide synchronization to multiple context(s) posting rqe */ 271 spinlock_t q_lock ____cacheline_aligned; 272 273 struct ocrdma_pd *pd; 274 u32 id; 275 }; 276 277 struct ocrdma_qp { 278 struct ib_qp ibqp; 279 struct ocrdma_dev *dev; 280 281 u8 __iomem *sq_db; 282 struct ocrdma_qp_hwq_info sq; 283 struct { 284 uint64_t wrid; 285 uint16_t dpp_wqe_idx; 286 uint16_t dpp_wqe; 287 uint8_t signaled; 288 uint8_t rsvd[3]; 289 } *wqe_wr_id_tbl; 290 u32 max_inline_data; 291 292 /* provide synchronization to multiple context(s) posting wqe, rqe */ 293 spinlock_t q_lock ____cacheline_aligned; 294 struct ocrdma_cq *sq_cq; 295 /* list maintained per CQ to flush SQ errors */ 296 struct list_head sq_entry; 297 298 u8 __iomem *rq_db; 299 struct ocrdma_qp_hwq_info rq; 300 u64 *rqe_wr_id_tbl; 301 struct ocrdma_cq *rq_cq; 302 struct ocrdma_srq *srq; 303 /* list maintained per CQ to flush RQ errors */ 304 struct list_head rq_entry; 305 306 enum ocrdma_qp_state state; /* QP state */ 307 int cap_flags; 308 u32 max_ord, max_ird; 309 310 u32 id; 311 struct ocrdma_pd *pd; 312 313 enum ib_qp_type qp_type; 314 315 int sgid_idx; 316 u32 qkey; 317 bool dpp_enabled; 318 u8 *ird_q_va; 319 bool signaled; 320 u16 db_cache; 321 }; 322 323 324 struct ocrdma_ucontext { 325 struct ib_ucontext ibucontext; 326 327 struct list_head mm_head; 328 struct mutex mm_list_lock; /* protects list entries of mm type */ 329 struct ocrdma_pd *cntxt_pd; 330 int pd_in_use; 331 332 struct { 333 u32 *va; 334 dma_addr_t pa; 335 u32 len; 336 } ah_tbl; 337 }; 338 339 struct ocrdma_mm { 340 struct { 341 u64 phy_addr; 342 unsigned long len; 343 } key; 344 struct list_head entry; 345 }; 346 347 static inline struct ocrdma_dev *get_ocrdma_dev(struct ib_device *ibdev) 348 { 349 return container_of(ibdev, struct ocrdma_dev, ibdev); 350 } 351 352 static inline struct ocrdma_ucontext *get_ocrdma_ucontext(struct ib_ucontext 353 *ibucontext) 354 { 355 return container_of(ibucontext, struct ocrdma_ucontext, ibucontext); 356 } 357 358 static inline struct ocrdma_pd *get_ocrdma_pd(struct ib_pd *ibpd) 359 { 360 return container_of(ibpd, struct ocrdma_pd, ibpd); 361 } 362 363 static inline struct ocrdma_cq *get_ocrdma_cq(struct ib_cq *ibcq) 364 { 365 return container_of(ibcq, struct ocrdma_cq, ibcq); 366 } 367 368 static inline struct ocrdma_qp *get_ocrdma_qp(struct ib_qp *ibqp) 369 { 370 return container_of(ibqp, struct ocrdma_qp, ibqp); 371 } 372 373 static inline struct ocrdma_mr *get_ocrdma_mr(struct ib_mr *ibmr) 374 { 375 return container_of(ibmr, struct ocrdma_mr, ibmr); 376 } 377 378 static inline struct ocrdma_ah *get_ocrdma_ah(struct ib_ah *ibah) 379 { 380 return container_of(ibah, struct ocrdma_ah, ibah); 381 } 382 383 static inline struct ocrdma_srq *get_ocrdma_srq(struct ib_srq *ibsrq) 384 { 385 return container_of(ibsrq, struct ocrdma_srq, ibsrq); 386 } 387 388 389 static inline int ocrdma_get_num_posted_shift(struct ocrdma_qp *qp) 390 { 391 return ((qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY && 392 qp->id < 128) ? 24 : 16); 393 } 394 395 static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe) 396 { 397 int cqe_valid; 398 cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID; 399 return (cqe_valid == cq->phase); 400 } 401 402 static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe) 403 { 404 return (le32_to_cpu(cqe->flags_status_srcqpn) & 405 OCRDMA_CQE_QTYPE) ? 0 : 1; 406 } 407 408 static inline int is_cqe_invalidated(struct ocrdma_cqe *cqe) 409 { 410 return (le32_to_cpu(cqe->flags_status_srcqpn) & 411 OCRDMA_CQE_INVALIDATE) ? 1 : 0; 412 } 413 414 static inline int is_cqe_imm(struct ocrdma_cqe *cqe) 415 { 416 return (le32_to_cpu(cqe->flags_status_srcqpn) & 417 OCRDMA_CQE_IMM) ? 1 : 0; 418 } 419 420 static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe) 421 { 422 return (le32_to_cpu(cqe->flags_status_srcqpn) & 423 OCRDMA_CQE_WRITE_IMM) ? 1 : 0; 424 } 425 426 static inline int ocrdma_resolve_dmac(struct ocrdma_dev *dev, 427 struct ib_ah_attr *ah_attr, u8 *mac_addr) 428 { 429 struct in6_addr in6; 430 431 memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6)); 432 if (rdma_is_multicast_addr(&in6)) 433 rdma_get_mcast_mac(&in6, mac_addr); 434 else 435 memcpy(mac_addr, ah_attr->dmac, ETH_ALEN); 436 return 0; 437 } 438 439 #endif 440