1 /******************************************************************* 2 * This file is part of the Emulex RoCE Device Driver for * 3 * RoCE (RDMA over Converged Ethernet) adapters. * 4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * * 8 * This program is free software; you can redistribute it and/or * 9 * modify it under the terms of version 2 of the GNU General * 10 * Public License as published by the Free Software Foundation. * 11 * This program is distributed in the hope that it will be useful. * 12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 17 * more details, a copy of which can be found in the file COPYING * 18 * included with this package. * 19 * 20 * Contact Information: 21 * linux-drivers@emulex.com 22 * 23 * Emulex 24 * 3333 Susan Street 25 * Costa Mesa, CA 92626 26 *******************************************************************/ 27 28 #ifndef __OCRDMA_H__ 29 #define __OCRDMA_H__ 30 31 #include <linux/mutex.h> 32 #include <linux/list.h> 33 #include <linux/spinlock.h> 34 #include <linux/pci.h> 35 36 #include <rdma/ib_verbs.h> 37 #include <rdma/ib_user_verbs.h> 38 #include <rdma/ib_addr.h> 39 40 #include <be_roce.h> 41 #include "ocrdma_sli.h" 42 43 #define OCRDMA_ROCE_DRV_VERSION "10.2.145.0u" 44 45 #define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver" 46 #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" 47 48 #define OC_NAME_SH OCRDMA_NODE_DESC "(Skyhawk)" 49 #define OC_NAME_UNKNOWN OCRDMA_NODE_DESC "(Unknown)" 50 51 #define OC_SKH_DEVICE_PF 0x720 52 #define OC_SKH_DEVICE_VF 0x728 53 #define OCRDMA_MAX_AH 512 54 55 #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) 56 57 #define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo) 58 59 struct ocrdma_dev_attr { 60 u8 fw_ver[32]; 61 u32 vendor_id; 62 u32 device_id; 63 u16 max_pd; 64 u16 max_cq; 65 u16 max_cqe; 66 u16 max_qp; 67 u16 max_wqe; 68 u16 max_rqe; 69 u16 max_srq; 70 u32 max_inline_data; 71 int max_send_sge; 72 int max_recv_sge; 73 int max_srq_sge; 74 int max_rdma_sge; 75 int max_mr; 76 u64 max_mr_size; 77 u32 max_num_mr_pbl; 78 int max_mw; 79 int max_fmr; 80 int max_map_per_fmr; 81 int max_pages_per_frmr; 82 u16 max_ord_per_qp; 83 u16 max_ird_per_qp; 84 85 int device_cap_flags; 86 u8 cq_overflow_detect; 87 u8 srq_supported; 88 89 u32 wqe_size; 90 u32 rqe_size; 91 u32 ird_page_size; 92 u8 local_ca_ack_delay; 93 u8 ird; 94 u8 num_ird_pages; 95 }; 96 97 struct ocrdma_dma_mem { 98 void *va; 99 dma_addr_t pa; 100 u32 size; 101 }; 102 103 struct ocrdma_pbl { 104 void *va; 105 dma_addr_t pa; 106 }; 107 108 struct ocrdma_queue_info { 109 void *va; 110 dma_addr_t dma; 111 u32 size; 112 u16 len; 113 u16 entry_size; /* Size of an element in the queue */ 114 u16 id; /* qid, where to ring the doorbell. */ 115 u16 head, tail; 116 bool created; 117 }; 118 119 struct ocrdma_eq { 120 struct ocrdma_queue_info q; 121 u32 vector; 122 int cq_cnt; 123 struct ocrdma_dev *dev; 124 char irq_name[32]; 125 }; 126 127 struct ocrdma_mq { 128 struct ocrdma_queue_info sq; 129 struct ocrdma_queue_info cq; 130 bool rearm_cq; 131 }; 132 133 struct mqe_ctx { 134 struct mutex lock; /* for serializing mailbox commands on MQ */ 135 wait_queue_head_t cmd_wait; 136 u32 tag; 137 u16 cqe_status; 138 u16 ext_status; 139 bool cmd_done; 140 }; 141 142 struct ocrdma_hw_mr { 143 u32 lkey; 144 u8 fr_mr; 145 u8 remote_atomic; 146 u8 remote_rd; 147 u8 remote_wr; 148 u8 local_rd; 149 u8 local_wr; 150 u8 mw_bind; 151 u8 rsvd; 152 u64 len; 153 struct ocrdma_pbl *pbl_table; 154 u32 num_pbls; 155 u32 num_pbes; 156 u32 pbl_size; 157 u32 pbe_size; 158 u64 fbo; 159 u64 va; 160 }; 161 162 struct ocrdma_mr { 163 struct ib_mr ibmr; 164 struct ib_umem *umem; 165 struct ocrdma_hw_mr hwmr; 166 }; 167 168 struct ocrdma_stats { 169 u8 type; 170 struct ocrdma_dev *dev; 171 }; 172 173 struct stats_mem { 174 struct ocrdma_mqe mqe; 175 void *va; 176 dma_addr_t pa; 177 u32 size; 178 char *debugfs_mem; 179 }; 180 181 struct phy_info { 182 u16 auto_speeds_supported; 183 u16 fixed_speeds_supported; 184 u16 phy_type; 185 u16 interface_type; 186 }; 187 188 struct ocrdma_dev { 189 struct ib_device ibdev; 190 struct ocrdma_dev_attr attr; 191 192 struct mutex dev_lock; /* provides syncronise access to device data */ 193 spinlock_t flush_q_lock ____cacheline_aligned; 194 195 struct ocrdma_cq **cq_tbl; 196 struct ocrdma_qp **qp_tbl; 197 198 struct ocrdma_eq *eq_tbl; 199 int eq_cnt; 200 u16 base_eqid; 201 u16 max_eq; 202 203 union ib_gid *sgid_tbl; 204 /* provided synchronization to sgid table for 205 * updating gid entries triggered by notifier. 206 */ 207 spinlock_t sgid_lock; 208 209 int gsi_qp_created; 210 struct ocrdma_cq *gsi_sqcq; 211 struct ocrdma_cq *gsi_rqcq; 212 213 struct { 214 struct ocrdma_av *va; 215 dma_addr_t pa; 216 u32 size; 217 u32 num_ah; 218 /* provide synchronization for av 219 * entry allocations. 220 */ 221 spinlock_t lock; 222 u32 ahid; 223 struct ocrdma_pbl pbl; 224 } av_tbl; 225 226 void *mbx_cmd; 227 struct ocrdma_mq mq; 228 struct mqe_ctx mqe_ctx; 229 230 struct be_dev_info nic_info; 231 struct phy_info phy; 232 char model_number[32]; 233 u32 hba_port_num; 234 235 struct list_head entry; 236 struct rcu_head rcu; 237 int id; 238 u64 stag_arr[OCRDMA_MAX_STAG]; 239 u16 pvid; 240 u32 asic_id; 241 242 ulong last_stats_time; 243 struct mutex stats_lock; /* provide synch for debugfs operations */ 244 struct stats_mem stats_mem; 245 struct ocrdma_stats rsrc_stats; 246 struct ocrdma_stats rx_stats; 247 struct ocrdma_stats wqe_stats; 248 struct ocrdma_stats tx_stats; 249 struct ocrdma_stats db_err_stats; 250 struct ocrdma_stats tx_qp_err_stats; 251 struct ocrdma_stats rx_qp_err_stats; 252 struct ocrdma_stats tx_dbg_stats; 253 struct ocrdma_stats rx_dbg_stats; 254 struct dentry *dir; 255 }; 256 257 struct ocrdma_cq { 258 struct ib_cq ibcq; 259 struct ocrdma_cqe *va; 260 u32 phase; 261 u32 getp; /* pointer to pending wrs to 262 * return to stack, wrap arounds 263 * at max_hw_cqe 264 */ 265 u32 max_hw_cqe; 266 bool phase_change; 267 bool deferred_arm, deferred_sol; 268 bool first_arm; 269 270 spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization 271 * to cq polling 272 */ 273 /* syncronizes cq completion handler invoked from multiple context */ 274 spinlock_t comp_handler_lock ____cacheline_aligned; 275 u16 id; 276 u16 eqn; 277 278 struct ocrdma_ucontext *ucontext; 279 dma_addr_t pa; 280 u32 len; 281 u32 cqe_cnt; 282 283 /* head of all qp's sq and rq for which cqes need to be flushed 284 * by the software. 285 */ 286 struct list_head sq_head, rq_head; 287 }; 288 289 struct ocrdma_pd { 290 struct ib_pd ibpd; 291 struct ocrdma_ucontext *uctx; 292 u32 id; 293 int num_dpp_qp; 294 u32 dpp_page; 295 bool dpp_enabled; 296 }; 297 298 struct ocrdma_ah { 299 struct ib_ah ibah; 300 struct ocrdma_av *av; 301 u16 sgid_index; 302 u32 id; 303 }; 304 305 struct ocrdma_qp_hwq_info { 306 u8 *va; /* virtual address */ 307 u32 max_sges; 308 u32 head, tail; 309 u32 entry_size; 310 u32 max_cnt; 311 u32 max_wqe_idx; 312 u16 dbid; /* qid, where to ring the doorbell. */ 313 u32 len; 314 dma_addr_t pa; 315 }; 316 317 struct ocrdma_srq { 318 struct ib_srq ibsrq; 319 u8 __iomem *db; 320 struct ocrdma_qp_hwq_info rq; 321 u64 *rqe_wr_id_tbl; 322 u32 *idx_bit_fields; 323 u32 bit_fields_len; 324 325 /* provide synchronization to multiple context(s) posting rqe */ 326 spinlock_t q_lock ____cacheline_aligned; 327 328 struct ocrdma_pd *pd; 329 u32 id; 330 }; 331 332 struct ocrdma_qp { 333 struct ib_qp ibqp; 334 struct ocrdma_dev *dev; 335 336 u8 __iomem *sq_db; 337 struct ocrdma_qp_hwq_info sq; 338 struct { 339 uint64_t wrid; 340 uint16_t dpp_wqe_idx; 341 uint16_t dpp_wqe; 342 uint8_t signaled; 343 uint8_t rsvd[3]; 344 } *wqe_wr_id_tbl; 345 u32 max_inline_data; 346 347 /* provide synchronization to multiple context(s) posting wqe, rqe */ 348 spinlock_t q_lock ____cacheline_aligned; 349 struct ocrdma_cq *sq_cq; 350 /* list maintained per CQ to flush SQ errors */ 351 struct list_head sq_entry; 352 353 u8 __iomem *rq_db; 354 struct ocrdma_qp_hwq_info rq; 355 u64 *rqe_wr_id_tbl; 356 struct ocrdma_cq *rq_cq; 357 struct ocrdma_srq *srq; 358 /* list maintained per CQ to flush RQ errors */ 359 struct list_head rq_entry; 360 361 enum ocrdma_qp_state state; /* QP state */ 362 int cap_flags; 363 u32 max_ord, max_ird; 364 365 u32 id; 366 struct ocrdma_pd *pd; 367 368 enum ib_qp_type qp_type; 369 370 int sgid_idx; 371 u32 qkey; 372 bool dpp_enabled; 373 u8 *ird_q_va; 374 bool signaled; 375 }; 376 377 struct ocrdma_ucontext { 378 struct ib_ucontext ibucontext; 379 380 struct list_head mm_head; 381 struct mutex mm_list_lock; /* protects list entries of mm type */ 382 struct ocrdma_pd *cntxt_pd; 383 int pd_in_use; 384 385 struct { 386 u32 *va; 387 dma_addr_t pa; 388 u32 len; 389 } ah_tbl; 390 }; 391 392 struct ocrdma_mm { 393 struct { 394 u64 phy_addr; 395 unsigned long len; 396 } key; 397 struct list_head entry; 398 }; 399 400 static inline struct ocrdma_dev *get_ocrdma_dev(struct ib_device *ibdev) 401 { 402 return container_of(ibdev, struct ocrdma_dev, ibdev); 403 } 404 405 static inline struct ocrdma_ucontext *get_ocrdma_ucontext(struct ib_ucontext 406 *ibucontext) 407 { 408 return container_of(ibucontext, struct ocrdma_ucontext, ibucontext); 409 } 410 411 static inline struct ocrdma_pd *get_ocrdma_pd(struct ib_pd *ibpd) 412 { 413 return container_of(ibpd, struct ocrdma_pd, ibpd); 414 } 415 416 static inline struct ocrdma_cq *get_ocrdma_cq(struct ib_cq *ibcq) 417 { 418 return container_of(ibcq, struct ocrdma_cq, ibcq); 419 } 420 421 static inline struct ocrdma_qp *get_ocrdma_qp(struct ib_qp *ibqp) 422 { 423 return container_of(ibqp, struct ocrdma_qp, ibqp); 424 } 425 426 static inline struct ocrdma_mr *get_ocrdma_mr(struct ib_mr *ibmr) 427 { 428 return container_of(ibmr, struct ocrdma_mr, ibmr); 429 } 430 431 static inline struct ocrdma_ah *get_ocrdma_ah(struct ib_ah *ibah) 432 { 433 return container_of(ibah, struct ocrdma_ah, ibah); 434 } 435 436 static inline struct ocrdma_srq *get_ocrdma_srq(struct ib_srq *ibsrq) 437 { 438 return container_of(ibsrq, struct ocrdma_srq, ibsrq); 439 } 440 441 static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe) 442 { 443 int cqe_valid; 444 cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID; 445 return (cqe_valid == cq->phase); 446 } 447 448 static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe) 449 { 450 return (le32_to_cpu(cqe->flags_status_srcqpn) & 451 OCRDMA_CQE_QTYPE) ? 0 : 1; 452 } 453 454 static inline int is_cqe_invalidated(struct ocrdma_cqe *cqe) 455 { 456 return (le32_to_cpu(cqe->flags_status_srcqpn) & 457 OCRDMA_CQE_INVALIDATE) ? 1 : 0; 458 } 459 460 static inline int is_cqe_imm(struct ocrdma_cqe *cqe) 461 { 462 return (le32_to_cpu(cqe->flags_status_srcqpn) & 463 OCRDMA_CQE_IMM) ? 1 : 0; 464 } 465 466 static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe) 467 { 468 return (le32_to_cpu(cqe->flags_status_srcqpn) & 469 OCRDMA_CQE_WRITE_IMM) ? 1 : 0; 470 } 471 472 static inline int ocrdma_resolve_dmac(struct ocrdma_dev *dev, 473 struct ib_ah_attr *ah_attr, u8 *mac_addr) 474 { 475 struct in6_addr in6; 476 477 memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6)); 478 if (rdma_is_multicast_addr(&in6)) 479 rdma_get_mcast_mac(&in6, mac_addr); 480 else 481 memcpy(mac_addr, ah_attr->dmac, ETH_ALEN); 482 return 0; 483 } 484 485 static inline char *hca_name(struct ocrdma_dev *dev) 486 { 487 switch (dev->nic_info.pdev->device) { 488 case OC_SKH_DEVICE_PF: 489 case OC_SKH_DEVICE_VF: 490 return OC_NAME_SH; 491 default: 492 return OC_NAME_UNKNOWN; 493 } 494 } 495 496 static inline int ocrdma_get_eq_table_index(struct ocrdma_dev *dev, 497 int eqid) 498 { 499 int indx; 500 501 for (indx = 0; indx < dev->eq_cnt; indx++) { 502 if (dev->eq_tbl[indx].q.id == eqid) 503 return indx; 504 } 505 506 return -EINVAL; 507 } 508 509 static inline u8 ocrdma_get_asic_type(struct ocrdma_dev *dev) 510 { 511 if (dev->nic_info.dev_family == 0xF && !dev->asic_id) { 512 pci_read_config_dword( 513 dev->nic_info.pdev, 514 OCRDMA_SLI_ASIC_ID_OFFSET, &dev->asic_id); 515 } 516 517 return (dev->asic_id & OCRDMA_SLI_ASIC_GEN_NUM_MASK) >> 518 OCRDMA_SLI_ASIC_GEN_NUM_SHIFT; 519 } 520 521 #endif 522