1 /* 2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #ifndef MLX5_IB_H 34 #define MLX5_IB_H 35 36 #include <linux/kernel.h> 37 #include <linux/sched.h> 38 #include <rdma/ib_verbs.h> 39 #include <rdma/ib_smi.h> 40 #include <linux/mlx5/driver.h> 41 #include <linux/mlx5/cq.h> 42 #include <linux/mlx5/qp.h> 43 #include <linux/mlx5/srq.h> 44 #include <linux/types.h> 45 46 #define mlx5_ib_dbg(dev, format, arg...) \ 47 pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \ 48 __LINE__, current->pid, ##arg) 49 50 #define mlx5_ib_err(dev, format, arg...) \ 51 pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \ 52 __LINE__, current->pid, ##arg) 53 54 #define mlx5_ib_warn(dev, format, arg...) \ 55 pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \ 56 __LINE__, current->pid, ##arg) 57 58 enum { 59 MLX5_IB_MMAP_CMD_SHIFT = 8, 60 MLX5_IB_MMAP_CMD_MASK = 0xff, 61 }; 62 63 enum mlx5_ib_mmap_cmd { 64 MLX5_IB_MMAP_REGULAR_PAGE = 0, 65 MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1, /* always last */ 66 }; 67 68 enum { 69 MLX5_RES_SCAT_DATA32_CQE = 0x1, 70 MLX5_RES_SCAT_DATA64_CQE = 0x2, 71 MLX5_REQ_SCAT_DATA32_CQE = 0x11, 72 MLX5_REQ_SCAT_DATA64_CQE = 0x22, 73 }; 74 75 enum mlx5_ib_latency_class { 76 MLX5_IB_LATENCY_CLASS_LOW, 77 MLX5_IB_LATENCY_CLASS_MEDIUM, 78 MLX5_IB_LATENCY_CLASS_HIGH, 79 MLX5_IB_LATENCY_CLASS_FAST_PATH 80 }; 81 82 enum mlx5_ib_mad_ifc_flags { 83 MLX5_MAD_IFC_IGNORE_MKEY = 1, 84 MLX5_MAD_IFC_IGNORE_BKEY = 2, 85 MLX5_MAD_IFC_NET_VIEW = 4, 86 }; 87 88 struct mlx5_ib_ucontext { 89 struct ib_ucontext ibucontext; 90 struct list_head db_page_list; 91 92 /* protect doorbell record alloc/free 93 */ 94 struct mutex db_page_mutex; 95 struct mlx5_uuar_info uuari; 96 }; 97 98 static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext) 99 { 100 return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext); 101 } 102 103 struct mlx5_ib_pd { 104 struct ib_pd ibpd; 105 u32 pdn; 106 u32 pa_lkey; 107 }; 108 109 /* Use macros here so that don't have to duplicate 110 * enum ib_send_flags and enum ib_qp_type for low-level driver 111 */ 112 113 #define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START 114 #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1 115 #define MLX5_IB_WR_UMR IB_WR_RESERVED1 116 117 struct wr_list { 118 u16 opcode; 119 u16 next; 120 }; 121 122 struct mlx5_ib_wq { 123 u64 *wrid; 124 u32 *wr_data; 125 struct wr_list *w_list; 126 unsigned *wqe_head; 127 u16 unsig_count; 128 129 /* serialize post to the work queue 130 */ 131 spinlock_t lock; 132 int wqe_cnt; 133 int max_post; 134 int max_gs; 135 int offset; 136 int wqe_shift; 137 unsigned head; 138 unsigned tail; 139 u16 cur_post; 140 u16 last_poll; 141 void *qend; 142 }; 143 144 enum { 145 MLX5_QP_USER, 146 MLX5_QP_KERNEL, 147 MLX5_QP_EMPTY 148 }; 149 150 struct mlx5_ib_qp { 151 struct ib_qp ibqp; 152 struct mlx5_core_qp mqp; 153 struct mlx5_buf buf; 154 155 struct mlx5_db db; 156 struct mlx5_ib_wq rq; 157 158 u32 doorbell_qpn; 159 u8 sq_signal_bits; 160 u8 fm_cache; 161 int sq_max_wqes_per_wr; 162 int sq_spare_wqes; 163 struct mlx5_ib_wq sq; 164 165 struct ib_umem *umem; 166 int buf_size; 167 168 /* serialize qp state modifications 169 */ 170 struct mutex mutex; 171 u16 xrcdn; 172 u32 flags; 173 u8 port; 174 u8 alt_port; 175 u8 atomic_rd_en; 176 u8 resp_depth; 177 u8 state; 178 int mlx_type; 179 int wq_sig; 180 int scat_cqe; 181 int max_inline_data; 182 struct mlx5_bf *bf; 183 int has_rq; 184 185 /* only for user space QPs. For kernel 186 * we have it from the bf object 187 */ 188 int uuarn; 189 190 int create_type; 191 u32 pa_lkey; 192 }; 193 194 struct mlx5_ib_cq_buf { 195 struct mlx5_buf buf; 196 struct ib_umem *umem; 197 int cqe_size; 198 }; 199 200 enum mlx5_ib_qp_flags { 201 MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = 1 << 0, 202 MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 1, 203 }; 204 205 struct mlx5_shared_mr_info { 206 int mr_id; 207 struct ib_umem *umem; 208 }; 209 210 struct mlx5_ib_cq { 211 struct ib_cq ibcq; 212 struct mlx5_core_cq mcq; 213 struct mlx5_ib_cq_buf buf; 214 struct mlx5_db db; 215 216 /* serialize access to the CQ 217 */ 218 spinlock_t lock; 219 220 /* protect resize cq 221 */ 222 struct mutex resize_mutex; 223 struct mlx5_ib_cq_resize *resize_buf; 224 struct ib_umem *resize_umem; 225 int cqe_size; 226 }; 227 228 struct mlx5_ib_srq { 229 struct ib_srq ibsrq; 230 struct mlx5_core_srq msrq; 231 struct mlx5_buf buf; 232 struct mlx5_db db; 233 u64 *wrid; 234 /* protect SRQ hanlding 235 */ 236 spinlock_t lock; 237 int head; 238 int tail; 239 u16 wqe_ctr; 240 struct ib_umem *umem; 241 /* serialize arming a SRQ 242 */ 243 struct mutex mutex; 244 int wq_sig; 245 }; 246 247 struct mlx5_ib_xrcd { 248 struct ib_xrcd ibxrcd; 249 u32 xrcdn; 250 }; 251 252 struct mlx5_ib_mr { 253 struct ib_mr ibmr; 254 struct mlx5_core_mr mmr; 255 struct ib_umem *umem; 256 struct mlx5_shared_mr_info *smr_info; 257 struct list_head list; 258 int order; 259 int umred; 260 __be64 *pas; 261 dma_addr_t dma; 262 int npages; 263 struct completion done; 264 enum ib_wc_status status; 265 struct mlx5_ib_dev *dev; 266 struct mlx5_create_mkey_mbox_out out; 267 unsigned long start; 268 }; 269 270 struct mlx5_ib_fast_reg_page_list { 271 struct ib_fast_reg_page_list ibfrpl; 272 __be64 *mapped_page_list; 273 dma_addr_t map; 274 }; 275 276 struct umr_common { 277 struct ib_pd *pd; 278 struct ib_cq *cq; 279 struct ib_qp *qp; 280 struct ib_mr *mr; 281 /* control access to UMR QP 282 */ 283 struct semaphore sem; 284 }; 285 286 enum { 287 MLX5_FMR_INVALID, 288 MLX5_FMR_VALID, 289 MLX5_FMR_BUSY, 290 }; 291 292 struct mlx5_ib_fmr { 293 struct ib_fmr ibfmr; 294 struct mlx5_core_mr mr; 295 int access_flags; 296 int state; 297 /* protect fmr state 298 */ 299 spinlock_t lock; 300 u64 wrid; 301 struct ib_send_wr wr[2]; 302 u8 page_shift; 303 struct ib_fast_reg_page_list page_list; 304 }; 305 306 struct mlx5_cache_ent { 307 struct list_head head; 308 /* sync access to the cahce entry 309 */ 310 spinlock_t lock; 311 312 313 struct dentry *dir; 314 char name[4]; 315 u32 order; 316 u32 size; 317 u32 cur; 318 u32 miss; 319 u32 limit; 320 321 struct dentry *fsize; 322 struct dentry *fcur; 323 struct dentry *fmiss; 324 struct dentry *flimit; 325 326 struct mlx5_ib_dev *dev; 327 struct work_struct work; 328 struct delayed_work dwork; 329 int pending; 330 }; 331 332 struct mlx5_mr_cache { 333 struct workqueue_struct *wq; 334 struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES]; 335 int stopped; 336 struct dentry *root; 337 unsigned long last_add; 338 }; 339 340 struct mlx5_ib_resources { 341 struct ib_cq *c0; 342 struct ib_xrcd *x0; 343 struct ib_xrcd *x1; 344 struct ib_pd *p0; 345 struct ib_srq *s0; 346 }; 347 348 struct mlx5_ib_dev { 349 struct ib_device ib_dev; 350 struct mlx5_core_dev mdev; 351 MLX5_DECLARE_DOORBELL_LOCK(uar_lock); 352 struct list_head eqs_list; 353 int num_ports; 354 int num_comp_vectors; 355 /* serialize update of capability mask 356 */ 357 struct mutex cap_mask_mutex; 358 bool ib_active; 359 struct umr_common umrc; 360 /* sync used page count stats 361 */ 362 spinlock_t mr_lock; 363 struct mlx5_ib_resources devr; 364 struct mlx5_mr_cache cache; 365 struct timer_list delay_timer; 366 int fill_delay; 367 }; 368 369 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) 370 { 371 return container_of(mcq, struct mlx5_ib_cq, mcq); 372 } 373 374 static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd) 375 { 376 return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd); 377 } 378 379 static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev) 380 { 381 return container_of(ibdev, struct mlx5_ib_dev, ib_dev); 382 } 383 384 static inline struct mlx5_ib_fmr *to_mfmr(struct ib_fmr *ibfmr) 385 { 386 return container_of(ibfmr, struct mlx5_ib_fmr, ibfmr); 387 } 388 389 static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq) 390 { 391 return container_of(ibcq, struct mlx5_ib_cq, ibcq); 392 } 393 394 static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp) 395 { 396 return container_of(mqp, struct mlx5_ib_qp, mqp); 397 } 398 399 static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd) 400 { 401 return container_of(ibpd, struct mlx5_ib_pd, ibpd); 402 } 403 404 static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq) 405 { 406 return container_of(ibsrq, struct mlx5_ib_srq, ibsrq); 407 } 408 409 static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp) 410 { 411 return container_of(ibqp, struct mlx5_ib_qp, ibqp); 412 } 413 414 static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq) 415 { 416 return container_of(msrq, struct mlx5_ib_srq, msrq); 417 } 418 419 static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr) 420 { 421 return container_of(ibmr, struct mlx5_ib_mr, ibmr); 422 } 423 424 static inline struct mlx5_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl) 425 { 426 return container_of(ibfrpl, struct mlx5_ib_fast_reg_page_list, ibfrpl); 427 } 428 429 struct mlx5_ib_ah { 430 struct ib_ah ibah; 431 struct mlx5_av av; 432 }; 433 434 static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah) 435 { 436 return container_of(ibah, struct mlx5_ib_ah, ibah); 437 } 438 439 static inline struct mlx5_ib_dev *mlx5_core2ibdev(struct mlx5_core_dev *dev) 440 { 441 return container_of(dev, struct mlx5_ib_dev, mdev); 442 } 443 444 static inline struct mlx5_ib_dev *mlx5_pci2ibdev(struct pci_dev *pdev) 445 { 446 return mlx5_core2ibdev(pci2mlx5_core_dev(pdev)); 447 } 448 449 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt, 450 struct mlx5_db *db); 451 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db); 452 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); 453 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); 454 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index); 455 int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, 456 int port, struct ib_wc *in_wc, struct ib_grh *in_grh, 457 void *in_mad, void *response_mad); 458 struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr, 459 struct mlx5_ib_ah *ah); 460 struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); 461 int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr); 462 int mlx5_ib_destroy_ah(struct ib_ah *ah); 463 struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, 464 struct ib_srq_init_attr *init_attr, 465 struct ib_udata *udata); 466 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, 467 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); 468 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr); 469 int mlx5_ib_destroy_srq(struct ib_srq *srq); 470 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, 471 struct ib_recv_wr **bad_wr); 472 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, 473 struct ib_qp_init_attr *init_attr, 474 struct ib_udata *udata); 475 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 476 int attr_mask, struct ib_udata *udata); 477 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, 478 struct ib_qp_init_attr *qp_init_attr); 479 int mlx5_ib_destroy_qp(struct ib_qp *qp); 480 int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 481 struct ib_send_wr **bad_wr); 482 int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, 483 struct ib_recv_wr **bad_wr); 484 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n); 485 struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries, 486 int vector, struct ib_ucontext *context, 487 struct ib_udata *udata); 488 int mlx5_ib_destroy_cq(struct ib_cq *cq); 489 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); 490 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); 491 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); 492 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata); 493 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc); 494 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 495 u64 virt_addr, int access_flags, 496 struct ib_udata *udata); 497 int mlx5_ib_dereg_mr(struct ib_mr *ibmr); 498 struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd, 499 int max_page_list_len); 500 struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev, 501 int page_list_len); 502 void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list); 503 struct ib_fmr *mlx5_ib_fmr_alloc(struct ib_pd *pd, int acc, 504 struct ib_fmr_attr *fmr_attr); 505 int mlx5_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, 506 int npages, u64 iova); 507 int mlx5_ib_unmap_fmr(struct list_head *fmr_list); 508 int mlx5_ib_fmr_dealloc(struct ib_fmr *ibfmr); 509 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 510 struct ib_wc *in_wc, struct ib_grh *in_grh, 511 struct ib_mad *in_mad, struct ib_mad *out_mad); 512 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, 513 struct ib_ucontext *context, 514 struct ib_udata *udata); 515 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd); 516 int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn); 517 int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset); 518 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port); 519 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, 520 struct ib_port_attr *props); 521 int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev); 522 void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev); 523 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, 524 int *ncont, int *order); 525 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, 526 int page_shift, __be64 *pas, int umr); 527 void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num); 528 int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq); 529 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev); 530 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev); 531 int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift); 532 void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context); 533 534 static inline void init_query_mad(struct ib_smp *mad) 535 { 536 mad->base_version = 1; 537 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; 538 mad->class_version = 1; 539 mad->method = IB_MGMT_METHOD_GET; 540 } 541 542 static inline u8 convert_access(int acc) 543 { 544 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) | 545 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) | 546 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) | 547 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) | 548 MLX5_PERM_LOCAL_READ; 549 } 550 551 #endif /* MLX5_IB_H */ 552