1 /* 2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #ifndef MLX5_IB_H 34 #define MLX5_IB_H 35 36 #include <linux/kernel.h> 37 #include <linux/sched.h> 38 #include <rdma/ib_verbs.h> 39 #include <rdma/ib_smi.h> 40 #include <linux/mlx5/driver.h> 41 #include <linux/mlx5/cq.h> 42 #include <linux/mlx5/qp.h> 43 #include <linux/mlx5/srq.h> 44 #include <linux/types.h> 45 46 #define mlx5_ib_dbg(dev, format, arg...) \ 47 pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \ 48 __LINE__, current->pid, ##arg) 49 50 #define mlx5_ib_err(dev, format, arg...) \ 51 pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \ 52 __LINE__, current->pid, ##arg) 53 54 #define mlx5_ib_warn(dev, format, arg...) \ 55 pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \ 56 __LINE__, current->pid, ##arg) 57 58 enum { 59 MLX5_IB_MMAP_CMD_SHIFT = 8, 60 MLX5_IB_MMAP_CMD_MASK = 0xff, 61 }; 62 63 enum mlx5_ib_mmap_cmd { 64 MLX5_IB_MMAP_REGULAR_PAGE = 0, 65 MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1, /* always last */ 66 }; 67 68 enum { 69 MLX5_RES_SCAT_DATA32_CQE = 0x1, 70 MLX5_RES_SCAT_DATA64_CQE = 0x2, 71 MLX5_REQ_SCAT_DATA32_CQE = 0x11, 72 MLX5_REQ_SCAT_DATA64_CQE = 0x22, 73 }; 74 75 enum mlx5_ib_latency_class { 76 MLX5_IB_LATENCY_CLASS_LOW, 77 MLX5_IB_LATENCY_CLASS_MEDIUM, 78 MLX5_IB_LATENCY_CLASS_HIGH, 79 MLX5_IB_LATENCY_CLASS_FAST_PATH 80 }; 81 82 enum mlx5_ib_mad_ifc_flags { 83 MLX5_MAD_IFC_IGNORE_MKEY = 1, 84 MLX5_MAD_IFC_IGNORE_BKEY = 2, 85 MLX5_MAD_IFC_NET_VIEW = 4, 86 }; 87 88 struct mlx5_ib_ucontext { 89 struct ib_ucontext ibucontext; 90 struct list_head db_page_list; 91 92 /* protect doorbell record alloc/free 93 */ 94 struct mutex db_page_mutex; 95 struct mlx5_uuar_info uuari; 96 }; 97 98 static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext) 99 { 100 return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext); 101 } 102 103 struct mlx5_ib_pd { 104 struct ib_pd ibpd; 105 u32 pdn; 106 u32 pa_lkey; 107 }; 108 109 /* Use macros here so that don't have to duplicate 110 * enum ib_send_flags and enum ib_qp_type for low-level driver 111 */ 112 113 #define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START 114 #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1 115 #define MLX5_IB_WR_UMR IB_WR_RESERVED1 116 117 struct wr_list { 118 u16 opcode; 119 u16 next; 120 }; 121 122 struct mlx5_ib_wq { 123 u64 *wrid; 124 u32 *wr_data; 125 struct wr_list *w_list; 126 unsigned *wqe_head; 127 u16 unsig_count; 128 129 /* serialize post to the work queue 130 */ 131 spinlock_t lock; 132 int wqe_cnt; 133 int max_post; 134 int max_gs; 135 int offset; 136 int wqe_shift; 137 unsigned head; 138 unsigned tail; 139 u16 cur_post; 140 u16 last_poll; 141 void *qend; 142 }; 143 144 enum { 145 MLX5_QP_USER, 146 MLX5_QP_KERNEL, 147 MLX5_QP_EMPTY 148 }; 149 150 struct mlx5_ib_qp { 151 struct ib_qp ibqp; 152 struct mlx5_core_qp mqp; 153 struct mlx5_buf buf; 154 155 struct mlx5_db db; 156 struct mlx5_ib_wq rq; 157 158 u32 doorbell_qpn; 159 u8 sq_signal_bits; 160 u8 fm_cache; 161 int sq_max_wqes_per_wr; 162 int sq_spare_wqes; 163 struct mlx5_ib_wq sq; 164 165 struct ib_umem *umem; 166 int buf_size; 167 168 /* serialize qp state modifications 169 */ 170 struct mutex mutex; 171 u16 xrcdn; 172 u32 flags; 173 u8 port; 174 u8 alt_port; 175 u8 atomic_rd_en; 176 u8 resp_depth; 177 u8 state; 178 int mlx_type; 179 int wq_sig; 180 int scat_cqe; 181 int max_inline_data; 182 struct mlx5_bf *bf; 183 int has_rq; 184 185 /* only for user space QPs. For kernel 186 * we have it from the bf object 187 */ 188 int uuarn; 189 190 int create_type; 191 u32 pa_lkey; 192 193 /* Store signature errors */ 194 bool signature_en; 195 }; 196 197 struct mlx5_ib_cq_buf { 198 struct mlx5_buf buf; 199 struct ib_umem *umem; 200 int cqe_size; 201 int nent; 202 }; 203 204 enum mlx5_ib_qp_flags { 205 MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = 1 << 0, 206 MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 1, 207 }; 208 209 struct mlx5_shared_mr_info { 210 int mr_id; 211 struct ib_umem *umem; 212 }; 213 214 struct mlx5_ib_cq { 215 struct ib_cq ibcq; 216 struct mlx5_core_cq mcq; 217 struct mlx5_ib_cq_buf buf; 218 struct mlx5_db db; 219 220 /* serialize access to the CQ 221 */ 222 spinlock_t lock; 223 224 /* protect resize cq 225 */ 226 struct mutex resize_mutex; 227 struct mlx5_ib_cq_buf *resize_buf; 228 struct ib_umem *resize_umem; 229 int cqe_size; 230 }; 231 232 struct mlx5_ib_srq { 233 struct ib_srq ibsrq; 234 struct mlx5_core_srq msrq; 235 struct mlx5_buf buf; 236 struct mlx5_db db; 237 u64 *wrid; 238 /* protect SRQ hanlding 239 */ 240 spinlock_t lock; 241 int head; 242 int tail; 243 u16 wqe_ctr; 244 struct ib_umem *umem; 245 /* serialize arming a SRQ 246 */ 247 struct mutex mutex; 248 int wq_sig; 249 }; 250 251 struct mlx5_ib_xrcd { 252 struct ib_xrcd ibxrcd; 253 u32 xrcdn; 254 }; 255 256 struct mlx5_ib_mr { 257 struct ib_mr ibmr; 258 struct mlx5_core_mr mmr; 259 struct ib_umem *umem; 260 struct mlx5_shared_mr_info *smr_info; 261 struct list_head list; 262 int order; 263 int umred; 264 __be64 *pas; 265 dma_addr_t dma; 266 int npages; 267 struct completion done; 268 enum ib_wc_status status; 269 struct mlx5_ib_dev *dev; 270 struct mlx5_create_mkey_mbox_out out; 271 struct mlx5_core_sig_ctx *sig; 272 }; 273 274 struct mlx5_ib_fast_reg_page_list { 275 struct ib_fast_reg_page_list ibfrpl; 276 __be64 *mapped_page_list; 277 dma_addr_t map; 278 }; 279 280 struct umr_common { 281 struct ib_pd *pd; 282 struct ib_cq *cq; 283 struct ib_qp *qp; 284 struct ib_mr *mr; 285 /* control access to UMR QP 286 */ 287 struct semaphore sem; 288 }; 289 290 enum { 291 MLX5_FMR_INVALID, 292 MLX5_FMR_VALID, 293 MLX5_FMR_BUSY, 294 }; 295 296 struct mlx5_ib_fmr { 297 struct ib_fmr ibfmr; 298 struct mlx5_core_mr mr; 299 int access_flags; 300 int state; 301 /* protect fmr state 302 */ 303 spinlock_t lock; 304 u64 wrid; 305 struct ib_send_wr wr[2]; 306 u8 page_shift; 307 struct ib_fast_reg_page_list page_list; 308 }; 309 310 struct mlx5_cache_ent { 311 struct list_head head; 312 /* sync access to the cahce entry 313 */ 314 spinlock_t lock; 315 316 317 struct dentry *dir; 318 char name[4]; 319 u32 order; 320 u32 size; 321 u32 cur; 322 u32 miss; 323 u32 limit; 324 325 struct dentry *fsize; 326 struct dentry *fcur; 327 struct dentry *fmiss; 328 struct dentry *flimit; 329 330 struct mlx5_ib_dev *dev; 331 struct work_struct work; 332 struct delayed_work dwork; 333 int pending; 334 }; 335 336 struct mlx5_mr_cache { 337 struct workqueue_struct *wq; 338 struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES]; 339 int stopped; 340 struct dentry *root; 341 unsigned long last_add; 342 }; 343 344 struct mlx5_ib_resources { 345 struct ib_cq *c0; 346 struct ib_xrcd *x0; 347 struct ib_xrcd *x1; 348 struct ib_pd *p0; 349 struct ib_srq *s0; 350 }; 351 352 struct mlx5_ib_dev { 353 struct ib_device ib_dev; 354 struct mlx5_core_dev mdev; 355 MLX5_DECLARE_DOORBELL_LOCK(uar_lock); 356 struct list_head eqs_list; 357 int num_ports; 358 int num_comp_vectors; 359 /* serialize update of capability mask 360 */ 361 struct mutex cap_mask_mutex; 362 bool ib_active; 363 struct umr_common umrc; 364 /* sync used page count stats 365 */ 366 spinlock_t mr_lock; 367 struct mlx5_ib_resources devr; 368 struct mlx5_mr_cache cache; 369 struct timer_list delay_timer; 370 int fill_delay; 371 }; 372 373 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) 374 { 375 return container_of(mcq, struct mlx5_ib_cq, mcq); 376 } 377 378 static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd) 379 { 380 return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd); 381 } 382 383 static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev) 384 { 385 return container_of(ibdev, struct mlx5_ib_dev, ib_dev); 386 } 387 388 static inline struct mlx5_ib_fmr *to_mfmr(struct ib_fmr *ibfmr) 389 { 390 return container_of(ibfmr, struct mlx5_ib_fmr, ibfmr); 391 } 392 393 static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq) 394 { 395 return container_of(ibcq, struct mlx5_ib_cq, ibcq); 396 } 397 398 static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp) 399 { 400 return container_of(mqp, struct mlx5_ib_qp, mqp); 401 } 402 403 static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mr *mmr) 404 { 405 return container_of(mmr, struct mlx5_ib_mr, mmr); 406 } 407 408 static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd) 409 { 410 return container_of(ibpd, struct mlx5_ib_pd, ibpd); 411 } 412 413 static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq) 414 { 415 return container_of(ibsrq, struct mlx5_ib_srq, ibsrq); 416 } 417 418 static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp) 419 { 420 return container_of(ibqp, struct mlx5_ib_qp, ibqp); 421 } 422 423 static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq) 424 { 425 return container_of(msrq, struct mlx5_ib_srq, msrq); 426 } 427 428 static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr) 429 { 430 return container_of(ibmr, struct mlx5_ib_mr, ibmr); 431 } 432 433 static inline struct mlx5_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl) 434 { 435 return container_of(ibfrpl, struct mlx5_ib_fast_reg_page_list, ibfrpl); 436 } 437 438 struct mlx5_ib_ah { 439 struct ib_ah ibah; 440 struct mlx5_av av; 441 }; 442 443 static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah) 444 { 445 return container_of(ibah, struct mlx5_ib_ah, ibah); 446 } 447 448 static inline struct mlx5_ib_dev *mlx5_core2ibdev(struct mlx5_core_dev *dev) 449 { 450 return container_of(dev, struct mlx5_ib_dev, mdev); 451 } 452 453 static inline struct mlx5_ib_dev *mlx5_pci2ibdev(struct pci_dev *pdev) 454 { 455 return mlx5_core2ibdev(pci2mlx5_core_dev(pdev)); 456 } 457 458 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt, 459 struct mlx5_db *db); 460 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db); 461 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); 462 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); 463 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index); 464 int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, 465 int port, struct ib_wc *in_wc, struct ib_grh *in_grh, 466 void *in_mad, void *response_mad); 467 struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr, 468 struct mlx5_ib_ah *ah); 469 struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); 470 int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr); 471 int mlx5_ib_destroy_ah(struct ib_ah *ah); 472 struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, 473 struct ib_srq_init_attr *init_attr, 474 struct ib_udata *udata); 475 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, 476 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); 477 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr); 478 int mlx5_ib_destroy_srq(struct ib_srq *srq); 479 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, 480 struct ib_recv_wr **bad_wr); 481 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, 482 struct ib_qp_init_attr *init_attr, 483 struct ib_udata *udata); 484 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 485 int attr_mask, struct ib_udata *udata); 486 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, 487 struct ib_qp_init_attr *qp_init_attr); 488 int mlx5_ib_destroy_qp(struct ib_qp *qp); 489 int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 490 struct ib_send_wr **bad_wr); 491 int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, 492 struct ib_recv_wr **bad_wr); 493 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n); 494 struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries, 495 int vector, struct ib_ucontext *context, 496 struct ib_udata *udata); 497 int mlx5_ib_destroy_cq(struct ib_cq *cq); 498 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); 499 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); 500 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); 501 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata); 502 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc); 503 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 504 u64 virt_addr, int access_flags, 505 struct ib_udata *udata); 506 int mlx5_ib_dereg_mr(struct ib_mr *ibmr); 507 int mlx5_ib_destroy_mr(struct ib_mr *ibmr); 508 struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd, 509 struct ib_mr_init_attr *mr_init_attr); 510 struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd, 511 int max_page_list_len); 512 struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev, 513 int page_list_len); 514 void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list); 515 struct ib_fmr *mlx5_ib_fmr_alloc(struct ib_pd *pd, int acc, 516 struct ib_fmr_attr *fmr_attr); 517 int mlx5_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, 518 int npages, u64 iova); 519 int mlx5_ib_unmap_fmr(struct list_head *fmr_list); 520 int mlx5_ib_fmr_dealloc(struct ib_fmr *ibfmr); 521 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 522 struct ib_wc *in_wc, struct ib_grh *in_grh, 523 struct ib_mad *in_mad, struct ib_mad *out_mad); 524 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, 525 struct ib_ucontext *context, 526 struct ib_udata *udata); 527 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd); 528 int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn); 529 int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset); 530 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port); 531 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, 532 struct ib_port_attr *props); 533 int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev); 534 void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev); 535 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, 536 int *ncont, int *order); 537 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, 538 int page_shift, __be64 *pas, int umr); 539 void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num); 540 int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq); 541 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev); 542 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev); 543 int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift); 544 void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context); 545 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, 546 struct ib_mr_status *mr_status); 547 548 static inline void init_query_mad(struct ib_smp *mad) 549 { 550 mad->base_version = 1; 551 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; 552 mad->class_version = 1; 553 mad->method = IB_MGMT_METHOD_GET; 554 } 555 556 static inline u8 convert_access(int acc) 557 { 558 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) | 559 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) | 560 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) | 561 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) | 562 MLX5_PERM_LOCAL_READ; 563 } 564 565 #endif /* MLX5_IB_H */ 566