1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #ifndef MLX5_IB_H 34 #define MLX5_IB_H 35 36 #include <linux/kernel.h> 37 #include <linux/sched.h> 38 #include <rdma/ib_verbs.h> 39 #include <rdma/ib_umem.h> 40 #include <rdma/ib_smi.h> 41 #include <linux/mlx5/driver.h> 42 #include <linux/mlx5/cq.h> 43 #include <linux/mlx5/fs.h> 44 #include <linux/mlx5/qp.h> 45 #include <linux/types.h> 46 #include <linux/mlx5/transobj.h> 47 #include <rdma/ib_user_verbs.h> 48 #include <rdma/mlx5-abi.h> 49 #include <rdma/uverbs_ioctl.h> 50 #include <rdma/mlx5_user_ioctl_cmds.h> 51 #include <rdma/mlx5_user_ioctl_verbs.h> 52 53 #include "srq.h" 54 55 #define mlx5_ib_dbg(_dev, format, arg...) \ 56 dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \ 57 __LINE__, current->pid, ##arg) 58 59 #define mlx5_ib_err(_dev, format, arg...) \ 60 dev_err(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \ 61 __LINE__, current->pid, ##arg) 62 63 #define mlx5_ib_warn(_dev, format, arg...) \ 64 dev_warn(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \ 65 __LINE__, current->pid, ##arg) 66 67 #define field_avail(type, fld, sz) (offsetof(type, fld) + \ 68 sizeof(((type *)0)->fld) <= (sz)) 69 #define MLX5_IB_DEFAULT_UIDX 0xffffff 70 #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index) 71 72 #define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size) 73 74 enum { 75 MLX5_IB_MMAP_CMD_SHIFT = 8, 76 MLX5_IB_MMAP_CMD_MASK = 0xff, 77 }; 78 79 enum { 80 MLX5_RES_SCAT_DATA32_CQE = 0x1, 81 MLX5_RES_SCAT_DATA64_CQE = 0x2, 82 MLX5_REQ_SCAT_DATA32_CQE = 0x11, 83 MLX5_REQ_SCAT_DATA64_CQE = 0x22, 84 }; 85 86 enum mlx5_ib_mad_ifc_flags { 87 MLX5_MAD_IFC_IGNORE_MKEY = 1, 88 MLX5_MAD_IFC_IGNORE_BKEY = 2, 89 MLX5_MAD_IFC_NET_VIEW = 4, 90 }; 91 92 enum { 93 MLX5_CROSS_CHANNEL_BFREG = 0, 94 }; 95 96 enum { 97 MLX5_CQE_VERSION_V0, 98 MLX5_CQE_VERSION_V1, 99 }; 100 101 enum { 102 MLX5_TM_MAX_RNDV_MSG_SIZE = 64, 103 MLX5_TM_MAX_SGE = 1, 104 }; 105 106 enum { 107 MLX5_IB_INVALID_UAR_INDEX = BIT(31), 108 MLX5_IB_INVALID_BFREG = BIT(31), 109 }; 110 111 enum { 112 MLX5_MAX_MEMIC_PAGES = 0x100, 113 MLX5_MEMIC_ALLOC_SIZE_MASK = 0x3f, 114 }; 115 116 enum { 117 MLX5_MEMIC_BASE_ALIGN = 6, 118 MLX5_MEMIC_BASE_SIZE = 1 << MLX5_MEMIC_BASE_ALIGN, 119 }; 120 121 #define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) \ 122 (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity)) 123 #define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)) 124 125 struct mlx5_ib_ucontext { 126 struct ib_ucontext ibucontext; 127 struct list_head db_page_list; 128 129 /* protect doorbell record alloc/free 130 */ 131 struct mutex db_page_mutex; 132 struct mlx5_bfreg_info bfregi; 133 u8 cqe_version; 134 /* Transport Domain number */ 135 u32 tdn; 136 137 u64 lib_caps; 138 DECLARE_BITMAP(dm_pages, MLX5_MAX_MEMIC_PAGES); 139 u16 devx_uid; 140 /* For RoCE LAG TX affinity */ 141 atomic_t tx_port_affinity; 142 }; 143 144 static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext) 145 { 146 return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext); 147 } 148 149 struct mlx5_ib_pd { 150 struct ib_pd ibpd; 151 u32 pdn; 152 u16 uid; 153 }; 154 155 enum { 156 MLX5_IB_FLOW_ACTION_MODIFY_HEADER, 157 MLX5_IB_FLOW_ACTION_PACKET_REFORMAT, 158 MLX5_IB_FLOW_ACTION_DECAP, 159 }; 160 161 #define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1) 162 #define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1) 163 #if (MLX5_IB_FLOW_LAST_PRIO <= 0) 164 #error "Invalid number of bypass priorities" 165 #endif 166 #define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1) 167 168 #define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1) 169 #define MLX5_IB_NUM_SNIFFER_FTS 2 170 #define MLX5_IB_NUM_EGRESS_FTS 1 171 struct mlx5_ib_flow_prio { 172 struct mlx5_flow_table *flow_table; 173 unsigned int refcount; 174 }; 175 176 struct mlx5_ib_flow_handler { 177 struct list_head list; 178 struct ib_flow ibflow; 179 struct mlx5_ib_flow_prio *prio; 180 struct mlx5_flow_handle *rule; 181 struct ib_counters *ibcounters; 182 struct mlx5_ib_dev *dev; 183 struct mlx5_ib_flow_matcher *flow_matcher; 184 }; 185 186 struct mlx5_ib_flow_matcher { 187 struct mlx5_ib_match_params matcher_mask; 188 int mask_len; 189 enum mlx5_ib_flow_type flow_type; 190 enum mlx5_flow_namespace_type ns_type; 191 u16 priority; 192 struct mlx5_core_dev *mdev; 193 atomic_t usecnt; 194 u8 match_criteria_enable; 195 }; 196 197 struct mlx5_ib_flow_db { 198 struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT]; 199 struct mlx5_ib_flow_prio egress_prios[MLX5_IB_NUM_FLOW_FT]; 200 struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS]; 201 struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS]; 202 struct mlx5_ib_flow_prio fdb; 203 struct mlx5_flow_table *lag_demux_ft; 204 /* Protect flow steering bypass flow tables 205 * when add/del flow rules. 206 * only single add/removal of flow steering rule could be done 207 * simultaneously. 208 */ 209 struct mutex lock; 210 }; 211 212 /* Use macros here so that don't have to duplicate 213 * enum ib_send_flags and enum ib_qp_type for low-level driver 214 */ 215 216 #define MLX5_IB_SEND_UMR_ENABLE_MR (IB_SEND_RESERVED_START << 0) 217 #define MLX5_IB_SEND_UMR_DISABLE_MR (IB_SEND_RESERVED_START << 1) 218 #define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 2) 219 #define MLX5_IB_SEND_UMR_UPDATE_XLT (IB_SEND_RESERVED_START << 3) 220 #define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 4) 221 #define MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS IB_SEND_RESERVED_END 222 223 #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1 224 /* 225 * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI 226 * creates the actual hardware QP. 227 */ 228 #define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2 229 #define MLX5_IB_QPT_DCI IB_QPT_RESERVED3 230 #define MLX5_IB_QPT_DCT IB_QPT_RESERVED4 231 #define MLX5_IB_WR_UMR IB_WR_RESERVED1 232 233 #define MLX5_IB_UMR_OCTOWORD 16 234 #define MLX5_IB_UMR_XLT_ALIGNMENT 64 235 236 #define MLX5_IB_UPD_XLT_ZAP BIT(0) 237 #define MLX5_IB_UPD_XLT_ENABLE BIT(1) 238 #define MLX5_IB_UPD_XLT_ATOMIC BIT(2) 239 #define MLX5_IB_UPD_XLT_ADDR BIT(3) 240 #define MLX5_IB_UPD_XLT_PD BIT(4) 241 #define MLX5_IB_UPD_XLT_ACCESS BIT(5) 242 #define MLX5_IB_UPD_XLT_INDIRECT BIT(6) 243 244 /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags. 245 * 246 * These flags are intended for internal use by the mlx5_ib driver, and they 247 * rely on the range reserved for that use in the ib_qp_create_flags enum. 248 */ 249 250 /* Create a UD QP whose source QP number is 1 */ 251 static inline enum ib_qp_create_flags mlx5_ib_create_qp_sqpn_qp1(void) 252 { 253 return IB_QP_CREATE_RESERVED_START; 254 } 255 256 struct wr_list { 257 u16 opcode; 258 u16 next; 259 }; 260 261 enum mlx5_ib_rq_flags { 262 MLX5_IB_RQ_CVLAN_STRIPPING = 1 << 0, 263 MLX5_IB_RQ_PCI_WRITE_END_PADDING = 1 << 1, 264 }; 265 266 struct mlx5_ib_wq { 267 struct mlx5_frag_buf_ctrl fbc; 268 u64 *wrid; 269 u32 *wr_data; 270 struct wr_list *w_list; 271 unsigned *wqe_head; 272 u16 unsig_count; 273 274 /* serialize post to the work queue 275 */ 276 spinlock_t lock; 277 int wqe_cnt; 278 int max_post; 279 int max_gs; 280 int offset; 281 int wqe_shift; 282 unsigned head; 283 unsigned tail; 284 u16 cur_post; 285 void *cur_edge; 286 }; 287 288 enum mlx5_ib_wq_flags { 289 MLX5_IB_WQ_FLAGS_DELAY_DROP = 0x1, 290 MLX5_IB_WQ_FLAGS_STRIDING_RQ = 0x2, 291 }; 292 293 #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9 294 #define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16 295 #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6 296 #define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13 297 298 struct mlx5_ib_rwq { 299 struct ib_wq ibwq; 300 struct mlx5_core_qp core_qp; 301 u32 rq_num_pas; 302 u32 log_rq_stride; 303 u32 log_rq_size; 304 u32 rq_page_offset; 305 u32 log_page_size; 306 u32 log_num_strides; 307 u32 two_byte_shift_en; 308 u32 single_stride_log_num_of_bytes; 309 struct ib_umem *umem; 310 size_t buf_size; 311 unsigned int page_shift; 312 int create_type; 313 struct mlx5_db db; 314 u32 user_index; 315 u32 wqe_count; 316 u32 wqe_shift; 317 int wq_sig; 318 u32 create_flags; /* Use enum mlx5_ib_wq_flags */ 319 }; 320 321 enum { 322 MLX5_QP_USER, 323 MLX5_QP_KERNEL, 324 MLX5_QP_EMPTY 325 }; 326 327 enum { 328 MLX5_WQ_USER, 329 MLX5_WQ_KERNEL 330 }; 331 332 struct mlx5_ib_rwq_ind_table { 333 struct ib_rwq_ind_table ib_rwq_ind_tbl; 334 u32 rqtn; 335 u16 uid; 336 }; 337 338 struct mlx5_ib_ubuffer { 339 struct ib_umem *umem; 340 int buf_size; 341 u64 buf_addr; 342 }; 343 344 struct mlx5_ib_qp_base { 345 struct mlx5_ib_qp *container_mibqp; 346 struct mlx5_core_qp mqp; 347 struct mlx5_ib_ubuffer ubuffer; 348 }; 349 350 struct mlx5_ib_qp_trans { 351 struct mlx5_ib_qp_base base; 352 u16 xrcdn; 353 u8 alt_port; 354 u8 atomic_rd_en; 355 u8 resp_depth; 356 }; 357 358 struct mlx5_ib_rss_qp { 359 u32 tirn; 360 }; 361 362 struct mlx5_ib_rq { 363 struct mlx5_ib_qp_base base; 364 struct mlx5_ib_wq *rq; 365 struct mlx5_ib_ubuffer ubuffer; 366 struct mlx5_db *doorbell; 367 u32 tirn; 368 u8 state; 369 u32 flags; 370 }; 371 372 struct mlx5_ib_sq { 373 struct mlx5_ib_qp_base base; 374 struct mlx5_ib_wq *sq; 375 struct mlx5_ib_ubuffer ubuffer; 376 struct mlx5_db *doorbell; 377 struct mlx5_flow_handle *flow_rule; 378 u32 tisn; 379 u8 state; 380 }; 381 382 struct mlx5_ib_raw_packet_qp { 383 struct mlx5_ib_sq sq; 384 struct mlx5_ib_rq rq; 385 }; 386 387 struct mlx5_bf { 388 int buf_size; 389 unsigned long offset; 390 struct mlx5_sq_bfreg *bfreg; 391 }; 392 393 struct mlx5_ib_dct { 394 struct mlx5_core_dct mdct; 395 u32 *in; 396 }; 397 398 struct mlx5_ib_qp { 399 struct ib_qp ibqp; 400 union { 401 struct mlx5_ib_qp_trans trans_qp; 402 struct mlx5_ib_raw_packet_qp raw_packet_qp; 403 struct mlx5_ib_rss_qp rss_qp; 404 struct mlx5_ib_dct dct; 405 }; 406 struct mlx5_frag_buf buf; 407 408 struct mlx5_db db; 409 struct mlx5_ib_wq rq; 410 411 u8 sq_signal_bits; 412 u8 next_fence; 413 struct mlx5_ib_wq sq; 414 415 /* serialize qp state modifications 416 */ 417 struct mutex mutex; 418 u32 flags; 419 u8 port; 420 u8 state; 421 int wq_sig; 422 int scat_cqe; 423 int max_inline_data; 424 struct mlx5_bf bf; 425 int has_rq; 426 427 /* only for user space QPs. For kernel 428 * we have it from the bf object 429 */ 430 int bfregn; 431 432 int create_type; 433 434 struct list_head qps_list; 435 struct list_head cq_recv_list; 436 struct list_head cq_send_list; 437 struct mlx5_rate_limit rl; 438 u32 underlay_qpn; 439 u32 flags_en; 440 /* storage for qp sub type when core qp type is IB_QPT_DRIVER */ 441 enum ib_qp_type qp_sub_type; 442 /* A flag to indicate if there's a new counter is configured 443 * but not take effective 444 */ 445 u32 counter_pending; 446 }; 447 448 struct mlx5_ib_cq_buf { 449 struct mlx5_frag_buf_ctrl fbc; 450 struct mlx5_frag_buf frag_buf; 451 struct ib_umem *umem; 452 int cqe_size; 453 int nent; 454 }; 455 456 enum mlx5_ib_qp_flags { 457 MLX5_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO, 458 MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK, 459 MLX5_IB_QP_CROSS_CHANNEL = IB_QP_CREATE_CROSS_CHANNEL, 460 MLX5_IB_QP_MANAGED_SEND = IB_QP_CREATE_MANAGED_SEND, 461 MLX5_IB_QP_MANAGED_RECV = IB_QP_CREATE_MANAGED_RECV, 462 MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 5, 463 /* QP uses 1 as its source QP number */ 464 MLX5_IB_QP_SQPN_QP1 = 1 << 6, 465 MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7, 466 MLX5_IB_QP_RSS = 1 << 8, 467 MLX5_IB_QP_CVLAN_STRIPPING = 1 << 9, 468 MLX5_IB_QP_UNDERLAY = 1 << 10, 469 MLX5_IB_QP_PCI_WRITE_END_PADDING = 1 << 11, 470 MLX5_IB_QP_TUNNEL_OFFLOAD = 1 << 12, 471 MLX5_IB_QP_PACKET_BASED_CREDIT = 1 << 13, 472 }; 473 474 struct mlx5_umr_wr { 475 struct ib_send_wr wr; 476 u64 virt_addr; 477 u64 offset; 478 struct ib_pd *pd; 479 unsigned int page_shift; 480 unsigned int xlt_size; 481 u64 length; 482 int access_flags; 483 u32 mkey; 484 }; 485 486 static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr) 487 { 488 return container_of(wr, struct mlx5_umr_wr, wr); 489 } 490 491 struct mlx5_shared_mr_info { 492 int mr_id; 493 struct ib_umem *umem; 494 }; 495 496 enum mlx5_ib_cq_pr_flags { 497 MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0, 498 }; 499 500 struct mlx5_ib_cq { 501 struct ib_cq ibcq; 502 struct mlx5_core_cq mcq; 503 struct mlx5_ib_cq_buf buf; 504 struct mlx5_db db; 505 506 /* serialize access to the CQ 507 */ 508 spinlock_t lock; 509 510 /* protect resize cq 511 */ 512 struct mutex resize_mutex; 513 struct mlx5_ib_cq_buf *resize_buf; 514 struct ib_umem *resize_umem; 515 int cqe_size; 516 struct list_head list_send_qp; 517 struct list_head list_recv_qp; 518 u32 create_flags; 519 struct list_head wc_list; 520 enum ib_cq_notify_flags notify_flags; 521 struct work_struct notify_work; 522 u16 private_flags; /* Use mlx5_ib_cq_pr_flags */ 523 }; 524 525 struct mlx5_ib_wc { 526 struct ib_wc wc; 527 struct list_head list; 528 }; 529 530 struct mlx5_ib_srq { 531 struct ib_srq ibsrq; 532 struct mlx5_core_srq msrq; 533 struct mlx5_frag_buf buf; 534 struct mlx5_db db; 535 struct mlx5_frag_buf_ctrl fbc; 536 u64 *wrid; 537 /* protect SRQ hanlding 538 */ 539 spinlock_t lock; 540 int head; 541 int tail; 542 u16 wqe_ctr; 543 struct ib_umem *umem; 544 /* serialize arming a SRQ 545 */ 546 struct mutex mutex; 547 int wq_sig; 548 }; 549 550 struct mlx5_ib_xrcd { 551 struct ib_xrcd ibxrcd; 552 u32 xrcdn; 553 }; 554 555 enum mlx5_ib_mtt_access_flags { 556 MLX5_IB_MTT_READ = (1 << 0), 557 MLX5_IB_MTT_WRITE = (1 << 1), 558 }; 559 560 struct mlx5_ib_dm { 561 struct ib_dm ibdm; 562 phys_addr_t dev_addr; 563 u32 type; 564 size_t size; 565 union { 566 struct { 567 u32 obj_id; 568 } icm_dm; 569 /* other dm types specific params should be added here */ 570 }; 571 }; 572 573 #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE) 574 575 #define MLX5_IB_DM_MEMIC_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\ 576 IB_ACCESS_REMOTE_WRITE |\ 577 IB_ACCESS_REMOTE_READ |\ 578 IB_ACCESS_REMOTE_ATOMIC |\ 579 IB_ZERO_BASED) 580 581 #define MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\ 582 IB_ACCESS_REMOTE_WRITE |\ 583 IB_ACCESS_REMOTE_READ |\ 584 IB_ZERO_BASED) 585 586 struct mlx5_ib_mr { 587 struct ib_mr ibmr; 588 void *descs; 589 dma_addr_t desc_map; 590 int ndescs; 591 int data_length; 592 int meta_ndescs; 593 int meta_length; 594 int max_descs; 595 int desc_size; 596 int access_mode; 597 struct mlx5_core_mkey mmkey; 598 struct ib_umem *umem; 599 struct mlx5_shared_mr_info *smr_info; 600 struct list_head list; 601 int order; 602 bool allocated_from_cache; 603 int npages; 604 struct mlx5_ib_dev *dev; 605 u32 out[MLX5_ST_SZ_DW(create_mkey_out)]; 606 struct mlx5_core_sig_ctx *sig; 607 int live; 608 void *descs_alloc; 609 int access_flags; /* Needed for rereg MR */ 610 611 struct mlx5_ib_mr *parent; 612 /* Needed for IB_MR_TYPE_INTEGRITY */ 613 struct mlx5_ib_mr *pi_mr; 614 struct mlx5_ib_mr *klm_mr; 615 struct mlx5_ib_mr *mtt_mr; 616 u64 data_iova; 617 u64 pi_iova; 618 619 atomic_t num_leaf_free; 620 wait_queue_head_t q_leaf_free; 621 struct mlx5_async_work cb_work; 622 atomic_t num_pending_prefetch; 623 }; 624 625 static inline bool is_odp_mr(struct mlx5_ib_mr *mr) 626 { 627 return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem && 628 mr->umem->is_odp; 629 } 630 631 struct mlx5_ib_mw { 632 struct ib_mw ibmw; 633 struct mlx5_core_mkey mmkey; 634 int ndescs; 635 }; 636 637 struct mlx5_ib_devx_mr { 638 struct mlx5_core_mkey mmkey; 639 int ndescs; 640 struct rcu_head rcu; 641 }; 642 643 struct mlx5_ib_umr_context { 644 struct ib_cqe cqe; 645 enum ib_wc_status status; 646 struct completion done; 647 }; 648 649 struct umr_common { 650 struct ib_pd *pd; 651 struct ib_cq *cq; 652 struct ib_qp *qp; 653 /* control access to UMR QP 654 */ 655 struct semaphore sem; 656 }; 657 658 enum { 659 MLX5_FMR_INVALID, 660 MLX5_FMR_VALID, 661 MLX5_FMR_BUSY, 662 }; 663 664 struct mlx5_cache_ent { 665 struct list_head head; 666 /* sync access to the cahce entry 667 */ 668 spinlock_t lock; 669 670 671 char name[4]; 672 u32 order; 673 u32 xlt; 674 u32 access_mode; 675 u32 page; 676 677 u32 size; 678 u32 cur; 679 u32 miss; 680 u32 limit; 681 682 struct mlx5_ib_dev *dev; 683 struct work_struct work; 684 struct delayed_work dwork; 685 int pending; 686 struct completion compl; 687 }; 688 689 struct mlx5_mr_cache { 690 struct workqueue_struct *wq; 691 struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES]; 692 int stopped; 693 struct dentry *root; 694 unsigned long last_add; 695 }; 696 697 struct mlx5_ib_gsi_qp; 698 699 struct mlx5_ib_port_resources { 700 struct mlx5_ib_resources *devr; 701 struct mlx5_ib_gsi_qp *gsi; 702 struct work_struct pkey_change_work; 703 }; 704 705 struct mlx5_ib_resources { 706 struct ib_cq *c0; 707 struct ib_xrcd *x0; 708 struct ib_xrcd *x1; 709 struct ib_pd *p0; 710 struct ib_srq *s0; 711 struct ib_srq *s1; 712 struct mlx5_ib_port_resources ports[2]; 713 /* Protects changes to the port resources */ 714 struct mutex mutex; 715 }; 716 717 struct mlx5_ib_counters { 718 const char **names; 719 size_t *offsets; 720 u32 num_q_counters; 721 u32 num_cong_counters; 722 u32 num_ext_ppcnt_counters; 723 u16 set_id; 724 bool set_id_valid; 725 }; 726 727 struct mlx5_ib_multiport_info; 728 729 struct mlx5_ib_multiport { 730 struct mlx5_ib_multiport_info *mpi; 731 /* To be held when accessing the multiport info */ 732 spinlock_t mpi_lock; 733 }; 734 735 struct mlx5_roce { 736 /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL 737 * netdev pointer 738 */ 739 rwlock_t netdev_lock; 740 struct net_device *netdev; 741 struct notifier_block nb; 742 atomic_t tx_port_affinity; 743 enum ib_port_state last_port_state; 744 struct mlx5_ib_dev *dev; 745 u8 native_port_num; 746 }; 747 748 struct mlx5_ib_port { 749 struct mlx5_ib_counters cnts; 750 struct mlx5_ib_multiport mp; 751 struct mlx5_ib_dbg_cc_params *dbg_cc_params; 752 struct mlx5_roce roce; 753 struct mlx5_eswitch_rep *rep; 754 }; 755 756 struct mlx5_ib_dbg_param { 757 int offset; 758 struct mlx5_ib_dev *dev; 759 struct dentry *dentry; 760 u8 port_num; 761 }; 762 763 enum mlx5_ib_dbg_cc_types { 764 MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE, 765 MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI, 766 MLX5_IB_DBG_CC_RP_TIME_RESET, 767 MLX5_IB_DBG_CC_RP_BYTE_RESET, 768 MLX5_IB_DBG_CC_RP_THRESHOLD, 769 MLX5_IB_DBG_CC_RP_AI_RATE, 770 MLX5_IB_DBG_CC_RP_HAI_RATE, 771 MLX5_IB_DBG_CC_RP_MIN_DEC_FAC, 772 MLX5_IB_DBG_CC_RP_MIN_RATE, 773 MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP, 774 MLX5_IB_DBG_CC_RP_DCE_TCP_G, 775 MLX5_IB_DBG_CC_RP_DCE_TCP_RTT, 776 MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD, 777 MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE, 778 MLX5_IB_DBG_CC_RP_GD, 779 MLX5_IB_DBG_CC_NP_CNP_DSCP, 780 MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE, 781 MLX5_IB_DBG_CC_NP_CNP_PRIO, 782 MLX5_IB_DBG_CC_MAX, 783 }; 784 785 struct mlx5_ib_dbg_cc_params { 786 struct dentry *root; 787 struct mlx5_ib_dbg_param params[MLX5_IB_DBG_CC_MAX]; 788 }; 789 790 enum { 791 MLX5_MAX_DELAY_DROP_TIMEOUT_MS = 100, 792 }; 793 794 struct mlx5_ib_dbg_delay_drop { 795 struct dentry *dir_debugfs; 796 struct dentry *rqs_cnt_debugfs; 797 struct dentry *events_cnt_debugfs; 798 struct dentry *timeout_debugfs; 799 }; 800 801 struct mlx5_ib_delay_drop { 802 struct mlx5_ib_dev *dev; 803 struct work_struct delay_drop_work; 804 /* serialize setting of delay drop */ 805 struct mutex lock; 806 u32 timeout; 807 bool activate; 808 atomic_t events_cnt; 809 atomic_t rqs_cnt; 810 struct mlx5_ib_dbg_delay_drop *dbg; 811 }; 812 813 enum mlx5_ib_stages { 814 MLX5_IB_STAGE_INIT, 815 MLX5_IB_STAGE_FLOW_DB, 816 MLX5_IB_STAGE_CAPS, 817 MLX5_IB_STAGE_NON_DEFAULT_CB, 818 MLX5_IB_STAGE_ROCE, 819 MLX5_IB_STAGE_SRQ, 820 MLX5_IB_STAGE_DEVICE_RESOURCES, 821 MLX5_IB_STAGE_DEVICE_NOTIFIER, 822 MLX5_IB_STAGE_ODP, 823 MLX5_IB_STAGE_COUNTERS, 824 MLX5_IB_STAGE_CONG_DEBUGFS, 825 MLX5_IB_STAGE_UAR, 826 MLX5_IB_STAGE_BFREG, 827 MLX5_IB_STAGE_PRE_IB_REG_UMR, 828 MLX5_IB_STAGE_WHITELIST_UID, 829 MLX5_IB_STAGE_IB_REG, 830 MLX5_IB_STAGE_POST_IB_REG_UMR, 831 MLX5_IB_STAGE_DELAY_DROP, 832 MLX5_IB_STAGE_CLASS_ATTR, 833 MLX5_IB_STAGE_MAX, 834 }; 835 836 struct mlx5_ib_stage { 837 int (*init)(struct mlx5_ib_dev *dev); 838 void (*cleanup)(struct mlx5_ib_dev *dev); 839 }; 840 841 #define STAGE_CREATE(_stage, _init, _cleanup) \ 842 .stage[_stage] = {.init = _init, .cleanup = _cleanup} 843 844 struct mlx5_ib_profile { 845 struct mlx5_ib_stage stage[MLX5_IB_STAGE_MAX]; 846 }; 847 848 struct mlx5_ib_multiport_info { 849 struct list_head list; 850 struct mlx5_ib_dev *ibdev; 851 struct mlx5_core_dev *mdev; 852 struct notifier_block mdev_events; 853 struct completion unref_comp; 854 u64 sys_image_guid; 855 u32 mdev_refcnt; 856 bool is_master; 857 bool unaffiliate; 858 }; 859 860 struct mlx5_ib_flow_action { 861 struct ib_flow_action ib_action; 862 union { 863 struct { 864 u64 ib_flags; 865 struct mlx5_accel_esp_xfrm *ctx; 866 } esp_aes_gcm; 867 struct { 868 struct mlx5_ib_dev *dev; 869 u32 sub_type; 870 u32 action_id; 871 } flow_action_raw; 872 }; 873 }; 874 875 struct mlx5_dm { 876 struct mlx5_core_dev *dev; 877 /* This lock is used to protect the access to the shared 878 * allocation map when concurrent requests by different 879 * processes are handled. 880 */ 881 spinlock_t lock; 882 DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES); 883 unsigned long *steering_sw_icm_alloc_blocks; 884 unsigned long *header_modify_sw_icm_alloc_blocks; 885 }; 886 887 struct mlx5_read_counters_attr { 888 struct mlx5_fc *hw_cntrs_hndl; 889 u64 *out; 890 u32 flags; 891 }; 892 893 enum mlx5_ib_counters_type { 894 MLX5_IB_COUNTERS_FLOW, 895 }; 896 897 struct mlx5_ib_mcounters { 898 struct ib_counters ibcntrs; 899 enum mlx5_ib_counters_type type; 900 /* number of counters supported for this counters type */ 901 u32 counters_num; 902 struct mlx5_fc *hw_cntrs_hndl; 903 /* read function for this counters type */ 904 int (*read_counters)(struct ib_device *ibdev, 905 struct mlx5_read_counters_attr *read_attr); 906 /* max index set as part of create_flow */ 907 u32 cntrs_max_index; 908 /* number of counters data entries (<description,index> pair) */ 909 u32 ncounters; 910 /* counters data array for descriptions and indexes */ 911 struct mlx5_ib_flow_counters_desc *counters_data; 912 /* protects access to mcounters internal data */ 913 struct mutex mcntrs_mutex; 914 }; 915 916 static inline struct mlx5_ib_mcounters * 917 to_mcounters(struct ib_counters *ibcntrs) 918 { 919 return container_of(ibcntrs, struct mlx5_ib_mcounters, ibcntrs); 920 } 921 922 int parse_flow_flow_action(struct mlx5_ib_flow_action *maction, 923 bool is_egress, 924 struct mlx5_flow_act *action); 925 struct mlx5_ib_lb_state { 926 /* protect the user_td */ 927 struct mutex mutex; 928 u32 user_td; 929 int qps; 930 bool enabled; 931 }; 932 933 struct mlx5_ib_pf_eq { 934 struct notifier_block irq_nb; 935 struct mlx5_ib_dev *dev; 936 struct mlx5_eq *core; 937 struct work_struct work; 938 spinlock_t lock; /* Pagefaults spinlock */ 939 struct workqueue_struct *wq; 940 mempool_t *pool; 941 }; 942 943 struct mlx5_devx_event_table { 944 struct mlx5_nb devx_nb; 945 /* serialize updating the event_xa */ 946 struct mutex event_xa_lock; 947 struct xarray event_xa; 948 }; 949 950 struct mlx5_ib_dev { 951 struct ib_device ib_dev; 952 struct mlx5_core_dev *mdev; 953 struct notifier_block mdev_events; 954 int num_ports; 955 /* serialize update of capability mask 956 */ 957 struct mutex cap_mask_mutex; 958 bool ib_active; 959 struct umr_common umrc; 960 /* sync used page count stats 961 */ 962 struct mlx5_ib_resources devr; 963 struct mlx5_mr_cache cache; 964 struct timer_list delay_timer; 965 /* Prevents soft lock on massive reg MRs */ 966 struct mutex slow_path_mutex; 967 int fill_delay; 968 struct ib_odp_caps odp_caps; 969 u64 odp_max_size; 970 struct mlx5_ib_pf_eq odp_pf_eq; 971 972 /* 973 * Sleepable RCU that prevents destruction of MRs while they are still 974 * being used by a page fault handler. 975 */ 976 struct srcu_struct mr_srcu; 977 u32 null_mkey; 978 struct mlx5_ib_flow_db *flow_db; 979 /* protect resources needed as part of reset flow */ 980 spinlock_t reset_flow_resource_lock; 981 struct list_head qp_list; 982 /* Array with num_ports elements */ 983 struct mlx5_ib_port *port; 984 struct mlx5_sq_bfreg bfreg; 985 struct mlx5_sq_bfreg fp_bfreg; 986 struct mlx5_ib_delay_drop delay_drop; 987 const struct mlx5_ib_profile *profile; 988 bool is_rep; 989 int lag_active; 990 991 struct mlx5_ib_lb_state lb; 992 u8 umr_fence; 993 struct list_head ib_dev_list; 994 u64 sys_image_guid; 995 struct mlx5_dm dm; 996 u16 devx_whitelist_uid; 997 struct mlx5_srq_table srq_table; 998 struct mlx5_async_ctx async_ctx; 999 struct mlx5_devx_event_table devx_event_table; 1000 }; 1001 1002 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) 1003 { 1004 return container_of(mcq, struct mlx5_ib_cq, mcq); 1005 } 1006 1007 static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd) 1008 { 1009 return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd); 1010 } 1011 1012 static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev) 1013 { 1014 return container_of(ibdev, struct mlx5_ib_dev, ib_dev); 1015 } 1016 1017 static inline struct mlx5_ib_dev *mlx5_udata_to_mdev(struct ib_udata *udata) 1018 { 1019 struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context( 1020 udata, struct mlx5_ib_ucontext, ibucontext); 1021 1022 return to_mdev(context->ibucontext.device); 1023 } 1024 1025 static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq) 1026 { 1027 return container_of(ibcq, struct mlx5_ib_cq, ibcq); 1028 } 1029 1030 static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp) 1031 { 1032 return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp; 1033 } 1034 1035 static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp) 1036 { 1037 return container_of(core_qp, struct mlx5_ib_rwq, core_qp); 1038 } 1039 1040 static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mkey *mmkey) 1041 { 1042 return container_of(mmkey, struct mlx5_ib_mr, mmkey); 1043 } 1044 1045 static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd) 1046 { 1047 return container_of(ibpd, struct mlx5_ib_pd, ibpd); 1048 } 1049 1050 static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq) 1051 { 1052 return container_of(ibsrq, struct mlx5_ib_srq, ibsrq); 1053 } 1054 1055 static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp) 1056 { 1057 return container_of(ibqp, struct mlx5_ib_qp, ibqp); 1058 } 1059 1060 static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq) 1061 { 1062 return container_of(ibwq, struct mlx5_ib_rwq, ibwq); 1063 } 1064 1065 static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl) 1066 { 1067 return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl); 1068 } 1069 1070 static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq) 1071 { 1072 return container_of(msrq, struct mlx5_ib_srq, msrq); 1073 } 1074 1075 static inline struct mlx5_ib_dm *to_mdm(struct ib_dm *ibdm) 1076 { 1077 return container_of(ibdm, struct mlx5_ib_dm, ibdm); 1078 } 1079 1080 static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr) 1081 { 1082 return container_of(ibmr, struct mlx5_ib_mr, ibmr); 1083 } 1084 1085 static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw) 1086 { 1087 return container_of(ibmw, struct mlx5_ib_mw, ibmw); 1088 } 1089 1090 static inline struct mlx5_ib_flow_action * 1091 to_mflow_act(struct ib_flow_action *ibact) 1092 { 1093 return container_of(ibact, struct mlx5_ib_flow_action, ib_action); 1094 } 1095 1096 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, 1097 struct ib_udata *udata, unsigned long virt, 1098 struct mlx5_db *db); 1099 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db); 1100 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); 1101 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); 1102 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index); 1103 int mlx5_ib_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags, 1104 struct ib_udata *udata); 1105 int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); 1106 void mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags); 1107 int mlx5_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr, 1108 struct ib_udata *udata); 1109 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, 1110 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); 1111 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr); 1112 void mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata); 1113 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, 1114 const struct ib_recv_wr **bad_wr); 1115 int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp); 1116 void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp); 1117 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, 1118 struct ib_qp_init_attr *init_attr, 1119 struct ib_udata *udata); 1120 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1121 int attr_mask, struct ib_udata *udata); 1122 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, 1123 struct ib_qp_init_attr *qp_init_attr); 1124 int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata); 1125 void mlx5_ib_drain_sq(struct ib_qp *qp); 1126 void mlx5_ib_drain_rq(struct ib_qp *qp); 1127 int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, 1128 const struct ib_send_wr **bad_wr); 1129 int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, 1130 const struct ib_recv_wr **bad_wr); 1131 int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer, 1132 int buflen, size_t *bc); 1133 int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer, 1134 int buflen, size_t *bc); 1135 int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, 1136 void *buffer, int buflen, size_t *bc); 1137 int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, 1138 struct ib_udata *udata); 1139 void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); 1140 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); 1141 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); 1142 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); 1143 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata); 1144 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc); 1145 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 1146 u64 virt_addr, int access_flags, 1147 struct ib_udata *udata); 1148 int mlx5_ib_advise_mr(struct ib_pd *pd, 1149 enum ib_uverbs_advise_mr_advice advice, 1150 u32 flags, 1151 struct ib_sge *sg_list, 1152 u32 num_sge, 1153 struct uverbs_attr_bundle *attrs); 1154 struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, 1155 struct ib_udata *udata); 1156 int mlx5_ib_dealloc_mw(struct ib_mw *mw); 1157 int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, 1158 int page_shift, int flags); 1159 struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd, 1160 struct ib_udata *udata, 1161 int access_flags); 1162 void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr); 1163 int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, 1164 u64 length, u64 virt_addr, int access_flags, 1165 struct ib_pd *pd, struct ib_udata *udata); 1166 int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); 1167 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, 1168 u32 max_num_sg, struct ib_udata *udata); 1169 struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd, 1170 u32 max_num_sg, 1171 u32 max_num_meta_sg); 1172 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, 1173 unsigned int *sg_offset); 1174 int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, 1175 int data_sg_nents, unsigned int *data_sg_offset, 1176 struct scatterlist *meta_sg, int meta_sg_nents, 1177 unsigned int *meta_sg_offset); 1178 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 1179 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 1180 const struct ib_mad_hdr *in, size_t in_mad_size, 1181 struct ib_mad_hdr *out, size_t *out_mad_size, 1182 u16 *out_mad_pkey_index); 1183 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, 1184 struct ib_udata *udata); 1185 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata); 1186 int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset); 1187 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port); 1188 int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev, 1189 struct ib_smp *out_mad); 1190 int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev, 1191 __be64 *sys_image_guid); 1192 int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev, 1193 u16 *max_pkeys); 1194 int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev, 1195 u32 *vendor_id); 1196 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc); 1197 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid); 1198 int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index, 1199 u16 *pkey); 1200 int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index, 1201 union ib_gid *gid); 1202 int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port, 1203 struct ib_port_attr *props); 1204 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, 1205 struct ib_port_attr *props); 1206 int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev); 1207 void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev); 1208 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, 1209 unsigned long max_page_shift, 1210 int *count, int *shift, 1211 int *ncont, int *order); 1212 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, 1213 int page_shift, size_t offset, size_t num_pages, 1214 __be64 *pas, int access_flags); 1215 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, 1216 int page_shift, __be64 *pas, int access_flags); 1217 void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num); 1218 int mlx5_ib_get_cqe_size(struct ib_cq *ibcq); 1219 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev); 1220 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev); 1221 1222 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry); 1223 void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); 1224 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, 1225 struct ib_mr_status *mr_status); 1226 struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd, 1227 struct ib_wq_init_attr *init_attr, 1228 struct ib_udata *udata); 1229 void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata); 1230 int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, 1231 u32 wq_attr_mask, struct ib_udata *udata); 1232 struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device, 1233 struct ib_rwq_ind_table_init_attr *init_attr, 1234 struct ib_udata *udata); 1235 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); 1236 bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev); 1237 struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev, 1238 struct ib_ucontext *context, 1239 struct ib_dm_alloc_attr *attr, 1240 struct uverbs_attr_bundle *attrs); 1241 int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs); 1242 struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm, 1243 struct ib_dm_mr_attr *attr, 1244 struct uverbs_attr_bundle *attrs); 1245 1246 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 1247 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev); 1248 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev); 1249 void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev); 1250 int __init mlx5_ib_odp_init(void); 1251 void mlx5_ib_odp_cleanup(void); 1252 void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start, 1253 unsigned long end); 1254 void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent); 1255 void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset, 1256 size_t nentries, struct mlx5_ib_mr *mr, int flags); 1257 1258 int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, 1259 enum ib_uverbs_advise_mr_advice advice, 1260 u32 flags, struct ib_sge *sg_list, u32 num_sge); 1261 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ 1262 static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) 1263 { 1264 return; 1265 } 1266 1267 static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; } 1268 static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {} 1269 static inline int mlx5_ib_odp_init(void) { return 0; } 1270 static inline void mlx5_ib_odp_cleanup(void) {} 1271 static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {} 1272 static inline void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset, 1273 size_t nentries, struct mlx5_ib_mr *mr, 1274 int flags) {} 1275 1276 static inline int 1277 mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, 1278 enum ib_uverbs_advise_mr_advice advice, u32 flags, 1279 struct ib_sge *sg_list, u32 num_sge) 1280 { 1281 return -EOPNOTSUPP; 1282 } 1283 static inline void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, 1284 unsigned long start, 1285 unsigned long end){}; 1286 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ 1287 1288 /* Needed for rep profile */ 1289 void __mlx5_ib_remove(struct mlx5_ib_dev *dev, 1290 const struct mlx5_ib_profile *profile, 1291 int stage); 1292 void *__mlx5_ib_add(struct mlx5_ib_dev *dev, 1293 const struct mlx5_ib_profile *profile); 1294 1295 int mlx5_ib_get_vf_config(struct ib_device *device, int vf, 1296 u8 port, struct ifla_vf_info *info); 1297 int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf, 1298 u8 port, int state); 1299 int mlx5_ib_get_vf_stats(struct ib_device *device, int vf, 1300 u8 port, struct ifla_vf_stats *stats); 1301 int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port, 1302 u64 guid, int type); 1303 1304 __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, 1305 const struct ib_gid_attr *attr); 1306 1307 void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num); 1308 void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num); 1309 1310 /* GSI QP helper functions */ 1311 struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd, 1312 struct ib_qp_init_attr *init_attr); 1313 int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp); 1314 int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr, 1315 int attr_mask); 1316 int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, 1317 int qp_attr_mask, 1318 struct ib_qp_init_attr *qp_init_attr); 1319 int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr, 1320 const struct ib_send_wr **bad_wr); 1321 int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr, 1322 const struct ib_recv_wr **bad_wr); 1323 void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi); 1324 1325 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc); 1326 1327 void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, 1328 int bfregn); 1329 struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi); 1330 struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev, 1331 u8 ib_port_num, 1332 u8 *native_port_num); 1333 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev, 1334 u8 port_num); 1335 1336 #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) 1337 int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user); 1338 void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid); 1339 void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev *dev); 1340 void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev); 1341 const struct uverbs_object_tree_def *mlx5_ib_get_devx_tree(void); 1342 extern const struct uapi_definition mlx5_ib_devx_defs[]; 1343 extern const struct uapi_definition mlx5_ib_flow_defs[]; 1344 struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add( 1345 struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher, 1346 struct mlx5_flow_context *flow_context, 1347 struct mlx5_flow_act *flow_act, u32 counter_id, 1348 void *cmd_in, int inlen, int dest_id, int dest_type); 1349 bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type); 1350 bool mlx5_ib_devx_is_flow_counter(void *obj, u32 *counter_id); 1351 int mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root); 1352 void mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction); 1353 #else 1354 static inline int 1355 mlx5_ib_devx_create(struct mlx5_ib_dev *dev, 1356 bool is_user) { return -EOPNOTSUPP; } 1357 static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid) {} 1358 static inline void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev *dev) {} 1359 static inline void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev) {} 1360 static inline bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, 1361 int *dest_type) 1362 { 1363 return false; 1364 } 1365 static inline void 1366 mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction) 1367 { 1368 return; 1369 }; 1370 #endif 1371 static inline void init_query_mad(struct ib_smp *mad) 1372 { 1373 mad->base_version = 1; 1374 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; 1375 mad->class_version = 1; 1376 mad->method = IB_MGMT_METHOD_GET; 1377 } 1378 1379 static inline u8 convert_access(int acc) 1380 { 1381 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) | 1382 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) | 1383 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) | 1384 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) | 1385 MLX5_PERM_LOCAL_READ; 1386 } 1387 1388 static inline int is_qp1(enum ib_qp_type qp_type) 1389 { 1390 return qp_type == MLX5_IB_QPT_HW_GSI; 1391 } 1392 1393 #define MLX5_MAX_UMR_SHIFT 16 1394 #define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT) 1395 1396 static inline u32 check_cq_create_flags(u32 flags) 1397 { 1398 /* 1399 * It returns non-zero value for unsupported CQ 1400 * create flags, otherwise it returns zero. 1401 */ 1402 return (flags & ~(IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN | 1403 IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION)); 1404 } 1405 1406 static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx, 1407 u32 *user_index) 1408 { 1409 if (cqe_version) { 1410 if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) || 1411 (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK)) 1412 return -EINVAL; 1413 *user_index = cmd_uidx; 1414 } else { 1415 *user_index = MLX5_IB_DEFAULT_UIDX; 1416 } 1417 1418 return 0; 1419 } 1420 1421 static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext, 1422 struct mlx5_ib_create_qp *ucmd, 1423 int inlen, 1424 u32 *user_index) 1425 { 1426 u8 cqe_version = ucontext->cqe_version; 1427 1428 if (field_avail(struct mlx5_ib_create_qp, uidx, inlen) && 1429 !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX)) 1430 return 0; 1431 1432 if (!!(field_avail(struct mlx5_ib_create_qp, uidx, inlen) != 1433 !!cqe_version)) 1434 return -EINVAL; 1435 1436 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index); 1437 } 1438 1439 static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext, 1440 struct mlx5_ib_create_srq *ucmd, 1441 int inlen, 1442 u32 *user_index) 1443 { 1444 u8 cqe_version = ucontext->cqe_version; 1445 1446 if (field_avail(struct mlx5_ib_create_srq, uidx, inlen) && 1447 !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX)) 1448 return 0; 1449 1450 if (!!(field_avail(struct mlx5_ib_create_srq, uidx, inlen) != 1451 !!cqe_version)) 1452 return -EINVAL; 1453 1454 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index); 1455 } 1456 1457 static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support) 1458 { 1459 return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ? 1460 MLX5_UARS_IN_PAGE : 1; 1461 } 1462 1463 static inline int get_num_static_uars(struct mlx5_ib_dev *dev, 1464 struct mlx5_bfreg_info *bfregi) 1465 { 1466 return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_static_sys_pages; 1467 } 1468 1469 unsigned long mlx5_ib_get_xlt_emergency_page(void); 1470 void mlx5_ib_put_xlt_emergency_page(void); 1471 1472 int bfregn_to_uar_index(struct mlx5_ib_dev *dev, 1473 struct mlx5_bfreg_info *bfregi, u32 bfregn, 1474 bool dyn_bfreg); 1475 1476 int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter); 1477 #endif /* MLX5_IB_H */ 1478