1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2 /* 3 * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved. 4 * Copyright (c) 2020, Intel Corporation. All rights reserved. 5 */ 6 7 #ifndef MLX5_IB_H 8 #define MLX5_IB_H 9 10 #include <linux/kernel.h> 11 #include <linux/sched.h> 12 #include <rdma/ib_verbs.h> 13 #include <rdma/ib_umem.h> 14 #include <rdma/ib_smi.h> 15 #include <linux/mlx5/driver.h> 16 #include <linux/mlx5/cq.h> 17 #include <linux/mlx5/fs.h> 18 #include <linux/mlx5/qp.h> 19 #include <linux/types.h> 20 #include <linux/mlx5/transobj.h> 21 #include <rdma/ib_user_verbs.h> 22 #include <rdma/mlx5-abi.h> 23 #include <rdma/uverbs_ioctl.h> 24 #include <rdma/mlx5_user_ioctl_cmds.h> 25 #include <rdma/mlx5_user_ioctl_verbs.h> 26 27 #include "srq.h" 28 29 #define mlx5_ib_dbg(_dev, format, arg...) \ 30 dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \ 31 __LINE__, current->pid, ##arg) 32 33 #define mlx5_ib_err(_dev, format, arg...) \ 34 dev_err(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \ 35 __LINE__, current->pid, ##arg) 36 37 #define mlx5_ib_warn(_dev, format, arg...) \ 38 dev_warn(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \ 39 __LINE__, current->pid, ##arg) 40 41 #define MLX5_IB_DEFAULT_UIDX 0xffffff 42 #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index) 43 44 static __always_inline unsigned long 45 __mlx5_log_page_size_to_bitmap(unsigned int log_pgsz_bits, 46 unsigned int pgsz_shift) 47 { 48 unsigned int largest_pg_shift = 49 min_t(unsigned long, (1ULL << log_pgsz_bits) - 1 + pgsz_shift, 50 BITS_PER_LONG - 1); 51 52 /* 53 * Despite a command allowing it, the device does not support lower than 54 * 4k page size. 55 */ 56 pgsz_shift = max_t(unsigned int, MLX5_ADAPTER_PAGE_SHIFT, pgsz_shift); 57 return GENMASK(largest_pg_shift, pgsz_shift); 58 } 59 60 /* 61 * For mkc users, instead of a page_offset the command has a start_iova which 62 * specifies both the page_offset and the on-the-wire IOVA 63 */ 64 #define mlx5_umem_find_best_pgsz(umem, typ, log_pgsz_fld, pgsz_shift, iova) \ 65 ib_umem_find_best_pgsz(umem, \ 66 __mlx5_log_page_size_to_bitmap( \ 67 __mlx5_bit_sz(typ, log_pgsz_fld), \ 68 pgsz_shift), \ 69 iova) 70 71 static __always_inline unsigned long 72 __mlx5_page_offset_to_bitmask(unsigned int page_offset_bits, 73 unsigned int offset_shift) 74 { 75 unsigned int largest_offset_shift = 76 min_t(unsigned long, page_offset_bits - 1 + offset_shift, 77 BITS_PER_LONG - 1); 78 79 return GENMASK(largest_offset_shift, offset_shift); 80 } 81 82 /* 83 * QP/CQ/WQ/etc type commands take a page offset that satisifies: 84 * page_offset_quantized * (page_size/scale) = page_offset 85 * Which restricts allowed page sizes to ones that satisify the above. 86 */ 87 unsigned long __mlx5_umem_find_best_quantized_pgoff( 88 struct ib_umem *umem, unsigned long pgsz_bitmap, 89 unsigned int page_offset_bits, u64 pgoff_bitmask, unsigned int scale, 90 unsigned int *page_offset_quantized); 91 #define mlx5_umem_find_best_quantized_pgoff(umem, typ, log_pgsz_fld, \ 92 pgsz_shift, page_offset_fld, \ 93 scale, page_offset_quantized) \ 94 __mlx5_umem_find_best_quantized_pgoff( \ 95 umem, \ 96 __mlx5_log_page_size_to_bitmap( \ 97 __mlx5_bit_sz(typ, log_pgsz_fld), pgsz_shift), \ 98 __mlx5_bit_sz(typ, page_offset_fld), \ 99 GENMASK(31, order_base_2(scale)), scale, \ 100 page_offset_quantized) 101 102 #define mlx5_umem_find_best_cq_quantized_pgoff(umem, typ, log_pgsz_fld, \ 103 pgsz_shift, page_offset_fld, \ 104 scale, page_offset_quantized) \ 105 __mlx5_umem_find_best_quantized_pgoff( \ 106 umem, \ 107 __mlx5_log_page_size_to_bitmap( \ 108 __mlx5_bit_sz(typ, log_pgsz_fld), pgsz_shift), \ 109 __mlx5_bit_sz(typ, page_offset_fld), 0, scale, \ 110 page_offset_quantized) 111 112 enum { 113 MLX5_IB_MMAP_OFFSET_START = 9, 114 MLX5_IB_MMAP_OFFSET_END = 255, 115 }; 116 117 enum { 118 MLX5_IB_MMAP_CMD_SHIFT = 8, 119 MLX5_IB_MMAP_CMD_MASK = 0xff, 120 }; 121 122 enum { 123 MLX5_RES_SCAT_DATA32_CQE = 0x1, 124 MLX5_RES_SCAT_DATA64_CQE = 0x2, 125 MLX5_REQ_SCAT_DATA32_CQE = 0x11, 126 MLX5_REQ_SCAT_DATA64_CQE = 0x22, 127 }; 128 129 enum mlx5_ib_mad_ifc_flags { 130 MLX5_MAD_IFC_IGNORE_MKEY = 1, 131 MLX5_MAD_IFC_IGNORE_BKEY = 2, 132 MLX5_MAD_IFC_NET_VIEW = 4, 133 }; 134 135 enum { 136 MLX5_CROSS_CHANNEL_BFREG = 0, 137 }; 138 139 enum { 140 MLX5_CQE_VERSION_V0, 141 MLX5_CQE_VERSION_V1, 142 }; 143 144 enum { 145 MLX5_TM_MAX_RNDV_MSG_SIZE = 64, 146 MLX5_TM_MAX_SGE = 1, 147 }; 148 149 enum { 150 MLX5_IB_INVALID_UAR_INDEX = BIT(31), 151 MLX5_IB_INVALID_BFREG = BIT(31), 152 }; 153 154 enum { 155 MLX5_MAX_MEMIC_PAGES = 0x100, 156 MLX5_MEMIC_ALLOC_SIZE_MASK = 0x3f, 157 }; 158 159 enum { 160 MLX5_MEMIC_BASE_ALIGN = 6, 161 MLX5_MEMIC_BASE_SIZE = 1 << MLX5_MEMIC_BASE_ALIGN, 162 }; 163 164 enum mlx5_ib_mmap_type { 165 MLX5_IB_MMAP_TYPE_MEMIC = 1, 166 MLX5_IB_MMAP_TYPE_VAR = 2, 167 MLX5_IB_MMAP_TYPE_UAR_WC = 3, 168 MLX5_IB_MMAP_TYPE_UAR_NC = 4, 169 MLX5_IB_MMAP_TYPE_MEMIC_OP = 5, 170 }; 171 172 struct mlx5_bfreg_info { 173 u32 *sys_pages; 174 int num_low_latency_bfregs; 175 unsigned int *count; 176 177 /* 178 * protect bfreg allocation data structs 179 */ 180 struct mutex lock; 181 u32 ver; 182 u8 lib_uar_4k : 1; 183 u8 lib_uar_dyn : 1; 184 u32 num_sys_pages; 185 u32 num_static_sys_pages; 186 u32 total_num_bfregs; 187 u32 num_dyn_bfregs; 188 }; 189 190 struct mlx5_ib_ucontext { 191 struct ib_ucontext ibucontext; 192 struct list_head db_page_list; 193 194 /* protect doorbell record alloc/free 195 */ 196 struct mutex db_page_mutex; 197 struct mlx5_bfreg_info bfregi; 198 u8 cqe_version; 199 /* Transport Domain number */ 200 u32 tdn; 201 202 u64 lib_caps; 203 u16 devx_uid; 204 /* For RoCE LAG TX affinity */ 205 atomic_t tx_port_affinity; 206 }; 207 208 static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext) 209 { 210 return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext); 211 } 212 213 struct mlx5_ib_pd { 214 struct ib_pd ibpd; 215 u32 pdn; 216 u16 uid; 217 }; 218 219 enum { 220 MLX5_IB_FLOW_ACTION_MODIFY_HEADER, 221 MLX5_IB_FLOW_ACTION_PACKET_REFORMAT, 222 MLX5_IB_FLOW_ACTION_DECAP, 223 }; 224 225 #define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1) 226 #define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1) 227 #if (MLX5_IB_FLOW_LAST_PRIO <= 0) 228 #error "Invalid number of bypass priorities" 229 #endif 230 #define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1) 231 232 #define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1) 233 #define MLX5_IB_NUM_SNIFFER_FTS 2 234 #define MLX5_IB_NUM_EGRESS_FTS 1 235 struct mlx5_ib_flow_prio { 236 struct mlx5_flow_table *flow_table; 237 unsigned int refcount; 238 }; 239 240 struct mlx5_ib_flow_handler { 241 struct list_head list; 242 struct ib_flow ibflow; 243 struct mlx5_ib_flow_prio *prio; 244 struct mlx5_flow_handle *rule; 245 struct ib_counters *ibcounters; 246 struct mlx5_ib_dev *dev; 247 struct mlx5_ib_flow_matcher *flow_matcher; 248 }; 249 250 struct mlx5_ib_flow_matcher { 251 struct mlx5_ib_match_params matcher_mask; 252 int mask_len; 253 enum mlx5_ib_flow_type flow_type; 254 enum mlx5_flow_namespace_type ns_type; 255 u16 priority; 256 struct mlx5_core_dev *mdev; 257 atomic_t usecnt; 258 u8 match_criteria_enable; 259 }; 260 261 struct mlx5_ib_pp { 262 u16 index; 263 struct mlx5_core_dev *mdev; 264 }; 265 266 enum mlx5_ib_optional_counter_type { 267 MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS, 268 MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS, 269 MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS, 270 271 MLX5_IB_OPCOUNTER_MAX, 272 }; 273 274 struct mlx5_ib_flow_db { 275 struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT]; 276 struct mlx5_ib_flow_prio egress_prios[MLX5_IB_NUM_FLOW_FT]; 277 struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS]; 278 struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS]; 279 struct mlx5_ib_flow_prio fdb; 280 struct mlx5_ib_flow_prio rdma_rx[MLX5_IB_NUM_FLOW_FT]; 281 struct mlx5_ib_flow_prio rdma_tx[MLX5_IB_NUM_FLOW_FT]; 282 struct mlx5_ib_flow_prio opfcs[MLX5_IB_OPCOUNTER_MAX]; 283 struct mlx5_flow_table *lag_demux_ft; 284 /* Protect flow steering bypass flow tables 285 * when add/del flow rules. 286 * only single add/removal of flow steering rule could be done 287 * simultaneously. 288 */ 289 struct mutex lock; 290 }; 291 292 /* Use macros here so that don't have to duplicate 293 * enum ib_send_flags and enum ib_qp_type for low-level driver 294 */ 295 296 #define MLX5_IB_SEND_UMR_ENABLE_MR (IB_SEND_RESERVED_START << 0) 297 #define MLX5_IB_SEND_UMR_DISABLE_MR (IB_SEND_RESERVED_START << 1) 298 #define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 2) 299 #define MLX5_IB_SEND_UMR_UPDATE_XLT (IB_SEND_RESERVED_START << 3) 300 #define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 4) 301 #define MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS IB_SEND_RESERVED_END 302 303 #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1 304 /* 305 * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI 306 * creates the actual hardware QP. 307 */ 308 #define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2 309 #define MLX5_IB_QPT_DCI IB_QPT_RESERVED3 310 #define MLX5_IB_QPT_DCT IB_QPT_RESERVED4 311 #define MLX5_IB_WR_UMR IB_WR_RESERVED1 312 313 #define MLX5_IB_UMR_OCTOWORD 16 314 #define MLX5_IB_UMR_XLT_ALIGNMENT 64 315 316 #define MLX5_IB_UPD_XLT_ZAP BIT(0) 317 #define MLX5_IB_UPD_XLT_ENABLE BIT(1) 318 #define MLX5_IB_UPD_XLT_ATOMIC BIT(2) 319 #define MLX5_IB_UPD_XLT_ADDR BIT(3) 320 #define MLX5_IB_UPD_XLT_PD BIT(4) 321 #define MLX5_IB_UPD_XLT_ACCESS BIT(5) 322 #define MLX5_IB_UPD_XLT_INDIRECT BIT(6) 323 324 /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags. 325 * 326 * These flags are intended for internal use by the mlx5_ib driver, and they 327 * rely on the range reserved for that use in the ib_qp_create_flags enum. 328 */ 329 #define MLX5_IB_QP_CREATE_SQPN_QP1 IB_QP_CREATE_RESERVED_START 330 #define MLX5_IB_QP_CREATE_WC_TEST (IB_QP_CREATE_RESERVED_START << 1) 331 332 struct wr_list { 333 u16 opcode; 334 u16 next; 335 }; 336 337 enum mlx5_ib_rq_flags { 338 MLX5_IB_RQ_CVLAN_STRIPPING = 1 << 0, 339 MLX5_IB_RQ_PCI_WRITE_END_PADDING = 1 << 1, 340 }; 341 342 struct mlx5_ib_wq { 343 struct mlx5_frag_buf_ctrl fbc; 344 u64 *wrid; 345 u32 *wr_data; 346 struct wr_list *w_list; 347 unsigned *wqe_head; 348 u16 unsig_count; 349 350 /* serialize post to the work queue 351 */ 352 spinlock_t lock; 353 int wqe_cnt; 354 int max_post; 355 int max_gs; 356 int offset; 357 int wqe_shift; 358 unsigned head; 359 unsigned tail; 360 u16 cur_post; 361 u16 last_poll; 362 void *cur_edge; 363 }; 364 365 enum mlx5_ib_wq_flags { 366 MLX5_IB_WQ_FLAGS_DELAY_DROP = 0x1, 367 MLX5_IB_WQ_FLAGS_STRIDING_RQ = 0x2, 368 }; 369 370 #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9 371 #define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16 372 #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6 373 #define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13 374 #define MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES 3 375 376 struct mlx5_ib_rwq { 377 struct ib_wq ibwq; 378 struct mlx5_core_qp core_qp; 379 u32 rq_num_pas; 380 u32 log_rq_stride; 381 u32 log_rq_size; 382 u32 rq_page_offset; 383 u32 log_page_size; 384 u32 log_num_strides; 385 u32 two_byte_shift_en; 386 u32 single_stride_log_num_of_bytes; 387 struct ib_umem *umem; 388 size_t buf_size; 389 unsigned int page_shift; 390 struct mlx5_db db; 391 u32 user_index; 392 u32 wqe_count; 393 u32 wqe_shift; 394 int wq_sig; 395 u32 create_flags; /* Use enum mlx5_ib_wq_flags */ 396 }; 397 398 struct mlx5_ib_rwq_ind_table { 399 struct ib_rwq_ind_table ib_rwq_ind_tbl; 400 u32 rqtn; 401 u16 uid; 402 }; 403 404 struct mlx5_ib_ubuffer { 405 struct ib_umem *umem; 406 int buf_size; 407 u64 buf_addr; 408 }; 409 410 struct mlx5_ib_qp_base { 411 struct mlx5_ib_qp *container_mibqp; 412 struct mlx5_core_qp mqp; 413 struct mlx5_ib_ubuffer ubuffer; 414 }; 415 416 struct mlx5_ib_qp_trans { 417 struct mlx5_ib_qp_base base; 418 u16 xrcdn; 419 u32 alt_port; 420 u8 atomic_rd_en; 421 u8 resp_depth; 422 }; 423 424 struct mlx5_ib_rss_qp { 425 u32 tirn; 426 }; 427 428 struct mlx5_ib_rq { 429 struct mlx5_ib_qp_base base; 430 struct mlx5_ib_wq *rq; 431 struct mlx5_ib_ubuffer ubuffer; 432 struct mlx5_db *doorbell; 433 u32 tirn; 434 u8 state; 435 u32 flags; 436 }; 437 438 struct mlx5_ib_sq { 439 struct mlx5_ib_qp_base base; 440 struct mlx5_ib_wq *sq; 441 struct mlx5_ib_ubuffer ubuffer; 442 struct mlx5_db *doorbell; 443 struct mlx5_flow_handle *flow_rule; 444 u32 tisn; 445 u8 state; 446 }; 447 448 struct mlx5_ib_raw_packet_qp { 449 struct mlx5_ib_sq sq; 450 struct mlx5_ib_rq rq; 451 }; 452 453 struct mlx5_bf { 454 int buf_size; 455 unsigned long offset; 456 struct mlx5_sq_bfreg *bfreg; 457 }; 458 459 struct mlx5_ib_dct { 460 struct mlx5_core_dct mdct; 461 u32 *in; 462 }; 463 464 struct mlx5_ib_gsi_qp { 465 struct ib_qp *rx_qp; 466 u32 port_num; 467 struct ib_qp_cap cap; 468 struct ib_cq *cq; 469 struct mlx5_ib_gsi_wr *outstanding_wrs; 470 u32 outstanding_pi, outstanding_ci; 471 int num_qps; 472 /* Protects access to the tx_qps. Post send operations synchronize 473 * with tx_qp creation in setup_qp(). Also protects the 474 * outstanding_wrs array and indices. 475 */ 476 spinlock_t lock; 477 struct ib_qp **tx_qps; 478 }; 479 480 struct mlx5_ib_qp { 481 struct ib_qp ibqp; 482 union { 483 struct mlx5_ib_qp_trans trans_qp; 484 struct mlx5_ib_raw_packet_qp raw_packet_qp; 485 struct mlx5_ib_rss_qp rss_qp; 486 struct mlx5_ib_dct dct; 487 struct mlx5_ib_gsi_qp gsi; 488 }; 489 struct mlx5_frag_buf buf; 490 491 struct mlx5_db db; 492 struct mlx5_ib_wq rq; 493 494 u8 sq_signal_bits; 495 u8 next_fence; 496 struct mlx5_ib_wq sq; 497 498 /* serialize qp state modifications 499 */ 500 struct mutex mutex; 501 /* cached variant of create_flags from struct ib_qp_init_attr */ 502 u32 flags; 503 u32 port; 504 u8 state; 505 int max_inline_data; 506 struct mlx5_bf bf; 507 u8 has_rq:1; 508 u8 is_rss:1; 509 510 /* only for user space QPs. For kernel 511 * we have it from the bf object 512 */ 513 int bfregn; 514 515 struct list_head qps_list; 516 struct list_head cq_recv_list; 517 struct list_head cq_send_list; 518 struct mlx5_rate_limit rl; 519 u32 underlay_qpn; 520 u32 flags_en; 521 /* 522 * IB/core doesn't store low-level QP types, so 523 * store both MLX and IBTA types in the field below. 524 */ 525 enum ib_qp_type type; 526 /* A flag to indicate if there's a new counter is configured 527 * but not take effective 528 */ 529 u32 counter_pending; 530 u16 gsi_lag_port; 531 }; 532 533 struct mlx5_ib_cq_buf { 534 struct mlx5_frag_buf_ctrl fbc; 535 struct mlx5_frag_buf frag_buf; 536 struct ib_umem *umem; 537 int cqe_size; 538 int nent; 539 }; 540 541 struct mlx5_umr_wr { 542 struct ib_send_wr wr; 543 u64 virt_addr; 544 u64 offset; 545 struct ib_pd *pd; 546 unsigned int page_shift; 547 unsigned int xlt_size; 548 u64 length; 549 int access_flags; 550 u32 mkey; 551 u8 ignore_free_state:1; 552 }; 553 554 static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr) 555 { 556 return container_of(wr, struct mlx5_umr_wr, wr); 557 } 558 559 enum mlx5_ib_cq_pr_flags { 560 MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0, 561 MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS = 1 << 1, 562 }; 563 564 struct mlx5_ib_cq { 565 struct ib_cq ibcq; 566 struct mlx5_core_cq mcq; 567 struct mlx5_ib_cq_buf buf; 568 struct mlx5_db db; 569 570 /* serialize access to the CQ 571 */ 572 spinlock_t lock; 573 574 /* protect resize cq 575 */ 576 struct mutex resize_mutex; 577 struct mlx5_ib_cq_buf *resize_buf; 578 struct ib_umem *resize_umem; 579 int cqe_size; 580 struct list_head list_send_qp; 581 struct list_head list_recv_qp; 582 u32 create_flags; 583 struct list_head wc_list; 584 enum ib_cq_notify_flags notify_flags; 585 struct work_struct notify_work; 586 u16 private_flags; /* Use mlx5_ib_cq_pr_flags */ 587 }; 588 589 struct mlx5_ib_wc { 590 struct ib_wc wc; 591 struct list_head list; 592 }; 593 594 struct mlx5_ib_srq { 595 struct ib_srq ibsrq; 596 struct mlx5_core_srq msrq; 597 struct mlx5_frag_buf buf; 598 struct mlx5_db db; 599 struct mlx5_frag_buf_ctrl fbc; 600 u64 *wrid; 601 /* protect SRQ hanlding 602 */ 603 spinlock_t lock; 604 int head; 605 int tail; 606 u16 wqe_ctr; 607 struct ib_umem *umem; 608 /* serialize arming a SRQ 609 */ 610 struct mutex mutex; 611 int wq_sig; 612 }; 613 614 struct mlx5_ib_xrcd { 615 struct ib_xrcd ibxrcd; 616 u32 xrcdn; 617 }; 618 619 enum mlx5_ib_mtt_access_flags { 620 MLX5_IB_MTT_READ = (1 << 0), 621 MLX5_IB_MTT_WRITE = (1 << 1), 622 }; 623 624 struct mlx5_user_mmap_entry { 625 struct rdma_user_mmap_entry rdma_entry; 626 u8 mmap_flag; 627 u64 address; 628 u32 page_idx; 629 }; 630 631 enum mlx5_mkey_type { 632 MLX5_MKEY_MR = 1, 633 MLX5_MKEY_MW, 634 MLX5_MKEY_INDIRECT_DEVX, 635 }; 636 637 struct mlx5_ib_mkey { 638 u32 key; 639 enum mlx5_mkey_type type; 640 unsigned int ndescs; 641 struct wait_queue_head wait; 642 refcount_t usecount; 643 }; 644 645 #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE) 646 647 #define MLX5_IB_DM_MEMIC_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\ 648 IB_ACCESS_REMOTE_WRITE |\ 649 IB_ACCESS_REMOTE_READ |\ 650 IB_ACCESS_REMOTE_ATOMIC |\ 651 IB_ZERO_BASED) 652 653 #define MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\ 654 IB_ACCESS_REMOTE_WRITE |\ 655 IB_ACCESS_REMOTE_READ |\ 656 IB_ZERO_BASED) 657 658 #define mlx5_update_odp_stats(mr, counter_name, value) \ 659 atomic64_add(value, &((mr)->odp_stats.counter_name)) 660 661 struct mlx5_ib_mr { 662 struct ib_mr ibmr; 663 struct mlx5_ib_mkey mmkey; 664 665 /* User MR data */ 666 struct mlx5_cache_ent *cache_ent; 667 struct ib_umem *umem; 668 669 /* This is zero'd when the MR is allocated */ 670 union { 671 /* Used only while the MR is in the cache */ 672 struct { 673 u32 out[MLX5_ST_SZ_DW(create_mkey_out)]; 674 struct mlx5_async_work cb_work; 675 /* Cache list element */ 676 struct list_head list; 677 }; 678 679 /* Used only by kernel MRs (umem == NULL) */ 680 struct { 681 void *descs; 682 void *descs_alloc; 683 dma_addr_t desc_map; 684 int max_descs; 685 int desc_size; 686 int access_mode; 687 688 /* For Kernel IB_MR_TYPE_INTEGRITY */ 689 struct mlx5_core_sig_ctx *sig; 690 struct mlx5_ib_mr *pi_mr; 691 struct mlx5_ib_mr *klm_mr; 692 struct mlx5_ib_mr *mtt_mr; 693 u64 data_iova; 694 u64 pi_iova; 695 int meta_ndescs; 696 int meta_length; 697 int data_length; 698 }; 699 700 /* Used only by User MRs (umem != NULL) */ 701 struct { 702 unsigned int page_shift; 703 /* Current access_flags */ 704 int access_flags; 705 706 /* For User ODP */ 707 struct mlx5_ib_mr *parent; 708 struct xarray implicit_children; 709 union { 710 struct work_struct work; 711 } odp_destroy; 712 struct ib_odp_counters odp_stats; 713 bool is_odp_implicit; 714 }; 715 }; 716 }; 717 718 /* Zero the fields in the mr that are variant depending on usage */ 719 static inline void mlx5_clear_mr(struct mlx5_ib_mr *mr) 720 { 721 memset(mr->out, 0, sizeof(*mr) - offsetof(struct mlx5_ib_mr, out)); 722 } 723 724 static inline bool is_odp_mr(struct mlx5_ib_mr *mr) 725 { 726 return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem && 727 mr->umem->is_odp; 728 } 729 730 static inline bool is_dmabuf_mr(struct mlx5_ib_mr *mr) 731 { 732 return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem && 733 mr->umem->is_dmabuf; 734 } 735 736 struct mlx5_ib_mw { 737 struct ib_mw ibmw; 738 struct mlx5_ib_mkey mmkey; 739 }; 740 741 struct mlx5_ib_umr_context { 742 struct ib_cqe cqe; 743 enum ib_wc_status status; 744 struct completion done; 745 }; 746 747 struct umr_common { 748 struct ib_pd *pd; 749 struct ib_cq *cq; 750 struct ib_qp *qp; 751 /* control access to UMR QP 752 */ 753 struct semaphore sem; 754 }; 755 756 struct mlx5_cache_ent { 757 struct list_head head; 758 /* sync access to the cahce entry 759 */ 760 spinlock_t lock; 761 762 763 char name[4]; 764 u32 order; 765 u32 xlt; 766 u32 access_mode; 767 u32 page; 768 769 u8 disabled:1; 770 u8 fill_to_high_water:1; 771 772 /* 773 * - available_mrs is the length of list head, ie the number of MRs 774 * available for immediate allocation. 775 * - total_mrs is available_mrs plus all in use MRs that could be 776 * returned to the cache. 777 * - limit is the low water mark for available_mrs, 2* limit is the 778 * upper water mark. 779 * - pending is the number of MRs currently being created 780 */ 781 u32 total_mrs; 782 u32 available_mrs; 783 u32 limit; 784 u32 pending; 785 786 /* Statistics */ 787 u32 miss; 788 789 struct mlx5_ib_dev *dev; 790 struct work_struct work; 791 struct delayed_work dwork; 792 }; 793 794 struct mlx5_mr_cache { 795 struct workqueue_struct *wq; 796 struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES]; 797 struct dentry *root; 798 unsigned long last_add; 799 }; 800 801 struct mlx5_ib_port_resources { 802 struct mlx5_ib_gsi_qp *gsi; 803 struct work_struct pkey_change_work; 804 }; 805 806 struct mlx5_ib_resources { 807 struct ib_cq *c0; 808 u32 xrcdn0; 809 u32 xrcdn1; 810 struct ib_pd *p0; 811 struct ib_srq *s0; 812 struct ib_srq *s1; 813 struct mlx5_ib_port_resources ports[2]; 814 }; 815 816 #define MAX_OPFC_RULES 2 817 818 struct mlx5_ib_op_fc { 819 struct mlx5_fc *fc; 820 struct mlx5_flow_handle *rule[MAX_OPFC_RULES]; 821 }; 822 823 struct mlx5_ib_counters { 824 struct rdma_stat_desc *descs; 825 size_t *offsets; 826 u32 num_q_counters; 827 u32 num_cong_counters; 828 u32 num_ext_ppcnt_counters; 829 u32 num_op_counters; 830 u16 set_id; 831 struct mlx5_ib_op_fc opfcs[MLX5_IB_OPCOUNTER_MAX]; 832 }; 833 834 int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num, 835 struct mlx5_ib_op_fc *opfc, 836 enum mlx5_ib_optional_counter_type type); 837 838 void mlx5_ib_fs_remove_op_fc(struct mlx5_ib_dev *dev, 839 struct mlx5_ib_op_fc *opfc, 840 enum mlx5_ib_optional_counter_type type); 841 842 struct mlx5_ib_multiport_info; 843 844 struct mlx5_ib_multiport { 845 struct mlx5_ib_multiport_info *mpi; 846 /* To be held when accessing the multiport info */ 847 spinlock_t mpi_lock; 848 }; 849 850 struct mlx5_roce { 851 /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL 852 * netdev pointer 853 */ 854 rwlock_t netdev_lock; 855 struct net_device *netdev; 856 struct notifier_block nb; 857 atomic_t tx_port_affinity; 858 enum ib_port_state last_port_state; 859 struct mlx5_ib_dev *dev; 860 u32 native_port_num; 861 }; 862 863 struct mlx5_ib_port { 864 struct mlx5_ib_counters cnts; 865 struct mlx5_ib_multiport mp; 866 struct mlx5_ib_dbg_cc_params *dbg_cc_params; 867 struct mlx5_roce roce; 868 struct mlx5_eswitch_rep *rep; 869 }; 870 871 struct mlx5_ib_dbg_param { 872 int offset; 873 struct mlx5_ib_dev *dev; 874 struct dentry *dentry; 875 u32 port_num; 876 }; 877 878 enum mlx5_ib_dbg_cc_types { 879 MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE, 880 MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI, 881 MLX5_IB_DBG_CC_RP_TIME_RESET, 882 MLX5_IB_DBG_CC_RP_BYTE_RESET, 883 MLX5_IB_DBG_CC_RP_THRESHOLD, 884 MLX5_IB_DBG_CC_RP_AI_RATE, 885 MLX5_IB_DBG_CC_RP_MAX_RATE, 886 MLX5_IB_DBG_CC_RP_HAI_RATE, 887 MLX5_IB_DBG_CC_RP_MIN_DEC_FAC, 888 MLX5_IB_DBG_CC_RP_MIN_RATE, 889 MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP, 890 MLX5_IB_DBG_CC_RP_DCE_TCP_G, 891 MLX5_IB_DBG_CC_RP_DCE_TCP_RTT, 892 MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD, 893 MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE, 894 MLX5_IB_DBG_CC_RP_GD, 895 MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS, 896 MLX5_IB_DBG_CC_NP_CNP_DSCP, 897 MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE, 898 MLX5_IB_DBG_CC_NP_CNP_PRIO, 899 MLX5_IB_DBG_CC_MAX, 900 }; 901 902 struct mlx5_ib_dbg_cc_params { 903 struct dentry *root; 904 struct mlx5_ib_dbg_param params[MLX5_IB_DBG_CC_MAX]; 905 }; 906 907 enum { 908 MLX5_MAX_DELAY_DROP_TIMEOUT_MS = 100, 909 }; 910 911 struct mlx5_ib_delay_drop { 912 struct mlx5_ib_dev *dev; 913 struct work_struct delay_drop_work; 914 /* serialize setting of delay drop */ 915 struct mutex lock; 916 u32 timeout; 917 bool activate; 918 atomic_t events_cnt; 919 atomic_t rqs_cnt; 920 struct dentry *dir_debugfs; 921 }; 922 923 enum mlx5_ib_stages { 924 MLX5_IB_STAGE_INIT, 925 MLX5_IB_STAGE_FS, 926 MLX5_IB_STAGE_CAPS, 927 MLX5_IB_STAGE_NON_DEFAULT_CB, 928 MLX5_IB_STAGE_ROCE, 929 MLX5_IB_STAGE_QP, 930 MLX5_IB_STAGE_SRQ, 931 MLX5_IB_STAGE_DEVICE_RESOURCES, 932 MLX5_IB_STAGE_DEVICE_NOTIFIER, 933 MLX5_IB_STAGE_ODP, 934 MLX5_IB_STAGE_COUNTERS, 935 MLX5_IB_STAGE_CONG_DEBUGFS, 936 MLX5_IB_STAGE_UAR, 937 MLX5_IB_STAGE_BFREG, 938 MLX5_IB_STAGE_PRE_IB_REG_UMR, 939 MLX5_IB_STAGE_WHITELIST_UID, 940 MLX5_IB_STAGE_IB_REG, 941 MLX5_IB_STAGE_POST_IB_REG_UMR, 942 MLX5_IB_STAGE_DELAY_DROP, 943 MLX5_IB_STAGE_RESTRACK, 944 MLX5_IB_STAGE_MAX, 945 }; 946 947 struct mlx5_ib_stage { 948 int (*init)(struct mlx5_ib_dev *dev); 949 void (*cleanup)(struct mlx5_ib_dev *dev); 950 }; 951 952 #define STAGE_CREATE(_stage, _init, _cleanup) \ 953 .stage[_stage] = {.init = _init, .cleanup = _cleanup} 954 955 struct mlx5_ib_profile { 956 struct mlx5_ib_stage stage[MLX5_IB_STAGE_MAX]; 957 }; 958 959 struct mlx5_ib_multiport_info { 960 struct list_head list; 961 struct mlx5_ib_dev *ibdev; 962 struct mlx5_core_dev *mdev; 963 struct notifier_block mdev_events; 964 struct completion unref_comp; 965 u64 sys_image_guid; 966 u32 mdev_refcnt; 967 bool is_master; 968 bool unaffiliate; 969 }; 970 971 struct mlx5_ib_flow_action { 972 struct ib_flow_action ib_action; 973 union { 974 struct { 975 u64 ib_flags; 976 struct mlx5_accel_esp_xfrm *ctx; 977 } esp_aes_gcm; 978 struct { 979 struct mlx5_ib_dev *dev; 980 u32 sub_type; 981 union { 982 struct mlx5_modify_hdr *modify_hdr; 983 struct mlx5_pkt_reformat *pkt_reformat; 984 }; 985 } flow_action_raw; 986 }; 987 }; 988 989 struct mlx5_dm { 990 struct mlx5_core_dev *dev; 991 /* This lock is used to protect the access to the shared 992 * allocation map when concurrent requests by different 993 * processes are handled. 994 */ 995 spinlock_t lock; 996 DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES); 997 }; 998 999 struct mlx5_read_counters_attr { 1000 struct mlx5_fc *hw_cntrs_hndl; 1001 u64 *out; 1002 u32 flags; 1003 }; 1004 1005 enum mlx5_ib_counters_type { 1006 MLX5_IB_COUNTERS_FLOW, 1007 }; 1008 1009 struct mlx5_ib_mcounters { 1010 struct ib_counters ibcntrs; 1011 enum mlx5_ib_counters_type type; 1012 /* number of counters supported for this counters type */ 1013 u32 counters_num; 1014 struct mlx5_fc *hw_cntrs_hndl; 1015 /* read function for this counters type */ 1016 int (*read_counters)(struct ib_device *ibdev, 1017 struct mlx5_read_counters_attr *read_attr); 1018 /* max index set as part of create_flow */ 1019 u32 cntrs_max_index; 1020 /* number of counters data entries (<description,index> pair) */ 1021 u32 ncounters; 1022 /* counters data array for descriptions and indexes */ 1023 struct mlx5_ib_flow_counters_desc *counters_data; 1024 /* protects access to mcounters internal data */ 1025 struct mutex mcntrs_mutex; 1026 }; 1027 1028 static inline struct mlx5_ib_mcounters * 1029 to_mcounters(struct ib_counters *ibcntrs) 1030 { 1031 return container_of(ibcntrs, struct mlx5_ib_mcounters, ibcntrs); 1032 } 1033 1034 int parse_flow_flow_action(struct mlx5_ib_flow_action *maction, 1035 bool is_egress, 1036 struct mlx5_flow_act *action); 1037 struct mlx5_ib_lb_state { 1038 /* protect the user_td */ 1039 struct mutex mutex; 1040 u32 user_td; 1041 int qps; 1042 bool enabled; 1043 }; 1044 1045 struct mlx5_ib_pf_eq { 1046 struct notifier_block irq_nb; 1047 struct mlx5_ib_dev *dev; 1048 struct mlx5_eq *core; 1049 struct work_struct work; 1050 spinlock_t lock; /* Pagefaults spinlock */ 1051 struct workqueue_struct *wq; 1052 mempool_t *pool; 1053 }; 1054 1055 struct mlx5_devx_event_table { 1056 struct mlx5_nb devx_nb; 1057 /* serialize updating the event_xa */ 1058 struct mutex event_xa_lock; 1059 struct xarray event_xa; 1060 }; 1061 1062 struct mlx5_var_table { 1063 /* serialize updating the bitmap */ 1064 struct mutex bitmap_lock; 1065 unsigned long *bitmap; 1066 u64 hw_start_addr; 1067 u32 stride_size; 1068 u64 num_var_hw_entries; 1069 }; 1070 1071 struct mlx5_port_caps { 1072 bool has_smi; 1073 u8 ext_port_cap; 1074 }; 1075 1076 struct mlx5_ib_dev { 1077 struct ib_device ib_dev; 1078 struct mlx5_core_dev *mdev; 1079 struct notifier_block mdev_events; 1080 int num_ports; 1081 /* serialize update of capability mask 1082 */ 1083 struct mutex cap_mask_mutex; 1084 u8 ib_active:1; 1085 u8 is_rep:1; 1086 u8 lag_active:1; 1087 u8 wc_support:1; 1088 u8 fill_delay; 1089 struct umr_common umrc; 1090 /* sync used page count stats 1091 */ 1092 struct mlx5_ib_resources devr; 1093 1094 atomic_t mkey_var; 1095 struct mlx5_mr_cache cache; 1096 struct timer_list delay_timer; 1097 /* Prevents soft lock on massive reg MRs */ 1098 struct mutex slow_path_mutex; 1099 struct ib_odp_caps odp_caps; 1100 u64 odp_max_size; 1101 struct mutex odp_eq_mutex; 1102 struct mlx5_ib_pf_eq odp_pf_eq; 1103 1104 struct xarray odp_mkeys; 1105 1106 u32 null_mkey; 1107 struct mlx5_ib_flow_db *flow_db; 1108 /* protect resources needed as part of reset flow */ 1109 spinlock_t reset_flow_resource_lock; 1110 struct list_head qp_list; 1111 /* Array with num_ports elements */ 1112 struct mlx5_ib_port *port; 1113 struct mlx5_sq_bfreg bfreg; 1114 struct mlx5_sq_bfreg wc_bfreg; 1115 struct mlx5_sq_bfreg fp_bfreg; 1116 struct mlx5_ib_delay_drop delay_drop; 1117 const struct mlx5_ib_profile *profile; 1118 1119 struct mlx5_ib_lb_state lb; 1120 u8 umr_fence; 1121 struct list_head ib_dev_list; 1122 u64 sys_image_guid; 1123 struct mlx5_dm dm; 1124 u16 devx_whitelist_uid; 1125 struct mlx5_srq_table srq_table; 1126 struct mlx5_qp_table qp_table; 1127 struct mlx5_async_ctx async_ctx; 1128 struct mlx5_devx_event_table devx_event_table; 1129 struct mlx5_var_table var_table; 1130 1131 struct xarray sig_mrs; 1132 struct mlx5_port_caps port_caps[MLX5_MAX_PORTS]; 1133 u16 pkey_table_len; 1134 }; 1135 1136 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) 1137 { 1138 return container_of(mcq, struct mlx5_ib_cq, mcq); 1139 } 1140 1141 static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd) 1142 { 1143 return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd); 1144 } 1145 1146 static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev) 1147 { 1148 return container_of(ibdev, struct mlx5_ib_dev, ib_dev); 1149 } 1150 1151 static inline struct mlx5_ib_dev *mr_to_mdev(struct mlx5_ib_mr *mr) 1152 { 1153 return to_mdev(mr->ibmr.device); 1154 } 1155 1156 static inline struct mlx5_ib_dev *mlx5_udata_to_mdev(struct ib_udata *udata) 1157 { 1158 struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context( 1159 udata, struct mlx5_ib_ucontext, ibucontext); 1160 1161 return to_mdev(context->ibucontext.device); 1162 } 1163 1164 static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq) 1165 { 1166 return container_of(ibcq, struct mlx5_ib_cq, ibcq); 1167 } 1168 1169 static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp) 1170 { 1171 return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp; 1172 } 1173 1174 static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp) 1175 { 1176 return container_of(core_qp, struct mlx5_ib_rwq, core_qp); 1177 } 1178 1179 static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd) 1180 { 1181 return container_of(ibpd, struct mlx5_ib_pd, ibpd); 1182 } 1183 1184 static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq) 1185 { 1186 return container_of(ibsrq, struct mlx5_ib_srq, ibsrq); 1187 } 1188 1189 static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp) 1190 { 1191 return container_of(ibqp, struct mlx5_ib_qp, ibqp); 1192 } 1193 1194 static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq) 1195 { 1196 return container_of(ibwq, struct mlx5_ib_rwq, ibwq); 1197 } 1198 1199 static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl) 1200 { 1201 return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl); 1202 } 1203 1204 static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq) 1205 { 1206 return container_of(msrq, struct mlx5_ib_srq, msrq); 1207 } 1208 1209 static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr) 1210 { 1211 return container_of(ibmr, struct mlx5_ib_mr, ibmr); 1212 } 1213 1214 static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw) 1215 { 1216 return container_of(ibmw, struct mlx5_ib_mw, ibmw); 1217 } 1218 1219 static inline struct mlx5_ib_flow_action * 1220 to_mflow_act(struct ib_flow_action *ibact) 1221 { 1222 return container_of(ibact, struct mlx5_ib_flow_action, ib_action); 1223 } 1224 1225 static inline struct mlx5_user_mmap_entry * 1226 to_mmmap(struct rdma_user_mmap_entry *rdma_entry) 1227 { 1228 return container_of(rdma_entry, 1229 struct mlx5_user_mmap_entry, rdma_entry); 1230 } 1231 1232 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt, 1233 struct mlx5_db *db); 1234 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db); 1235 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); 1236 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); 1237 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index); 1238 int mlx5_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr, 1239 struct ib_udata *udata); 1240 int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); 1241 static inline int mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags) 1242 { 1243 return 0; 1244 } 1245 int mlx5_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr, 1246 struct ib_udata *udata); 1247 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, 1248 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); 1249 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr); 1250 int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata); 1251 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, 1252 const struct ib_recv_wr **bad_wr); 1253 int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp); 1254 void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp); 1255 int mlx5_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr, 1256 struct ib_udata *udata); 1257 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1258 int attr_mask, struct ib_udata *udata); 1259 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, 1260 struct ib_qp_init_attr *qp_init_attr); 1261 int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata); 1262 void mlx5_ib_drain_sq(struct ib_qp *qp); 1263 void mlx5_ib_drain_rq(struct ib_qp *qp); 1264 int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer, 1265 size_t buflen, size_t *bc); 1266 int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer, 1267 size_t buflen, size_t *bc); 1268 int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer, 1269 size_t buflen, size_t *bc); 1270 int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, 1271 struct ib_udata *udata); 1272 int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); 1273 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); 1274 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); 1275 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); 1276 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata); 1277 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc); 1278 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 1279 u64 virt_addr, int access_flags, 1280 struct ib_udata *udata); 1281 struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start, 1282 u64 length, u64 virt_addr, 1283 int fd, int access_flags, 1284 struct ib_udata *udata); 1285 int mlx5_ib_advise_mr(struct ib_pd *pd, 1286 enum ib_uverbs_advise_mr_advice advice, 1287 u32 flags, 1288 struct ib_sge *sg_list, 1289 u32 num_sge, 1290 struct uverbs_attr_bundle *attrs); 1291 int mlx5_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata); 1292 int mlx5_ib_dealloc_mw(struct ib_mw *mw); 1293 int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, 1294 int page_shift, int flags); 1295 int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags); 1296 struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd, 1297 int access_flags); 1298 void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr); 1299 void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr); 1300 struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, 1301 u64 length, u64 virt_addr, int access_flags, 1302 struct ib_pd *pd, struct ib_udata *udata); 1303 int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); 1304 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, 1305 u32 max_num_sg); 1306 struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd, 1307 u32 max_num_sg, 1308 u32 max_num_meta_sg); 1309 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, 1310 unsigned int *sg_offset); 1311 int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, 1312 int data_sg_nents, unsigned int *data_sg_offset, 1313 struct scatterlist *meta_sg, int meta_sg_nents, 1314 unsigned int *meta_sg_offset); 1315 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num, 1316 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 1317 const struct ib_mad *in, struct ib_mad *out, 1318 size_t *out_mad_size, u16 *out_mad_pkey_index); 1319 int mlx5_ib_alloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata); 1320 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata); 1321 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, unsigned int port); 1322 int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev, 1323 __be64 *sys_image_guid); 1324 int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev, 1325 u16 *max_pkeys); 1326 int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev, 1327 u32 *vendor_id); 1328 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc); 1329 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid); 1330 int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u32 port, u16 index, 1331 u16 *pkey); 1332 int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index, 1333 union ib_gid *gid); 1334 int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port, 1335 struct ib_port_attr *props); 1336 int mlx5_ib_query_port(struct ib_device *ibdev, u32 port, 1337 struct ib_port_attr *props); 1338 void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas, 1339 u64 access_flags); 1340 void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num); 1341 int mlx5_ib_get_cqe_size(struct ib_cq *ibcq); 1342 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev); 1343 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev); 1344 1345 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, 1346 unsigned int entry, int access_flags); 1347 1348 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, 1349 struct ib_mr_status *mr_status); 1350 struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd, 1351 struct ib_wq_init_attr *init_attr, 1352 struct ib_udata *udata); 1353 int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata); 1354 int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, 1355 u32 wq_attr_mask, struct ib_udata *udata); 1356 int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table, 1357 struct ib_rwq_ind_table_init_attr *init_attr, 1358 struct ib_udata *udata); 1359 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); 1360 struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm, 1361 struct ib_dm_mr_attr *attr, 1362 struct uverbs_attr_bundle *attrs); 1363 1364 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 1365 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev); 1366 int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq); 1367 void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev); 1368 int __init mlx5_ib_odp_init(void); 1369 void mlx5_ib_odp_cleanup(void); 1370 void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent); 1371 void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries, 1372 struct mlx5_ib_mr *mr, int flags); 1373 1374 int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, 1375 enum ib_uverbs_advise_mr_advice advice, 1376 u32 flags, struct ib_sge *sg_list, u32 num_sge); 1377 int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr); 1378 int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr); 1379 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ 1380 static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; } 1381 static inline int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, 1382 struct mlx5_ib_pf_eq *eq) 1383 { 1384 return 0; 1385 } 1386 static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {} 1387 static inline int mlx5_ib_odp_init(void) { return 0; } 1388 static inline void mlx5_ib_odp_cleanup(void) {} 1389 static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {} 1390 static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries, 1391 struct mlx5_ib_mr *mr, int flags) {} 1392 1393 static inline int 1394 mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, 1395 enum ib_uverbs_advise_mr_advice advice, u32 flags, 1396 struct ib_sge *sg_list, u32 num_sge) 1397 { 1398 return -EOPNOTSUPP; 1399 } 1400 static inline int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr) 1401 { 1402 return -EOPNOTSUPP; 1403 } 1404 static inline int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr) 1405 { 1406 return -EOPNOTSUPP; 1407 } 1408 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ 1409 1410 extern const struct mmu_interval_notifier_ops mlx5_mn_ops; 1411 1412 /* Needed for rep profile */ 1413 void __mlx5_ib_remove(struct mlx5_ib_dev *dev, 1414 const struct mlx5_ib_profile *profile, 1415 int stage); 1416 int __mlx5_ib_add(struct mlx5_ib_dev *dev, 1417 const struct mlx5_ib_profile *profile); 1418 1419 int mlx5_ib_get_vf_config(struct ib_device *device, int vf, 1420 u32 port, struct ifla_vf_info *info); 1421 int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf, 1422 u32 port, int state); 1423 int mlx5_ib_get_vf_stats(struct ib_device *device, int vf, 1424 u32 port, struct ifla_vf_stats *stats); 1425 int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u32 port, 1426 struct ifla_vf_guid *node_guid, 1427 struct ifla_vf_guid *port_guid); 1428 int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u32 port, 1429 u64 guid, int type); 1430 1431 __be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev, 1432 const struct ib_gid_attr *attr); 1433 1434 void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num); 1435 void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num); 1436 1437 /* GSI QP helper functions */ 1438 int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp, 1439 struct ib_qp_init_attr *attr); 1440 int mlx5_ib_destroy_gsi(struct mlx5_ib_qp *mqp); 1441 int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr, 1442 int attr_mask); 1443 int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, 1444 int qp_attr_mask, 1445 struct ib_qp_init_attr *qp_init_attr); 1446 int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr, 1447 const struct ib_send_wr **bad_wr); 1448 int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr, 1449 const struct ib_recv_wr **bad_wr); 1450 void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi); 1451 1452 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc); 1453 1454 void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, 1455 int bfregn); 1456 struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi); 1457 struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev, 1458 u32 ib_port_num, 1459 u32 *native_port_num); 1460 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev, 1461 u32 port_num); 1462 1463 extern const struct uapi_definition mlx5_ib_devx_defs[]; 1464 extern const struct uapi_definition mlx5_ib_flow_defs[]; 1465 extern const struct uapi_definition mlx5_ib_qos_defs[]; 1466 extern const struct uapi_definition mlx5_ib_std_types_defs[]; 1467 1468 static inline void init_query_mad(struct ib_smp *mad) 1469 { 1470 mad->base_version = 1; 1471 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; 1472 mad->class_version = 1; 1473 mad->method = IB_MGMT_METHOD_GET; 1474 } 1475 1476 static inline int is_qp1(enum ib_qp_type qp_type) 1477 { 1478 return qp_type == MLX5_IB_QPT_HW_GSI || qp_type == IB_QPT_GSI; 1479 } 1480 1481 #define MLX5_MAX_UMR_SHIFT 16 1482 #define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT) 1483 1484 static inline u32 check_cq_create_flags(u32 flags) 1485 { 1486 /* 1487 * It returns non-zero value for unsupported CQ 1488 * create flags, otherwise it returns zero. 1489 */ 1490 return (flags & ~(IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN | 1491 IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION)); 1492 } 1493 1494 static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx, 1495 u32 *user_index) 1496 { 1497 if (cqe_version) { 1498 if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) || 1499 (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK)) 1500 return -EINVAL; 1501 *user_index = cmd_uidx; 1502 } else { 1503 *user_index = MLX5_IB_DEFAULT_UIDX; 1504 } 1505 1506 return 0; 1507 } 1508 1509 static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext, 1510 struct mlx5_ib_create_qp *ucmd, 1511 int inlen, 1512 u32 *user_index) 1513 { 1514 u8 cqe_version = ucontext->cqe_version; 1515 1516 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version && 1517 (ucmd->uidx == MLX5_IB_DEFAULT_UIDX)) 1518 return 0; 1519 1520 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version) 1521 return -EINVAL; 1522 1523 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index); 1524 } 1525 1526 static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext, 1527 struct mlx5_ib_create_srq *ucmd, 1528 int inlen, 1529 u32 *user_index) 1530 { 1531 u8 cqe_version = ucontext->cqe_version; 1532 1533 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version && 1534 (ucmd->uidx == MLX5_IB_DEFAULT_UIDX)) 1535 return 0; 1536 1537 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version) 1538 return -EINVAL; 1539 1540 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index); 1541 } 1542 1543 static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support) 1544 { 1545 return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ? 1546 MLX5_UARS_IN_PAGE : 1; 1547 } 1548 1549 static inline int get_num_static_uars(struct mlx5_ib_dev *dev, 1550 struct mlx5_bfreg_info *bfregi) 1551 { 1552 return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_static_sys_pages; 1553 } 1554 1555 extern void *xlt_emergency_page; 1556 1557 int bfregn_to_uar_index(struct mlx5_ib_dev *dev, 1558 struct mlx5_bfreg_info *bfregi, u32 bfregn, 1559 bool dyn_bfreg); 1560 1561 static inline bool mlx5_ib_can_load_pas_with_umr(struct mlx5_ib_dev *dev, 1562 size_t length) 1563 { 1564 /* 1565 * umr_check_mkey_mask() rejects MLX5_MKEY_MASK_PAGE_SIZE which is 1566 * always set if MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (aka 1567 * MLX5_IB_UPD_XLT_ADDR and MLX5_IB_UPD_XLT_ENABLE) is set. Thus, a mkey 1568 * can never be enabled without this capability. Simplify this weird 1569 * quirky hardware by just saying it can't use PAS lists with UMR at 1570 * all. 1571 */ 1572 if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) 1573 return false; 1574 1575 /* 1576 * length is the size of the MR in bytes when mlx5_ib_update_xlt() is 1577 * used. 1578 */ 1579 if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) && 1580 length >= MLX5_MAX_UMR_PAGES * PAGE_SIZE) 1581 return false; 1582 return true; 1583 } 1584 1585 /* 1586 * true if an existing MR can be reconfigured to new access_flags using UMR. 1587 * Older HW cannot use UMR to update certain elements of the MKC. See 1588 * umr_check_mkey_mask(), get_umr_update_access_mask() and umr_check_mkey_mask() 1589 */ 1590 static inline bool mlx5_ib_can_reconfig_with_umr(struct mlx5_ib_dev *dev, 1591 unsigned int current_access_flags, 1592 unsigned int target_access_flags) 1593 { 1594 unsigned int diffs = current_access_flags ^ target_access_flags; 1595 1596 if ((diffs & IB_ACCESS_REMOTE_ATOMIC) && 1597 MLX5_CAP_GEN(dev->mdev, atomic) && 1598 MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)) 1599 return false; 1600 1601 if ((diffs & IB_ACCESS_RELAXED_ORDERING) && 1602 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) && 1603 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr)) 1604 return false; 1605 1606 if ((diffs & IB_ACCESS_RELAXED_ORDERING) && 1607 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) && 1608 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr)) 1609 return false; 1610 1611 return true; 1612 } 1613 1614 static inline int mlx5r_store_odp_mkey(struct mlx5_ib_dev *dev, 1615 struct mlx5_ib_mkey *mmkey) 1616 { 1617 refcount_set(&mmkey->usecount, 1); 1618 1619 return xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mmkey->key), 1620 mmkey, GFP_KERNEL)); 1621 } 1622 1623 /* deref an mkey that can participate in ODP flow */ 1624 static inline void mlx5r_deref_odp_mkey(struct mlx5_ib_mkey *mmkey) 1625 { 1626 if (refcount_dec_and_test(&mmkey->usecount)) 1627 wake_up(&mmkey->wait); 1628 } 1629 1630 /* deref an mkey that can participate in ODP flow and wait for relese */ 1631 static inline void mlx5r_deref_wait_odp_mkey(struct mlx5_ib_mkey *mmkey) 1632 { 1633 mlx5r_deref_odp_mkey(mmkey); 1634 wait_event(mmkey->wait, refcount_read(&mmkey->usecount) == 0); 1635 } 1636 1637 int mlx5_ib_test_wc(struct mlx5_ib_dev *dev); 1638 1639 static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev) 1640 { 1641 return dev->lag_active || 1642 (MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 && 1643 MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity)); 1644 } 1645 1646 static inline bool rt_supported(int ts_cap) 1647 { 1648 return ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_REAL_TIME || 1649 ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME; 1650 } 1651 #endif /* MLX5_IB_H */ 1652