1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2 /* 3 * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved. 4 * Copyright (c) 2020, Intel Corporation. All rights reserved. 5 */ 6 7 #ifndef MLX5_IB_H 8 #define MLX5_IB_H 9 10 #include <linux/kernel.h> 11 #include <linux/sched.h> 12 #include <rdma/ib_verbs.h> 13 #include <rdma/ib_umem.h> 14 #include <rdma/ib_smi.h> 15 #include <linux/mlx5/driver.h> 16 #include <linux/mlx5/cq.h> 17 #include <linux/mlx5/fs.h> 18 #include <linux/mlx5/qp.h> 19 #include <linux/types.h> 20 #include <linux/mlx5/transobj.h> 21 #include <rdma/ib_user_verbs.h> 22 #include <rdma/mlx5-abi.h> 23 #include <rdma/uverbs_ioctl.h> 24 #include <rdma/mlx5_user_ioctl_cmds.h> 25 #include <rdma/mlx5_user_ioctl_verbs.h> 26 27 #include "srq.h" 28 29 #define mlx5_ib_dbg(_dev, format, arg...) \ 30 dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \ 31 __LINE__, current->pid, ##arg) 32 33 #define mlx5_ib_err(_dev, format, arg...) \ 34 dev_err(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \ 35 __LINE__, current->pid, ##arg) 36 37 #define mlx5_ib_warn(_dev, format, arg...) \ 38 dev_warn(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \ 39 __LINE__, current->pid, ##arg) 40 41 #define MLX5_IB_DEFAULT_UIDX 0xffffff 42 #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index) 43 44 static __always_inline unsigned long 45 __mlx5_log_page_size_to_bitmap(unsigned int log_pgsz_bits, 46 unsigned int pgsz_shift) 47 { 48 unsigned int largest_pg_shift = 49 min_t(unsigned long, (1ULL << log_pgsz_bits) - 1 + pgsz_shift, 50 BITS_PER_LONG - 1); 51 52 /* 53 * Despite a command allowing it, the device does not support lower than 54 * 4k page size. 55 */ 56 pgsz_shift = max_t(unsigned int, MLX5_ADAPTER_PAGE_SHIFT, pgsz_shift); 57 return GENMASK(largest_pg_shift, pgsz_shift); 58 } 59 60 /* 61 * For mkc users, instead of a page_offset the command has a start_iova which 62 * specifies both the page_offset and the on-the-wire IOVA 63 */ 64 #define mlx5_umem_find_best_pgsz(umem, typ, log_pgsz_fld, pgsz_shift, iova) \ 65 ib_umem_find_best_pgsz(umem, \ 66 __mlx5_log_page_size_to_bitmap( \ 67 __mlx5_bit_sz(typ, log_pgsz_fld), \ 68 pgsz_shift), \ 69 iova) 70 71 static __always_inline unsigned long 72 __mlx5_page_offset_to_bitmask(unsigned int page_offset_bits, 73 unsigned int offset_shift) 74 { 75 unsigned int largest_offset_shift = 76 min_t(unsigned long, page_offset_bits - 1 + offset_shift, 77 BITS_PER_LONG - 1); 78 79 return GENMASK(largest_offset_shift, offset_shift); 80 } 81 82 /* 83 * QP/CQ/WQ/etc type commands take a page offset that satisifies: 84 * page_offset_quantized * (page_size/scale) = page_offset 85 * Which restricts allowed page sizes to ones that satisify the above. 86 */ 87 unsigned long __mlx5_umem_find_best_quantized_pgoff( 88 struct ib_umem *umem, unsigned long pgsz_bitmap, 89 unsigned int page_offset_bits, u64 pgoff_bitmask, unsigned int scale, 90 unsigned int *page_offset_quantized); 91 #define mlx5_umem_find_best_quantized_pgoff(umem, typ, log_pgsz_fld, \ 92 pgsz_shift, page_offset_fld, \ 93 scale, page_offset_quantized) \ 94 __mlx5_umem_find_best_quantized_pgoff( \ 95 umem, \ 96 __mlx5_log_page_size_to_bitmap( \ 97 __mlx5_bit_sz(typ, log_pgsz_fld), pgsz_shift), \ 98 __mlx5_bit_sz(typ, page_offset_fld), \ 99 GENMASK(31, order_base_2(scale)), scale, \ 100 page_offset_quantized) 101 102 #define mlx5_umem_find_best_cq_quantized_pgoff(umem, typ, log_pgsz_fld, \ 103 pgsz_shift, page_offset_fld, \ 104 scale, page_offset_quantized) \ 105 __mlx5_umem_find_best_quantized_pgoff( \ 106 umem, \ 107 __mlx5_log_page_size_to_bitmap( \ 108 __mlx5_bit_sz(typ, log_pgsz_fld), pgsz_shift), \ 109 __mlx5_bit_sz(typ, page_offset_fld), 0, scale, \ 110 page_offset_quantized) 111 112 enum { 113 MLX5_IB_MMAP_OFFSET_START = 9, 114 MLX5_IB_MMAP_OFFSET_END = 255, 115 }; 116 117 enum { 118 MLX5_IB_MMAP_CMD_SHIFT = 8, 119 MLX5_IB_MMAP_CMD_MASK = 0xff, 120 }; 121 122 enum { 123 MLX5_RES_SCAT_DATA32_CQE = 0x1, 124 MLX5_RES_SCAT_DATA64_CQE = 0x2, 125 MLX5_REQ_SCAT_DATA32_CQE = 0x11, 126 MLX5_REQ_SCAT_DATA64_CQE = 0x22, 127 }; 128 129 enum mlx5_ib_mad_ifc_flags { 130 MLX5_MAD_IFC_IGNORE_MKEY = 1, 131 MLX5_MAD_IFC_IGNORE_BKEY = 2, 132 MLX5_MAD_IFC_NET_VIEW = 4, 133 }; 134 135 enum { 136 MLX5_CROSS_CHANNEL_BFREG = 0, 137 }; 138 139 enum { 140 MLX5_CQE_VERSION_V0, 141 MLX5_CQE_VERSION_V1, 142 }; 143 144 enum { 145 MLX5_TM_MAX_RNDV_MSG_SIZE = 64, 146 MLX5_TM_MAX_SGE = 1, 147 }; 148 149 enum { 150 MLX5_IB_INVALID_UAR_INDEX = BIT(31), 151 MLX5_IB_INVALID_BFREG = BIT(31), 152 }; 153 154 enum { 155 MLX5_MAX_MEMIC_PAGES = 0x100, 156 MLX5_MEMIC_ALLOC_SIZE_MASK = 0x3f, 157 }; 158 159 enum { 160 MLX5_MEMIC_BASE_ALIGN = 6, 161 MLX5_MEMIC_BASE_SIZE = 1 << MLX5_MEMIC_BASE_ALIGN, 162 }; 163 164 enum mlx5_ib_mmap_type { 165 MLX5_IB_MMAP_TYPE_MEMIC = 1, 166 MLX5_IB_MMAP_TYPE_VAR = 2, 167 MLX5_IB_MMAP_TYPE_UAR_WC = 3, 168 MLX5_IB_MMAP_TYPE_UAR_NC = 4, 169 MLX5_IB_MMAP_TYPE_MEMIC_OP = 5, 170 }; 171 172 struct mlx5_bfreg_info { 173 u32 *sys_pages; 174 int num_low_latency_bfregs; 175 unsigned int *count; 176 177 /* 178 * protect bfreg allocation data structs 179 */ 180 struct mutex lock; 181 u32 ver; 182 u8 lib_uar_4k : 1; 183 u8 lib_uar_dyn : 1; 184 u32 num_sys_pages; 185 u32 num_static_sys_pages; 186 u32 total_num_bfregs; 187 u32 num_dyn_bfregs; 188 }; 189 190 struct mlx5_ib_ucontext { 191 struct ib_ucontext ibucontext; 192 struct list_head db_page_list; 193 194 /* protect doorbell record alloc/free 195 */ 196 struct mutex db_page_mutex; 197 struct mlx5_bfreg_info bfregi; 198 u8 cqe_version; 199 /* Transport Domain number */ 200 u32 tdn; 201 202 u64 lib_caps; 203 u16 devx_uid; 204 /* For RoCE LAG TX affinity */ 205 atomic_t tx_port_affinity; 206 }; 207 208 static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext) 209 { 210 return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext); 211 } 212 213 struct mlx5_ib_pd { 214 struct ib_pd ibpd; 215 u32 pdn; 216 u16 uid; 217 }; 218 219 enum { 220 MLX5_IB_FLOW_ACTION_MODIFY_HEADER, 221 MLX5_IB_FLOW_ACTION_PACKET_REFORMAT, 222 MLX5_IB_FLOW_ACTION_DECAP, 223 }; 224 225 #define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1) 226 #define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1) 227 #if (MLX5_IB_FLOW_LAST_PRIO <= 0) 228 #error "Invalid number of bypass priorities" 229 #endif 230 #define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1) 231 232 #define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1) 233 #define MLX5_IB_NUM_SNIFFER_FTS 2 234 #define MLX5_IB_NUM_EGRESS_FTS 1 235 #define MLX5_IB_NUM_FDB_FTS MLX5_BY_PASS_NUM_REGULAR_PRIOS 236 struct mlx5_ib_flow_prio { 237 struct mlx5_flow_table *flow_table; 238 unsigned int refcount; 239 }; 240 241 struct mlx5_ib_flow_handler { 242 struct list_head list; 243 struct ib_flow ibflow; 244 struct mlx5_ib_flow_prio *prio; 245 struct mlx5_flow_handle *rule; 246 struct ib_counters *ibcounters; 247 struct mlx5_ib_dev *dev; 248 struct mlx5_ib_flow_matcher *flow_matcher; 249 }; 250 251 struct mlx5_ib_flow_matcher { 252 struct mlx5_ib_match_params matcher_mask; 253 int mask_len; 254 enum mlx5_ib_flow_type flow_type; 255 enum mlx5_flow_namespace_type ns_type; 256 u16 priority; 257 struct mlx5_core_dev *mdev; 258 atomic_t usecnt; 259 u8 match_criteria_enable; 260 }; 261 262 struct mlx5_ib_steering_anchor { 263 struct mlx5_ib_flow_prio *ft_prio; 264 struct mlx5_ib_dev *dev; 265 atomic_t usecnt; 266 }; 267 268 struct mlx5_ib_pp { 269 u16 index; 270 struct mlx5_core_dev *mdev; 271 }; 272 273 enum mlx5_ib_optional_counter_type { 274 MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS, 275 MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS, 276 MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS, 277 278 MLX5_IB_OPCOUNTER_MAX, 279 }; 280 281 struct mlx5_ib_flow_db { 282 struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT]; 283 struct mlx5_ib_flow_prio egress_prios[MLX5_IB_NUM_FLOW_FT]; 284 struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS]; 285 struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS]; 286 struct mlx5_ib_flow_prio fdb[MLX5_IB_NUM_FDB_FTS]; 287 struct mlx5_ib_flow_prio rdma_rx[MLX5_IB_NUM_FLOW_FT]; 288 struct mlx5_ib_flow_prio rdma_tx[MLX5_IB_NUM_FLOW_FT]; 289 struct mlx5_ib_flow_prio opfcs[MLX5_IB_OPCOUNTER_MAX]; 290 struct mlx5_flow_table *lag_demux_ft; 291 /* Protect flow steering bypass flow tables 292 * when add/del flow rules. 293 * only single add/removal of flow steering rule could be done 294 * simultaneously. 295 */ 296 struct mutex lock; 297 }; 298 299 /* Use macros here so that don't have to duplicate 300 * enum ib_qp_type for low-level driver 301 */ 302 303 #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1 304 /* 305 * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI 306 * creates the actual hardware QP. 307 */ 308 #define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2 309 #define MLX5_IB_QPT_DCI IB_QPT_RESERVED3 310 #define MLX5_IB_QPT_DCT IB_QPT_RESERVED4 311 #define MLX5_IB_WR_UMR IB_WR_RESERVED1 312 313 #define MLX5_IB_UPD_XLT_ZAP BIT(0) 314 #define MLX5_IB_UPD_XLT_ENABLE BIT(1) 315 #define MLX5_IB_UPD_XLT_ATOMIC BIT(2) 316 #define MLX5_IB_UPD_XLT_ADDR BIT(3) 317 #define MLX5_IB_UPD_XLT_PD BIT(4) 318 #define MLX5_IB_UPD_XLT_ACCESS BIT(5) 319 #define MLX5_IB_UPD_XLT_INDIRECT BIT(6) 320 321 /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags. 322 * 323 * These flags are intended for internal use by the mlx5_ib driver, and they 324 * rely on the range reserved for that use in the ib_qp_create_flags enum. 325 */ 326 #define MLX5_IB_QP_CREATE_SQPN_QP1 IB_QP_CREATE_RESERVED_START 327 #define MLX5_IB_QP_CREATE_WC_TEST (IB_QP_CREATE_RESERVED_START << 1) 328 329 struct wr_list { 330 u16 opcode; 331 u16 next; 332 }; 333 334 enum mlx5_ib_rq_flags { 335 MLX5_IB_RQ_CVLAN_STRIPPING = 1 << 0, 336 MLX5_IB_RQ_PCI_WRITE_END_PADDING = 1 << 1, 337 }; 338 339 struct mlx5_ib_wq { 340 struct mlx5_frag_buf_ctrl fbc; 341 u64 *wrid; 342 u32 *wr_data; 343 struct wr_list *w_list; 344 unsigned *wqe_head; 345 u16 unsig_count; 346 347 /* serialize post to the work queue 348 */ 349 spinlock_t lock; 350 int wqe_cnt; 351 int max_post; 352 int max_gs; 353 int offset; 354 int wqe_shift; 355 unsigned head; 356 unsigned tail; 357 u16 cur_post; 358 u16 last_poll; 359 void *cur_edge; 360 }; 361 362 enum mlx5_ib_wq_flags { 363 MLX5_IB_WQ_FLAGS_DELAY_DROP = 0x1, 364 MLX5_IB_WQ_FLAGS_STRIDING_RQ = 0x2, 365 }; 366 367 #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9 368 #define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16 369 #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6 370 #define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13 371 #define MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES 3 372 373 struct mlx5_ib_rwq { 374 struct ib_wq ibwq; 375 struct mlx5_core_qp core_qp; 376 u32 rq_num_pas; 377 u32 log_rq_stride; 378 u32 log_rq_size; 379 u32 rq_page_offset; 380 u32 log_page_size; 381 u32 log_num_strides; 382 u32 two_byte_shift_en; 383 u32 single_stride_log_num_of_bytes; 384 struct ib_umem *umem; 385 size_t buf_size; 386 unsigned int page_shift; 387 struct mlx5_db db; 388 u32 user_index; 389 u32 wqe_count; 390 u32 wqe_shift; 391 int wq_sig; 392 u32 create_flags; /* Use enum mlx5_ib_wq_flags */ 393 }; 394 395 struct mlx5_ib_rwq_ind_table { 396 struct ib_rwq_ind_table ib_rwq_ind_tbl; 397 u32 rqtn; 398 u16 uid; 399 }; 400 401 struct mlx5_ib_ubuffer { 402 struct ib_umem *umem; 403 int buf_size; 404 u64 buf_addr; 405 }; 406 407 struct mlx5_ib_qp_base { 408 struct mlx5_ib_qp *container_mibqp; 409 struct mlx5_core_qp mqp; 410 struct mlx5_ib_ubuffer ubuffer; 411 }; 412 413 struct mlx5_ib_qp_trans { 414 struct mlx5_ib_qp_base base; 415 u16 xrcdn; 416 u32 alt_port; 417 u8 atomic_rd_en; 418 u8 resp_depth; 419 }; 420 421 struct mlx5_ib_rss_qp { 422 u32 tirn; 423 }; 424 425 struct mlx5_ib_rq { 426 struct mlx5_ib_qp_base base; 427 struct mlx5_ib_wq *rq; 428 struct mlx5_ib_ubuffer ubuffer; 429 struct mlx5_db *doorbell; 430 u32 tirn; 431 u8 state; 432 u32 flags; 433 }; 434 435 struct mlx5_ib_sq { 436 struct mlx5_ib_qp_base base; 437 struct mlx5_ib_wq *sq; 438 struct mlx5_ib_ubuffer ubuffer; 439 struct mlx5_db *doorbell; 440 struct mlx5_flow_handle *flow_rule; 441 u32 tisn; 442 u8 state; 443 }; 444 445 struct mlx5_ib_raw_packet_qp { 446 struct mlx5_ib_sq sq; 447 struct mlx5_ib_rq rq; 448 }; 449 450 struct mlx5_bf { 451 int buf_size; 452 unsigned long offset; 453 struct mlx5_sq_bfreg *bfreg; 454 }; 455 456 struct mlx5_ib_dct { 457 struct mlx5_core_dct mdct; 458 u32 *in; 459 }; 460 461 struct mlx5_ib_gsi_qp { 462 struct ib_qp *rx_qp; 463 u32 port_num; 464 struct ib_qp_cap cap; 465 struct ib_cq *cq; 466 struct mlx5_ib_gsi_wr *outstanding_wrs; 467 u32 outstanding_pi, outstanding_ci; 468 int num_qps; 469 /* Protects access to the tx_qps. Post send operations synchronize 470 * with tx_qp creation in setup_qp(). Also protects the 471 * outstanding_wrs array and indices. 472 */ 473 spinlock_t lock; 474 struct ib_qp **tx_qps; 475 }; 476 477 struct mlx5_ib_qp { 478 struct ib_qp ibqp; 479 union { 480 struct mlx5_ib_qp_trans trans_qp; 481 struct mlx5_ib_raw_packet_qp raw_packet_qp; 482 struct mlx5_ib_rss_qp rss_qp; 483 struct mlx5_ib_dct dct; 484 struct mlx5_ib_gsi_qp gsi; 485 }; 486 struct mlx5_frag_buf buf; 487 488 struct mlx5_db db; 489 struct mlx5_ib_wq rq; 490 491 u8 sq_signal_bits; 492 u8 next_fence; 493 struct mlx5_ib_wq sq; 494 495 /* serialize qp state modifications 496 */ 497 struct mutex mutex; 498 /* cached variant of create_flags from struct ib_qp_init_attr */ 499 u32 flags; 500 u32 port; 501 u8 state; 502 int max_inline_data; 503 struct mlx5_bf bf; 504 u8 has_rq:1; 505 u8 is_rss:1; 506 507 /* only for user space QPs. For kernel 508 * we have it from the bf object 509 */ 510 int bfregn; 511 512 struct list_head qps_list; 513 struct list_head cq_recv_list; 514 struct list_head cq_send_list; 515 struct mlx5_rate_limit rl; 516 u32 underlay_qpn; 517 u32 flags_en; 518 /* 519 * IB/core doesn't store low-level QP types, so 520 * store both MLX and IBTA types in the field below. 521 */ 522 enum ib_qp_type type; 523 /* A flag to indicate if there's a new counter is configured 524 * but not take effective 525 */ 526 u32 counter_pending; 527 u16 gsi_lag_port; 528 }; 529 530 struct mlx5_ib_cq_buf { 531 struct mlx5_frag_buf_ctrl fbc; 532 struct mlx5_frag_buf frag_buf; 533 struct ib_umem *umem; 534 int cqe_size; 535 int nent; 536 }; 537 538 enum mlx5_ib_cq_pr_flags { 539 MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0, 540 MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS = 1 << 1, 541 }; 542 543 struct mlx5_ib_cq { 544 struct ib_cq ibcq; 545 struct mlx5_core_cq mcq; 546 struct mlx5_ib_cq_buf buf; 547 struct mlx5_db db; 548 549 /* serialize access to the CQ 550 */ 551 spinlock_t lock; 552 553 /* protect resize cq 554 */ 555 struct mutex resize_mutex; 556 struct mlx5_ib_cq_buf *resize_buf; 557 struct ib_umem *resize_umem; 558 int cqe_size; 559 struct list_head list_send_qp; 560 struct list_head list_recv_qp; 561 u32 create_flags; 562 struct list_head wc_list; 563 enum ib_cq_notify_flags notify_flags; 564 struct work_struct notify_work; 565 u16 private_flags; /* Use mlx5_ib_cq_pr_flags */ 566 }; 567 568 struct mlx5_ib_wc { 569 struct ib_wc wc; 570 struct list_head list; 571 }; 572 573 struct mlx5_ib_srq { 574 struct ib_srq ibsrq; 575 struct mlx5_core_srq msrq; 576 struct mlx5_frag_buf buf; 577 struct mlx5_db db; 578 struct mlx5_frag_buf_ctrl fbc; 579 u64 *wrid; 580 /* protect SRQ hanlding 581 */ 582 spinlock_t lock; 583 int head; 584 int tail; 585 u16 wqe_ctr; 586 struct ib_umem *umem; 587 /* serialize arming a SRQ 588 */ 589 struct mutex mutex; 590 int wq_sig; 591 }; 592 593 struct mlx5_ib_xrcd { 594 struct ib_xrcd ibxrcd; 595 u32 xrcdn; 596 }; 597 598 enum mlx5_ib_mtt_access_flags { 599 MLX5_IB_MTT_READ = (1 << 0), 600 MLX5_IB_MTT_WRITE = (1 << 1), 601 }; 602 603 struct mlx5_user_mmap_entry { 604 struct rdma_user_mmap_entry rdma_entry; 605 u8 mmap_flag; 606 u64 address; 607 u32 page_idx; 608 }; 609 610 enum mlx5_mkey_type { 611 MLX5_MKEY_MR = 1, 612 MLX5_MKEY_MW, 613 MLX5_MKEY_INDIRECT_DEVX, 614 }; 615 616 struct mlx5_ib_mkey { 617 u32 key; 618 enum mlx5_mkey_type type; 619 unsigned int ndescs; 620 struct wait_queue_head wait; 621 refcount_t usecount; 622 struct mlx5_cache_ent *cache_ent; 623 }; 624 625 #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE) 626 627 #define MLX5_IB_DM_MEMIC_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\ 628 IB_ACCESS_REMOTE_WRITE |\ 629 IB_ACCESS_REMOTE_READ |\ 630 IB_ACCESS_REMOTE_ATOMIC |\ 631 IB_ZERO_BASED) 632 633 #define MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\ 634 IB_ACCESS_REMOTE_WRITE |\ 635 IB_ACCESS_REMOTE_READ |\ 636 IB_ZERO_BASED) 637 638 #define mlx5_update_odp_stats(mr, counter_name, value) \ 639 atomic64_add(value, &((mr)->odp_stats.counter_name)) 640 641 struct mlx5_ib_mr { 642 struct ib_mr ibmr; 643 struct mlx5_ib_mkey mmkey; 644 645 struct ib_umem *umem; 646 647 union { 648 /* Used only by kernel MRs (umem == NULL) */ 649 struct { 650 void *descs; 651 void *descs_alloc; 652 dma_addr_t desc_map; 653 int max_descs; 654 int desc_size; 655 int access_mode; 656 657 /* For Kernel IB_MR_TYPE_INTEGRITY */ 658 struct mlx5_core_sig_ctx *sig; 659 struct mlx5_ib_mr *pi_mr; 660 struct mlx5_ib_mr *klm_mr; 661 struct mlx5_ib_mr *mtt_mr; 662 u64 data_iova; 663 u64 pi_iova; 664 int meta_ndescs; 665 int meta_length; 666 int data_length; 667 }; 668 669 /* Used only by User MRs (umem != NULL) */ 670 struct { 671 unsigned int page_shift; 672 /* Current access_flags */ 673 int access_flags; 674 675 /* For User ODP */ 676 struct mlx5_ib_mr *parent; 677 struct xarray implicit_children; 678 union { 679 struct work_struct work; 680 } odp_destroy; 681 struct ib_odp_counters odp_stats; 682 bool is_odp_implicit; 683 }; 684 }; 685 }; 686 687 static inline bool is_odp_mr(struct mlx5_ib_mr *mr) 688 { 689 return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem && 690 mr->umem->is_odp; 691 } 692 693 static inline bool is_dmabuf_mr(struct mlx5_ib_mr *mr) 694 { 695 return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem && 696 mr->umem->is_dmabuf; 697 } 698 699 struct mlx5_ib_mw { 700 struct ib_mw ibmw; 701 struct mlx5_ib_mkey mmkey; 702 }; 703 704 struct mlx5_ib_umr_context { 705 struct ib_cqe cqe; 706 enum ib_wc_status status; 707 struct completion done; 708 }; 709 710 enum { 711 MLX5_UMR_STATE_UNINIT, 712 MLX5_UMR_STATE_ACTIVE, 713 MLX5_UMR_STATE_RECOVER, 714 MLX5_UMR_STATE_ERR, 715 }; 716 717 struct umr_common { 718 struct ib_pd *pd; 719 struct ib_cq *cq; 720 struct ib_qp *qp; 721 /* Protects from UMR QP overflow 722 */ 723 struct semaphore sem; 724 /* Protects from using UMR while the UMR is not active 725 */ 726 struct mutex lock; 727 unsigned int state; 728 }; 729 730 struct mlx5_cache_ent { 731 struct xarray mkeys; 732 unsigned long stored; 733 unsigned long reserved; 734 735 char name[4]; 736 u32 order; 737 u32 access_mode; 738 u32 page; 739 unsigned int ndescs; 740 741 u8 disabled:1; 742 u8 fill_to_high_water:1; 743 744 /* 745 * - limit is the low water mark for stored mkeys, 2* limit is the 746 * upper water mark. 747 */ 748 u32 in_use; 749 u32 limit; 750 751 /* Statistics */ 752 u32 miss; 753 754 struct mlx5_ib_dev *dev; 755 struct delayed_work dwork; 756 }; 757 758 struct mlx5r_async_create_mkey { 759 union { 760 u32 in[MLX5_ST_SZ_BYTES(create_mkey_in)]; 761 u32 out[MLX5_ST_SZ_DW(create_mkey_out)]; 762 }; 763 struct mlx5_async_work cb_work; 764 struct mlx5_cache_ent *ent; 765 u32 mkey; 766 }; 767 768 struct mlx5_mkey_cache { 769 struct workqueue_struct *wq; 770 struct mlx5_cache_ent ent[MAX_MKEY_CACHE_ENTRIES]; 771 struct dentry *root; 772 unsigned long last_add; 773 }; 774 775 struct mlx5_ib_port_resources { 776 struct mlx5_ib_gsi_qp *gsi; 777 struct work_struct pkey_change_work; 778 }; 779 780 struct mlx5_ib_resources { 781 struct ib_cq *c0; 782 u32 xrcdn0; 783 u32 xrcdn1; 784 struct ib_pd *p0; 785 struct ib_srq *s0; 786 struct ib_srq *s1; 787 struct mlx5_ib_port_resources ports[2]; 788 }; 789 790 #define MAX_OPFC_RULES 2 791 792 struct mlx5_ib_op_fc { 793 struct mlx5_fc *fc; 794 struct mlx5_flow_handle *rule[MAX_OPFC_RULES]; 795 }; 796 797 struct mlx5_ib_counters { 798 struct rdma_stat_desc *descs; 799 size_t *offsets; 800 u32 num_q_counters; 801 u32 num_cong_counters; 802 u32 num_ext_ppcnt_counters; 803 u32 num_op_counters; 804 u16 set_id; 805 struct mlx5_ib_op_fc opfcs[MLX5_IB_OPCOUNTER_MAX]; 806 }; 807 808 int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num, 809 struct mlx5_ib_op_fc *opfc, 810 enum mlx5_ib_optional_counter_type type); 811 812 void mlx5_ib_fs_remove_op_fc(struct mlx5_ib_dev *dev, 813 struct mlx5_ib_op_fc *opfc, 814 enum mlx5_ib_optional_counter_type type); 815 816 struct mlx5_ib_multiport_info; 817 818 struct mlx5_ib_multiport { 819 struct mlx5_ib_multiport_info *mpi; 820 /* To be held when accessing the multiport info */ 821 spinlock_t mpi_lock; 822 }; 823 824 struct mlx5_roce { 825 /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL 826 * netdev pointer 827 */ 828 rwlock_t netdev_lock; 829 struct net_device *netdev; 830 struct notifier_block nb; 831 atomic_t tx_port_affinity; 832 enum ib_port_state last_port_state; 833 struct mlx5_ib_dev *dev; 834 u32 native_port_num; 835 }; 836 837 struct mlx5_ib_port { 838 struct mlx5_ib_counters cnts; 839 struct mlx5_ib_multiport mp; 840 struct mlx5_ib_dbg_cc_params *dbg_cc_params; 841 struct mlx5_roce roce; 842 struct mlx5_eswitch_rep *rep; 843 }; 844 845 struct mlx5_ib_dbg_param { 846 int offset; 847 struct mlx5_ib_dev *dev; 848 struct dentry *dentry; 849 u32 port_num; 850 }; 851 852 enum mlx5_ib_dbg_cc_types { 853 MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE, 854 MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI, 855 MLX5_IB_DBG_CC_RP_TIME_RESET, 856 MLX5_IB_DBG_CC_RP_BYTE_RESET, 857 MLX5_IB_DBG_CC_RP_THRESHOLD, 858 MLX5_IB_DBG_CC_RP_AI_RATE, 859 MLX5_IB_DBG_CC_RP_MAX_RATE, 860 MLX5_IB_DBG_CC_RP_HAI_RATE, 861 MLX5_IB_DBG_CC_RP_MIN_DEC_FAC, 862 MLX5_IB_DBG_CC_RP_MIN_RATE, 863 MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP, 864 MLX5_IB_DBG_CC_RP_DCE_TCP_G, 865 MLX5_IB_DBG_CC_RP_DCE_TCP_RTT, 866 MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD, 867 MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE, 868 MLX5_IB_DBG_CC_RP_GD, 869 MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS, 870 MLX5_IB_DBG_CC_NP_CNP_DSCP, 871 MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE, 872 MLX5_IB_DBG_CC_NP_CNP_PRIO, 873 MLX5_IB_DBG_CC_MAX, 874 }; 875 876 struct mlx5_ib_dbg_cc_params { 877 struct dentry *root; 878 struct mlx5_ib_dbg_param params[MLX5_IB_DBG_CC_MAX]; 879 }; 880 881 enum { 882 MLX5_MAX_DELAY_DROP_TIMEOUT_MS = 100, 883 }; 884 885 struct mlx5_ib_delay_drop { 886 struct mlx5_ib_dev *dev; 887 struct work_struct delay_drop_work; 888 /* serialize setting of delay drop */ 889 struct mutex lock; 890 u32 timeout; 891 bool activate; 892 atomic_t events_cnt; 893 atomic_t rqs_cnt; 894 struct dentry *dir_debugfs; 895 }; 896 897 enum mlx5_ib_stages { 898 MLX5_IB_STAGE_INIT, 899 MLX5_IB_STAGE_FS, 900 MLX5_IB_STAGE_CAPS, 901 MLX5_IB_STAGE_NON_DEFAULT_CB, 902 MLX5_IB_STAGE_ROCE, 903 MLX5_IB_STAGE_QP, 904 MLX5_IB_STAGE_SRQ, 905 MLX5_IB_STAGE_DEVICE_RESOURCES, 906 MLX5_IB_STAGE_DEVICE_NOTIFIER, 907 MLX5_IB_STAGE_ODP, 908 MLX5_IB_STAGE_COUNTERS, 909 MLX5_IB_STAGE_CONG_DEBUGFS, 910 MLX5_IB_STAGE_UAR, 911 MLX5_IB_STAGE_BFREG, 912 MLX5_IB_STAGE_PRE_IB_REG_UMR, 913 MLX5_IB_STAGE_WHITELIST_UID, 914 MLX5_IB_STAGE_IB_REG, 915 MLX5_IB_STAGE_POST_IB_REG_UMR, 916 MLX5_IB_STAGE_DELAY_DROP, 917 MLX5_IB_STAGE_RESTRACK, 918 MLX5_IB_STAGE_MAX, 919 }; 920 921 struct mlx5_ib_stage { 922 int (*init)(struct mlx5_ib_dev *dev); 923 void (*cleanup)(struct mlx5_ib_dev *dev); 924 }; 925 926 #define STAGE_CREATE(_stage, _init, _cleanup) \ 927 .stage[_stage] = {.init = _init, .cleanup = _cleanup} 928 929 struct mlx5_ib_profile { 930 struct mlx5_ib_stage stage[MLX5_IB_STAGE_MAX]; 931 }; 932 933 struct mlx5_ib_multiport_info { 934 struct list_head list; 935 struct mlx5_ib_dev *ibdev; 936 struct mlx5_core_dev *mdev; 937 struct notifier_block mdev_events; 938 struct completion unref_comp; 939 u64 sys_image_guid; 940 u32 mdev_refcnt; 941 bool is_master; 942 bool unaffiliate; 943 }; 944 945 struct mlx5_ib_flow_action { 946 struct ib_flow_action ib_action; 947 union { 948 struct { 949 u64 ib_flags; 950 struct mlx5_accel_esp_xfrm *ctx; 951 } esp_aes_gcm; 952 struct { 953 struct mlx5_ib_dev *dev; 954 u32 sub_type; 955 union { 956 struct mlx5_modify_hdr *modify_hdr; 957 struct mlx5_pkt_reformat *pkt_reformat; 958 }; 959 } flow_action_raw; 960 }; 961 }; 962 963 struct mlx5_dm { 964 struct mlx5_core_dev *dev; 965 /* This lock is used to protect the access to the shared 966 * allocation map when concurrent requests by different 967 * processes are handled. 968 */ 969 spinlock_t lock; 970 DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES); 971 }; 972 973 struct mlx5_read_counters_attr { 974 struct mlx5_fc *hw_cntrs_hndl; 975 u64 *out; 976 u32 flags; 977 }; 978 979 enum mlx5_ib_counters_type { 980 MLX5_IB_COUNTERS_FLOW, 981 }; 982 983 struct mlx5_ib_mcounters { 984 struct ib_counters ibcntrs; 985 enum mlx5_ib_counters_type type; 986 /* number of counters supported for this counters type */ 987 u32 counters_num; 988 struct mlx5_fc *hw_cntrs_hndl; 989 /* read function for this counters type */ 990 int (*read_counters)(struct ib_device *ibdev, 991 struct mlx5_read_counters_attr *read_attr); 992 /* max index set as part of create_flow */ 993 u32 cntrs_max_index; 994 /* number of counters data entries (<description,index> pair) */ 995 u32 ncounters; 996 /* counters data array for descriptions and indexes */ 997 struct mlx5_ib_flow_counters_desc *counters_data; 998 /* protects access to mcounters internal data */ 999 struct mutex mcntrs_mutex; 1000 }; 1001 1002 static inline struct mlx5_ib_mcounters * 1003 to_mcounters(struct ib_counters *ibcntrs) 1004 { 1005 return container_of(ibcntrs, struct mlx5_ib_mcounters, ibcntrs); 1006 } 1007 1008 int parse_flow_flow_action(struct mlx5_ib_flow_action *maction, 1009 bool is_egress, 1010 struct mlx5_flow_act *action); 1011 struct mlx5_ib_lb_state { 1012 /* protect the user_td */ 1013 struct mutex mutex; 1014 u32 user_td; 1015 int qps; 1016 bool enabled; 1017 }; 1018 1019 struct mlx5_ib_pf_eq { 1020 struct notifier_block irq_nb; 1021 struct mlx5_ib_dev *dev; 1022 struct mlx5_eq *core; 1023 struct work_struct work; 1024 spinlock_t lock; /* Pagefaults spinlock */ 1025 struct workqueue_struct *wq; 1026 mempool_t *pool; 1027 }; 1028 1029 struct mlx5_devx_event_table { 1030 struct mlx5_nb devx_nb; 1031 /* serialize updating the event_xa */ 1032 struct mutex event_xa_lock; 1033 struct xarray event_xa; 1034 }; 1035 1036 struct mlx5_var_table { 1037 /* serialize updating the bitmap */ 1038 struct mutex bitmap_lock; 1039 unsigned long *bitmap; 1040 u64 hw_start_addr; 1041 u32 stride_size; 1042 u64 num_var_hw_entries; 1043 }; 1044 1045 struct mlx5_port_caps { 1046 bool has_smi; 1047 u8 ext_port_cap; 1048 }; 1049 1050 struct mlx5_ib_dev { 1051 struct ib_device ib_dev; 1052 struct mlx5_core_dev *mdev; 1053 struct notifier_block mdev_events; 1054 int num_ports; 1055 /* serialize update of capability mask 1056 */ 1057 struct mutex cap_mask_mutex; 1058 u8 ib_active:1; 1059 u8 is_rep:1; 1060 u8 lag_active:1; 1061 u8 wc_support:1; 1062 u8 fill_delay; 1063 struct umr_common umrc; 1064 /* sync used page count stats 1065 */ 1066 struct mlx5_ib_resources devr; 1067 1068 atomic_t mkey_var; 1069 struct mlx5_mkey_cache cache; 1070 struct timer_list delay_timer; 1071 /* Prevents soft lock on massive reg MRs */ 1072 struct mutex slow_path_mutex; 1073 struct ib_odp_caps odp_caps; 1074 u64 odp_max_size; 1075 struct mutex odp_eq_mutex; 1076 struct mlx5_ib_pf_eq odp_pf_eq; 1077 1078 struct xarray odp_mkeys; 1079 1080 u32 null_mkey; 1081 struct mlx5_ib_flow_db *flow_db; 1082 /* protect resources needed as part of reset flow */ 1083 spinlock_t reset_flow_resource_lock; 1084 struct list_head qp_list; 1085 /* Array with num_ports elements */ 1086 struct mlx5_ib_port *port; 1087 struct mlx5_sq_bfreg bfreg; 1088 struct mlx5_sq_bfreg wc_bfreg; 1089 struct mlx5_sq_bfreg fp_bfreg; 1090 struct mlx5_ib_delay_drop delay_drop; 1091 const struct mlx5_ib_profile *profile; 1092 1093 struct mlx5_ib_lb_state lb; 1094 u8 umr_fence; 1095 struct list_head ib_dev_list; 1096 u64 sys_image_guid; 1097 struct mlx5_dm dm; 1098 u16 devx_whitelist_uid; 1099 struct mlx5_srq_table srq_table; 1100 struct mlx5_qp_table qp_table; 1101 struct mlx5_async_ctx async_ctx; 1102 struct mlx5_devx_event_table devx_event_table; 1103 struct mlx5_var_table var_table; 1104 1105 struct xarray sig_mrs; 1106 struct mlx5_port_caps port_caps[MLX5_MAX_PORTS]; 1107 u16 pkey_table_len; 1108 u8 lag_ports; 1109 }; 1110 1111 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) 1112 { 1113 return container_of(mcq, struct mlx5_ib_cq, mcq); 1114 } 1115 1116 static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd) 1117 { 1118 return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd); 1119 } 1120 1121 static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev) 1122 { 1123 return container_of(ibdev, struct mlx5_ib_dev, ib_dev); 1124 } 1125 1126 static inline struct mlx5_ib_dev *mr_to_mdev(struct mlx5_ib_mr *mr) 1127 { 1128 return to_mdev(mr->ibmr.device); 1129 } 1130 1131 static inline struct mlx5_ib_dev *mlx5_udata_to_mdev(struct ib_udata *udata) 1132 { 1133 struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context( 1134 udata, struct mlx5_ib_ucontext, ibucontext); 1135 1136 return to_mdev(context->ibucontext.device); 1137 } 1138 1139 static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq) 1140 { 1141 return container_of(ibcq, struct mlx5_ib_cq, ibcq); 1142 } 1143 1144 static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp) 1145 { 1146 return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp; 1147 } 1148 1149 static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp) 1150 { 1151 return container_of(core_qp, struct mlx5_ib_rwq, core_qp); 1152 } 1153 1154 static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd) 1155 { 1156 return container_of(ibpd, struct mlx5_ib_pd, ibpd); 1157 } 1158 1159 static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq) 1160 { 1161 return container_of(ibsrq, struct mlx5_ib_srq, ibsrq); 1162 } 1163 1164 static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp) 1165 { 1166 return container_of(ibqp, struct mlx5_ib_qp, ibqp); 1167 } 1168 1169 static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq) 1170 { 1171 return container_of(ibwq, struct mlx5_ib_rwq, ibwq); 1172 } 1173 1174 static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl) 1175 { 1176 return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl); 1177 } 1178 1179 static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq) 1180 { 1181 return container_of(msrq, struct mlx5_ib_srq, msrq); 1182 } 1183 1184 static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr) 1185 { 1186 return container_of(ibmr, struct mlx5_ib_mr, ibmr); 1187 } 1188 1189 static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw) 1190 { 1191 return container_of(ibmw, struct mlx5_ib_mw, ibmw); 1192 } 1193 1194 static inline struct mlx5_ib_flow_action * 1195 to_mflow_act(struct ib_flow_action *ibact) 1196 { 1197 return container_of(ibact, struct mlx5_ib_flow_action, ib_action); 1198 } 1199 1200 static inline struct mlx5_user_mmap_entry * 1201 to_mmmap(struct rdma_user_mmap_entry *rdma_entry) 1202 { 1203 return container_of(rdma_entry, 1204 struct mlx5_user_mmap_entry, rdma_entry); 1205 } 1206 1207 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt, 1208 struct mlx5_db *db); 1209 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db); 1210 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); 1211 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); 1212 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index); 1213 int mlx5_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr, 1214 struct ib_udata *udata); 1215 int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); 1216 static inline int mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags) 1217 { 1218 return 0; 1219 } 1220 int mlx5_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr, 1221 struct ib_udata *udata); 1222 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, 1223 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); 1224 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr); 1225 int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata); 1226 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, 1227 const struct ib_recv_wr **bad_wr); 1228 int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp); 1229 void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp); 1230 int mlx5_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr, 1231 struct ib_udata *udata); 1232 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1233 int attr_mask, struct ib_udata *udata); 1234 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, 1235 struct ib_qp_init_attr *qp_init_attr); 1236 int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata); 1237 void mlx5_ib_drain_sq(struct ib_qp *qp); 1238 void mlx5_ib_drain_rq(struct ib_qp *qp); 1239 int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer, 1240 size_t buflen, size_t *bc); 1241 int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer, 1242 size_t buflen, size_t *bc); 1243 int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer, 1244 size_t buflen, size_t *bc); 1245 int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, 1246 struct ib_udata *udata); 1247 int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); 1248 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); 1249 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); 1250 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); 1251 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata); 1252 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc); 1253 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 1254 u64 virt_addr, int access_flags, 1255 struct ib_udata *udata); 1256 struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start, 1257 u64 length, u64 virt_addr, 1258 int fd, int access_flags, 1259 struct ib_udata *udata); 1260 int mlx5_ib_advise_mr(struct ib_pd *pd, 1261 enum ib_uverbs_advise_mr_advice advice, 1262 u32 flags, 1263 struct ib_sge *sg_list, 1264 u32 num_sge, 1265 struct uverbs_attr_bundle *attrs); 1266 int mlx5_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata); 1267 int mlx5_ib_dealloc_mw(struct ib_mw *mw); 1268 struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd, 1269 int access_flags); 1270 void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr); 1271 void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr); 1272 struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, 1273 u64 length, u64 virt_addr, int access_flags, 1274 struct ib_pd *pd, struct ib_udata *udata); 1275 int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); 1276 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, 1277 u32 max_num_sg); 1278 struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd, 1279 u32 max_num_sg, 1280 u32 max_num_meta_sg); 1281 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, 1282 unsigned int *sg_offset); 1283 int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, 1284 int data_sg_nents, unsigned int *data_sg_offset, 1285 struct scatterlist *meta_sg, int meta_sg_nents, 1286 unsigned int *meta_sg_offset); 1287 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num, 1288 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 1289 const struct ib_mad *in, struct ib_mad *out, 1290 size_t *out_mad_size, u16 *out_mad_pkey_index); 1291 int mlx5_ib_alloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata); 1292 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata); 1293 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, unsigned int port); 1294 int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev, 1295 __be64 *sys_image_guid); 1296 int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev, 1297 u16 *max_pkeys); 1298 int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev, 1299 u32 *vendor_id); 1300 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc); 1301 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid); 1302 int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u32 port, u16 index, 1303 u16 *pkey); 1304 int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index, 1305 union ib_gid *gid); 1306 int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port, 1307 struct ib_port_attr *props); 1308 int mlx5_ib_query_port(struct ib_device *ibdev, u32 port, 1309 struct ib_port_attr *props); 1310 void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas, 1311 u64 access_flags); 1312 void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num); 1313 int mlx5_ib_get_cqe_size(struct ib_cq *ibcq); 1314 int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev); 1315 int mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev); 1316 1317 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, 1318 struct mlx5_cache_ent *ent, 1319 int access_flags); 1320 1321 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, 1322 struct ib_mr_status *mr_status); 1323 struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd, 1324 struct ib_wq_init_attr *init_attr, 1325 struct ib_udata *udata); 1326 int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata); 1327 int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, 1328 u32 wq_attr_mask, struct ib_udata *udata); 1329 int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table, 1330 struct ib_rwq_ind_table_init_attr *init_attr, 1331 struct ib_udata *udata); 1332 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); 1333 struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm, 1334 struct ib_dm_mr_attr *attr, 1335 struct uverbs_attr_bundle *attrs); 1336 1337 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 1338 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev); 1339 int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq); 1340 void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev); 1341 int __init mlx5_ib_odp_init(void); 1342 void mlx5_ib_odp_cleanup(void); 1343 void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent); 1344 void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries, 1345 struct mlx5_ib_mr *mr, int flags); 1346 1347 int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, 1348 enum ib_uverbs_advise_mr_advice advice, 1349 u32 flags, struct ib_sge *sg_list, u32 num_sge); 1350 int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr); 1351 int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr); 1352 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ 1353 static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; } 1354 static inline int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, 1355 struct mlx5_ib_pf_eq *eq) 1356 { 1357 return 0; 1358 } 1359 static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {} 1360 static inline int mlx5_ib_odp_init(void) { return 0; } 1361 static inline void mlx5_ib_odp_cleanup(void) {} 1362 static inline void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent) {} 1363 static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries, 1364 struct mlx5_ib_mr *mr, int flags) {} 1365 1366 static inline int 1367 mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, 1368 enum ib_uverbs_advise_mr_advice advice, u32 flags, 1369 struct ib_sge *sg_list, u32 num_sge) 1370 { 1371 return -EOPNOTSUPP; 1372 } 1373 static inline int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr) 1374 { 1375 return -EOPNOTSUPP; 1376 } 1377 static inline int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr) 1378 { 1379 return -EOPNOTSUPP; 1380 } 1381 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ 1382 1383 extern const struct mmu_interval_notifier_ops mlx5_mn_ops; 1384 1385 /* Needed for rep profile */ 1386 void __mlx5_ib_remove(struct mlx5_ib_dev *dev, 1387 const struct mlx5_ib_profile *profile, 1388 int stage); 1389 int __mlx5_ib_add(struct mlx5_ib_dev *dev, 1390 const struct mlx5_ib_profile *profile); 1391 1392 int mlx5_ib_get_vf_config(struct ib_device *device, int vf, 1393 u32 port, struct ifla_vf_info *info); 1394 int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf, 1395 u32 port, int state); 1396 int mlx5_ib_get_vf_stats(struct ib_device *device, int vf, 1397 u32 port, struct ifla_vf_stats *stats); 1398 int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u32 port, 1399 struct ifla_vf_guid *node_guid, 1400 struct ifla_vf_guid *port_guid); 1401 int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u32 port, 1402 u64 guid, int type); 1403 1404 __be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev, 1405 const struct ib_gid_attr *attr); 1406 1407 void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num); 1408 void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num); 1409 1410 /* GSI QP helper functions */ 1411 int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp, 1412 struct ib_qp_init_attr *attr); 1413 int mlx5_ib_destroy_gsi(struct mlx5_ib_qp *mqp); 1414 int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr, 1415 int attr_mask); 1416 int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, 1417 int qp_attr_mask, 1418 struct ib_qp_init_attr *qp_init_attr); 1419 int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr, 1420 const struct ib_send_wr **bad_wr); 1421 int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr, 1422 const struct ib_recv_wr **bad_wr); 1423 void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi); 1424 1425 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc); 1426 1427 void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, 1428 int bfregn); 1429 struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi); 1430 struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev, 1431 u32 ib_port_num, 1432 u32 *native_port_num); 1433 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev, 1434 u32 port_num); 1435 1436 extern const struct uapi_definition mlx5_ib_devx_defs[]; 1437 extern const struct uapi_definition mlx5_ib_flow_defs[]; 1438 extern const struct uapi_definition mlx5_ib_qos_defs[]; 1439 extern const struct uapi_definition mlx5_ib_std_types_defs[]; 1440 1441 static inline int is_qp1(enum ib_qp_type qp_type) 1442 { 1443 return qp_type == MLX5_IB_QPT_HW_GSI || qp_type == IB_QPT_GSI; 1444 } 1445 1446 static inline u32 check_cq_create_flags(u32 flags) 1447 { 1448 /* 1449 * It returns non-zero value for unsupported CQ 1450 * create flags, otherwise it returns zero. 1451 */ 1452 return (flags & ~(IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN | 1453 IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION)); 1454 } 1455 1456 static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx, 1457 u32 *user_index) 1458 { 1459 if (cqe_version) { 1460 if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) || 1461 (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK)) 1462 return -EINVAL; 1463 *user_index = cmd_uidx; 1464 } else { 1465 *user_index = MLX5_IB_DEFAULT_UIDX; 1466 } 1467 1468 return 0; 1469 } 1470 1471 static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext, 1472 struct mlx5_ib_create_qp *ucmd, 1473 int inlen, 1474 u32 *user_index) 1475 { 1476 u8 cqe_version = ucontext->cqe_version; 1477 1478 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version && 1479 (ucmd->uidx == MLX5_IB_DEFAULT_UIDX)) 1480 return 0; 1481 1482 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version) 1483 return -EINVAL; 1484 1485 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index); 1486 } 1487 1488 static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext, 1489 struct mlx5_ib_create_srq *ucmd, 1490 int inlen, 1491 u32 *user_index) 1492 { 1493 u8 cqe_version = ucontext->cqe_version; 1494 1495 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version && 1496 (ucmd->uidx == MLX5_IB_DEFAULT_UIDX)) 1497 return 0; 1498 1499 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version) 1500 return -EINVAL; 1501 1502 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index); 1503 } 1504 1505 static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support) 1506 { 1507 return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ? 1508 MLX5_UARS_IN_PAGE : 1; 1509 } 1510 1511 extern void *xlt_emergency_page; 1512 1513 int bfregn_to_uar_index(struct mlx5_ib_dev *dev, 1514 struct mlx5_bfreg_info *bfregi, u32 bfregn, 1515 bool dyn_bfreg); 1516 1517 static inline int mlx5r_store_odp_mkey(struct mlx5_ib_dev *dev, 1518 struct mlx5_ib_mkey *mmkey) 1519 { 1520 refcount_set(&mmkey->usecount, 1); 1521 1522 return xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mmkey->key), 1523 mmkey, GFP_KERNEL)); 1524 } 1525 1526 /* deref an mkey that can participate in ODP flow */ 1527 static inline void mlx5r_deref_odp_mkey(struct mlx5_ib_mkey *mmkey) 1528 { 1529 if (refcount_dec_and_test(&mmkey->usecount)) 1530 wake_up(&mmkey->wait); 1531 } 1532 1533 /* deref an mkey that can participate in ODP flow and wait for relese */ 1534 static inline void mlx5r_deref_wait_odp_mkey(struct mlx5_ib_mkey *mmkey) 1535 { 1536 mlx5r_deref_odp_mkey(mmkey); 1537 wait_event(mmkey->wait, refcount_read(&mmkey->usecount) == 0); 1538 } 1539 1540 int mlx5_ib_test_wc(struct mlx5_ib_dev *dev); 1541 1542 static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev) 1543 { 1544 /* 1545 * If the driver is in hash mode and the port_select_flow_table_bypass cap 1546 * is supported, it means that the driver no longer needs to assign the port 1547 * affinity by default. If a user wants to set the port affinity explicitly, 1548 * the user has a dedicated API to do that, so there is no need to assign 1549 * the port affinity by default. 1550 */ 1551 if (dev->lag_active && 1552 mlx5_lag_mode_is_hash(dev->mdev) && 1553 MLX5_CAP_PORT_SELECTION(dev->mdev, port_select_flow_table_bypass)) 1554 return 0; 1555 1556 return dev->lag_active || 1557 (MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 && 1558 MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity)); 1559 } 1560 1561 static inline bool rt_supported(int ts_cap) 1562 { 1563 return ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_REAL_TIME || 1564 ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME; 1565 } 1566 1567 /* 1568 * PCI Peer to Peer is a trainwreck. If no switch is present then things 1569 * sometimes work, depending on the pci_distance_p2p logic for excluding broken 1570 * root complexes. However if a switch is present in the path, then things get 1571 * really ugly depending on how the switch is setup. This table assumes that the 1572 * root complex is strict and is validating that all req/reps are matches 1573 * perfectly - so any scenario where it sees only half the transaction is a 1574 * failure. 1575 * 1576 * CR/RR/DT ATS RO P2P 1577 * 00X X X OK 1578 * 010 X X fails (request is routed to root but root never sees comp) 1579 * 011 0 X fails (request is routed to root but root never sees comp) 1580 * 011 1 X OK 1581 * 10X X 1 OK 1582 * 101 X 0 fails (completion is routed to root but root didn't see req) 1583 * 110 X 0 SLOW 1584 * 111 0 0 SLOW 1585 * 111 1 0 fails (completion is routed to root but root didn't see req) 1586 * 111 1 1 OK 1587 * 1588 * Unfortunately we cannot reliably know if a switch is present or what the 1589 * CR/RR/DT ACS settings are, as in a VM that is all hidden. Assume that 1590 * CR/RR/DT is 111 if the ATS cap is enabled and follow the last three rows. 1591 * 1592 * For now assume if the umem is a dma_buf then it is P2P. 1593 */ 1594 static inline bool mlx5_umem_needs_ats(struct mlx5_ib_dev *dev, 1595 struct ib_umem *umem, int access_flags) 1596 { 1597 if (!MLX5_CAP_GEN(dev->mdev, ats) || !umem->is_dmabuf) 1598 return false; 1599 return access_flags & IB_ACCESS_RELAXED_ORDERING; 1600 } 1601 1602 #endif /* MLX5_IB_H */ 1603