1 /* 2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 8 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 9 * 10 * This software is available to you under a choice of one of two 11 * licenses. You may choose to be licensed under the terms of the GNU 12 * General Public License (GPL) Version 2, available from the file 13 * COPYING in the main directory of this source tree, or the 14 * OpenIB.org BSD license below: 15 * 16 * Redistribution and use in source and binary forms, with or 17 * without modification, are permitted provided that the following 18 * conditions are met: 19 * 20 * - Redistributions of source code must retain the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer. 23 * 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials 27 * provided with the distribution. 28 * 29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 36 * SOFTWARE. 37 */ 38 39 #if !defined(IB_VERBS_H) 40 #define IB_VERBS_H 41 42 #include <linux/types.h> 43 #include <linux/device.h> 44 #include <linux/dma-mapping.h> 45 #include <linux/kref.h> 46 #include <linux/list.h> 47 #include <linux/rwsem.h> 48 #include <linux/workqueue.h> 49 #include <linux/irq_poll.h> 50 #include <uapi/linux/if_ether.h> 51 #include <net/ipv6.h> 52 #include <net/ip.h> 53 #include <linux/string.h> 54 #include <linux/slab.h> 55 #include <linux/netdevice.h> 56 #include <linux/refcount.h> 57 #include <linux/if_link.h> 58 #include <linux/atomic.h> 59 #include <linux/mmu_notifier.h> 60 #include <linux/uaccess.h> 61 #include <linux/cgroup_rdma.h> 62 #include <linux/irqflags.h> 63 #include <linux/preempt.h> 64 #include <linux/dim.h> 65 #include <uapi/rdma/ib_user_verbs.h> 66 #include <rdma/rdma_counter.h> 67 #include <rdma/restrack.h> 68 #include <rdma/signature.h> 69 #include <uapi/rdma/rdma_user_ioctl.h> 70 #include <uapi/rdma/ib_user_ioctl_verbs.h> 71 72 #define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN 73 74 struct ib_umem_odp; 75 76 extern struct workqueue_struct *ib_wq; 77 extern struct workqueue_struct *ib_comp_wq; 78 extern struct workqueue_struct *ib_comp_unbound_wq; 79 80 __printf(3, 4) __cold 81 void ibdev_printk(const char *level, const struct ib_device *ibdev, 82 const char *format, ...); 83 __printf(2, 3) __cold 84 void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...); 85 __printf(2, 3) __cold 86 void ibdev_alert(const struct ib_device *ibdev, const char *format, ...); 87 __printf(2, 3) __cold 88 void ibdev_crit(const struct ib_device *ibdev, const char *format, ...); 89 __printf(2, 3) __cold 90 void ibdev_err(const struct ib_device *ibdev, const char *format, ...); 91 __printf(2, 3) __cold 92 void ibdev_warn(const struct ib_device *ibdev, const char *format, ...); 93 __printf(2, 3) __cold 94 void ibdev_notice(const struct ib_device *ibdev, const char *format, ...); 95 __printf(2, 3) __cold 96 void ibdev_info(const struct ib_device *ibdev, const char *format, ...); 97 98 #if defined(CONFIG_DYNAMIC_DEBUG) 99 #define ibdev_dbg(__dev, format, args...) \ 100 dynamic_ibdev_dbg(__dev, format, ##args) 101 #else 102 __printf(2, 3) __cold 103 static inline 104 void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {} 105 #endif 106 107 #define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...) \ 108 do { \ 109 static DEFINE_RATELIMIT_STATE(_rs, \ 110 DEFAULT_RATELIMIT_INTERVAL, \ 111 DEFAULT_RATELIMIT_BURST); \ 112 if (__ratelimit(&_rs)) \ 113 ibdev_level(ibdev, fmt, ##__VA_ARGS__); \ 114 } while (0) 115 116 #define ibdev_emerg_ratelimited(ibdev, fmt, ...) \ 117 ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__) 118 #define ibdev_alert_ratelimited(ibdev, fmt, ...) \ 119 ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__) 120 #define ibdev_crit_ratelimited(ibdev, fmt, ...) \ 121 ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__) 122 #define ibdev_err_ratelimited(ibdev, fmt, ...) \ 123 ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__) 124 #define ibdev_warn_ratelimited(ibdev, fmt, ...) \ 125 ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__) 126 #define ibdev_notice_ratelimited(ibdev, fmt, ...) \ 127 ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__) 128 #define ibdev_info_ratelimited(ibdev, fmt, ...) \ 129 ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__) 130 131 #if defined(CONFIG_DYNAMIC_DEBUG) 132 /* descriptor check is first to prevent flooding with "callbacks suppressed" */ 133 #define ibdev_dbg_ratelimited(ibdev, fmt, ...) \ 134 do { \ 135 static DEFINE_RATELIMIT_STATE(_rs, \ 136 DEFAULT_RATELIMIT_INTERVAL, \ 137 DEFAULT_RATELIMIT_BURST); \ 138 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ 139 if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs)) \ 140 __dynamic_ibdev_dbg(&descriptor, ibdev, fmt, \ 141 ##__VA_ARGS__); \ 142 } while (0) 143 #else 144 __printf(2, 3) __cold 145 static inline 146 void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {} 147 #endif 148 149 union ib_gid { 150 u8 raw[16]; 151 struct { 152 __be64 subnet_prefix; 153 __be64 interface_id; 154 } global; 155 }; 156 157 extern union ib_gid zgid; 158 159 enum ib_gid_type { 160 /* If link layer is Ethernet, this is RoCE V1 */ 161 IB_GID_TYPE_IB = 0, 162 IB_GID_TYPE_ROCE = 0, 163 IB_GID_TYPE_ROCE_UDP_ENCAP = 1, 164 IB_GID_TYPE_SIZE 165 }; 166 167 #define ROCE_V2_UDP_DPORT 4791 168 struct ib_gid_attr { 169 struct net_device __rcu *ndev; 170 struct ib_device *device; 171 union ib_gid gid; 172 enum ib_gid_type gid_type; 173 u16 index; 174 u8 port_num; 175 }; 176 177 enum { 178 /* set the local administered indication */ 179 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2, 180 }; 181 182 enum rdma_transport_type { 183 RDMA_TRANSPORT_IB, 184 RDMA_TRANSPORT_IWARP, 185 RDMA_TRANSPORT_USNIC, 186 RDMA_TRANSPORT_USNIC_UDP, 187 RDMA_TRANSPORT_UNSPECIFIED, 188 }; 189 190 enum rdma_protocol_type { 191 RDMA_PROTOCOL_IB, 192 RDMA_PROTOCOL_IBOE, 193 RDMA_PROTOCOL_IWARP, 194 RDMA_PROTOCOL_USNIC_UDP 195 }; 196 197 __attribute_const__ enum rdma_transport_type 198 rdma_node_get_transport(unsigned int node_type); 199 200 enum rdma_network_type { 201 RDMA_NETWORK_IB, 202 RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB, 203 RDMA_NETWORK_IPV4, 204 RDMA_NETWORK_IPV6 205 }; 206 207 static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type) 208 { 209 if (network_type == RDMA_NETWORK_IPV4 || 210 network_type == RDMA_NETWORK_IPV6) 211 return IB_GID_TYPE_ROCE_UDP_ENCAP; 212 213 /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */ 214 return IB_GID_TYPE_IB; 215 } 216 217 static inline enum rdma_network_type 218 rdma_gid_attr_network_type(const struct ib_gid_attr *attr) 219 { 220 if (attr->gid_type == IB_GID_TYPE_IB) 221 return RDMA_NETWORK_IB; 222 223 if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid)) 224 return RDMA_NETWORK_IPV4; 225 else 226 return RDMA_NETWORK_IPV6; 227 } 228 229 enum rdma_link_layer { 230 IB_LINK_LAYER_UNSPECIFIED, 231 IB_LINK_LAYER_INFINIBAND, 232 IB_LINK_LAYER_ETHERNET, 233 }; 234 235 enum ib_device_cap_flags { 236 IB_DEVICE_RESIZE_MAX_WR = (1 << 0), 237 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1), 238 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2), 239 IB_DEVICE_RAW_MULTI = (1 << 3), 240 IB_DEVICE_AUTO_PATH_MIG = (1 << 4), 241 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5), 242 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6), 243 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7), 244 IB_DEVICE_SHUTDOWN_PORT = (1 << 8), 245 /* Not in use, former INIT_TYPE = (1 << 9),*/ 246 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10), 247 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11), 248 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12), 249 IB_DEVICE_SRQ_RESIZE = (1 << 13), 250 IB_DEVICE_N_NOTIFY_CQ = (1 << 14), 251 252 /* 253 * This device supports a per-device lkey or stag that can be 254 * used without performing a memory registration for the local 255 * memory. Note that ULPs should never check this flag, but 256 * instead of use the local_dma_lkey flag in the ib_pd structure, 257 * which will always contain a usable lkey. 258 */ 259 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15), 260 /* Reserved, old SEND_W_INV = (1 << 16),*/ 261 IB_DEVICE_MEM_WINDOW = (1 << 17), 262 /* 263 * Devices should set IB_DEVICE_UD_IP_SUM if they support 264 * insertion of UDP and TCP checksum on outgoing UD IPoIB 265 * messages and can verify the validity of checksum for 266 * incoming messages. Setting this flag implies that the 267 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode. 268 */ 269 IB_DEVICE_UD_IP_CSUM = (1 << 18), 270 IB_DEVICE_UD_TSO = (1 << 19), 271 IB_DEVICE_XRC = (1 << 20), 272 273 /* 274 * This device supports the IB "base memory management extension", 275 * which includes support for fast registrations (IB_WR_REG_MR, 276 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should 277 * also be set by any iWarp device which must support FRs to comply 278 * to the iWarp verbs spec. iWarp devices also support the 279 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the 280 * stag. 281 */ 282 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21), 283 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22), 284 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23), 285 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24), 286 IB_DEVICE_RC_IP_CSUM = (1 << 25), 287 /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */ 288 IB_DEVICE_RAW_IP_CSUM = (1 << 26), 289 /* 290 * Devices should set IB_DEVICE_CROSS_CHANNEL if they 291 * support execution of WQEs that involve synchronization 292 * of I/O operations with single completion queue managed 293 * by hardware. 294 */ 295 IB_DEVICE_CROSS_CHANNEL = (1 << 27), 296 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29), 297 IB_DEVICE_INTEGRITY_HANDOVER = (1 << 30), 298 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31), 299 IB_DEVICE_SG_GAPS_REG = (1ULL << 32), 300 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33), 301 /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */ 302 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34), 303 IB_DEVICE_RDMA_NETDEV_OPA_VNIC = (1ULL << 35), 304 /* The device supports padding incoming writes to cacheline. */ 305 IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36), 306 IB_DEVICE_ALLOW_USER_UNREG = (1ULL << 37), 307 }; 308 309 enum ib_atomic_cap { 310 IB_ATOMIC_NONE, 311 IB_ATOMIC_HCA, 312 IB_ATOMIC_GLOB 313 }; 314 315 enum ib_odp_general_cap_bits { 316 IB_ODP_SUPPORT = 1 << 0, 317 IB_ODP_SUPPORT_IMPLICIT = 1 << 1, 318 }; 319 320 enum ib_odp_transport_cap_bits { 321 IB_ODP_SUPPORT_SEND = 1 << 0, 322 IB_ODP_SUPPORT_RECV = 1 << 1, 323 IB_ODP_SUPPORT_WRITE = 1 << 2, 324 IB_ODP_SUPPORT_READ = 1 << 3, 325 IB_ODP_SUPPORT_ATOMIC = 1 << 4, 326 IB_ODP_SUPPORT_SRQ_RECV = 1 << 5, 327 }; 328 329 struct ib_odp_caps { 330 uint64_t general_caps; 331 struct { 332 uint32_t rc_odp_caps; 333 uint32_t uc_odp_caps; 334 uint32_t ud_odp_caps; 335 uint32_t xrc_odp_caps; 336 } per_transport_caps; 337 }; 338 339 struct ib_rss_caps { 340 /* Corresponding bit will be set if qp type from 341 * 'enum ib_qp_type' is supported, e.g. 342 * supported_qpts |= 1 << IB_QPT_UD 343 */ 344 u32 supported_qpts; 345 u32 max_rwq_indirection_tables; 346 u32 max_rwq_indirection_table_size; 347 }; 348 349 enum ib_tm_cap_flags { 350 /* Support tag matching with rendezvous offload for RC transport */ 351 IB_TM_CAP_RNDV_RC = 1 << 0, 352 }; 353 354 struct ib_tm_caps { 355 /* Max size of RNDV header */ 356 u32 max_rndv_hdr_size; 357 /* Max number of entries in tag matching list */ 358 u32 max_num_tags; 359 /* From enum ib_tm_cap_flags */ 360 u32 flags; 361 /* Max number of outstanding list operations */ 362 u32 max_ops; 363 /* Max number of SGE in tag matching entry */ 364 u32 max_sge; 365 }; 366 367 struct ib_cq_init_attr { 368 unsigned int cqe; 369 u32 comp_vector; 370 u32 flags; 371 }; 372 373 enum ib_cq_attr_mask { 374 IB_CQ_MODERATE = 1 << 0, 375 }; 376 377 struct ib_cq_caps { 378 u16 max_cq_moderation_count; 379 u16 max_cq_moderation_period; 380 }; 381 382 struct ib_dm_mr_attr { 383 u64 length; 384 u64 offset; 385 u32 access_flags; 386 }; 387 388 struct ib_dm_alloc_attr { 389 u64 length; 390 u32 alignment; 391 u32 flags; 392 }; 393 394 struct ib_device_attr { 395 u64 fw_ver; 396 __be64 sys_image_guid; 397 u64 max_mr_size; 398 u64 page_size_cap; 399 u32 vendor_id; 400 u32 vendor_part_id; 401 u32 hw_ver; 402 int max_qp; 403 int max_qp_wr; 404 u64 device_cap_flags; 405 int max_send_sge; 406 int max_recv_sge; 407 int max_sge_rd; 408 int max_cq; 409 int max_cqe; 410 int max_mr; 411 int max_pd; 412 int max_qp_rd_atom; 413 int max_ee_rd_atom; 414 int max_res_rd_atom; 415 int max_qp_init_rd_atom; 416 int max_ee_init_rd_atom; 417 enum ib_atomic_cap atomic_cap; 418 enum ib_atomic_cap masked_atomic_cap; 419 int max_ee; 420 int max_rdd; 421 int max_mw; 422 int max_raw_ipv6_qp; 423 int max_raw_ethy_qp; 424 int max_mcast_grp; 425 int max_mcast_qp_attach; 426 int max_total_mcast_qp_attach; 427 int max_ah; 428 int max_fmr; 429 int max_map_per_fmr; 430 int max_srq; 431 int max_srq_wr; 432 int max_srq_sge; 433 unsigned int max_fast_reg_page_list_len; 434 unsigned int max_pi_fast_reg_page_list_len; 435 u16 max_pkeys; 436 u8 local_ca_ack_delay; 437 int sig_prot_cap; 438 int sig_guard_cap; 439 struct ib_odp_caps odp_caps; 440 uint64_t timestamp_mask; 441 uint64_t hca_core_clock; /* in KHZ */ 442 struct ib_rss_caps rss_caps; 443 u32 max_wq_type_rq; 444 u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */ 445 struct ib_tm_caps tm_caps; 446 struct ib_cq_caps cq_caps; 447 u64 max_dm_size; 448 /* Max entries for sgl for optimized performance per READ */ 449 u32 max_sgl_rd; 450 }; 451 452 enum ib_mtu { 453 IB_MTU_256 = 1, 454 IB_MTU_512 = 2, 455 IB_MTU_1024 = 3, 456 IB_MTU_2048 = 4, 457 IB_MTU_4096 = 5 458 }; 459 460 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu) 461 { 462 switch (mtu) { 463 case IB_MTU_256: return 256; 464 case IB_MTU_512: return 512; 465 case IB_MTU_1024: return 1024; 466 case IB_MTU_2048: return 2048; 467 case IB_MTU_4096: return 4096; 468 default: return -1; 469 } 470 } 471 472 static inline enum ib_mtu ib_mtu_int_to_enum(int mtu) 473 { 474 if (mtu >= 4096) 475 return IB_MTU_4096; 476 else if (mtu >= 2048) 477 return IB_MTU_2048; 478 else if (mtu >= 1024) 479 return IB_MTU_1024; 480 else if (mtu >= 512) 481 return IB_MTU_512; 482 else 483 return IB_MTU_256; 484 } 485 486 enum ib_port_state { 487 IB_PORT_NOP = 0, 488 IB_PORT_DOWN = 1, 489 IB_PORT_INIT = 2, 490 IB_PORT_ARMED = 3, 491 IB_PORT_ACTIVE = 4, 492 IB_PORT_ACTIVE_DEFER = 5 493 }; 494 495 enum ib_port_phys_state { 496 IB_PORT_PHYS_STATE_SLEEP = 1, 497 IB_PORT_PHYS_STATE_POLLING = 2, 498 IB_PORT_PHYS_STATE_DISABLED = 3, 499 IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4, 500 IB_PORT_PHYS_STATE_LINK_UP = 5, 501 IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6, 502 IB_PORT_PHYS_STATE_PHY_TEST = 7, 503 }; 504 505 enum ib_port_width { 506 IB_WIDTH_1X = 1, 507 IB_WIDTH_2X = 16, 508 IB_WIDTH_4X = 2, 509 IB_WIDTH_8X = 4, 510 IB_WIDTH_12X = 8 511 }; 512 513 static inline int ib_width_enum_to_int(enum ib_port_width width) 514 { 515 switch (width) { 516 case IB_WIDTH_1X: return 1; 517 case IB_WIDTH_2X: return 2; 518 case IB_WIDTH_4X: return 4; 519 case IB_WIDTH_8X: return 8; 520 case IB_WIDTH_12X: return 12; 521 default: return -1; 522 } 523 } 524 525 enum ib_port_speed { 526 IB_SPEED_SDR = 1, 527 IB_SPEED_DDR = 2, 528 IB_SPEED_QDR = 4, 529 IB_SPEED_FDR10 = 8, 530 IB_SPEED_FDR = 16, 531 IB_SPEED_EDR = 32, 532 IB_SPEED_HDR = 64 533 }; 534 535 /** 536 * struct rdma_hw_stats 537 * @lock - Mutex to protect parallel write access to lifespan and values 538 * of counters, which are 64bits and not guaranteeed to be written 539 * atomicaly on 32bits systems. 540 * @timestamp - Used by the core code to track when the last update was 541 * @lifespan - Used by the core code to determine how old the counters 542 * should be before being updated again. Stored in jiffies, defaults 543 * to 10 milliseconds, drivers can override the default be specifying 544 * their own value during their allocation routine. 545 * @name - Array of pointers to static names used for the counters in 546 * directory. 547 * @num_counters - How many hardware counters there are. If name is 548 * shorter than this number, a kernel oops will result. Driver authors 549 * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters) 550 * in their code to prevent this. 551 * @value - Array of u64 counters that are accessed by the sysfs code and 552 * filled in by the drivers get_stats routine 553 */ 554 struct rdma_hw_stats { 555 struct mutex lock; /* Protect lifespan and values[] */ 556 unsigned long timestamp; 557 unsigned long lifespan; 558 const char * const *names; 559 int num_counters; 560 u64 value[]; 561 }; 562 563 #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10 564 /** 565 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct 566 * for drivers. 567 * @names - Array of static const char * 568 * @num_counters - How many elements in array 569 * @lifespan - How many milliseconds between updates 570 */ 571 static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct( 572 const char * const *names, int num_counters, 573 unsigned long lifespan) 574 { 575 struct rdma_hw_stats *stats; 576 577 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64), 578 GFP_KERNEL); 579 if (!stats) 580 return NULL; 581 stats->names = names; 582 stats->num_counters = num_counters; 583 stats->lifespan = msecs_to_jiffies(lifespan); 584 585 return stats; 586 } 587 588 589 /* Define bits for the various functionality this port needs to be supported by 590 * the core. 591 */ 592 /* Management 0x00000FFF */ 593 #define RDMA_CORE_CAP_IB_MAD 0x00000001 594 #define RDMA_CORE_CAP_IB_SMI 0x00000002 595 #define RDMA_CORE_CAP_IB_CM 0x00000004 596 #define RDMA_CORE_CAP_IW_CM 0x00000008 597 #define RDMA_CORE_CAP_IB_SA 0x00000010 598 #define RDMA_CORE_CAP_OPA_MAD 0x00000020 599 600 /* Address format 0x000FF000 */ 601 #define RDMA_CORE_CAP_AF_IB 0x00001000 602 #define RDMA_CORE_CAP_ETH_AH 0x00002000 603 #define RDMA_CORE_CAP_OPA_AH 0x00004000 604 #define RDMA_CORE_CAP_IB_GRH_REQUIRED 0x00008000 605 606 /* Protocol 0xFFF00000 */ 607 #define RDMA_CORE_CAP_PROT_IB 0x00100000 608 #define RDMA_CORE_CAP_PROT_ROCE 0x00200000 609 #define RDMA_CORE_CAP_PROT_IWARP 0x00400000 610 #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000 611 #define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000 612 #define RDMA_CORE_CAP_PROT_USNIC 0x02000000 613 614 #define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \ 615 | RDMA_CORE_CAP_PROT_ROCE \ 616 | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP) 617 618 #define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \ 619 | RDMA_CORE_CAP_IB_MAD \ 620 | RDMA_CORE_CAP_IB_SMI \ 621 | RDMA_CORE_CAP_IB_CM \ 622 | RDMA_CORE_CAP_IB_SA \ 623 | RDMA_CORE_CAP_AF_IB) 624 #define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \ 625 | RDMA_CORE_CAP_IB_MAD \ 626 | RDMA_CORE_CAP_IB_CM \ 627 | RDMA_CORE_CAP_AF_IB \ 628 | RDMA_CORE_CAP_ETH_AH) 629 #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \ 630 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \ 631 | RDMA_CORE_CAP_IB_MAD \ 632 | RDMA_CORE_CAP_IB_CM \ 633 | RDMA_CORE_CAP_AF_IB \ 634 | RDMA_CORE_CAP_ETH_AH) 635 #define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \ 636 | RDMA_CORE_CAP_IW_CM) 637 #define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \ 638 | RDMA_CORE_CAP_OPA_MAD) 639 640 #define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET) 641 642 #define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC) 643 644 struct ib_port_attr { 645 u64 subnet_prefix; 646 enum ib_port_state state; 647 enum ib_mtu max_mtu; 648 enum ib_mtu active_mtu; 649 int gid_tbl_len; 650 unsigned int ip_gids:1; 651 /* This is the value from PortInfo CapabilityMask, defined by IBA */ 652 u32 port_cap_flags; 653 u32 max_msg_sz; 654 u32 bad_pkey_cntr; 655 u32 qkey_viol_cntr; 656 u16 pkey_tbl_len; 657 u32 sm_lid; 658 u32 lid; 659 u8 lmc; 660 u8 max_vl_num; 661 u8 sm_sl; 662 u8 subnet_timeout; 663 u8 init_type_reply; 664 u8 active_width; 665 u8 active_speed; 666 u8 phys_state; 667 u16 port_cap_flags2; 668 }; 669 670 enum ib_device_modify_flags { 671 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0, 672 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1 673 }; 674 675 #define IB_DEVICE_NODE_DESC_MAX 64 676 677 struct ib_device_modify { 678 u64 sys_image_guid; 679 char node_desc[IB_DEVICE_NODE_DESC_MAX]; 680 }; 681 682 enum ib_port_modify_flags { 683 IB_PORT_SHUTDOWN = 1, 684 IB_PORT_INIT_TYPE = (1<<2), 685 IB_PORT_RESET_QKEY_CNTR = (1<<3), 686 IB_PORT_OPA_MASK_CHG = (1<<4) 687 }; 688 689 struct ib_port_modify { 690 u32 set_port_cap_mask; 691 u32 clr_port_cap_mask; 692 u8 init_type; 693 }; 694 695 enum ib_event_type { 696 IB_EVENT_CQ_ERR, 697 IB_EVENT_QP_FATAL, 698 IB_EVENT_QP_REQ_ERR, 699 IB_EVENT_QP_ACCESS_ERR, 700 IB_EVENT_COMM_EST, 701 IB_EVENT_SQ_DRAINED, 702 IB_EVENT_PATH_MIG, 703 IB_EVENT_PATH_MIG_ERR, 704 IB_EVENT_DEVICE_FATAL, 705 IB_EVENT_PORT_ACTIVE, 706 IB_EVENT_PORT_ERR, 707 IB_EVENT_LID_CHANGE, 708 IB_EVENT_PKEY_CHANGE, 709 IB_EVENT_SM_CHANGE, 710 IB_EVENT_SRQ_ERR, 711 IB_EVENT_SRQ_LIMIT_REACHED, 712 IB_EVENT_QP_LAST_WQE_REACHED, 713 IB_EVENT_CLIENT_REREGISTER, 714 IB_EVENT_GID_CHANGE, 715 IB_EVENT_WQ_FATAL, 716 }; 717 718 const char *__attribute_const__ ib_event_msg(enum ib_event_type event); 719 720 struct ib_event { 721 struct ib_device *device; 722 union { 723 struct ib_cq *cq; 724 struct ib_qp *qp; 725 struct ib_srq *srq; 726 struct ib_wq *wq; 727 u8 port_num; 728 } element; 729 enum ib_event_type event; 730 }; 731 732 struct ib_event_handler { 733 struct ib_device *device; 734 void (*handler)(struct ib_event_handler *, struct ib_event *); 735 struct list_head list; 736 }; 737 738 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \ 739 do { \ 740 (_ptr)->device = _device; \ 741 (_ptr)->handler = _handler; \ 742 INIT_LIST_HEAD(&(_ptr)->list); \ 743 } while (0) 744 745 struct ib_global_route { 746 const struct ib_gid_attr *sgid_attr; 747 union ib_gid dgid; 748 u32 flow_label; 749 u8 sgid_index; 750 u8 hop_limit; 751 u8 traffic_class; 752 }; 753 754 struct ib_grh { 755 __be32 version_tclass_flow; 756 __be16 paylen; 757 u8 next_hdr; 758 u8 hop_limit; 759 union ib_gid sgid; 760 union ib_gid dgid; 761 }; 762 763 union rdma_network_hdr { 764 struct ib_grh ibgrh; 765 struct { 766 /* The IB spec states that if it's IPv4, the header 767 * is located in the last 20 bytes of the header. 768 */ 769 u8 reserved[20]; 770 struct iphdr roce4grh; 771 }; 772 }; 773 774 #define IB_QPN_MASK 0xFFFFFF 775 776 enum { 777 IB_MULTICAST_QPN = 0xffffff 778 }; 779 780 #define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF) 781 #define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000) 782 783 enum ib_ah_flags { 784 IB_AH_GRH = 1 785 }; 786 787 enum ib_rate { 788 IB_RATE_PORT_CURRENT = 0, 789 IB_RATE_2_5_GBPS = 2, 790 IB_RATE_5_GBPS = 5, 791 IB_RATE_10_GBPS = 3, 792 IB_RATE_20_GBPS = 6, 793 IB_RATE_30_GBPS = 4, 794 IB_RATE_40_GBPS = 7, 795 IB_RATE_60_GBPS = 8, 796 IB_RATE_80_GBPS = 9, 797 IB_RATE_120_GBPS = 10, 798 IB_RATE_14_GBPS = 11, 799 IB_RATE_56_GBPS = 12, 800 IB_RATE_112_GBPS = 13, 801 IB_RATE_168_GBPS = 14, 802 IB_RATE_25_GBPS = 15, 803 IB_RATE_100_GBPS = 16, 804 IB_RATE_200_GBPS = 17, 805 IB_RATE_300_GBPS = 18, 806 IB_RATE_28_GBPS = 19, 807 IB_RATE_50_GBPS = 20, 808 IB_RATE_400_GBPS = 21, 809 IB_RATE_600_GBPS = 22, 810 }; 811 812 /** 813 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the 814 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be 815 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec. 816 * @rate: rate to convert. 817 */ 818 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate); 819 820 /** 821 * ib_rate_to_mbps - Convert the IB rate enum to Mbps. 822 * For example, IB_RATE_2_5_GBPS will be converted to 2500. 823 * @rate: rate to convert. 824 */ 825 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate); 826 827 828 /** 829 * enum ib_mr_type - memory region type 830 * @IB_MR_TYPE_MEM_REG: memory region that is used for 831 * normal registration 832 * @IB_MR_TYPE_SG_GAPS: memory region that is capable to 833 * register any arbitrary sg lists (without 834 * the normal mr constraints - see 835 * ib_map_mr_sg) 836 * @IB_MR_TYPE_DM: memory region that is used for device 837 * memory registration 838 * @IB_MR_TYPE_USER: memory region that is used for the user-space 839 * application 840 * @IB_MR_TYPE_DMA: memory region that is used for DMA operations 841 * without address translations (VA=PA) 842 * @IB_MR_TYPE_INTEGRITY: memory region that is used for 843 * data integrity operations 844 */ 845 enum ib_mr_type { 846 IB_MR_TYPE_MEM_REG, 847 IB_MR_TYPE_SG_GAPS, 848 IB_MR_TYPE_DM, 849 IB_MR_TYPE_USER, 850 IB_MR_TYPE_DMA, 851 IB_MR_TYPE_INTEGRITY, 852 }; 853 854 enum ib_mr_status_check { 855 IB_MR_CHECK_SIG_STATUS = 1, 856 }; 857 858 /** 859 * struct ib_mr_status - Memory region status container 860 * 861 * @fail_status: Bitmask of MR checks status. For each 862 * failed check a corresponding status bit is set. 863 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS 864 * failure. 865 */ 866 struct ib_mr_status { 867 u32 fail_status; 868 struct ib_sig_err sig_err; 869 }; 870 871 /** 872 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate 873 * enum. 874 * @mult: multiple to convert. 875 */ 876 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult); 877 878 enum rdma_ah_attr_type { 879 RDMA_AH_ATTR_TYPE_UNDEFINED, 880 RDMA_AH_ATTR_TYPE_IB, 881 RDMA_AH_ATTR_TYPE_ROCE, 882 RDMA_AH_ATTR_TYPE_OPA, 883 }; 884 885 struct ib_ah_attr { 886 u16 dlid; 887 u8 src_path_bits; 888 }; 889 890 struct roce_ah_attr { 891 u8 dmac[ETH_ALEN]; 892 }; 893 894 struct opa_ah_attr { 895 u32 dlid; 896 u8 src_path_bits; 897 bool make_grd; 898 }; 899 900 struct rdma_ah_attr { 901 struct ib_global_route grh; 902 u8 sl; 903 u8 static_rate; 904 u8 port_num; 905 u8 ah_flags; 906 enum rdma_ah_attr_type type; 907 union { 908 struct ib_ah_attr ib; 909 struct roce_ah_attr roce; 910 struct opa_ah_attr opa; 911 }; 912 }; 913 914 enum ib_wc_status { 915 IB_WC_SUCCESS, 916 IB_WC_LOC_LEN_ERR, 917 IB_WC_LOC_QP_OP_ERR, 918 IB_WC_LOC_EEC_OP_ERR, 919 IB_WC_LOC_PROT_ERR, 920 IB_WC_WR_FLUSH_ERR, 921 IB_WC_MW_BIND_ERR, 922 IB_WC_BAD_RESP_ERR, 923 IB_WC_LOC_ACCESS_ERR, 924 IB_WC_REM_INV_REQ_ERR, 925 IB_WC_REM_ACCESS_ERR, 926 IB_WC_REM_OP_ERR, 927 IB_WC_RETRY_EXC_ERR, 928 IB_WC_RNR_RETRY_EXC_ERR, 929 IB_WC_LOC_RDD_VIOL_ERR, 930 IB_WC_REM_INV_RD_REQ_ERR, 931 IB_WC_REM_ABORT_ERR, 932 IB_WC_INV_EECN_ERR, 933 IB_WC_INV_EEC_STATE_ERR, 934 IB_WC_FATAL_ERR, 935 IB_WC_RESP_TIMEOUT_ERR, 936 IB_WC_GENERAL_ERR 937 }; 938 939 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status); 940 941 enum ib_wc_opcode { 942 IB_WC_SEND, 943 IB_WC_RDMA_WRITE, 944 IB_WC_RDMA_READ, 945 IB_WC_COMP_SWAP, 946 IB_WC_FETCH_ADD, 947 IB_WC_LSO, 948 IB_WC_LOCAL_INV, 949 IB_WC_REG_MR, 950 IB_WC_MASKED_COMP_SWAP, 951 IB_WC_MASKED_FETCH_ADD, 952 /* 953 * Set value of IB_WC_RECV so consumers can test if a completion is a 954 * receive by testing (opcode & IB_WC_RECV). 955 */ 956 IB_WC_RECV = 1 << 7, 957 IB_WC_RECV_RDMA_WITH_IMM 958 }; 959 960 enum ib_wc_flags { 961 IB_WC_GRH = 1, 962 IB_WC_WITH_IMM = (1<<1), 963 IB_WC_WITH_INVALIDATE = (1<<2), 964 IB_WC_IP_CSUM_OK = (1<<3), 965 IB_WC_WITH_SMAC = (1<<4), 966 IB_WC_WITH_VLAN = (1<<5), 967 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6), 968 }; 969 970 struct ib_wc { 971 union { 972 u64 wr_id; 973 struct ib_cqe *wr_cqe; 974 }; 975 enum ib_wc_status status; 976 enum ib_wc_opcode opcode; 977 u32 vendor_err; 978 u32 byte_len; 979 struct ib_qp *qp; 980 union { 981 __be32 imm_data; 982 u32 invalidate_rkey; 983 } ex; 984 u32 src_qp; 985 u32 slid; 986 int wc_flags; 987 u16 pkey_index; 988 u8 sl; 989 u8 dlid_path_bits; 990 u8 port_num; /* valid only for DR SMPs on switches */ 991 u8 smac[ETH_ALEN]; 992 u16 vlan_id; 993 u8 network_hdr_type; 994 }; 995 996 enum ib_cq_notify_flags { 997 IB_CQ_SOLICITED = 1 << 0, 998 IB_CQ_NEXT_COMP = 1 << 1, 999 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP, 1000 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2, 1001 }; 1002 1003 enum ib_srq_type { 1004 IB_SRQT_BASIC, 1005 IB_SRQT_XRC, 1006 IB_SRQT_TM, 1007 }; 1008 1009 static inline bool ib_srq_has_cq(enum ib_srq_type srq_type) 1010 { 1011 return srq_type == IB_SRQT_XRC || 1012 srq_type == IB_SRQT_TM; 1013 } 1014 1015 enum ib_srq_attr_mask { 1016 IB_SRQ_MAX_WR = 1 << 0, 1017 IB_SRQ_LIMIT = 1 << 1, 1018 }; 1019 1020 struct ib_srq_attr { 1021 u32 max_wr; 1022 u32 max_sge; 1023 u32 srq_limit; 1024 }; 1025 1026 struct ib_srq_init_attr { 1027 void (*event_handler)(struct ib_event *, void *); 1028 void *srq_context; 1029 struct ib_srq_attr attr; 1030 enum ib_srq_type srq_type; 1031 1032 struct { 1033 struct ib_cq *cq; 1034 union { 1035 struct { 1036 struct ib_xrcd *xrcd; 1037 } xrc; 1038 1039 struct { 1040 u32 max_num_tags; 1041 } tag_matching; 1042 }; 1043 } ext; 1044 }; 1045 1046 struct ib_qp_cap { 1047 u32 max_send_wr; 1048 u32 max_recv_wr; 1049 u32 max_send_sge; 1050 u32 max_recv_sge; 1051 u32 max_inline_data; 1052 1053 /* 1054 * Maximum number of rdma_rw_ctx structures in flight at a time. 1055 * ib_create_qp() will calculate the right amount of neededed WRs 1056 * and MRs based on this. 1057 */ 1058 u32 max_rdma_ctxs; 1059 }; 1060 1061 enum ib_sig_type { 1062 IB_SIGNAL_ALL_WR, 1063 IB_SIGNAL_REQ_WR 1064 }; 1065 1066 enum ib_qp_type { 1067 /* 1068 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries 1069 * here (and in that order) since the MAD layer uses them as 1070 * indices into a 2-entry table. 1071 */ 1072 IB_QPT_SMI, 1073 IB_QPT_GSI, 1074 1075 IB_QPT_RC, 1076 IB_QPT_UC, 1077 IB_QPT_UD, 1078 IB_QPT_RAW_IPV6, 1079 IB_QPT_RAW_ETHERTYPE, 1080 IB_QPT_RAW_PACKET = 8, 1081 IB_QPT_XRC_INI = 9, 1082 IB_QPT_XRC_TGT, 1083 IB_QPT_MAX, 1084 IB_QPT_DRIVER = 0xFF, 1085 /* Reserve a range for qp types internal to the low level driver. 1086 * These qp types will not be visible at the IB core layer, so the 1087 * IB_QPT_MAX usages should not be affected in the core layer 1088 */ 1089 IB_QPT_RESERVED1 = 0x1000, 1090 IB_QPT_RESERVED2, 1091 IB_QPT_RESERVED3, 1092 IB_QPT_RESERVED4, 1093 IB_QPT_RESERVED5, 1094 IB_QPT_RESERVED6, 1095 IB_QPT_RESERVED7, 1096 IB_QPT_RESERVED8, 1097 IB_QPT_RESERVED9, 1098 IB_QPT_RESERVED10, 1099 }; 1100 1101 enum ib_qp_create_flags { 1102 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0, 1103 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1, 1104 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2, 1105 IB_QP_CREATE_MANAGED_SEND = 1 << 3, 1106 IB_QP_CREATE_MANAGED_RECV = 1 << 4, 1107 IB_QP_CREATE_NETIF_QP = 1 << 5, 1108 IB_QP_CREATE_INTEGRITY_EN = 1 << 6, 1109 /* FREE = 1 << 7, */ 1110 IB_QP_CREATE_SCATTER_FCS = 1 << 8, 1111 IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9, 1112 IB_QP_CREATE_SOURCE_QPN = 1 << 10, 1113 IB_QP_CREATE_PCI_WRITE_END_PADDING = 1 << 11, 1114 /* reserve bits 26-31 for low level drivers' internal use */ 1115 IB_QP_CREATE_RESERVED_START = 1 << 26, 1116 IB_QP_CREATE_RESERVED_END = 1 << 31, 1117 }; 1118 1119 /* 1120 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler 1121 * callback to destroy the passed in QP. 1122 */ 1123 1124 struct ib_qp_init_attr { 1125 /* Consumer's event_handler callback must not block */ 1126 void (*event_handler)(struct ib_event *, void *); 1127 1128 void *qp_context; 1129 struct ib_cq *send_cq; 1130 struct ib_cq *recv_cq; 1131 struct ib_srq *srq; 1132 struct ib_xrcd *xrcd; /* XRC TGT QPs only */ 1133 struct ib_qp_cap cap; 1134 enum ib_sig_type sq_sig_type; 1135 enum ib_qp_type qp_type; 1136 u32 create_flags; 1137 1138 /* 1139 * Only needed for special QP types, or when using the RW API. 1140 */ 1141 u8 port_num; 1142 struct ib_rwq_ind_table *rwq_ind_tbl; 1143 u32 source_qpn; 1144 }; 1145 1146 struct ib_qp_open_attr { 1147 void (*event_handler)(struct ib_event *, void *); 1148 void *qp_context; 1149 u32 qp_num; 1150 enum ib_qp_type qp_type; 1151 }; 1152 1153 enum ib_rnr_timeout { 1154 IB_RNR_TIMER_655_36 = 0, 1155 IB_RNR_TIMER_000_01 = 1, 1156 IB_RNR_TIMER_000_02 = 2, 1157 IB_RNR_TIMER_000_03 = 3, 1158 IB_RNR_TIMER_000_04 = 4, 1159 IB_RNR_TIMER_000_06 = 5, 1160 IB_RNR_TIMER_000_08 = 6, 1161 IB_RNR_TIMER_000_12 = 7, 1162 IB_RNR_TIMER_000_16 = 8, 1163 IB_RNR_TIMER_000_24 = 9, 1164 IB_RNR_TIMER_000_32 = 10, 1165 IB_RNR_TIMER_000_48 = 11, 1166 IB_RNR_TIMER_000_64 = 12, 1167 IB_RNR_TIMER_000_96 = 13, 1168 IB_RNR_TIMER_001_28 = 14, 1169 IB_RNR_TIMER_001_92 = 15, 1170 IB_RNR_TIMER_002_56 = 16, 1171 IB_RNR_TIMER_003_84 = 17, 1172 IB_RNR_TIMER_005_12 = 18, 1173 IB_RNR_TIMER_007_68 = 19, 1174 IB_RNR_TIMER_010_24 = 20, 1175 IB_RNR_TIMER_015_36 = 21, 1176 IB_RNR_TIMER_020_48 = 22, 1177 IB_RNR_TIMER_030_72 = 23, 1178 IB_RNR_TIMER_040_96 = 24, 1179 IB_RNR_TIMER_061_44 = 25, 1180 IB_RNR_TIMER_081_92 = 26, 1181 IB_RNR_TIMER_122_88 = 27, 1182 IB_RNR_TIMER_163_84 = 28, 1183 IB_RNR_TIMER_245_76 = 29, 1184 IB_RNR_TIMER_327_68 = 30, 1185 IB_RNR_TIMER_491_52 = 31 1186 }; 1187 1188 enum ib_qp_attr_mask { 1189 IB_QP_STATE = 1, 1190 IB_QP_CUR_STATE = (1<<1), 1191 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2), 1192 IB_QP_ACCESS_FLAGS = (1<<3), 1193 IB_QP_PKEY_INDEX = (1<<4), 1194 IB_QP_PORT = (1<<5), 1195 IB_QP_QKEY = (1<<6), 1196 IB_QP_AV = (1<<7), 1197 IB_QP_PATH_MTU = (1<<8), 1198 IB_QP_TIMEOUT = (1<<9), 1199 IB_QP_RETRY_CNT = (1<<10), 1200 IB_QP_RNR_RETRY = (1<<11), 1201 IB_QP_RQ_PSN = (1<<12), 1202 IB_QP_MAX_QP_RD_ATOMIC = (1<<13), 1203 IB_QP_ALT_PATH = (1<<14), 1204 IB_QP_MIN_RNR_TIMER = (1<<15), 1205 IB_QP_SQ_PSN = (1<<16), 1206 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17), 1207 IB_QP_PATH_MIG_STATE = (1<<18), 1208 IB_QP_CAP = (1<<19), 1209 IB_QP_DEST_QPN = (1<<20), 1210 IB_QP_RESERVED1 = (1<<21), 1211 IB_QP_RESERVED2 = (1<<22), 1212 IB_QP_RESERVED3 = (1<<23), 1213 IB_QP_RESERVED4 = (1<<24), 1214 IB_QP_RATE_LIMIT = (1<<25), 1215 }; 1216 1217 enum ib_qp_state { 1218 IB_QPS_RESET, 1219 IB_QPS_INIT, 1220 IB_QPS_RTR, 1221 IB_QPS_RTS, 1222 IB_QPS_SQD, 1223 IB_QPS_SQE, 1224 IB_QPS_ERR 1225 }; 1226 1227 enum ib_mig_state { 1228 IB_MIG_MIGRATED, 1229 IB_MIG_REARM, 1230 IB_MIG_ARMED 1231 }; 1232 1233 enum ib_mw_type { 1234 IB_MW_TYPE_1 = 1, 1235 IB_MW_TYPE_2 = 2 1236 }; 1237 1238 struct ib_qp_attr { 1239 enum ib_qp_state qp_state; 1240 enum ib_qp_state cur_qp_state; 1241 enum ib_mtu path_mtu; 1242 enum ib_mig_state path_mig_state; 1243 u32 qkey; 1244 u32 rq_psn; 1245 u32 sq_psn; 1246 u32 dest_qp_num; 1247 int qp_access_flags; 1248 struct ib_qp_cap cap; 1249 struct rdma_ah_attr ah_attr; 1250 struct rdma_ah_attr alt_ah_attr; 1251 u16 pkey_index; 1252 u16 alt_pkey_index; 1253 u8 en_sqd_async_notify; 1254 u8 sq_draining; 1255 u8 max_rd_atomic; 1256 u8 max_dest_rd_atomic; 1257 u8 min_rnr_timer; 1258 u8 port_num; 1259 u8 timeout; 1260 u8 retry_cnt; 1261 u8 rnr_retry; 1262 u8 alt_port_num; 1263 u8 alt_timeout; 1264 u32 rate_limit; 1265 }; 1266 1267 enum ib_wr_opcode { 1268 /* These are shared with userspace */ 1269 IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE, 1270 IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM, 1271 IB_WR_SEND = IB_UVERBS_WR_SEND, 1272 IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM, 1273 IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ, 1274 IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP, 1275 IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD, 1276 IB_WR_LSO = IB_UVERBS_WR_TSO, 1277 IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV, 1278 IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV, 1279 IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV, 1280 IB_WR_MASKED_ATOMIC_CMP_AND_SWP = 1281 IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP, 1282 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD = 1283 IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD, 1284 1285 /* These are kernel only and can not be issued by userspace */ 1286 IB_WR_REG_MR = 0x20, 1287 IB_WR_REG_MR_INTEGRITY, 1288 1289 /* reserve values for low level drivers' internal use. 1290 * These values will not be used at all in the ib core layer. 1291 */ 1292 IB_WR_RESERVED1 = 0xf0, 1293 IB_WR_RESERVED2, 1294 IB_WR_RESERVED3, 1295 IB_WR_RESERVED4, 1296 IB_WR_RESERVED5, 1297 IB_WR_RESERVED6, 1298 IB_WR_RESERVED7, 1299 IB_WR_RESERVED8, 1300 IB_WR_RESERVED9, 1301 IB_WR_RESERVED10, 1302 }; 1303 1304 enum ib_send_flags { 1305 IB_SEND_FENCE = 1, 1306 IB_SEND_SIGNALED = (1<<1), 1307 IB_SEND_SOLICITED = (1<<2), 1308 IB_SEND_INLINE = (1<<3), 1309 IB_SEND_IP_CSUM = (1<<4), 1310 1311 /* reserve bits 26-31 for low level drivers' internal use */ 1312 IB_SEND_RESERVED_START = (1 << 26), 1313 IB_SEND_RESERVED_END = (1 << 31), 1314 }; 1315 1316 struct ib_sge { 1317 u64 addr; 1318 u32 length; 1319 u32 lkey; 1320 }; 1321 1322 struct ib_cqe { 1323 void (*done)(struct ib_cq *cq, struct ib_wc *wc); 1324 }; 1325 1326 struct ib_send_wr { 1327 struct ib_send_wr *next; 1328 union { 1329 u64 wr_id; 1330 struct ib_cqe *wr_cqe; 1331 }; 1332 struct ib_sge *sg_list; 1333 int num_sge; 1334 enum ib_wr_opcode opcode; 1335 int send_flags; 1336 union { 1337 __be32 imm_data; 1338 u32 invalidate_rkey; 1339 } ex; 1340 }; 1341 1342 struct ib_rdma_wr { 1343 struct ib_send_wr wr; 1344 u64 remote_addr; 1345 u32 rkey; 1346 }; 1347 1348 static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr) 1349 { 1350 return container_of(wr, struct ib_rdma_wr, wr); 1351 } 1352 1353 struct ib_atomic_wr { 1354 struct ib_send_wr wr; 1355 u64 remote_addr; 1356 u64 compare_add; 1357 u64 swap; 1358 u64 compare_add_mask; 1359 u64 swap_mask; 1360 u32 rkey; 1361 }; 1362 1363 static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr) 1364 { 1365 return container_of(wr, struct ib_atomic_wr, wr); 1366 } 1367 1368 struct ib_ud_wr { 1369 struct ib_send_wr wr; 1370 struct ib_ah *ah; 1371 void *header; 1372 int hlen; 1373 int mss; 1374 u32 remote_qpn; 1375 u32 remote_qkey; 1376 u16 pkey_index; /* valid for GSI only */ 1377 u8 port_num; /* valid for DR SMPs on switch only */ 1378 }; 1379 1380 static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr) 1381 { 1382 return container_of(wr, struct ib_ud_wr, wr); 1383 } 1384 1385 struct ib_reg_wr { 1386 struct ib_send_wr wr; 1387 struct ib_mr *mr; 1388 u32 key; 1389 int access; 1390 }; 1391 1392 static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr) 1393 { 1394 return container_of(wr, struct ib_reg_wr, wr); 1395 } 1396 1397 struct ib_recv_wr { 1398 struct ib_recv_wr *next; 1399 union { 1400 u64 wr_id; 1401 struct ib_cqe *wr_cqe; 1402 }; 1403 struct ib_sge *sg_list; 1404 int num_sge; 1405 }; 1406 1407 enum ib_access_flags { 1408 IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE, 1409 IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE, 1410 IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ, 1411 IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC, 1412 IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND, 1413 IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED, 1414 IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND, 1415 IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB, 1416 1417 IB_ACCESS_SUPPORTED = ((IB_ACCESS_HUGETLB << 1) - 1) 1418 }; 1419 1420 /* 1421 * XXX: these are apparently used for ->rereg_user_mr, no idea why they 1422 * are hidden here instead of a uapi header! 1423 */ 1424 enum ib_mr_rereg_flags { 1425 IB_MR_REREG_TRANS = 1, 1426 IB_MR_REREG_PD = (1<<1), 1427 IB_MR_REREG_ACCESS = (1<<2), 1428 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1) 1429 }; 1430 1431 struct ib_fmr_attr { 1432 int max_pages; 1433 int max_maps; 1434 u8 page_shift; 1435 }; 1436 1437 struct ib_umem; 1438 1439 enum rdma_remove_reason { 1440 /* 1441 * Userspace requested uobject deletion or initial try 1442 * to remove uobject via cleanup. Call could fail 1443 */ 1444 RDMA_REMOVE_DESTROY, 1445 /* Context deletion. This call should delete the actual object itself */ 1446 RDMA_REMOVE_CLOSE, 1447 /* Driver is being hot-unplugged. This call should delete the actual object itself */ 1448 RDMA_REMOVE_DRIVER_REMOVE, 1449 /* uobj is being cleaned-up before being committed */ 1450 RDMA_REMOVE_ABORT, 1451 }; 1452 1453 struct ib_rdmacg_object { 1454 #ifdef CONFIG_CGROUP_RDMA 1455 struct rdma_cgroup *cg; /* owner rdma cgroup */ 1456 #endif 1457 }; 1458 1459 struct ib_ucontext { 1460 struct ib_device *device; 1461 struct ib_uverbs_file *ufile; 1462 /* 1463 * 'closing' can be read by the driver only during a destroy callback, 1464 * it is set when we are closing the file descriptor and indicates 1465 * that mm_sem may be locked. 1466 */ 1467 bool closing; 1468 1469 bool cleanup_retryable; 1470 1471 struct ib_rdmacg_object cg_obj; 1472 /* 1473 * Implementation details of the RDMA core, don't use in drivers: 1474 */ 1475 struct rdma_restrack_entry res; 1476 struct xarray mmap_xa; 1477 }; 1478 1479 struct ib_uobject { 1480 u64 user_handle; /* handle given to us by userspace */ 1481 /* ufile & ucontext owning this object */ 1482 struct ib_uverbs_file *ufile; 1483 /* FIXME, save memory: ufile->context == context */ 1484 struct ib_ucontext *context; /* associated user context */ 1485 void *object; /* containing object */ 1486 struct list_head list; /* link to context's list */ 1487 struct ib_rdmacg_object cg_obj; /* rdmacg object */ 1488 int id; /* index into kernel idr */ 1489 struct kref ref; 1490 atomic_t usecnt; /* protects exclusive access */ 1491 struct rcu_head rcu; /* kfree_rcu() overhead */ 1492 1493 const struct uverbs_api_object *uapi_object; 1494 }; 1495 1496 struct ib_udata { 1497 const void __user *inbuf; 1498 void __user *outbuf; 1499 size_t inlen; 1500 size_t outlen; 1501 }; 1502 1503 struct ib_pd { 1504 u32 local_dma_lkey; 1505 u32 flags; 1506 struct ib_device *device; 1507 struct ib_uobject *uobject; 1508 atomic_t usecnt; /* count all resources */ 1509 1510 u32 unsafe_global_rkey; 1511 1512 /* 1513 * Implementation details of the RDMA core, don't use in drivers: 1514 */ 1515 struct ib_mr *__internal_mr; 1516 struct rdma_restrack_entry res; 1517 }; 1518 1519 struct ib_xrcd { 1520 struct ib_device *device; 1521 atomic_t usecnt; /* count all exposed resources */ 1522 struct inode *inode; 1523 1524 struct mutex tgt_qp_mutex; 1525 struct list_head tgt_qp_list; 1526 }; 1527 1528 struct ib_ah { 1529 struct ib_device *device; 1530 struct ib_pd *pd; 1531 struct ib_uobject *uobject; 1532 const struct ib_gid_attr *sgid_attr; 1533 enum rdma_ah_attr_type type; 1534 }; 1535 1536 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); 1537 1538 enum ib_poll_context { 1539 IB_POLL_DIRECT, /* caller context, no hw completions */ 1540 IB_POLL_SOFTIRQ, /* poll from softirq context */ 1541 IB_POLL_WORKQUEUE, /* poll from workqueue */ 1542 IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */ 1543 }; 1544 1545 struct ib_cq { 1546 struct ib_device *device; 1547 struct ib_uobject *uobject; 1548 ib_comp_handler comp_handler; 1549 void (*event_handler)(struct ib_event *, void *); 1550 void *cq_context; 1551 int cqe; 1552 atomic_t usecnt; /* count number of work queues */ 1553 enum ib_poll_context poll_ctx; 1554 struct ib_wc *wc; 1555 union { 1556 struct irq_poll iop; 1557 struct work_struct work; 1558 }; 1559 struct workqueue_struct *comp_wq; 1560 struct dim *dim; 1561 /* 1562 * Implementation details of the RDMA core, don't use in drivers: 1563 */ 1564 struct rdma_restrack_entry res; 1565 }; 1566 1567 struct ib_srq { 1568 struct ib_device *device; 1569 struct ib_pd *pd; 1570 struct ib_uobject *uobject; 1571 void (*event_handler)(struct ib_event *, void *); 1572 void *srq_context; 1573 enum ib_srq_type srq_type; 1574 atomic_t usecnt; 1575 1576 struct { 1577 struct ib_cq *cq; 1578 union { 1579 struct { 1580 struct ib_xrcd *xrcd; 1581 u32 srq_num; 1582 } xrc; 1583 }; 1584 } ext; 1585 }; 1586 1587 enum ib_raw_packet_caps { 1588 /* Strip cvlan from incoming packet and report it in the matching work 1589 * completion is supported. 1590 */ 1591 IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0), 1592 /* Scatter FCS field of an incoming packet to host memory is supported. 1593 */ 1594 IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1), 1595 /* Checksum offloads are supported (for both send and receive). */ 1596 IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2), 1597 /* When a packet is received for an RQ with no receive WQEs, the 1598 * packet processing is delayed. 1599 */ 1600 IB_RAW_PACKET_CAP_DELAY_DROP = (1 << 3), 1601 }; 1602 1603 enum ib_wq_type { 1604 IB_WQT_RQ 1605 }; 1606 1607 enum ib_wq_state { 1608 IB_WQS_RESET, 1609 IB_WQS_RDY, 1610 IB_WQS_ERR 1611 }; 1612 1613 struct ib_wq { 1614 struct ib_device *device; 1615 struct ib_uobject *uobject; 1616 void *wq_context; 1617 void (*event_handler)(struct ib_event *, void *); 1618 struct ib_pd *pd; 1619 struct ib_cq *cq; 1620 u32 wq_num; 1621 enum ib_wq_state state; 1622 enum ib_wq_type wq_type; 1623 atomic_t usecnt; 1624 }; 1625 1626 enum ib_wq_flags { 1627 IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0, 1628 IB_WQ_FLAGS_SCATTER_FCS = 1 << 1, 1629 IB_WQ_FLAGS_DELAY_DROP = 1 << 2, 1630 IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3, 1631 }; 1632 1633 struct ib_wq_init_attr { 1634 void *wq_context; 1635 enum ib_wq_type wq_type; 1636 u32 max_wr; 1637 u32 max_sge; 1638 struct ib_cq *cq; 1639 void (*event_handler)(struct ib_event *, void *); 1640 u32 create_flags; /* Use enum ib_wq_flags */ 1641 }; 1642 1643 enum ib_wq_attr_mask { 1644 IB_WQ_STATE = 1 << 0, 1645 IB_WQ_CUR_STATE = 1 << 1, 1646 IB_WQ_FLAGS = 1 << 2, 1647 }; 1648 1649 struct ib_wq_attr { 1650 enum ib_wq_state wq_state; 1651 enum ib_wq_state curr_wq_state; 1652 u32 flags; /* Use enum ib_wq_flags */ 1653 u32 flags_mask; /* Use enum ib_wq_flags */ 1654 }; 1655 1656 struct ib_rwq_ind_table { 1657 struct ib_device *device; 1658 struct ib_uobject *uobject; 1659 atomic_t usecnt; 1660 u32 ind_tbl_num; 1661 u32 log_ind_tbl_size; 1662 struct ib_wq **ind_tbl; 1663 }; 1664 1665 struct ib_rwq_ind_table_init_attr { 1666 u32 log_ind_tbl_size; 1667 /* Each entry is a pointer to Receive Work Queue */ 1668 struct ib_wq **ind_tbl; 1669 }; 1670 1671 enum port_pkey_state { 1672 IB_PORT_PKEY_NOT_VALID = 0, 1673 IB_PORT_PKEY_VALID = 1, 1674 IB_PORT_PKEY_LISTED = 2, 1675 }; 1676 1677 struct ib_qp_security; 1678 1679 struct ib_port_pkey { 1680 enum port_pkey_state state; 1681 u16 pkey_index; 1682 u8 port_num; 1683 struct list_head qp_list; 1684 struct list_head to_error_list; 1685 struct ib_qp_security *sec; 1686 }; 1687 1688 struct ib_ports_pkeys { 1689 struct ib_port_pkey main; 1690 struct ib_port_pkey alt; 1691 }; 1692 1693 struct ib_qp_security { 1694 struct ib_qp *qp; 1695 struct ib_device *dev; 1696 /* Hold this mutex when changing port and pkey settings. */ 1697 struct mutex mutex; 1698 struct ib_ports_pkeys *ports_pkeys; 1699 /* A list of all open shared QP handles. Required to enforce security 1700 * properly for all users of a shared QP. 1701 */ 1702 struct list_head shared_qp_list; 1703 void *security; 1704 bool destroying; 1705 atomic_t error_list_count; 1706 struct completion error_complete; 1707 int error_comps_pending; 1708 }; 1709 1710 /* 1711 * @max_write_sge: Maximum SGE elements per RDMA WRITE request. 1712 * @max_read_sge: Maximum SGE elements per RDMA READ request. 1713 */ 1714 struct ib_qp { 1715 struct ib_device *device; 1716 struct ib_pd *pd; 1717 struct ib_cq *send_cq; 1718 struct ib_cq *recv_cq; 1719 spinlock_t mr_lock; 1720 int mrs_used; 1721 struct list_head rdma_mrs; 1722 struct list_head sig_mrs; 1723 struct ib_srq *srq; 1724 struct ib_xrcd *xrcd; /* XRC TGT QPs only */ 1725 struct list_head xrcd_list; 1726 1727 /* count times opened, mcast attaches, flow attaches */ 1728 atomic_t usecnt; 1729 struct list_head open_list; 1730 struct ib_qp *real_qp; 1731 struct ib_uobject *uobject; 1732 void (*event_handler)(struct ib_event *, void *); 1733 void *qp_context; 1734 /* sgid_attrs associated with the AV's */ 1735 const struct ib_gid_attr *av_sgid_attr; 1736 const struct ib_gid_attr *alt_path_sgid_attr; 1737 u32 qp_num; 1738 u32 max_write_sge; 1739 u32 max_read_sge; 1740 enum ib_qp_type qp_type; 1741 struct ib_rwq_ind_table *rwq_ind_tbl; 1742 struct ib_qp_security *qp_sec; 1743 u8 port; 1744 1745 bool integrity_en; 1746 /* 1747 * Implementation details of the RDMA core, don't use in drivers: 1748 */ 1749 struct rdma_restrack_entry res; 1750 1751 /* The counter the qp is bind to */ 1752 struct rdma_counter *counter; 1753 }; 1754 1755 struct ib_dm { 1756 struct ib_device *device; 1757 u32 length; 1758 u32 flags; 1759 struct ib_uobject *uobject; 1760 atomic_t usecnt; 1761 }; 1762 1763 struct ib_mr { 1764 struct ib_device *device; 1765 struct ib_pd *pd; 1766 u32 lkey; 1767 u32 rkey; 1768 u64 iova; 1769 u64 length; 1770 unsigned int page_size; 1771 enum ib_mr_type type; 1772 bool need_inval; 1773 union { 1774 struct ib_uobject *uobject; /* user */ 1775 struct list_head qp_entry; /* FR */ 1776 }; 1777 1778 struct ib_dm *dm; 1779 struct ib_sig_attrs *sig_attrs; /* only for IB_MR_TYPE_INTEGRITY MRs */ 1780 /* 1781 * Implementation details of the RDMA core, don't use in drivers: 1782 */ 1783 struct rdma_restrack_entry res; 1784 }; 1785 1786 struct ib_mw { 1787 struct ib_device *device; 1788 struct ib_pd *pd; 1789 struct ib_uobject *uobject; 1790 u32 rkey; 1791 enum ib_mw_type type; 1792 }; 1793 1794 struct ib_fmr { 1795 struct ib_device *device; 1796 struct ib_pd *pd; 1797 struct list_head list; 1798 u32 lkey; 1799 u32 rkey; 1800 }; 1801 1802 /* Supported steering options */ 1803 enum ib_flow_attr_type { 1804 /* steering according to rule specifications */ 1805 IB_FLOW_ATTR_NORMAL = 0x0, 1806 /* default unicast and multicast rule - 1807 * receive all Eth traffic which isn't steered to any QP 1808 */ 1809 IB_FLOW_ATTR_ALL_DEFAULT = 0x1, 1810 /* default multicast rule - 1811 * receive all Eth multicast traffic which isn't steered to any QP 1812 */ 1813 IB_FLOW_ATTR_MC_DEFAULT = 0x2, 1814 /* sniffer rule - receive all port traffic */ 1815 IB_FLOW_ATTR_SNIFFER = 0x3 1816 }; 1817 1818 /* Supported steering header types */ 1819 enum ib_flow_spec_type { 1820 /* L2 headers*/ 1821 IB_FLOW_SPEC_ETH = 0x20, 1822 IB_FLOW_SPEC_IB = 0x22, 1823 /* L3 header*/ 1824 IB_FLOW_SPEC_IPV4 = 0x30, 1825 IB_FLOW_SPEC_IPV6 = 0x31, 1826 IB_FLOW_SPEC_ESP = 0x34, 1827 /* L4 headers*/ 1828 IB_FLOW_SPEC_TCP = 0x40, 1829 IB_FLOW_SPEC_UDP = 0x41, 1830 IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50, 1831 IB_FLOW_SPEC_GRE = 0x51, 1832 IB_FLOW_SPEC_MPLS = 0x60, 1833 IB_FLOW_SPEC_INNER = 0x100, 1834 /* Actions */ 1835 IB_FLOW_SPEC_ACTION_TAG = 0x1000, 1836 IB_FLOW_SPEC_ACTION_DROP = 0x1001, 1837 IB_FLOW_SPEC_ACTION_HANDLE = 0x1002, 1838 IB_FLOW_SPEC_ACTION_COUNT = 0x1003, 1839 }; 1840 #define IB_FLOW_SPEC_LAYER_MASK 0xF0 1841 #define IB_FLOW_SPEC_SUPPORT_LAYERS 10 1842 1843 /* Flow steering rule priority is set according to it's domain. 1844 * Lower domain value means higher priority. 1845 */ 1846 enum ib_flow_domain { 1847 IB_FLOW_DOMAIN_USER, 1848 IB_FLOW_DOMAIN_ETHTOOL, 1849 IB_FLOW_DOMAIN_RFS, 1850 IB_FLOW_DOMAIN_NIC, 1851 IB_FLOW_DOMAIN_NUM /* Must be last */ 1852 }; 1853 1854 enum ib_flow_flags { 1855 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */ 1856 IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */ 1857 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 3 /* Must be last */ 1858 }; 1859 1860 struct ib_flow_eth_filter { 1861 u8 dst_mac[6]; 1862 u8 src_mac[6]; 1863 __be16 ether_type; 1864 __be16 vlan_tag; 1865 /* Must be last */ 1866 u8 real_sz[0]; 1867 }; 1868 1869 struct ib_flow_spec_eth { 1870 u32 type; 1871 u16 size; 1872 struct ib_flow_eth_filter val; 1873 struct ib_flow_eth_filter mask; 1874 }; 1875 1876 struct ib_flow_ib_filter { 1877 __be16 dlid; 1878 __u8 sl; 1879 /* Must be last */ 1880 u8 real_sz[0]; 1881 }; 1882 1883 struct ib_flow_spec_ib { 1884 u32 type; 1885 u16 size; 1886 struct ib_flow_ib_filter val; 1887 struct ib_flow_ib_filter mask; 1888 }; 1889 1890 /* IPv4 header flags */ 1891 enum ib_ipv4_flags { 1892 IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */ 1893 IB_IPV4_MORE_FRAG = 0X4 /* For All fragmented packets except the 1894 last have this flag set */ 1895 }; 1896 1897 struct ib_flow_ipv4_filter { 1898 __be32 src_ip; 1899 __be32 dst_ip; 1900 u8 proto; 1901 u8 tos; 1902 u8 ttl; 1903 u8 flags; 1904 /* Must be last */ 1905 u8 real_sz[0]; 1906 }; 1907 1908 struct ib_flow_spec_ipv4 { 1909 u32 type; 1910 u16 size; 1911 struct ib_flow_ipv4_filter val; 1912 struct ib_flow_ipv4_filter mask; 1913 }; 1914 1915 struct ib_flow_ipv6_filter { 1916 u8 src_ip[16]; 1917 u8 dst_ip[16]; 1918 __be32 flow_label; 1919 u8 next_hdr; 1920 u8 traffic_class; 1921 u8 hop_limit; 1922 /* Must be last */ 1923 u8 real_sz[0]; 1924 }; 1925 1926 struct ib_flow_spec_ipv6 { 1927 u32 type; 1928 u16 size; 1929 struct ib_flow_ipv6_filter val; 1930 struct ib_flow_ipv6_filter mask; 1931 }; 1932 1933 struct ib_flow_tcp_udp_filter { 1934 __be16 dst_port; 1935 __be16 src_port; 1936 /* Must be last */ 1937 u8 real_sz[0]; 1938 }; 1939 1940 struct ib_flow_spec_tcp_udp { 1941 u32 type; 1942 u16 size; 1943 struct ib_flow_tcp_udp_filter val; 1944 struct ib_flow_tcp_udp_filter mask; 1945 }; 1946 1947 struct ib_flow_tunnel_filter { 1948 __be32 tunnel_id; 1949 u8 real_sz[0]; 1950 }; 1951 1952 /* ib_flow_spec_tunnel describes the Vxlan tunnel 1953 * the tunnel_id from val has the vni value 1954 */ 1955 struct ib_flow_spec_tunnel { 1956 u32 type; 1957 u16 size; 1958 struct ib_flow_tunnel_filter val; 1959 struct ib_flow_tunnel_filter mask; 1960 }; 1961 1962 struct ib_flow_esp_filter { 1963 __be32 spi; 1964 __be32 seq; 1965 /* Must be last */ 1966 u8 real_sz[0]; 1967 }; 1968 1969 struct ib_flow_spec_esp { 1970 u32 type; 1971 u16 size; 1972 struct ib_flow_esp_filter val; 1973 struct ib_flow_esp_filter mask; 1974 }; 1975 1976 struct ib_flow_gre_filter { 1977 __be16 c_ks_res0_ver; 1978 __be16 protocol; 1979 __be32 key; 1980 /* Must be last */ 1981 u8 real_sz[0]; 1982 }; 1983 1984 struct ib_flow_spec_gre { 1985 u32 type; 1986 u16 size; 1987 struct ib_flow_gre_filter val; 1988 struct ib_flow_gre_filter mask; 1989 }; 1990 1991 struct ib_flow_mpls_filter { 1992 __be32 tag; 1993 /* Must be last */ 1994 u8 real_sz[0]; 1995 }; 1996 1997 struct ib_flow_spec_mpls { 1998 u32 type; 1999 u16 size; 2000 struct ib_flow_mpls_filter val; 2001 struct ib_flow_mpls_filter mask; 2002 }; 2003 2004 struct ib_flow_spec_action_tag { 2005 enum ib_flow_spec_type type; 2006 u16 size; 2007 u32 tag_id; 2008 }; 2009 2010 struct ib_flow_spec_action_drop { 2011 enum ib_flow_spec_type type; 2012 u16 size; 2013 }; 2014 2015 struct ib_flow_spec_action_handle { 2016 enum ib_flow_spec_type type; 2017 u16 size; 2018 struct ib_flow_action *act; 2019 }; 2020 2021 enum ib_counters_description { 2022 IB_COUNTER_PACKETS, 2023 IB_COUNTER_BYTES, 2024 }; 2025 2026 struct ib_flow_spec_action_count { 2027 enum ib_flow_spec_type type; 2028 u16 size; 2029 struct ib_counters *counters; 2030 }; 2031 2032 union ib_flow_spec { 2033 struct { 2034 u32 type; 2035 u16 size; 2036 }; 2037 struct ib_flow_spec_eth eth; 2038 struct ib_flow_spec_ib ib; 2039 struct ib_flow_spec_ipv4 ipv4; 2040 struct ib_flow_spec_tcp_udp tcp_udp; 2041 struct ib_flow_spec_ipv6 ipv6; 2042 struct ib_flow_spec_tunnel tunnel; 2043 struct ib_flow_spec_esp esp; 2044 struct ib_flow_spec_gre gre; 2045 struct ib_flow_spec_mpls mpls; 2046 struct ib_flow_spec_action_tag flow_tag; 2047 struct ib_flow_spec_action_drop drop; 2048 struct ib_flow_spec_action_handle action; 2049 struct ib_flow_spec_action_count flow_count; 2050 }; 2051 2052 struct ib_flow_attr { 2053 enum ib_flow_attr_type type; 2054 u16 size; 2055 u16 priority; 2056 u32 flags; 2057 u8 num_of_specs; 2058 u8 port; 2059 union ib_flow_spec flows[]; 2060 }; 2061 2062 struct ib_flow { 2063 struct ib_qp *qp; 2064 struct ib_device *device; 2065 struct ib_uobject *uobject; 2066 }; 2067 2068 enum ib_flow_action_type { 2069 IB_FLOW_ACTION_UNSPECIFIED, 2070 IB_FLOW_ACTION_ESP = 1, 2071 }; 2072 2073 struct ib_flow_action_attrs_esp_keymats { 2074 enum ib_uverbs_flow_action_esp_keymat protocol; 2075 union { 2076 struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm; 2077 } keymat; 2078 }; 2079 2080 struct ib_flow_action_attrs_esp_replays { 2081 enum ib_uverbs_flow_action_esp_replay protocol; 2082 union { 2083 struct ib_uverbs_flow_action_esp_replay_bmp bmp; 2084 } replay; 2085 }; 2086 2087 enum ib_flow_action_attrs_esp_flags { 2088 /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags 2089 * This is done in order to share the same flags between user-space and 2090 * kernel and spare an unnecessary translation. 2091 */ 2092 2093 /* Kernel flags */ 2094 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED = 1ULL << 32, 2095 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS = 1ULL << 33, 2096 }; 2097 2098 struct ib_flow_spec_list { 2099 struct ib_flow_spec_list *next; 2100 union ib_flow_spec spec; 2101 }; 2102 2103 struct ib_flow_action_attrs_esp { 2104 struct ib_flow_action_attrs_esp_keymats *keymat; 2105 struct ib_flow_action_attrs_esp_replays *replay; 2106 struct ib_flow_spec_list *encap; 2107 /* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled. 2108 * Value of 0 is a valid value. 2109 */ 2110 u32 esn; 2111 u32 spi; 2112 u32 seq; 2113 u32 tfc_pad; 2114 /* Use enum ib_flow_action_attrs_esp_flags */ 2115 u64 flags; 2116 u64 hard_limit_pkts; 2117 }; 2118 2119 struct ib_flow_action { 2120 struct ib_device *device; 2121 struct ib_uobject *uobject; 2122 enum ib_flow_action_type type; 2123 atomic_t usecnt; 2124 }; 2125 2126 struct ib_mad; 2127 struct ib_grh; 2128 2129 enum ib_process_mad_flags { 2130 IB_MAD_IGNORE_MKEY = 1, 2131 IB_MAD_IGNORE_BKEY = 2, 2132 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY 2133 }; 2134 2135 enum ib_mad_result { 2136 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */ 2137 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */ 2138 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */ 2139 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */ 2140 }; 2141 2142 struct ib_port_cache { 2143 u64 subnet_prefix; 2144 struct ib_pkey_cache *pkey; 2145 struct ib_gid_table *gid; 2146 u8 lmc; 2147 enum ib_port_state port_state; 2148 }; 2149 2150 struct ib_cache { 2151 rwlock_t lock; 2152 struct ib_event_handler event_handler; 2153 }; 2154 2155 struct ib_port_immutable { 2156 int pkey_tbl_len; 2157 int gid_tbl_len; 2158 u32 core_cap_flags; 2159 u32 max_mad_size; 2160 }; 2161 2162 struct ib_port_data { 2163 struct ib_device *ib_dev; 2164 2165 struct ib_port_immutable immutable; 2166 2167 spinlock_t pkey_list_lock; 2168 struct list_head pkey_list; 2169 2170 struct ib_port_cache cache; 2171 2172 spinlock_t netdev_lock; 2173 struct net_device __rcu *netdev; 2174 struct hlist_node ndev_hash_link; 2175 struct rdma_port_counter port_counter; 2176 struct rdma_hw_stats *hw_stats; 2177 }; 2178 2179 /* rdma netdev type - specifies protocol type */ 2180 enum rdma_netdev_t { 2181 RDMA_NETDEV_OPA_VNIC, 2182 RDMA_NETDEV_IPOIB, 2183 }; 2184 2185 /** 2186 * struct rdma_netdev - rdma netdev 2187 * For cases where netstack interfacing is required. 2188 */ 2189 struct rdma_netdev { 2190 void *clnt_priv; 2191 struct ib_device *hca; 2192 u8 port_num; 2193 2194 /* 2195 * cleanup function must be specified. 2196 * FIXME: This is only used for OPA_VNIC and that usage should be 2197 * removed too. 2198 */ 2199 void (*free_rdma_netdev)(struct net_device *netdev); 2200 2201 /* control functions */ 2202 void (*set_id)(struct net_device *netdev, int id); 2203 /* send packet */ 2204 int (*send)(struct net_device *dev, struct sk_buff *skb, 2205 struct ib_ah *address, u32 dqpn); 2206 /* multicast */ 2207 int (*attach_mcast)(struct net_device *dev, struct ib_device *hca, 2208 union ib_gid *gid, u16 mlid, 2209 int set_qkey, u32 qkey); 2210 int (*detach_mcast)(struct net_device *dev, struct ib_device *hca, 2211 union ib_gid *gid, u16 mlid); 2212 }; 2213 2214 struct rdma_netdev_alloc_params { 2215 size_t sizeof_priv; 2216 unsigned int txqs; 2217 unsigned int rxqs; 2218 void *param; 2219 2220 int (*initialize_rdma_netdev)(struct ib_device *device, u8 port_num, 2221 struct net_device *netdev, void *param); 2222 }; 2223 2224 struct ib_odp_counters { 2225 atomic64_t faults; 2226 atomic64_t invalidations; 2227 }; 2228 2229 struct ib_counters { 2230 struct ib_device *device; 2231 struct ib_uobject *uobject; 2232 /* num of objects attached */ 2233 atomic_t usecnt; 2234 }; 2235 2236 struct ib_counters_read_attr { 2237 u64 *counters_buff; 2238 u32 ncounters; 2239 u32 flags; /* use enum ib_read_counters_flags */ 2240 }; 2241 2242 struct uverbs_attr_bundle; 2243 struct iw_cm_id; 2244 struct iw_cm_conn_param; 2245 2246 #define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \ 2247 .size_##ib_struct = \ 2248 (sizeof(struct drv_struct) + \ 2249 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) + \ 2250 BUILD_BUG_ON_ZERO( \ 2251 !__same_type(((struct drv_struct *)NULL)->member, \ 2252 struct ib_struct))) 2253 2254 #define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \ 2255 ((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp)) 2256 2257 #define rdma_zalloc_drv_obj(ib_dev, ib_type) \ 2258 rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL) 2259 2260 #define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct 2261 2262 struct rdma_user_mmap_entry { 2263 struct kref ref; 2264 struct ib_ucontext *ucontext; 2265 unsigned long start_pgoff; 2266 size_t npages; 2267 bool driver_removed; 2268 }; 2269 2270 /* Return the offset (in bytes) the user should pass to libc's mmap() */ 2271 static inline u64 2272 rdma_user_mmap_get_offset(const struct rdma_user_mmap_entry *entry) 2273 { 2274 return (u64)entry->start_pgoff << PAGE_SHIFT; 2275 } 2276 2277 /** 2278 * struct ib_device_ops - InfiniBand device operations 2279 * This structure defines all the InfiniBand device operations, providers will 2280 * need to define the supported operations, otherwise they will be set to null. 2281 */ 2282 struct ib_device_ops { 2283 struct module *owner; 2284 enum rdma_driver_id driver_id; 2285 u32 uverbs_abi_ver; 2286 unsigned int uverbs_no_driver_id_binding:1; 2287 2288 int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr, 2289 const struct ib_send_wr **bad_send_wr); 2290 int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr, 2291 const struct ib_recv_wr **bad_recv_wr); 2292 void (*drain_rq)(struct ib_qp *qp); 2293 void (*drain_sq)(struct ib_qp *qp); 2294 int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc); 2295 int (*peek_cq)(struct ib_cq *cq, int wc_cnt); 2296 int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags); 2297 int (*req_ncomp_notif)(struct ib_cq *cq, int wc_cnt); 2298 int (*post_srq_recv)(struct ib_srq *srq, 2299 const struct ib_recv_wr *recv_wr, 2300 const struct ib_recv_wr **bad_recv_wr); 2301 int (*process_mad)(struct ib_device *device, int process_mad_flags, 2302 u8 port_num, const struct ib_wc *in_wc, 2303 const struct ib_grh *in_grh, 2304 const struct ib_mad *in_mad, struct ib_mad *out_mad, 2305 size_t *out_mad_size, u16 *out_mad_pkey_index); 2306 int (*query_device)(struct ib_device *device, 2307 struct ib_device_attr *device_attr, 2308 struct ib_udata *udata); 2309 int (*modify_device)(struct ib_device *device, int device_modify_mask, 2310 struct ib_device_modify *device_modify); 2311 void (*get_dev_fw_str)(struct ib_device *device, char *str); 2312 const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev, 2313 int comp_vector); 2314 int (*query_port)(struct ib_device *device, u8 port_num, 2315 struct ib_port_attr *port_attr); 2316 int (*modify_port)(struct ib_device *device, u8 port_num, 2317 int port_modify_mask, 2318 struct ib_port_modify *port_modify); 2319 /** 2320 * The following mandatory functions are used only at device 2321 * registration. Keep functions such as these at the end of this 2322 * structure to avoid cache line misses when accessing struct ib_device 2323 * in fast paths. 2324 */ 2325 int (*get_port_immutable)(struct ib_device *device, u8 port_num, 2326 struct ib_port_immutable *immutable); 2327 enum rdma_link_layer (*get_link_layer)(struct ib_device *device, 2328 u8 port_num); 2329 /** 2330 * When calling get_netdev, the HW vendor's driver should return the 2331 * net device of device @device at port @port_num or NULL if such 2332 * a net device doesn't exist. The vendor driver should call dev_hold 2333 * on this net device. The HW vendor's device driver must guarantee 2334 * that this function returns NULL before the net device has finished 2335 * NETDEV_UNREGISTER state. 2336 */ 2337 struct net_device *(*get_netdev)(struct ib_device *device, u8 port_num); 2338 /** 2339 * rdma netdev operation 2340 * 2341 * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params 2342 * must return -EOPNOTSUPP if it doesn't support the specified type. 2343 */ 2344 struct net_device *(*alloc_rdma_netdev)( 2345 struct ib_device *device, u8 port_num, enum rdma_netdev_t type, 2346 const char *name, unsigned char name_assign_type, 2347 void (*setup)(struct net_device *)); 2348 2349 int (*rdma_netdev_get_params)(struct ib_device *device, u8 port_num, 2350 enum rdma_netdev_t type, 2351 struct rdma_netdev_alloc_params *params); 2352 /** 2353 * query_gid should be return GID value for @device, when @port_num 2354 * link layer is either IB or iWarp. It is no-op if @port_num port 2355 * is RoCE link layer. 2356 */ 2357 int (*query_gid)(struct ib_device *device, u8 port_num, int index, 2358 union ib_gid *gid); 2359 /** 2360 * When calling add_gid, the HW vendor's driver should add the gid 2361 * of device of port at gid index available at @attr. Meta-info of 2362 * that gid (for example, the network device related to this gid) is 2363 * available at @attr. @context allows the HW vendor driver to store 2364 * extra information together with a GID entry. The HW vendor driver may 2365 * allocate memory to contain this information and store it in @context 2366 * when a new GID entry is written to. Params are consistent until the 2367 * next call of add_gid or delete_gid. The function should return 0 on 2368 * success or error otherwise. The function could be called 2369 * concurrently for different ports. This function is only called when 2370 * roce_gid_table is used. 2371 */ 2372 int (*add_gid)(const struct ib_gid_attr *attr, void **context); 2373 /** 2374 * When calling del_gid, the HW vendor's driver should delete the 2375 * gid of device @device at gid index gid_index of port port_num 2376 * available in @attr. 2377 * Upon the deletion of a GID entry, the HW vendor must free any 2378 * allocated memory. The caller will clear @context afterwards. 2379 * This function is only called when roce_gid_table is used. 2380 */ 2381 int (*del_gid)(const struct ib_gid_attr *attr, void **context); 2382 int (*query_pkey)(struct ib_device *device, u8 port_num, u16 index, 2383 u16 *pkey); 2384 int (*alloc_ucontext)(struct ib_ucontext *context, 2385 struct ib_udata *udata); 2386 void (*dealloc_ucontext)(struct ib_ucontext *context); 2387 int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma); 2388 /** 2389 * This will be called once refcount of an entry in mmap_xa reaches 2390 * zero. The type of the memory that was mapped may differ between 2391 * entries and is opaque to the rdma_user_mmap interface. 2392 * Therefore needs to be implemented by the driver in mmap_free. 2393 */ 2394 void (*mmap_free)(struct rdma_user_mmap_entry *entry); 2395 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext); 2396 int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata); 2397 void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata); 2398 int (*create_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, 2399 u32 flags, struct ib_udata *udata); 2400 int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); 2401 int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); 2402 void (*destroy_ah)(struct ib_ah *ah, u32 flags); 2403 int (*create_srq)(struct ib_srq *srq, 2404 struct ib_srq_init_attr *srq_init_attr, 2405 struct ib_udata *udata); 2406 int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr, 2407 enum ib_srq_attr_mask srq_attr_mask, 2408 struct ib_udata *udata); 2409 int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr); 2410 void (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata); 2411 struct ib_qp *(*create_qp)(struct ib_pd *pd, 2412 struct ib_qp_init_attr *qp_init_attr, 2413 struct ib_udata *udata); 2414 int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr, 2415 int qp_attr_mask, struct ib_udata *udata); 2416 int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr, 2417 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); 2418 int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata); 2419 int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr, 2420 struct ib_udata *udata); 2421 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period); 2422 void (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata); 2423 int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata); 2424 struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags); 2425 struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length, 2426 u64 virt_addr, int mr_access_flags, 2427 struct ib_udata *udata); 2428 int (*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start, u64 length, 2429 u64 virt_addr, int mr_access_flags, 2430 struct ib_pd *pd, struct ib_udata *udata); 2431 int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata); 2432 struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type, 2433 u32 max_num_sg, struct ib_udata *udata); 2434 struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd, 2435 u32 max_num_data_sg, 2436 u32 max_num_meta_sg); 2437 int (*advise_mr)(struct ib_pd *pd, 2438 enum ib_uverbs_advise_mr_advice advice, u32 flags, 2439 struct ib_sge *sg_list, u32 num_sge, 2440 struct uverbs_attr_bundle *attrs); 2441 int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 2442 unsigned int *sg_offset); 2443 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask, 2444 struct ib_mr_status *mr_status); 2445 struct ib_mw *(*alloc_mw)(struct ib_pd *pd, enum ib_mw_type type, 2446 struct ib_udata *udata); 2447 int (*dealloc_mw)(struct ib_mw *mw); 2448 struct ib_fmr *(*alloc_fmr)(struct ib_pd *pd, int mr_access_flags, 2449 struct ib_fmr_attr *fmr_attr); 2450 int (*map_phys_fmr)(struct ib_fmr *fmr, u64 *page_list, int list_len, 2451 u64 iova); 2452 int (*unmap_fmr)(struct list_head *fmr_list); 2453 int (*dealloc_fmr)(struct ib_fmr *fmr); 2454 int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid); 2455 int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid); 2456 struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device, 2457 struct ib_udata *udata); 2458 int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata); 2459 struct ib_flow *(*create_flow)(struct ib_qp *qp, 2460 struct ib_flow_attr *flow_attr, 2461 int domain, struct ib_udata *udata); 2462 int (*destroy_flow)(struct ib_flow *flow_id); 2463 struct ib_flow_action *(*create_flow_action_esp)( 2464 struct ib_device *device, 2465 const struct ib_flow_action_attrs_esp *attr, 2466 struct uverbs_attr_bundle *attrs); 2467 int (*destroy_flow_action)(struct ib_flow_action *action); 2468 int (*modify_flow_action_esp)( 2469 struct ib_flow_action *action, 2470 const struct ib_flow_action_attrs_esp *attr, 2471 struct uverbs_attr_bundle *attrs); 2472 int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port, 2473 int state); 2474 int (*get_vf_config)(struct ib_device *device, int vf, u8 port, 2475 struct ifla_vf_info *ivf); 2476 int (*get_vf_stats)(struct ib_device *device, int vf, u8 port, 2477 struct ifla_vf_stats *stats); 2478 int (*get_vf_guid)(struct ib_device *device, int vf, u8 port, 2479 struct ifla_vf_guid *node_guid, 2480 struct ifla_vf_guid *port_guid); 2481 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid, 2482 int type); 2483 struct ib_wq *(*create_wq)(struct ib_pd *pd, 2484 struct ib_wq_init_attr *init_attr, 2485 struct ib_udata *udata); 2486 void (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata); 2487 int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr, 2488 u32 wq_attr_mask, struct ib_udata *udata); 2489 struct ib_rwq_ind_table *(*create_rwq_ind_table)( 2490 struct ib_device *device, 2491 struct ib_rwq_ind_table_init_attr *init_attr, 2492 struct ib_udata *udata); 2493 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table); 2494 struct ib_dm *(*alloc_dm)(struct ib_device *device, 2495 struct ib_ucontext *context, 2496 struct ib_dm_alloc_attr *attr, 2497 struct uverbs_attr_bundle *attrs); 2498 int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs); 2499 struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm, 2500 struct ib_dm_mr_attr *attr, 2501 struct uverbs_attr_bundle *attrs); 2502 struct ib_counters *(*create_counters)( 2503 struct ib_device *device, struct uverbs_attr_bundle *attrs); 2504 int (*destroy_counters)(struct ib_counters *counters); 2505 int (*read_counters)(struct ib_counters *counters, 2506 struct ib_counters_read_attr *counters_read_attr, 2507 struct uverbs_attr_bundle *attrs); 2508 int (*map_mr_sg_pi)(struct ib_mr *mr, struct scatterlist *data_sg, 2509 int data_sg_nents, unsigned int *data_sg_offset, 2510 struct scatterlist *meta_sg, int meta_sg_nents, 2511 unsigned int *meta_sg_offset); 2512 2513 /** 2514 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the 2515 * driver initialized data. The struct is kfree()'ed by the sysfs 2516 * core when the device is removed. A lifespan of -1 in the return 2517 * struct tells the core to set a default lifespan. 2518 */ 2519 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device, 2520 u8 port_num); 2521 /** 2522 * get_hw_stats - Fill in the counter value(s) in the stats struct. 2523 * @index - The index in the value array we wish to have updated, or 2524 * num_counters if we want all stats updated 2525 * Return codes - 2526 * < 0 - Error, no counters updated 2527 * index - Updated the single counter pointed to by index 2528 * num_counters - Updated all counters (will reset the timestamp 2529 * and prevent further calls for lifespan milliseconds) 2530 * Drivers are allowed to update all counters in leiu of just the 2531 * one given in index at their option 2532 */ 2533 int (*get_hw_stats)(struct ib_device *device, 2534 struct rdma_hw_stats *stats, u8 port, int index); 2535 /* 2536 * This function is called once for each port when a ib device is 2537 * registered. 2538 */ 2539 int (*init_port)(struct ib_device *device, u8 port_num, 2540 struct kobject *port_sysfs); 2541 /** 2542 * Allows rdma drivers to add their own restrack attributes. 2543 */ 2544 int (*fill_res_entry)(struct sk_buff *msg, 2545 struct rdma_restrack_entry *entry); 2546 2547 /* Device lifecycle callbacks */ 2548 /* 2549 * Called after the device becomes registered, before clients are 2550 * attached 2551 */ 2552 int (*enable_driver)(struct ib_device *dev); 2553 /* 2554 * This is called as part of ib_dealloc_device(). 2555 */ 2556 void (*dealloc_driver)(struct ib_device *dev); 2557 2558 /* iWarp CM callbacks */ 2559 void (*iw_add_ref)(struct ib_qp *qp); 2560 void (*iw_rem_ref)(struct ib_qp *qp); 2561 struct ib_qp *(*iw_get_qp)(struct ib_device *device, int qpn); 2562 int (*iw_connect)(struct iw_cm_id *cm_id, 2563 struct iw_cm_conn_param *conn_param); 2564 int (*iw_accept)(struct iw_cm_id *cm_id, 2565 struct iw_cm_conn_param *conn_param); 2566 int (*iw_reject)(struct iw_cm_id *cm_id, const void *pdata, 2567 u8 pdata_len); 2568 int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog); 2569 int (*iw_destroy_listen)(struct iw_cm_id *cm_id); 2570 /** 2571 * counter_bind_qp - Bind a QP to a counter. 2572 * @counter - The counter to be bound. If counter->id is zero then 2573 * the driver needs to allocate a new counter and set counter->id 2574 */ 2575 int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp); 2576 /** 2577 * counter_unbind_qp - Unbind the qp from the dynamically-allocated 2578 * counter and bind it onto the default one 2579 */ 2580 int (*counter_unbind_qp)(struct ib_qp *qp); 2581 /** 2582 * counter_dealloc -De-allocate the hw counter 2583 */ 2584 int (*counter_dealloc)(struct rdma_counter *counter); 2585 /** 2586 * counter_alloc_stats - Allocate a struct rdma_hw_stats and fill in 2587 * the driver initialized data. 2588 */ 2589 struct rdma_hw_stats *(*counter_alloc_stats)( 2590 struct rdma_counter *counter); 2591 /** 2592 * counter_update_stats - Query the stats value of this counter 2593 */ 2594 int (*counter_update_stats)(struct rdma_counter *counter); 2595 2596 /** 2597 * Allows rdma drivers to add their own restrack attributes 2598 * dumped via 'rdma stat' iproute2 command. 2599 */ 2600 int (*fill_stat_entry)(struct sk_buff *msg, 2601 struct rdma_restrack_entry *entry); 2602 2603 DECLARE_RDMA_OBJ_SIZE(ib_ah); 2604 DECLARE_RDMA_OBJ_SIZE(ib_cq); 2605 DECLARE_RDMA_OBJ_SIZE(ib_pd); 2606 DECLARE_RDMA_OBJ_SIZE(ib_srq); 2607 DECLARE_RDMA_OBJ_SIZE(ib_ucontext); 2608 }; 2609 2610 struct ib_core_device { 2611 /* device must be the first element in structure until, 2612 * union of ib_core_device and device exists in ib_device. 2613 */ 2614 struct device dev; 2615 possible_net_t rdma_net; 2616 struct kobject *ports_kobj; 2617 struct list_head port_list; 2618 struct ib_device *owner; /* reach back to owner ib_device */ 2619 }; 2620 2621 struct rdma_restrack_root; 2622 struct ib_device { 2623 /* Do not access @dma_device directly from ULP nor from HW drivers. */ 2624 struct device *dma_device; 2625 struct ib_device_ops ops; 2626 char name[IB_DEVICE_NAME_MAX]; 2627 struct rcu_head rcu_head; 2628 2629 struct list_head event_handler_list; 2630 spinlock_t event_handler_lock; 2631 2632 struct rw_semaphore client_data_rwsem; 2633 struct xarray client_data; 2634 struct mutex unregistration_lock; 2635 2636 struct ib_cache cache; 2637 /** 2638 * port_data is indexed by port number 2639 */ 2640 struct ib_port_data *port_data; 2641 2642 int num_comp_vectors; 2643 2644 union { 2645 struct device dev; 2646 struct ib_core_device coredev; 2647 }; 2648 2649 /* First group for device attributes, 2650 * Second group for driver provided attributes (optional). 2651 * It is NULL terminated array. 2652 */ 2653 const struct attribute_group *groups[3]; 2654 2655 u64 uverbs_cmd_mask; 2656 u64 uverbs_ex_cmd_mask; 2657 2658 char node_desc[IB_DEVICE_NODE_DESC_MAX]; 2659 __be64 node_guid; 2660 u32 local_dma_lkey; 2661 u16 is_switch:1; 2662 /* Indicates kernel verbs support, should not be used in drivers */ 2663 u16 kverbs_provider:1; 2664 /* CQ adaptive moderation (RDMA DIM) */ 2665 u16 use_cq_dim:1; 2666 u8 node_type; 2667 u8 phys_port_cnt; 2668 struct ib_device_attr attrs; 2669 struct attribute_group *hw_stats_ag; 2670 struct rdma_hw_stats *hw_stats; 2671 2672 #ifdef CONFIG_CGROUP_RDMA 2673 struct rdmacg_device cg_device; 2674 #endif 2675 2676 u32 index; 2677 struct rdma_restrack_root *res; 2678 2679 const struct uapi_definition *driver_def; 2680 2681 /* 2682 * Positive refcount indicates that the device is currently 2683 * registered and cannot be unregistered. 2684 */ 2685 refcount_t refcount; 2686 struct completion unreg_completion; 2687 struct work_struct unregistration_work; 2688 2689 const struct rdma_link_ops *link_ops; 2690 2691 /* Protects compat_devs xarray modifications */ 2692 struct mutex compat_devs_mutex; 2693 /* Maintains compat devices for each net namespace */ 2694 struct xarray compat_devs; 2695 2696 /* Used by iWarp CM */ 2697 char iw_ifname[IFNAMSIZ]; 2698 u32 iw_driver_flags; 2699 }; 2700 2701 struct ib_client_nl_info; 2702 struct ib_client { 2703 const char *name; 2704 void (*add) (struct ib_device *); 2705 void (*remove)(struct ib_device *, void *client_data); 2706 void (*rename)(struct ib_device *dev, void *client_data); 2707 int (*get_nl_info)(struct ib_device *ibdev, void *client_data, 2708 struct ib_client_nl_info *res); 2709 int (*get_global_nl_info)(struct ib_client_nl_info *res); 2710 2711 /* Returns the net_dev belonging to this ib_client and matching the 2712 * given parameters. 2713 * @dev: An RDMA device that the net_dev use for communication. 2714 * @port: A physical port number on the RDMA device. 2715 * @pkey: P_Key that the net_dev uses if applicable. 2716 * @gid: A GID that the net_dev uses to communicate. 2717 * @addr: An IP address the net_dev is configured with. 2718 * @client_data: The device's client data set by ib_set_client_data(). 2719 * 2720 * An ib_client that implements a net_dev on top of RDMA devices 2721 * (such as IP over IB) should implement this callback, allowing the 2722 * rdma_cm module to find the right net_dev for a given request. 2723 * 2724 * The caller is responsible for calling dev_put on the returned 2725 * netdev. */ 2726 struct net_device *(*get_net_dev_by_params)( 2727 struct ib_device *dev, 2728 u8 port, 2729 u16 pkey, 2730 const union ib_gid *gid, 2731 const struct sockaddr *addr, 2732 void *client_data); 2733 2734 refcount_t uses; 2735 struct completion uses_zero; 2736 u32 client_id; 2737 2738 /* kverbs are not required by the client */ 2739 u8 no_kverbs_req:1; 2740 }; 2741 2742 /* 2743 * IB block DMA iterator 2744 * 2745 * Iterates the DMA-mapped SGL in contiguous memory blocks aligned 2746 * to a HW supported page size. 2747 */ 2748 struct ib_block_iter { 2749 /* internal states */ 2750 struct scatterlist *__sg; /* sg holding the current aligned block */ 2751 dma_addr_t __dma_addr; /* unaligned DMA address of this block */ 2752 unsigned int __sg_nents; /* number of SG entries */ 2753 unsigned int __sg_advance; /* number of bytes to advance in sg in next step */ 2754 unsigned int __pg_bit; /* alignment of current block */ 2755 }; 2756 2757 struct ib_device *_ib_alloc_device(size_t size); 2758 #define ib_alloc_device(drv_struct, member) \ 2759 container_of(_ib_alloc_device(sizeof(struct drv_struct) + \ 2760 BUILD_BUG_ON_ZERO(offsetof( \ 2761 struct drv_struct, member))), \ 2762 struct drv_struct, member) 2763 2764 void ib_dealloc_device(struct ib_device *device); 2765 2766 void ib_get_device_fw_str(struct ib_device *device, char *str); 2767 2768 int ib_register_device(struct ib_device *device, const char *name); 2769 void ib_unregister_device(struct ib_device *device); 2770 void ib_unregister_driver(enum rdma_driver_id driver_id); 2771 void ib_unregister_device_and_put(struct ib_device *device); 2772 void ib_unregister_device_queued(struct ib_device *ib_dev); 2773 2774 int ib_register_client (struct ib_client *client); 2775 void ib_unregister_client(struct ib_client *client); 2776 2777 void __rdma_block_iter_start(struct ib_block_iter *biter, 2778 struct scatterlist *sglist, 2779 unsigned int nents, 2780 unsigned long pgsz); 2781 bool __rdma_block_iter_next(struct ib_block_iter *biter); 2782 2783 /** 2784 * rdma_block_iter_dma_address - get the aligned dma address of the current 2785 * block held by the block iterator. 2786 * @biter: block iterator holding the memory block 2787 */ 2788 static inline dma_addr_t 2789 rdma_block_iter_dma_address(struct ib_block_iter *biter) 2790 { 2791 return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1); 2792 } 2793 2794 /** 2795 * rdma_for_each_block - iterate over contiguous memory blocks of the sg list 2796 * @sglist: sglist to iterate over 2797 * @biter: block iterator holding the memory block 2798 * @nents: maximum number of sg entries to iterate over 2799 * @pgsz: best HW supported page size to use 2800 * 2801 * Callers may use rdma_block_iter_dma_address() to get each 2802 * blocks aligned DMA address. 2803 */ 2804 #define rdma_for_each_block(sglist, biter, nents, pgsz) \ 2805 for (__rdma_block_iter_start(biter, sglist, nents, \ 2806 pgsz); \ 2807 __rdma_block_iter_next(biter);) 2808 2809 /** 2810 * ib_get_client_data - Get IB client context 2811 * @device:Device to get context for 2812 * @client:Client to get context for 2813 * 2814 * ib_get_client_data() returns the client context data set with 2815 * ib_set_client_data(). This can only be called while the client is 2816 * registered to the device, once the ib_client remove() callback returns this 2817 * cannot be called. 2818 */ 2819 static inline void *ib_get_client_data(struct ib_device *device, 2820 struct ib_client *client) 2821 { 2822 return xa_load(&device->client_data, client->client_id); 2823 } 2824 void ib_set_client_data(struct ib_device *device, struct ib_client *client, 2825 void *data); 2826 void ib_set_device_ops(struct ib_device *device, 2827 const struct ib_device_ops *ops); 2828 2829 int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma, 2830 unsigned long pfn, unsigned long size, pgprot_t prot, 2831 struct rdma_user_mmap_entry *entry); 2832 int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext, 2833 struct rdma_user_mmap_entry *entry, 2834 size_t length); 2835 struct rdma_user_mmap_entry * 2836 rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext, 2837 unsigned long pgoff); 2838 struct rdma_user_mmap_entry * 2839 rdma_user_mmap_entry_get(struct ib_ucontext *ucontext, 2840 struct vm_area_struct *vma); 2841 void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry); 2842 2843 void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry); 2844 2845 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len) 2846 { 2847 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0; 2848 } 2849 2850 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len) 2851 { 2852 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; 2853 } 2854 2855 static inline bool ib_is_buffer_cleared(const void __user *p, 2856 size_t len) 2857 { 2858 bool ret; 2859 u8 *buf; 2860 2861 if (len > USHRT_MAX) 2862 return false; 2863 2864 buf = memdup_user(p, len); 2865 if (IS_ERR(buf)) 2866 return false; 2867 2868 ret = !memchr_inv(buf, 0, len); 2869 kfree(buf); 2870 return ret; 2871 } 2872 2873 static inline bool ib_is_udata_cleared(struct ib_udata *udata, 2874 size_t offset, 2875 size_t len) 2876 { 2877 return ib_is_buffer_cleared(udata->inbuf + offset, len); 2878 } 2879 2880 /** 2881 * ib_is_destroy_retryable - Check whether the uobject destruction 2882 * is retryable. 2883 * @ret: The initial destruction return code 2884 * @why: remove reason 2885 * @uobj: The uobject that is destroyed 2886 * 2887 * This function is a helper function that IB layer and low-level drivers 2888 * can use to consider whether the destruction of the given uobject is 2889 * retry-able. 2890 * It checks the original return code, if it wasn't success the destruction 2891 * is retryable according to the ucontext state (i.e. cleanup_retryable) and 2892 * the remove reason. (i.e. why). 2893 * Must be called with the object locked for destroy. 2894 */ 2895 static inline bool ib_is_destroy_retryable(int ret, enum rdma_remove_reason why, 2896 struct ib_uobject *uobj) 2897 { 2898 return ret && (why == RDMA_REMOVE_DESTROY || 2899 uobj->context->cleanup_retryable); 2900 } 2901 2902 /** 2903 * ib_destroy_usecnt - Called during destruction to check the usecnt 2904 * @usecnt: The usecnt atomic 2905 * @why: remove reason 2906 * @uobj: The uobject that is destroyed 2907 * 2908 * Non-zero usecnts will block destruction unless destruction was triggered by 2909 * a ucontext cleanup. 2910 */ 2911 static inline int ib_destroy_usecnt(atomic_t *usecnt, 2912 enum rdma_remove_reason why, 2913 struct ib_uobject *uobj) 2914 { 2915 if (atomic_read(usecnt) && ib_is_destroy_retryable(-EBUSY, why, uobj)) 2916 return -EBUSY; 2917 return 0; 2918 } 2919 2920 /** 2921 * ib_modify_qp_is_ok - Check that the supplied attribute mask 2922 * contains all required attributes and no attributes not allowed for 2923 * the given QP state transition. 2924 * @cur_state: Current QP state 2925 * @next_state: Next QP state 2926 * @type: QP type 2927 * @mask: Mask of supplied QP attributes 2928 * 2929 * This function is a helper function that a low-level driver's 2930 * modify_qp method can use to validate the consumer's input. It 2931 * checks that cur_state and next_state are valid QP states, that a 2932 * transition from cur_state to next_state is allowed by the IB spec, 2933 * and that the attribute mask supplied is allowed for the transition. 2934 */ 2935 bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 2936 enum ib_qp_type type, enum ib_qp_attr_mask mask); 2937 2938 void ib_register_event_handler(struct ib_event_handler *event_handler); 2939 void ib_unregister_event_handler(struct ib_event_handler *event_handler); 2940 void ib_dispatch_event(struct ib_event *event); 2941 2942 int ib_query_port(struct ib_device *device, 2943 u8 port_num, struct ib_port_attr *port_attr); 2944 2945 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, 2946 u8 port_num); 2947 2948 /** 2949 * rdma_cap_ib_switch - Check if the device is IB switch 2950 * @device: Device to check 2951 * 2952 * Device driver is responsible for setting is_switch bit on 2953 * in ib_device structure at init time. 2954 * 2955 * Return: true if the device is IB switch. 2956 */ 2957 static inline bool rdma_cap_ib_switch(const struct ib_device *device) 2958 { 2959 return device->is_switch; 2960 } 2961 2962 /** 2963 * rdma_start_port - Return the first valid port number for the device 2964 * specified 2965 * 2966 * @device: Device to be checked 2967 * 2968 * Return start port number 2969 */ 2970 static inline u8 rdma_start_port(const struct ib_device *device) 2971 { 2972 return rdma_cap_ib_switch(device) ? 0 : 1; 2973 } 2974 2975 /** 2976 * rdma_for_each_port - Iterate over all valid port numbers of the IB device 2977 * @device - The struct ib_device * to iterate over 2978 * @iter - The unsigned int to store the port number 2979 */ 2980 #define rdma_for_each_port(device, iter) \ 2981 for (iter = rdma_start_port(device + BUILD_BUG_ON_ZERO(!__same_type( \ 2982 unsigned int, iter))); \ 2983 iter <= rdma_end_port(device); (iter)++) 2984 2985 /** 2986 * rdma_end_port - Return the last valid port number for the device 2987 * specified 2988 * 2989 * @device: Device to be checked 2990 * 2991 * Return last port number 2992 */ 2993 static inline u8 rdma_end_port(const struct ib_device *device) 2994 { 2995 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt; 2996 } 2997 2998 static inline int rdma_is_port_valid(const struct ib_device *device, 2999 unsigned int port) 3000 { 3001 return (port >= rdma_start_port(device) && 3002 port <= rdma_end_port(device)); 3003 } 3004 3005 static inline bool rdma_is_grh_required(const struct ib_device *device, 3006 u8 port_num) 3007 { 3008 return device->port_data[port_num].immutable.core_cap_flags & 3009 RDMA_CORE_PORT_IB_GRH_REQUIRED; 3010 } 3011 3012 static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num) 3013 { 3014 return device->port_data[port_num].immutable.core_cap_flags & 3015 RDMA_CORE_CAP_PROT_IB; 3016 } 3017 3018 static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num) 3019 { 3020 return device->port_data[port_num].immutable.core_cap_flags & 3021 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP); 3022 } 3023 3024 static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num) 3025 { 3026 return device->port_data[port_num].immutable.core_cap_flags & 3027 RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP; 3028 } 3029 3030 static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num) 3031 { 3032 return device->port_data[port_num].immutable.core_cap_flags & 3033 RDMA_CORE_CAP_PROT_ROCE; 3034 } 3035 3036 static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num) 3037 { 3038 return device->port_data[port_num].immutable.core_cap_flags & 3039 RDMA_CORE_CAP_PROT_IWARP; 3040 } 3041 3042 static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num) 3043 { 3044 return rdma_protocol_ib(device, port_num) || 3045 rdma_protocol_roce(device, port_num); 3046 } 3047 3048 static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num) 3049 { 3050 return device->port_data[port_num].immutable.core_cap_flags & 3051 RDMA_CORE_CAP_PROT_RAW_PACKET; 3052 } 3053 3054 static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num) 3055 { 3056 return device->port_data[port_num].immutable.core_cap_flags & 3057 RDMA_CORE_CAP_PROT_USNIC; 3058 } 3059 3060 /** 3061 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband 3062 * Management Datagrams. 3063 * @device: Device to check 3064 * @port_num: Port number to check 3065 * 3066 * Management Datagrams (MAD) are a required part of the InfiniBand 3067 * specification and are supported on all InfiniBand devices. A slightly 3068 * extended version are also supported on OPA interfaces. 3069 * 3070 * Return: true if the port supports sending/receiving of MAD packets. 3071 */ 3072 static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num) 3073 { 3074 return device->port_data[port_num].immutable.core_cap_flags & 3075 RDMA_CORE_CAP_IB_MAD; 3076 } 3077 3078 /** 3079 * rdma_cap_opa_mad - Check if the port of device provides support for OPA 3080 * Management Datagrams. 3081 * @device: Device to check 3082 * @port_num: Port number to check 3083 * 3084 * Intel OmniPath devices extend and/or replace the InfiniBand Management 3085 * datagrams with their own versions. These OPA MADs share many but not all of 3086 * the characteristics of InfiniBand MADs. 3087 * 3088 * OPA MADs differ in the following ways: 3089 * 3090 * 1) MADs are variable size up to 2K 3091 * IBTA defined MADs remain fixed at 256 bytes 3092 * 2) OPA SMPs must carry valid PKeys 3093 * 3) OPA SMP packets are a different format 3094 * 3095 * Return: true if the port supports OPA MAD packet formats. 3096 */ 3097 static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num) 3098 { 3099 return device->port_data[port_num].immutable.core_cap_flags & 3100 RDMA_CORE_CAP_OPA_MAD; 3101 } 3102 3103 /** 3104 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband 3105 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI). 3106 * @device: Device to check 3107 * @port_num: Port number to check 3108 * 3109 * Each InfiniBand node is required to provide a Subnet Management Agent 3110 * that the subnet manager can access. Prior to the fabric being fully 3111 * configured by the subnet manager, the SMA is accessed via a well known 3112 * interface called the Subnet Management Interface (SMI). This interface 3113 * uses directed route packets to communicate with the SM to get around the 3114 * chicken and egg problem of the SM needing to know what's on the fabric 3115 * in order to configure the fabric, and needing to configure the fabric in 3116 * order to send packets to the devices on the fabric. These directed 3117 * route packets do not need the fabric fully configured in order to reach 3118 * their destination. The SMI is the only method allowed to send 3119 * directed route packets on an InfiniBand fabric. 3120 * 3121 * Return: true if the port provides an SMI. 3122 */ 3123 static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num) 3124 { 3125 return device->port_data[port_num].immutable.core_cap_flags & 3126 RDMA_CORE_CAP_IB_SMI; 3127 } 3128 3129 /** 3130 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband 3131 * Communication Manager. 3132 * @device: Device to check 3133 * @port_num: Port number to check 3134 * 3135 * The InfiniBand Communication Manager is one of many pre-defined General 3136 * Service Agents (GSA) that are accessed via the General Service 3137 * Interface (GSI). It's role is to facilitate establishment of connections 3138 * between nodes as well as other management related tasks for established 3139 * connections. 3140 * 3141 * Return: true if the port supports an IB CM (this does not guarantee that 3142 * a CM is actually running however). 3143 */ 3144 static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num) 3145 { 3146 return device->port_data[port_num].immutable.core_cap_flags & 3147 RDMA_CORE_CAP_IB_CM; 3148 } 3149 3150 /** 3151 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP 3152 * Communication Manager. 3153 * @device: Device to check 3154 * @port_num: Port number to check 3155 * 3156 * Similar to above, but specific to iWARP connections which have a different 3157 * managment protocol than InfiniBand. 3158 * 3159 * Return: true if the port supports an iWARP CM (this does not guarantee that 3160 * a CM is actually running however). 3161 */ 3162 static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num) 3163 { 3164 return device->port_data[port_num].immutable.core_cap_flags & 3165 RDMA_CORE_CAP_IW_CM; 3166 } 3167 3168 /** 3169 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband 3170 * Subnet Administration. 3171 * @device: Device to check 3172 * @port_num: Port number to check 3173 * 3174 * An InfiniBand Subnet Administration (SA) service is a pre-defined General 3175 * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand 3176 * fabrics, devices should resolve routes to other hosts by contacting the 3177 * SA to query the proper route. 3178 * 3179 * Return: true if the port should act as a client to the fabric Subnet 3180 * Administration interface. This does not imply that the SA service is 3181 * running locally. 3182 */ 3183 static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num) 3184 { 3185 return device->port_data[port_num].immutable.core_cap_flags & 3186 RDMA_CORE_CAP_IB_SA; 3187 } 3188 3189 /** 3190 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband 3191 * Multicast. 3192 * @device: Device to check 3193 * @port_num: Port number to check 3194 * 3195 * InfiniBand multicast registration is more complex than normal IPv4 or 3196 * IPv6 multicast registration. Each Host Channel Adapter must register 3197 * with the Subnet Manager when it wishes to join a multicast group. It 3198 * should do so only once regardless of how many queue pairs it subscribes 3199 * to this group. And it should leave the group only after all queue pairs 3200 * attached to the group have been detached. 3201 * 3202 * Return: true if the port must undertake the additional adminstrative 3203 * overhead of registering/unregistering with the SM and tracking of the 3204 * total number of queue pairs attached to the multicast group. 3205 */ 3206 static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num) 3207 { 3208 return rdma_cap_ib_sa(device, port_num); 3209 } 3210 3211 /** 3212 * rdma_cap_af_ib - Check if the port of device has the capability 3213 * Native Infiniband Address. 3214 * @device: Device to check 3215 * @port_num: Port number to check 3216 * 3217 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default 3218 * GID. RoCE uses a different mechanism, but still generates a GID via 3219 * a prescribed mechanism and port specific data. 3220 * 3221 * Return: true if the port uses a GID address to identify devices on the 3222 * network. 3223 */ 3224 static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num) 3225 { 3226 return device->port_data[port_num].immutable.core_cap_flags & 3227 RDMA_CORE_CAP_AF_IB; 3228 } 3229 3230 /** 3231 * rdma_cap_eth_ah - Check if the port of device has the capability 3232 * Ethernet Address Handle. 3233 * @device: Device to check 3234 * @port_num: Port number to check 3235 * 3236 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique 3237 * to fabricate GIDs over Ethernet/IP specific addresses native to the 3238 * port. Normally, packet headers are generated by the sending host 3239 * adapter, but when sending connectionless datagrams, we must manually 3240 * inject the proper headers for the fabric we are communicating over. 3241 * 3242 * Return: true if we are running as a RoCE port and must force the 3243 * addition of a Global Route Header built from our Ethernet Address 3244 * Handle into our header list for connectionless packets. 3245 */ 3246 static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num) 3247 { 3248 return device->port_data[port_num].immutable.core_cap_flags & 3249 RDMA_CORE_CAP_ETH_AH; 3250 } 3251 3252 /** 3253 * rdma_cap_opa_ah - Check if the port of device supports 3254 * OPA Address handles 3255 * @device: Device to check 3256 * @port_num: Port number to check 3257 * 3258 * Return: true if we are running on an OPA device which supports 3259 * the extended OPA addressing. 3260 */ 3261 static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num) 3262 { 3263 return (device->port_data[port_num].immutable.core_cap_flags & 3264 RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH; 3265 } 3266 3267 /** 3268 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port. 3269 * 3270 * @device: Device 3271 * @port_num: Port number 3272 * 3273 * This MAD size includes the MAD headers and MAD payload. No other headers 3274 * are included. 3275 * 3276 * Return the max MAD size required by the Port. Will return 0 if the port 3277 * does not support MADs 3278 */ 3279 static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num) 3280 { 3281 return device->port_data[port_num].immutable.max_mad_size; 3282 } 3283 3284 /** 3285 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table 3286 * @device: Device to check 3287 * @port_num: Port number to check 3288 * 3289 * RoCE GID table mechanism manages the various GIDs for a device. 3290 * 3291 * NOTE: if allocating the port's GID table has failed, this call will still 3292 * return true, but any RoCE GID table API will fail. 3293 * 3294 * Return: true if the port uses RoCE GID table mechanism in order to manage 3295 * its GIDs. 3296 */ 3297 static inline bool rdma_cap_roce_gid_table(const struct ib_device *device, 3298 u8 port_num) 3299 { 3300 return rdma_protocol_roce(device, port_num) && 3301 device->ops.add_gid && device->ops.del_gid; 3302 } 3303 3304 /* 3305 * Check if the device supports READ W/ INVALIDATE. 3306 */ 3307 static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num) 3308 { 3309 /* 3310 * iWarp drivers must support READ W/ INVALIDATE. No other protocol 3311 * has support for it yet. 3312 */ 3313 return rdma_protocol_iwarp(dev, port_num); 3314 } 3315 3316 /** 3317 * rdma_find_pg_bit - Find page bit given address and HW supported page sizes 3318 * 3319 * @addr: address 3320 * @pgsz_bitmap: bitmap of HW supported page sizes 3321 */ 3322 static inline unsigned int rdma_find_pg_bit(unsigned long addr, 3323 unsigned long pgsz_bitmap) 3324 { 3325 unsigned long align; 3326 unsigned long pgsz; 3327 3328 align = addr & -addr; 3329 3330 /* Find page bit such that addr is aligned to the highest supported 3331 * HW page size 3332 */ 3333 pgsz = pgsz_bitmap & ~(-align << 1); 3334 if (!pgsz) 3335 return __ffs(pgsz_bitmap); 3336 3337 return __fls(pgsz); 3338 } 3339 3340 int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, 3341 int state); 3342 int ib_get_vf_config(struct ib_device *device, int vf, u8 port, 3343 struct ifla_vf_info *info); 3344 int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, 3345 struct ifla_vf_stats *stats); 3346 int ib_get_vf_guid(struct ib_device *device, int vf, u8 port, 3347 struct ifla_vf_guid *node_guid, 3348 struct ifla_vf_guid *port_guid); 3349 int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, 3350 int type); 3351 3352 int ib_query_pkey(struct ib_device *device, 3353 u8 port_num, u16 index, u16 *pkey); 3354 3355 int ib_modify_device(struct ib_device *device, 3356 int device_modify_mask, 3357 struct ib_device_modify *device_modify); 3358 3359 int ib_modify_port(struct ib_device *device, 3360 u8 port_num, int port_modify_mask, 3361 struct ib_port_modify *port_modify); 3362 3363 int ib_find_gid(struct ib_device *device, union ib_gid *gid, 3364 u8 *port_num, u16 *index); 3365 3366 int ib_find_pkey(struct ib_device *device, 3367 u8 port_num, u16 pkey, u16 *index); 3368 3369 enum ib_pd_flags { 3370 /* 3371 * Create a memory registration for all memory in the system and place 3372 * the rkey for it into pd->unsafe_global_rkey. This can be used by 3373 * ULPs to avoid the overhead of dynamic MRs. 3374 * 3375 * This flag is generally considered unsafe and must only be used in 3376 * extremly trusted environments. Every use of it will log a warning 3377 * in the kernel log. 3378 */ 3379 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01, 3380 }; 3381 3382 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, 3383 const char *caller); 3384 3385 #define ib_alloc_pd(device, flags) \ 3386 __ib_alloc_pd((device), (flags), KBUILD_MODNAME) 3387 3388 /** 3389 * ib_dealloc_pd_user - Deallocate kernel/user PD 3390 * @pd: The protection domain 3391 * @udata: Valid user data or NULL for kernel objects 3392 */ 3393 void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata); 3394 3395 /** 3396 * ib_dealloc_pd - Deallocate kernel PD 3397 * @pd: The protection domain 3398 * 3399 * NOTE: for user PD use ib_dealloc_pd_user with valid udata! 3400 */ 3401 static inline void ib_dealloc_pd(struct ib_pd *pd) 3402 { 3403 ib_dealloc_pd_user(pd, NULL); 3404 } 3405 3406 enum rdma_create_ah_flags { 3407 /* In a sleepable context */ 3408 RDMA_CREATE_AH_SLEEPABLE = BIT(0), 3409 }; 3410 3411 /** 3412 * rdma_create_ah - Creates an address handle for the given address vector. 3413 * @pd: The protection domain associated with the address handle. 3414 * @ah_attr: The attributes of the address vector. 3415 * @flags: Create address handle flags (see enum rdma_create_ah_flags). 3416 * 3417 * The address handle is used to reference a local or global destination 3418 * in all UD QP post sends. 3419 */ 3420 struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, 3421 u32 flags); 3422 3423 /** 3424 * rdma_create_user_ah - Creates an address handle for the given address vector. 3425 * It resolves destination mac address for ah attribute of RoCE type. 3426 * @pd: The protection domain associated with the address handle. 3427 * @ah_attr: The attributes of the address vector. 3428 * @udata: pointer to user's input output buffer information need by 3429 * provider driver. 3430 * 3431 * It returns 0 on success and returns appropriate error code on error. 3432 * The address handle is used to reference a local or global destination 3433 * in all UD QP post sends. 3434 */ 3435 struct ib_ah *rdma_create_user_ah(struct ib_pd *pd, 3436 struct rdma_ah_attr *ah_attr, 3437 struct ib_udata *udata); 3438 /** 3439 * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header 3440 * work completion. 3441 * @hdr: the L3 header to parse 3442 * @net_type: type of header to parse 3443 * @sgid: place to store source gid 3444 * @dgid: place to store destination gid 3445 */ 3446 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr, 3447 enum rdma_network_type net_type, 3448 union ib_gid *sgid, union ib_gid *dgid); 3449 3450 /** 3451 * ib_get_rdma_header_version - Get the header version 3452 * @hdr: the L3 header to parse 3453 */ 3454 int ib_get_rdma_header_version(const union rdma_network_hdr *hdr); 3455 3456 /** 3457 * ib_init_ah_attr_from_wc - Initializes address handle attributes from a 3458 * work completion. 3459 * @device: Device on which the received message arrived. 3460 * @port_num: Port on which the received message arrived. 3461 * @wc: Work completion associated with the received message. 3462 * @grh: References the received global route header. This parameter is 3463 * ignored unless the work completion indicates that the GRH is valid. 3464 * @ah_attr: Returned attributes that can be used when creating an address 3465 * handle for replying to the message. 3466 * When ib_init_ah_attr_from_wc() returns success, 3467 * (a) for IB link layer it optionally contains a reference to SGID attribute 3468 * when GRH is present for IB link layer. 3469 * (b) for RoCE link layer it contains a reference to SGID attribute. 3470 * User must invoke rdma_cleanup_ah_attr_gid_attr() to release reference to SGID 3471 * attributes which are initialized using ib_init_ah_attr_from_wc(). 3472 * 3473 */ 3474 int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num, 3475 const struct ib_wc *wc, const struct ib_grh *grh, 3476 struct rdma_ah_attr *ah_attr); 3477 3478 /** 3479 * ib_create_ah_from_wc - Creates an address handle associated with the 3480 * sender of the specified work completion. 3481 * @pd: The protection domain associated with the address handle. 3482 * @wc: Work completion information associated with a received message. 3483 * @grh: References the received global route header. This parameter is 3484 * ignored unless the work completion indicates that the GRH is valid. 3485 * @port_num: The outbound port number to associate with the address. 3486 * 3487 * The address handle is used to reference a local or global destination 3488 * in all UD QP post sends. 3489 */ 3490 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, 3491 const struct ib_grh *grh, u8 port_num); 3492 3493 /** 3494 * rdma_modify_ah - Modifies the address vector associated with an address 3495 * handle. 3496 * @ah: The address handle to modify. 3497 * @ah_attr: The new address vector attributes to associate with the 3498 * address handle. 3499 */ 3500 int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); 3501 3502 /** 3503 * rdma_query_ah - Queries the address vector associated with an address 3504 * handle. 3505 * @ah: The address handle to query. 3506 * @ah_attr: The address vector attributes associated with the address 3507 * handle. 3508 */ 3509 int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); 3510 3511 enum rdma_destroy_ah_flags { 3512 /* In a sleepable context */ 3513 RDMA_DESTROY_AH_SLEEPABLE = BIT(0), 3514 }; 3515 3516 /** 3517 * rdma_destroy_ah_user - Destroys an address handle. 3518 * @ah: The address handle to destroy. 3519 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags). 3520 * @udata: Valid user data or NULL for kernel objects 3521 */ 3522 int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata); 3523 3524 /** 3525 * rdma_destroy_ah - Destroys an kernel address handle. 3526 * @ah: The address handle to destroy. 3527 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags). 3528 * 3529 * NOTE: for user ah use rdma_destroy_ah_user with valid udata! 3530 */ 3531 static inline int rdma_destroy_ah(struct ib_ah *ah, u32 flags) 3532 { 3533 return rdma_destroy_ah_user(ah, flags, NULL); 3534 } 3535 3536 /** 3537 * ib_create_srq - Creates a SRQ associated with the specified protection 3538 * domain. 3539 * @pd: The protection domain associated with the SRQ. 3540 * @srq_init_attr: A list of initial attributes required to create the 3541 * SRQ. If SRQ creation succeeds, then the attributes are updated to 3542 * the actual capabilities of the created SRQ. 3543 * 3544 * srq_attr->max_wr and srq_attr->max_sge are read the determine the 3545 * requested size of the SRQ, and set to the actual values allocated 3546 * on return. If ib_create_srq() succeeds, then max_wr and max_sge 3547 * will always be at least as large as the requested values. 3548 */ 3549 struct ib_srq *ib_create_srq(struct ib_pd *pd, 3550 struct ib_srq_init_attr *srq_init_attr); 3551 3552 /** 3553 * ib_modify_srq - Modifies the attributes for the specified SRQ. 3554 * @srq: The SRQ to modify. 3555 * @srq_attr: On input, specifies the SRQ attributes to modify. On output, 3556 * the current values of selected SRQ attributes are returned. 3557 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ 3558 * are being modified. 3559 * 3560 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or 3561 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when 3562 * the number of receives queued drops below the limit. 3563 */ 3564 int ib_modify_srq(struct ib_srq *srq, 3565 struct ib_srq_attr *srq_attr, 3566 enum ib_srq_attr_mask srq_attr_mask); 3567 3568 /** 3569 * ib_query_srq - Returns the attribute list and current values for the 3570 * specified SRQ. 3571 * @srq: The SRQ to query. 3572 * @srq_attr: The attributes of the specified SRQ. 3573 */ 3574 int ib_query_srq(struct ib_srq *srq, 3575 struct ib_srq_attr *srq_attr); 3576 3577 /** 3578 * ib_destroy_srq_user - Destroys the specified SRQ. 3579 * @srq: The SRQ to destroy. 3580 * @udata: Valid user data or NULL for kernel objects 3581 */ 3582 int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata); 3583 3584 /** 3585 * ib_destroy_srq - Destroys the specified kernel SRQ. 3586 * @srq: The SRQ to destroy. 3587 * 3588 * NOTE: for user srq use ib_destroy_srq_user with valid udata! 3589 */ 3590 static inline int ib_destroy_srq(struct ib_srq *srq) 3591 { 3592 return ib_destroy_srq_user(srq, NULL); 3593 } 3594 3595 /** 3596 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ. 3597 * @srq: The SRQ to post the work request on. 3598 * @recv_wr: A list of work requests to post on the receive queue. 3599 * @bad_recv_wr: On an immediate failure, this parameter will reference 3600 * the work request that failed to be posted on the QP. 3601 */ 3602 static inline int ib_post_srq_recv(struct ib_srq *srq, 3603 const struct ib_recv_wr *recv_wr, 3604 const struct ib_recv_wr **bad_recv_wr) 3605 { 3606 const struct ib_recv_wr *dummy; 3607 3608 return srq->device->ops.post_srq_recv(srq, recv_wr, 3609 bad_recv_wr ? : &dummy); 3610 } 3611 3612 /** 3613 * ib_create_qp_user - Creates a QP associated with the specified protection 3614 * domain. 3615 * @pd: The protection domain associated with the QP. 3616 * @qp_init_attr: A list of initial attributes required to create the 3617 * QP. If QP creation succeeds, then the attributes are updated to 3618 * the actual capabilities of the created QP. 3619 * @udata: Valid user data or NULL for kernel objects 3620 */ 3621 struct ib_qp *ib_create_qp_user(struct ib_pd *pd, 3622 struct ib_qp_init_attr *qp_init_attr, 3623 struct ib_udata *udata); 3624 3625 /** 3626 * ib_create_qp - Creates a kernel QP associated with the specified protection 3627 * domain. 3628 * @pd: The protection domain associated with the QP. 3629 * @qp_init_attr: A list of initial attributes required to create the 3630 * QP. If QP creation succeeds, then the attributes are updated to 3631 * the actual capabilities of the created QP. 3632 * @udata: Valid user data or NULL for kernel objects 3633 * 3634 * NOTE: for user qp use ib_create_qp_user with valid udata! 3635 */ 3636 static inline struct ib_qp *ib_create_qp(struct ib_pd *pd, 3637 struct ib_qp_init_attr *qp_init_attr) 3638 { 3639 return ib_create_qp_user(pd, qp_init_attr, NULL); 3640 } 3641 3642 /** 3643 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP. 3644 * @qp: The QP to modify. 3645 * @attr: On input, specifies the QP attributes to modify. On output, 3646 * the current values of selected QP attributes are returned. 3647 * @attr_mask: A bit-mask used to specify which attributes of the QP 3648 * are being modified. 3649 * @udata: pointer to user's input output buffer information 3650 * are being modified. 3651 * It returns 0 on success and returns appropriate error code on error. 3652 */ 3653 int ib_modify_qp_with_udata(struct ib_qp *qp, 3654 struct ib_qp_attr *attr, 3655 int attr_mask, 3656 struct ib_udata *udata); 3657 3658 /** 3659 * ib_modify_qp - Modifies the attributes for the specified QP and then 3660 * transitions the QP to the given state. 3661 * @qp: The QP to modify. 3662 * @qp_attr: On input, specifies the QP attributes to modify. On output, 3663 * the current values of selected QP attributes are returned. 3664 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP 3665 * are being modified. 3666 */ 3667 int ib_modify_qp(struct ib_qp *qp, 3668 struct ib_qp_attr *qp_attr, 3669 int qp_attr_mask); 3670 3671 /** 3672 * ib_query_qp - Returns the attribute list and current values for the 3673 * specified QP. 3674 * @qp: The QP to query. 3675 * @qp_attr: The attributes of the specified QP. 3676 * @qp_attr_mask: A bit-mask used to select specific attributes to query. 3677 * @qp_init_attr: Additional attributes of the selected QP. 3678 * 3679 * The qp_attr_mask may be used to limit the query to gathering only the 3680 * selected attributes. 3681 */ 3682 int ib_query_qp(struct ib_qp *qp, 3683 struct ib_qp_attr *qp_attr, 3684 int qp_attr_mask, 3685 struct ib_qp_init_attr *qp_init_attr); 3686 3687 /** 3688 * ib_destroy_qp - Destroys the specified QP. 3689 * @qp: The QP to destroy. 3690 * @udata: Valid udata or NULL for kernel objects 3691 */ 3692 int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata); 3693 3694 /** 3695 * ib_destroy_qp - Destroys the specified kernel QP. 3696 * @qp: The QP to destroy. 3697 * 3698 * NOTE: for user qp use ib_destroy_qp_user with valid udata! 3699 */ 3700 static inline int ib_destroy_qp(struct ib_qp *qp) 3701 { 3702 return ib_destroy_qp_user(qp, NULL); 3703 } 3704 3705 /** 3706 * ib_open_qp - Obtain a reference to an existing sharable QP. 3707 * @xrcd - XRC domain 3708 * @qp_open_attr: Attributes identifying the QP to open. 3709 * 3710 * Returns a reference to a sharable QP. 3711 */ 3712 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, 3713 struct ib_qp_open_attr *qp_open_attr); 3714 3715 /** 3716 * ib_close_qp - Release an external reference to a QP. 3717 * @qp: The QP handle to release 3718 * 3719 * The opened QP handle is released by the caller. The underlying 3720 * shared QP is not destroyed until all internal references are released. 3721 */ 3722 int ib_close_qp(struct ib_qp *qp); 3723 3724 /** 3725 * ib_post_send - Posts a list of work requests to the send queue of 3726 * the specified QP. 3727 * @qp: The QP to post the work request on. 3728 * @send_wr: A list of work requests to post on the send queue. 3729 * @bad_send_wr: On an immediate failure, this parameter will reference 3730 * the work request that failed to be posted on the QP. 3731 * 3732 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate 3733 * error is returned, the QP state shall not be affected, 3734 * ib_post_send() will return an immediate error after queueing any 3735 * earlier work requests in the list. 3736 */ 3737 static inline int ib_post_send(struct ib_qp *qp, 3738 const struct ib_send_wr *send_wr, 3739 const struct ib_send_wr **bad_send_wr) 3740 { 3741 const struct ib_send_wr *dummy; 3742 3743 return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy); 3744 } 3745 3746 /** 3747 * ib_post_recv - Posts a list of work requests to the receive queue of 3748 * the specified QP. 3749 * @qp: The QP to post the work request on. 3750 * @recv_wr: A list of work requests to post on the receive queue. 3751 * @bad_recv_wr: On an immediate failure, this parameter will reference 3752 * the work request that failed to be posted on the QP. 3753 */ 3754 static inline int ib_post_recv(struct ib_qp *qp, 3755 const struct ib_recv_wr *recv_wr, 3756 const struct ib_recv_wr **bad_recv_wr) 3757 { 3758 const struct ib_recv_wr *dummy; 3759 3760 return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy); 3761 } 3762 3763 struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private, 3764 int nr_cqe, int comp_vector, 3765 enum ib_poll_context poll_ctx, 3766 const char *caller, struct ib_udata *udata); 3767 3768 /** 3769 * ib_alloc_cq_user: Allocate kernel/user CQ 3770 * @dev: The IB device 3771 * @private: Private data attached to the CQE 3772 * @nr_cqe: Number of CQEs in the CQ 3773 * @comp_vector: Completion vector used for the IRQs 3774 * @poll_ctx: Context used for polling the CQ 3775 * @udata: Valid user data or NULL for kernel objects 3776 */ 3777 static inline struct ib_cq *ib_alloc_cq_user(struct ib_device *dev, 3778 void *private, int nr_cqe, 3779 int comp_vector, 3780 enum ib_poll_context poll_ctx, 3781 struct ib_udata *udata) 3782 { 3783 return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx, 3784 KBUILD_MODNAME, udata); 3785 } 3786 3787 /** 3788 * ib_alloc_cq: Allocate kernel CQ 3789 * @dev: The IB device 3790 * @private: Private data attached to the CQE 3791 * @nr_cqe: Number of CQEs in the CQ 3792 * @comp_vector: Completion vector used for the IRQs 3793 * @poll_ctx: Context used for polling the CQ 3794 * 3795 * NOTE: for user cq use ib_alloc_cq_user with valid udata! 3796 */ 3797 static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private, 3798 int nr_cqe, int comp_vector, 3799 enum ib_poll_context poll_ctx) 3800 { 3801 return ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx, 3802 NULL); 3803 } 3804 3805 struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private, 3806 int nr_cqe, enum ib_poll_context poll_ctx, 3807 const char *caller); 3808 3809 /** 3810 * ib_alloc_cq_any: Allocate kernel CQ 3811 * @dev: The IB device 3812 * @private: Private data attached to the CQE 3813 * @nr_cqe: Number of CQEs in the CQ 3814 * @poll_ctx: Context used for polling the CQ 3815 */ 3816 static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev, 3817 void *private, int nr_cqe, 3818 enum ib_poll_context poll_ctx) 3819 { 3820 return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx, 3821 KBUILD_MODNAME); 3822 } 3823 3824 /** 3825 * ib_free_cq_user - Free kernel/user CQ 3826 * @cq: The CQ to free 3827 * @udata: Valid user data or NULL for kernel objects 3828 */ 3829 void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata); 3830 3831 /** 3832 * ib_free_cq - Free kernel CQ 3833 * @cq: The CQ to free 3834 * 3835 * NOTE: for user cq use ib_free_cq_user with valid udata! 3836 */ 3837 static inline void ib_free_cq(struct ib_cq *cq) 3838 { 3839 ib_free_cq_user(cq, NULL); 3840 } 3841 3842 int ib_process_cq_direct(struct ib_cq *cq, int budget); 3843 3844 /** 3845 * ib_create_cq - Creates a CQ on the specified device. 3846 * @device: The device on which to create the CQ. 3847 * @comp_handler: A user-specified callback that is invoked when a 3848 * completion event occurs on the CQ. 3849 * @event_handler: A user-specified callback that is invoked when an 3850 * asynchronous event not associated with a completion occurs on the CQ. 3851 * @cq_context: Context associated with the CQ returned to the user via 3852 * the associated completion and event handlers. 3853 * @cq_attr: The attributes the CQ should be created upon. 3854 * 3855 * Users can examine the cq structure to determine the actual CQ size. 3856 */ 3857 struct ib_cq *__ib_create_cq(struct ib_device *device, 3858 ib_comp_handler comp_handler, 3859 void (*event_handler)(struct ib_event *, void *), 3860 void *cq_context, 3861 const struct ib_cq_init_attr *cq_attr, 3862 const char *caller); 3863 #define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \ 3864 __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME) 3865 3866 /** 3867 * ib_resize_cq - Modifies the capacity of the CQ. 3868 * @cq: The CQ to resize. 3869 * @cqe: The minimum size of the CQ. 3870 * 3871 * Users can examine the cq structure to determine the actual CQ size. 3872 */ 3873 int ib_resize_cq(struct ib_cq *cq, int cqe); 3874 3875 /** 3876 * rdma_set_cq_moderation - Modifies moderation params of the CQ 3877 * @cq: The CQ to modify. 3878 * @cq_count: number of CQEs that will trigger an event 3879 * @cq_period: max period of time in usec before triggering an event 3880 * 3881 */ 3882 int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period); 3883 3884 /** 3885 * ib_destroy_cq_user - Destroys the specified CQ. 3886 * @cq: The CQ to destroy. 3887 * @udata: Valid user data or NULL for kernel objects 3888 */ 3889 int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata); 3890 3891 /** 3892 * ib_destroy_cq - Destroys the specified kernel CQ. 3893 * @cq: The CQ to destroy. 3894 * 3895 * NOTE: for user cq use ib_destroy_cq_user with valid udata! 3896 */ 3897 static inline void ib_destroy_cq(struct ib_cq *cq) 3898 { 3899 ib_destroy_cq_user(cq, NULL); 3900 } 3901 3902 /** 3903 * ib_poll_cq - poll a CQ for completion(s) 3904 * @cq:the CQ being polled 3905 * @num_entries:maximum number of completions to return 3906 * @wc:array of at least @num_entries &struct ib_wc where completions 3907 * will be returned 3908 * 3909 * Poll a CQ for (possibly multiple) completions. If the return value 3910 * is < 0, an error occurred. If the return value is >= 0, it is the 3911 * number of completions returned. If the return value is 3912 * non-negative and < num_entries, then the CQ was emptied. 3913 */ 3914 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries, 3915 struct ib_wc *wc) 3916 { 3917 return cq->device->ops.poll_cq(cq, num_entries, wc); 3918 } 3919 3920 /** 3921 * ib_req_notify_cq - Request completion notification on a CQ. 3922 * @cq: The CQ to generate an event for. 3923 * @flags: 3924 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP 3925 * to request an event on the next solicited event or next work 3926 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS 3927 * may also be |ed in to request a hint about missed events, as 3928 * described below. 3929 * 3930 * Return Value: 3931 * < 0 means an error occurred while requesting notification 3932 * == 0 means notification was requested successfully, and if 3933 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events 3934 * were missed and it is safe to wait for another event. In 3935 * this case is it guaranteed that any work completions added 3936 * to the CQ since the last CQ poll will trigger a completion 3937 * notification event. 3938 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed 3939 * in. It means that the consumer must poll the CQ again to 3940 * make sure it is empty to avoid missing an event because of a 3941 * race between requesting notification and an entry being 3942 * added to the CQ. This return value means it is possible 3943 * (but not guaranteed) that a work completion has been added 3944 * to the CQ since the last poll without triggering a 3945 * completion notification event. 3946 */ 3947 static inline int ib_req_notify_cq(struct ib_cq *cq, 3948 enum ib_cq_notify_flags flags) 3949 { 3950 return cq->device->ops.req_notify_cq(cq, flags); 3951 } 3952 3953 /** 3954 * ib_req_ncomp_notif - Request completion notification when there are 3955 * at least the specified number of unreaped completions on the CQ. 3956 * @cq: The CQ to generate an event for. 3957 * @wc_cnt: The number of unreaped completions that should be on the 3958 * CQ before an event is generated. 3959 */ 3960 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) 3961 { 3962 return cq->device->ops.req_ncomp_notif ? 3963 cq->device->ops.req_ncomp_notif(cq, wc_cnt) : 3964 -ENOSYS; 3965 } 3966 3967 /** 3968 * ib_dma_mapping_error - check a DMA addr for error 3969 * @dev: The device for which the dma_addr was created 3970 * @dma_addr: The DMA address to check 3971 */ 3972 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) 3973 { 3974 return dma_mapping_error(dev->dma_device, dma_addr); 3975 } 3976 3977 /** 3978 * ib_dma_map_single - Map a kernel virtual address to DMA address 3979 * @dev: The device for which the dma_addr is to be created 3980 * @cpu_addr: The kernel virtual address 3981 * @size: The size of the region in bytes 3982 * @direction: The direction of the DMA 3983 */ 3984 static inline u64 ib_dma_map_single(struct ib_device *dev, 3985 void *cpu_addr, size_t size, 3986 enum dma_data_direction direction) 3987 { 3988 return dma_map_single(dev->dma_device, cpu_addr, size, direction); 3989 } 3990 3991 /** 3992 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single() 3993 * @dev: The device for which the DMA address was created 3994 * @addr: The DMA address 3995 * @size: The size of the region in bytes 3996 * @direction: The direction of the DMA 3997 */ 3998 static inline void ib_dma_unmap_single(struct ib_device *dev, 3999 u64 addr, size_t size, 4000 enum dma_data_direction direction) 4001 { 4002 dma_unmap_single(dev->dma_device, addr, size, direction); 4003 } 4004 4005 /** 4006 * ib_dma_map_page - Map a physical page to DMA address 4007 * @dev: The device for which the dma_addr is to be created 4008 * @page: The page to be mapped 4009 * @offset: The offset within the page 4010 * @size: The size of the region in bytes 4011 * @direction: The direction of the DMA 4012 */ 4013 static inline u64 ib_dma_map_page(struct ib_device *dev, 4014 struct page *page, 4015 unsigned long offset, 4016 size_t size, 4017 enum dma_data_direction direction) 4018 { 4019 return dma_map_page(dev->dma_device, page, offset, size, direction); 4020 } 4021 4022 /** 4023 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page() 4024 * @dev: The device for which the DMA address was created 4025 * @addr: The DMA address 4026 * @size: The size of the region in bytes 4027 * @direction: The direction of the DMA 4028 */ 4029 static inline void ib_dma_unmap_page(struct ib_device *dev, 4030 u64 addr, size_t size, 4031 enum dma_data_direction direction) 4032 { 4033 dma_unmap_page(dev->dma_device, addr, size, direction); 4034 } 4035 4036 /** 4037 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses 4038 * @dev: The device for which the DMA addresses are to be created 4039 * @sg: The array of scatter/gather entries 4040 * @nents: The number of scatter/gather entries 4041 * @direction: The direction of the DMA 4042 */ 4043 static inline int ib_dma_map_sg(struct ib_device *dev, 4044 struct scatterlist *sg, int nents, 4045 enum dma_data_direction direction) 4046 { 4047 return dma_map_sg(dev->dma_device, sg, nents, direction); 4048 } 4049 4050 /** 4051 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses 4052 * @dev: The device for which the DMA addresses were created 4053 * @sg: The array of scatter/gather entries 4054 * @nents: The number of scatter/gather entries 4055 * @direction: The direction of the DMA 4056 */ 4057 static inline void ib_dma_unmap_sg(struct ib_device *dev, 4058 struct scatterlist *sg, int nents, 4059 enum dma_data_direction direction) 4060 { 4061 dma_unmap_sg(dev->dma_device, sg, nents, direction); 4062 } 4063 4064 static inline int ib_dma_map_sg_attrs(struct ib_device *dev, 4065 struct scatterlist *sg, int nents, 4066 enum dma_data_direction direction, 4067 unsigned long dma_attrs) 4068 { 4069 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, 4070 dma_attrs); 4071 } 4072 4073 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, 4074 struct scatterlist *sg, int nents, 4075 enum dma_data_direction direction, 4076 unsigned long dma_attrs) 4077 { 4078 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs); 4079 } 4080 4081 /** 4082 * ib_dma_max_seg_size - Return the size limit of a single DMA transfer 4083 * @dev: The device to query 4084 * 4085 * The returned value represents a size in bytes. 4086 */ 4087 static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev) 4088 { 4089 return dma_get_max_seg_size(dev->dma_device); 4090 } 4091 4092 /** 4093 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU 4094 * @dev: The device for which the DMA address was created 4095 * @addr: The DMA address 4096 * @size: The size of the region in bytes 4097 * @dir: The direction of the DMA 4098 */ 4099 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, 4100 u64 addr, 4101 size_t size, 4102 enum dma_data_direction dir) 4103 { 4104 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); 4105 } 4106 4107 /** 4108 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device 4109 * @dev: The device for which the DMA address was created 4110 * @addr: The DMA address 4111 * @size: The size of the region in bytes 4112 * @dir: The direction of the DMA 4113 */ 4114 static inline void ib_dma_sync_single_for_device(struct ib_device *dev, 4115 u64 addr, 4116 size_t size, 4117 enum dma_data_direction dir) 4118 { 4119 dma_sync_single_for_device(dev->dma_device, addr, size, dir); 4120 } 4121 4122 /** 4123 * ib_dma_alloc_coherent - Allocate memory and map it for DMA 4124 * @dev: The device for which the DMA address is requested 4125 * @size: The size of the region to allocate in bytes 4126 * @dma_handle: A pointer for returning the DMA address of the region 4127 * @flag: memory allocator flags 4128 */ 4129 static inline void *ib_dma_alloc_coherent(struct ib_device *dev, 4130 size_t size, 4131 dma_addr_t *dma_handle, 4132 gfp_t flag) 4133 { 4134 return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag); 4135 } 4136 4137 /** 4138 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent() 4139 * @dev: The device for which the DMA addresses were allocated 4140 * @size: The size of the region 4141 * @cpu_addr: the address returned by ib_dma_alloc_coherent() 4142 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent() 4143 */ 4144 static inline void ib_dma_free_coherent(struct ib_device *dev, 4145 size_t size, void *cpu_addr, 4146 dma_addr_t dma_handle) 4147 { 4148 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); 4149 } 4150 4151 /** 4152 * ib_dereg_mr_user - Deregisters a memory region and removes it from the 4153 * HCA translation table. 4154 * @mr: The memory region to deregister. 4155 * @udata: Valid user data or NULL for kernel object 4156 * 4157 * This function can fail, if the memory region has memory windows bound to it. 4158 */ 4159 int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata); 4160 4161 /** 4162 * ib_dereg_mr - Deregisters a kernel memory region and removes it from the 4163 * HCA translation table. 4164 * @mr: The memory region to deregister. 4165 * 4166 * This function can fail, if the memory region has memory windows bound to it. 4167 * 4168 * NOTE: for user mr use ib_dereg_mr_user with valid udata! 4169 */ 4170 static inline int ib_dereg_mr(struct ib_mr *mr) 4171 { 4172 return ib_dereg_mr_user(mr, NULL); 4173 } 4174 4175 struct ib_mr *ib_alloc_mr_user(struct ib_pd *pd, enum ib_mr_type mr_type, 4176 u32 max_num_sg, struct ib_udata *udata); 4177 4178 static inline struct ib_mr *ib_alloc_mr(struct ib_pd *pd, 4179 enum ib_mr_type mr_type, u32 max_num_sg) 4180 { 4181 return ib_alloc_mr_user(pd, mr_type, max_num_sg, NULL); 4182 } 4183 4184 struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd, 4185 u32 max_num_data_sg, 4186 u32 max_num_meta_sg); 4187 4188 /** 4189 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR 4190 * R_Key and L_Key. 4191 * @mr - struct ib_mr pointer to be updated. 4192 * @newkey - new key to be used. 4193 */ 4194 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey) 4195 { 4196 mr->lkey = (mr->lkey & 0xffffff00) | newkey; 4197 mr->rkey = (mr->rkey & 0xffffff00) | newkey; 4198 } 4199 4200 /** 4201 * ib_inc_rkey - increments the key portion of the given rkey. Can be used 4202 * for calculating a new rkey for type 2 memory windows. 4203 * @rkey - the rkey to increment. 4204 */ 4205 static inline u32 ib_inc_rkey(u32 rkey) 4206 { 4207 const u32 mask = 0x000000ff; 4208 return ((rkey + 1) & mask) | (rkey & ~mask); 4209 } 4210 4211 /** 4212 * ib_alloc_fmr - Allocates a unmapped fast memory region. 4213 * @pd: The protection domain associated with the unmapped region. 4214 * @mr_access_flags: Specifies the memory access rights. 4215 * @fmr_attr: Attributes of the unmapped region. 4216 * 4217 * A fast memory region must be mapped before it can be used as part of 4218 * a work request. 4219 */ 4220 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, 4221 int mr_access_flags, 4222 struct ib_fmr_attr *fmr_attr); 4223 4224 /** 4225 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region. 4226 * @fmr: The fast memory region to associate with the pages. 4227 * @page_list: An array of physical pages to map to the fast memory region. 4228 * @list_len: The number of pages in page_list. 4229 * @iova: The I/O virtual address to use with the mapped region. 4230 */ 4231 static inline int ib_map_phys_fmr(struct ib_fmr *fmr, 4232 u64 *page_list, int list_len, 4233 u64 iova) 4234 { 4235 return fmr->device->ops.map_phys_fmr(fmr, page_list, list_len, iova); 4236 } 4237 4238 /** 4239 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions. 4240 * @fmr_list: A linked list of fast memory regions to unmap. 4241 */ 4242 int ib_unmap_fmr(struct list_head *fmr_list); 4243 4244 /** 4245 * ib_dealloc_fmr - Deallocates a fast memory region. 4246 * @fmr: The fast memory region to deallocate. 4247 */ 4248 int ib_dealloc_fmr(struct ib_fmr *fmr); 4249 4250 /** 4251 * ib_attach_mcast - Attaches the specified QP to a multicast group. 4252 * @qp: QP to attach to the multicast group. The QP must be type 4253 * IB_QPT_UD. 4254 * @gid: Multicast group GID. 4255 * @lid: Multicast group LID in host byte order. 4256 * 4257 * In order to send and receive multicast packets, subnet 4258 * administration must have created the multicast group and configured 4259 * the fabric appropriately. The port associated with the specified 4260 * QP must also be a member of the multicast group. 4261 */ 4262 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 4263 4264 /** 4265 * ib_detach_mcast - Detaches the specified QP from a multicast group. 4266 * @qp: QP to detach from the multicast group. 4267 * @gid: Multicast group GID. 4268 * @lid: Multicast group LID in host byte order. 4269 */ 4270 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 4271 4272 /** 4273 * ib_alloc_xrcd - Allocates an XRC domain. 4274 * @device: The device on which to allocate the XRC domain. 4275 * @caller: Module name for kernel consumers 4276 */ 4277 struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller); 4278 #define ib_alloc_xrcd(device) \ 4279 __ib_alloc_xrcd((device), KBUILD_MODNAME) 4280 4281 /** 4282 * ib_dealloc_xrcd - Deallocates an XRC domain. 4283 * @xrcd: The XRC domain to deallocate. 4284 * @udata: Valid user data or NULL for kernel object 4285 */ 4286 int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata); 4287 4288 static inline int ib_check_mr_access(int flags) 4289 { 4290 /* 4291 * Local write permission is required if remote write or 4292 * remote atomic permission is also requested. 4293 */ 4294 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && 4295 !(flags & IB_ACCESS_LOCAL_WRITE)) 4296 return -EINVAL; 4297 4298 return 0; 4299 } 4300 4301 static inline bool ib_access_writable(int access_flags) 4302 { 4303 /* 4304 * We have writable memory backing the MR if any of the following 4305 * access flags are set. "Local write" and "remote write" obviously 4306 * require write access. "Remote atomic" can do things like fetch and 4307 * add, which will modify memory, and "MW bind" can change permissions 4308 * by binding a window. 4309 */ 4310 return access_flags & 4311 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | 4312 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND); 4313 } 4314 4315 /** 4316 * ib_check_mr_status: lightweight check of MR status. 4317 * This routine may provide status checks on a selected 4318 * ib_mr. first use is for signature status check. 4319 * 4320 * @mr: A memory region. 4321 * @check_mask: Bitmask of which checks to perform from 4322 * ib_mr_status_check enumeration. 4323 * @mr_status: The container of relevant status checks. 4324 * failed checks will be indicated in the status bitmask 4325 * and the relevant info shall be in the error item. 4326 */ 4327 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, 4328 struct ib_mr_status *mr_status); 4329 4330 /** 4331 * ib_device_try_get: Hold a registration lock 4332 * device: The device to lock 4333 * 4334 * A device under an active registration lock cannot become unregistered. It 4335 * is only possible to obtain a registration lock on a device that is fully 4336 * registered, otherwise this function returns false. 4337 * 4338 * The registration lock is only necessary for actions which require the 4339 * device to still be registered. Uses that only require the device pointer to 4340 * be valid should use get_device(&ibdev->dev) to hold the memory. 4341 * 4342 */ 4343 static inline bool ib_device_try_get(struct ib_device *dev) 4344 { 4345 return refcount_inc_not_zero(&dev->refcount); 4346 } 4347 4348 void ib_device_put(struct ib_device *device); 4349 struct ib_device *ib_device_get_by_netdev(struct net_device *ndev, 4350 enum rdma_driver_id driver_id); 4351 struct ib_device *ib_device_get_by_name(const char *name, 4352 enum rdma_driver_id driver_id); 4353 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port, 4354 u16 pkey, const union ib_gid *gid, 4355 const struct sockaddr *addr); 4356 int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev, 4357 unsigned int port); 4358 struct net_device *ib_device_netdev(struct ib_device *dev, u8 port); 4359 4360 struct ib_wq *ib_create_wq(struct ib_pd *pd, 4361 struct ib_wq_init_attr *init_attr); 4362 int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata); 4363 int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr, 4364 u32 wq_attr_mask); 4365 struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, 4366 struct ib_rwq_ind_table_init_attr* 4367 wq_ind_table_init_attr); 4368 int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); 4369 4370 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 4371 unsigned int *sg_offset, unsigned int page_size); 4372 int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg, 4373 int data_sg_nents, unsigned int *data_sg_offset, 4374 struct scatterlist *meta_sg, int meta_sg_nents, 4375 unsigned int *meta_sg_offset, unsigned int page_size); 4376 4377 static inline int 4378 ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 4379 unsigned int *sg_offset, unsigned int page_size) 4380 { 4381 int n; 4382 4383 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size); 4384 mr->iova = 0; 4385 4386 return n; 4387 } 4388 4389 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents, 4390 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64)); 4391 4392 void ib_drain_rq(struct ib_qp *qp); 4393 void ib_drain_sq(struct ib_qp *qp); 4394 void ib_drain_qp(struct ib_qp *qp); 4395 4396 int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width); 4397 4398 static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr) 4399 { 4400 if (attr->type == RDMA_AH_ATTR_TYPE_ROCE) 4401 return attr->roce.dmac; 4402 return NULL; 4403 } 4404 4405 static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid) 4406 { 4407 if (attr->type == RDMA_AH_ATTR_TYPE_IB) 4408 attr->ib.dlid = (u16)dlid; 4409 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 4410 attr->opa.dlid = dlid; 4411 } 4412 4413 static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr) 4414 { 4415 if (attr->type == RDMA_AH_ATTR_TYPE_IB) 4416 return attr->ib.dlid; 4417 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 4418 return attr->opa.dlid; 4419 return 0; 4420 } 4421 4422 static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl) 4423 { 4424 attr->sl = sl; 4425 } 4426 4427 static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr) 4428 { 4429 return attr->sl; 4430 } 4431 4432 static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr, 4433 u8 src_path_bits) 4434 { 4435 if (attr->type == RDMA_AH_ATTR_TYPE_IB) 4436 attr->ib.src_path_bits = src_path_bits; 4437 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 4438 attr->opa.src_path_bits = src_path_bits; 4439 } 4440 4441 static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr) 4442 { 4443 if (attr->type == RDMA_AH_ATTR_TYPE_IB) 4444 return attr->ib.src_path_bits; 4445 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 4446 return attr->opa.src_path_bits; 4447 return 0; 4448 } 4449 4450 static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr, 4451 bool make_grd) 4452 { 4453 if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 4454 attr->opa.make_grd = make_grd; 4455 } 4456 4457 static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr) 4458 { 4459 if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 4460 return attr->opa.make_grd; 4461 return false; 4462 } 4463 4464 static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num) 4465 { 4466 attr->port_num = port_num; 4467 } 4468 4469 static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr) 4470 { 4471 return attr->port_num; 4472 } 4473 4474 static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr, 4475 u8 static_rate) 4476 { 4477 attr->static_rate = static_rate; 4478 } 4479 4480 static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr) 4481 { 4482 return attr->static_rate; 4483 } 4484 4485 static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr, 4486 enum ib_ah_flags flag) 4487 { 4488 attr->ah_flags = flag; 4489 } 4490 4491 static inline enum ib_ah_flags 4492 rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr) 4493 { 4494 return attr->ah_flags; 4495 } 4496 4497 static inline const struct ib_global_route 4498 *rdma_ah_read_grh(const struct rdma_ah_attr *attr) 4499 { 4500 return &attr->grh; 4501 } 4502 4503 /*To retrieve and modify the grh */ 4504 static inline struct ib_global_route 4505 *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr) 4506 { 4507 return &attr->grh; 4508 } 4509 4510 static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid) 4511 { 4512 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); 4513 4514 memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid)); 4515 } 4516 4517 static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr, 4518 __be64 prefix) 4519 { 4520 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); 4521 4522 grh->dgid.global.subnet_prefix = prefix; 4523 } 4524 4525 static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr, 4526 __be64 if_id) 4527 { 4528 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); 4529 4530 grh->dgid.global.interface_id = if_id; 4531 } 4532 4533 static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr, 4534 union ib_gid *dgid, u32 flow_label, 4535 u8 sgid_index, u8 hop_limit, 4536 u8 traffic_class) 4537 { 4538 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); 4539 4540 attr->ah_flags = IB_AH_GRH; 4541 if (dgid) 4542 grh->dgid = *dgid; 4543 grh->flow_label = flow_label; 4544 grh->sgid_index = sgid_index; 4545 grh->hop_limit = hop_limit; 4546 grh->traffic_class = traffic_class; 4547 grh->sgid_attr = NULL; 4548 } 4549 4550 void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr); 4551 void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid, 4552 u32 flow_label, u8 hop_limit, u8 traffic_class, 4553 const struct ib_gid_attr *sgid_attr); 4554 void rdma_copy_ah_attr(struct rdma_ah_attr *dest, 4555 const struct rdma_ah_attr *src); 4556 void rdma_replace_ah_attr(struct rdma_ah_attr *old, 4557 const struct rdma_ah_attr *new); 4558 void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src); 4559 4560 /** 4561 * rdma_ah_find_type - Return address handle type. 4562 * 4563 * @dev: Device to be checked 4564 * @port_num: Port number 4565 */ 4566 static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev, 4567 u8 port_num) 4568 { 4569 if (rdma_protocol_roce(dev, port_num)) 4570 return RDMA_AH_ATTR_TYPE_ROCE; 4571 if (rdma_protocol_ib(dev, port_num)) { 4572 if (rdma_cap_opa_ah(dev, port_num)) 4573 return RDMA_AH_ATTR_TYPE_OPA; 4574 return RDMA_AH_ATTR_TYPE_IB; 4575 } 4576 4577 return RDMA_AH_ATTR_TYPE_UNDEFINED; 4578 } 4579 4580 /** 4581 * ib_lid_cpu16 - Return lid in 16bit CPU encoding. 4582 * In the current implementation the only way to get 4583 * get the 32bit lid is from other sources for OPA. 4584 * For IB, lids will always be 16bits so cast the 4585 * value accordingly. 4586 * 4587 * @lid: A 32bit LID 4588 */ 4589 static inline u16 ib_lid_cpu16(u32 lid) 4590 { 4591 WARN_ON_ONCE(lid & 0xFFFF0000); 4592 return (u16)lid; 4593 } 4594 4595 /** 4596 * ib_lid_be16 - Return lid in 16bit BE encoding. 4597 * 4598 * @lid: A 32bit LID 4599 */ 4600 static inline __be16 ib_lid_be16(u32 lid) 4601 { 4602 WARN_ON_ONCE(lid & 0xFFFF0000); 4603 return cpu_to_be16((u16)lid); 4604 } 4605 4606 /** 4607 * ib_get_vector_affinity - Get the affinity mappings of a given completion 4608 * vector 4609 * @device: the rdma device 4610 * @comp_vector: index of completion vector 4611 * 4612 * Returns NULL on failure, otherwise a corresponding cpu map of the 4613 * completion vector (returns all-cpus map if the device driver doesn't 4614 * implement get_vector_affinity). 4615 */ 4616 static inline const struct cpumask * 4617 ib_get_vector_affinity(struct ib_device *device, int comp_vector) 4618 { 4619 if (comp_vector < 0 || comp_vector >= device->num_comp_vectors || 4620 !device->ops.get_vector_affinity) 4621 return NULL; 4622 4623 return device->ops.get_vector_affinity(device, comp_vector); 4624 4625 } 4626 4627 /** 4628 * rdma_roce_rescan_device - Rescan all of the network devices in the system 4629 * and add their gids, as needed, to the relevant RoCE devices. 4630 * 4631 * @device: the rdma device 4632 */ 4633 void rdma_roce_rescan_device(struct ib_device *ibdev); 4634 4635 struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile); 4636 4637 int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs); 4638 4639 struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num, 4640 enum rdma_netdev_t type, const char *name, 4641 unsigned char name_assign_type, 4642 void (*setup)(struct net_device *)); 4643 4644 int rdma_init_netdev(struct ib_device *device, u8 port_num, 4645 enum rdma_netdev_t type, const char *name, 4646 unsigned char name_assign_type, 4647 void (*setup)(struct net_device *), 4648 struct net_device *netdev); 4649 4650 /** 4651 * rdma_set_device_sysfs_group - Set device attributes group to have 4652 * driver specific sysfs entries at 4653 * for infiniband class. 4654 * 4655 * @device: device pointer for which attributes to be created 4656 * @group: Pointer to group which should be added when device 4657 * is registered with sysfs. 4658 * rdma_set_device_sysfs_group() allows existing drivers to expose one 4659 * group per device to have sysfs attributes. 4660 * 4661 * NOTE: New drivers should not make use of this API; instead new device 4662 * parameter should be exposed via netlink command. This API and mechanism 4663 * exist only for existing drivers. 4664 */ 4665 static inline void 4666 rdma_set_device_sysfs_group(struct ib_device *dev, 4667 const struct attribute_group *group) 4668 { 4669 dev->groups[1] = group; 4670 } 4671 4672 /** 4673 * rdma_device_to_ibdev - Get ib_device pointer from device pointer 4674 * 4675 * @device: device pointer for which ib_device pointer to retrieve 4676 * 4677 * rdma_device_to_ibdev() retrieves ib_device pointer from device. 4678 * 4679 */ 4680 static inline struct ib_device *rdma_device_to_ibdev(struct device *device) 4681 { 4682 struct ib_core_device *coredev = 4683 container_of(device, struct ib_core_device, dev); 4684 4685 return coredev->owner; 4686 } 4687 4688 /** 4689 * rdma_device_to_drv_device - Helper macro to reach back to driver's 4690 * ib_device holder structure from device pointer. 4691 * 4692 * NOTE: New drivers should not make use of this API; This API is only for 4693 * existing drivers who have exposed sysfs entries using 4694 * rdma_set_device_sysfs_group(). 4695 */ 4696 #define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member) \ 4697 container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member) 4698 4699 bool rdma_dev_access_netns(const struct ib_device *device, 4700 const struct net *net); 4701 #endif /* IB_VERBS_H */ 4702