1 /* 2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 8 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 9 * 10 * This software is available to you under a choice of one of two 11 * licenses. You may choose to be licensed under the terms of the GNU 12 * General Public License (GPL) Version 2, available from the file 13 * COPYING in the main directory of this source tree, or the 14 * OpenIB.org BSD license below: 15 * 16 * Redistribution and use in source and binary forms, with or 17 * without modification, are permitted provided that the following 18 * conditions are met: 19 * 20 * - Redistributions of source code must retain the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer. 23 * 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials 27 * provided with the distribution. 28 * 29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 36 * SOFTWARE. 37 */ 38 39 #if !defined(IB_VERBS_H) 40 #define IB_VERBS_H 41 42 #include <linux/types.h> 43 #include <linux/device.h> 44 #include <linux/dma-mapping.h> 45 #include <linux/kref.h> 46 #include <linux/list.h> 47 #include <linux/rwsem.h> 48 #include <linux/workqueue.h> 49 #include <linux/irq_poll.h> 50 #include <uapi/linux/if_ether.h> 51 #include <net/ipv6.h> 52 #include <net/ip.h> 53 #include <linux/string.h> 54 #include <linux/slab.h> 55 #include <linux/netdevice.h> 56 #include <linux/refcount.h> 57 #include <linux/if_link.h> 58 #include <linux/atomic.h> 59 #include <linux/mmu_notifier.h> 60 #include <linux/uaccess.h> 61 #include <linux/cgroup_rdma.h> 62 #include <linux/irqflags.h> 63 #include <linux/preempt.h> 64 #include <linux/dim.h> 65 #include <uapi/rdma/ib_user_verbs.h> 66 #include <rdma/rdma_counter.h> 67 #include <rdma/restrack.h> 68 #include <rdma/signature.h> 69 #include <uapi/rdma/rdma_user_ioctl.h> 70 #include <uapi/rdma/ib_user_ioctl_verbs.h> 71 72 #define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN 73 74 struct ib_umem_odp; 75 76 extern struct workqueue_struct *ib_wq; 77 extern struct workqueue_struct *ib_comp_wq; 78 extern struct workqueue_struct *ib_comp_unbound_wq; 79 80 __printf(3, 4) __cold 81 void ibdev_printk(const char *level, const struct ib_device *ibdev, 82 const char *format, ...); 83 __printf(2, 3) __cold 84 void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...); 85 __printf(2, 3) __cold 86 void ibdev_alert(const struct ib_device *ibdev, const char *format, ...); 87 __printf(2, 3) __cold 88 void ibdev_crit(const struct ib_device *ibdev, const char *format, ...); 89 __printf(2, 3) __cold 90 void ibdev_err(const struct ib_device *ibdev, const char *format, ...); 91 __printf(2, 3) __cold 92 void ibdev_warn(const struct ib_device *ibdev, const char *format, ...); 93 __printf(2, 3) __cold 94 void ibdev_notice(const struct ib_device *ibdev, const char *format, ...); 95 __printf(2, 3) __cold 96 void ibdev_info(const struct ib_device *ibdev, const char *format, ...); 97 98 #if defined(CONFIG_DYNAMIC_DEBUG) 99 #define ibdev_dbg(__dev, format, args...) \ 100 dynamic_ibdev_dbg(__dev, format, ##args) 101 #else 102 __printf(2, 3) __cold 103 static inline 104 void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {} 105 #endif 106 107 #define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...) \ 108 do { \ 109 static DEFINE_RATELIMIT_STATE(_rs, \ 110 DEFAULT_RATELIMIT_INTERVAL, \ 111 DEFAULT_RATELIMIT_BURST); \ 112 if (__ratelimit(&_rs)) \ 113 ibdev_level(ibdev, fmt, ##__VA_ARGS__); \ 114 } while (0) 115 116 #define ibdev_emerg_ratelimited(ibdev, fmt, ...) \ 117 ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__) 118 #define ibdev_alert_ratelimited(ibdev, fmt, ...) \ 119 ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__) 120 #define ibdev_crit_ratelimited(ibdev, fmt, ...) \ 121 ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__) 122 #define ibdev_err_ratelimited(ibdev, fmt, ...) \ 123 ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__) 124 #define ibdev_warn_ratelimited(ibdev, fmt, ...) \ 125 ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__) 126 #define ibdev_notice_ratelimited(ibdev, fmt, ...) \ 127 ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__) 128 #define ibdev_info_ratelimited(ibdev, fmt, ...) \ 129 ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__) 130 131 #if defined(CONFIG_DYNAMIC_DEBUG) 132 /* descriptor check is first to prevent flooding with "callbacks suppressed" */ 133 #define ibdev_dbg_ratelimited(ibdev, fmt, ...) \ 134 do { \ 135 static DEFINE_RATELIMIT_STATE(_rs, \ 136 DEFAULT_RATELIMIT_INTERVAL, \ 137 DEFAULT_RATELIMIT_BURST); \ 138 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ 139 if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs)) \ 140 __dynamic_ibdev_dbg(&descriptor, ibdev, fmt, \ 141 ##__VA_ARGS__); \ 142 } while (0) 143 #else 144 __printf(2, 3) __cold 145 static inline 146 void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {} 147 #endif 148 149 union ib_gid { 150 u8 raw[16]; 151 struct { 152 __be64 subnet_prefix; 153 __be64 interface_id; 154 } global; 155 }; 156 157 extern union ib_gid zgid; 158 159 enum ib_gid_type { 160 /* If link layer is Ethernet, this is RoCE V1 */ 161 IB_GID_TYPE_IB = 0, 162 IB_GID_TYPE_ROCE = 0, 163 IB_GID_TYPE_ROCE_UDP_ENCAP = 1, 164 IB_GID_TYPE_SIZE 165 }; 166 167 #define ROCE_V2_UDP_DPORT 4791 168 struct ib_gid_attr { 169 struct net_device __rcu *ndev; 170 struct ib_device *device; 171 union ib_gid gid; 172 enum ib_gid_type gid_type; 173 u16 index; 174 u8 port_num; 175 }; 176 177 enum { 178 /* set the local administered indication */ 179 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2, 180 }; 181 182 enum rdma_transport_type { 183 RDMA_TRANSPORT_IB, 184 RDMA_TRANSPORT_IWARP, 185 RDMA_TRANSPORT_USNIC, 186 RDMA_TRANSPORT_USNIC_UDP, 187 RDMA_TRANSPORT_UNSPECIFIED, 188 }; 189 190 enum rdma_protocol_type { 191 RDMA_PROTOCOL_IB, 192 RDMA_PROTOCOL_IBOE, 193 RDMA_PROTOCOL_IWARP, 194 RDMA_PROTOCOL_USNIC_UDP 195 }; 196 197 __attribute_const__ enum rdma_transport_type 198 rdma_node_get_transport(unsigned int node_type); 199 200 enum rdma_network_type { 201 RDMA_NETWORK_IB, 202 RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB, 203 RDMA_NETWORK_IPV4, 204 RDMA_NETWORK_IPV6 205 }; 206 207 static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type) 208 { 209 if (network_type == RDMA_NETWORK_IPV4 || 210 network_type == RDMA_NETWORK_IPV6) 211 return IB_GID_TYPE_ROCE_UDP_ENCAP; 212 213 /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */ 214 return IB_GID_TYPE_IB; 215 } 216 217 static inline enum rdma_network_type 218 rdma_gid_attr_network_type(const struct ib_gid_attr *attr) 219 { 220 if (attr->gid_type == IB_GID_TYPE_IB) 221 return RDMA_NETWORK_IB; 222 223 if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid)) 224 return RDMA_NETWORK_IPV4; 225 else 226 return RDMA_NETWORK_IPV6; 227 } 228 229 enum rdma_link_layer { 230 IB_LINK_LAYER_UNSPECIFIED, 231 IB_LINK_LAYER_INFINIBAND, 232 IB_LINK_LAYER_ETHERNET, 233 }; 234 235 enum ib_device_cap_flags { 236 IB_DEVICE_RESIZE_MAX_WR = (1 << 0), 237 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1), 238 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2), 239 IB_DEVICE_RAW_MULTI = (1 << 3), 240 IB_DEVICE_AUTO_PATH_MIG = (1 << 4), 241 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5), 242 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6), 243 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7), 244 IB_DEVICE_SHUTDOWN_PORT = (1 << 8), 245 /* Not in use, former INIT_TYPE = (1 << 9),*/ 246 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10), 247 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11), 248 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12), 249 IB_DEVICE_SRQ_RESIZE = (1 << 13), 250 IB_DEVICE_N_NOTIFY_CQ = (1 << 14), 251 252 /* 253 * This device supports a per-device lkey or stag that can be 254 * used without performing a memory registration for the local 255 * memory. Note that ULPs should never check this flag, but 256 * instead of use the local_dma_lkey flag in the ib_pd structure, 257 * which will always contain a usable lkey. 258 */ 259 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15), 260 /* Reserved, old SEND_W_INV = (1 << 16),*/ 261 IB_DEVICE_MEM_WINDOW = (1 << 17), 262 /* 263 * Devices should set IB_DEVICE_UD_IP_SUM if they support 264 * insertion of UDP and TCP checksum on outgoing UD IPoIB 265 * messages and can verify the validity of checksum for 266 * incoming messages. Setting this flag implies that the 267 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode. 268 */ 269 IB_DEVICE_UD_IP_CSUM = (1 << 18), 270 IB_DEVICE_UD_TSO = (1 << 19), 271 IB_DEVICE_XRC = (1 << 20), 272 273 /* 274 * This device supports the IB "base memory management extension", 275 * which includes support for fast registrations (IB_WR_REG_MR, 276 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should 277 * also be set by any iWarp device which must support FRs to comply 278 * to the iWarp verbs spec. iWarp devices also support the 279 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the 280 * stag. 281 */ 282 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21), 283 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22), 284 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23), 285 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24), 286 IB_DEVICE_RC_IP_CSUM = (1 << 25), 287 /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */ 288 IB_DEVICE_RAW_IP_CSUM = (1 << 26), 289 /* 290 * Devices should set IB_DEVICE_CROSS_CHANNEL if they 291 * support execution of WQEs that involve synchronization 292 * of I/O operations with single completion queue managed 293 * by hardware. 294 */ 295 IB_DEVICE_CROSS_CHANNEL = (1 << 27), 296 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29), 297 IB_DEVICE_INTEGRITY_HANDOVER = (1 << 30), 298 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31), 299 IB_DEVICE_SG_GAPS_REG = (1ULL << 32), 300 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33), 301 /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */ 302 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34), 303 IB_DEVICE_RDMA_NETDEV_OPA_VNIC = (1ULL << 35), 304 /* The device supports padding incoming writes to cacheline. */ 305 IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36), 306 IB_DEVICE_ALLOW_USER_UNREG = (1ULL << 37), 307 }; 308 309 enum ib_atomic_cap { 310 IB_ATOMIC_NONE, 311 IB_ATOMIC_HCA, 312 IB_ATOMIC_GLOB 313 }; 314 315 enum ib_odp_general_cap_bits { 316 IB_ODP_SUPPORT = 1 << 0, 317 IB_ODP_SUPPORT_IMPLICIT = 1 << 1, 318 }; 319 320 enum ib_odp_transport_cap_bits { 321 IB_ODP_SUPPORT_SEND = 1 << 0, 322 IB_ODP_SUPPORT_RECV = 1 << 1, 323 IB_ODP_SUPPORT_WRITE = 1 << 2, 324 IB_ODP_SUPPORT_READ = 1 << 3, 325 IB_ODP_SUPPORT_ATOMIC = 1 << 4, 326 IB_ODP_SUPPORT_SRQ_RECV = 1 << 5, 327 }; 328 329 struct ib_odp_caps { 330 uint64_t general_caps; 331 struct { 332 uint32_t rc_odp_caps; 333 uint32_t uc_odp_caps; 334 uint32_t ud_odp_caps; 335 uint32_t xrc_odp_caps; 336 } per_transport_caps; 337 }; 338 339 struct ib_rss_caps { 340 /* Corresponding bit will be set if qp type from 341 * 'enum ib_qp_type' is supported, e.g. 342 * supported_qpts |= 1 << IB_QPT_UD 343 */ 344 u32 supported_qpts; 345 u32 max_rwq_indirection_tables; 346 u32 max_rwq_indirection_table_size; 347 }; 348 349 enum ib_tm_cap_flags { 350 /* Support tag matching with rendezvous offload for RC transport */ 351 IB_TM_CAP_RNDV_RC = 1 << 0, 352 }; 353 354 struct ib_tm_caps { 355 /* Max size of RNDV header */ 356 u32 max_rndv_hdr_size; 357 /* Max number of entries in tag matching list */ 358 u32 max_num_tags; 359 /* From enum ib_tm_cap_flags */ 360 u32 flags; 361 /* Max number of outstanding list operations */ 362 u32 max_ops; 363 /* Max number of SGE in tag matching entry */ 364 u32 max_sge; 365 }; 366 367 struct ib_cq_init_attr { 368 unsigned int cqe; 369 u32 comp_vector; 370 u32 flags; 371 }; 372 373 enum ib_cq_attr_mask { 374 IB_CQ_MODERATE = 1 << 0, 375 }; 376 377 struct ib_cq_caps { 378 u16 max_cq_moderation_count; 379 u16 max_cq_moderation_period; 380 }; 381 382 struct ib_dm_mr_attr { 383 u64 length; 384 u64 offset; 385 u32 access_flags; 386 }; 387 388 struct ib_dm_alloc_attr { 389 u64 length; 390 u32 alignment; 391 u32 flags; 392 }; 393 394 struct ib_device_attr { 395 u64 fw_ver; 396 __be64 sys_image_guid; 397 u64 max_mr_size; 398 u64 page_size_cap; 399 u32 vendor_id; 400 u32 vendor_part_id; 401 u32 hw_ver; 402 int max_qp; 403 int max_qp_wr; 404 u64 device_cap_flags; 405 int max_send_sge; 406 int max_recv_sge; 407 int max_sge_rd; 408 int max_cq; 409 int max_cqe; 410 int max_mr; 411 int max_pd; 412 int max_qp_rd_atom; 413 int max_ee_rd_atom; 414 int max_res_rd_atom; 415 int max_qp_init_rd_atom; 416 int max_ee_init_rd_atom; 417 enum ib_atomic_cap atomic_cap; 418 enum ib_atomic_cap masked_atomic_cap; 419 int max_ee; 420 int max_rdd; 421 int max_mw; 422 int max_raw_ipv6_qp; 423 int max_raw_ethy_qp; 424 int max_mcast_grp; 425 int max_mcast_qp_attach; 426 int max_total_mcast_qp_attach; 427 int max_ah; 428 int max_fmr; 429 int max_map_per_fmr; 430 int max_srq; 431 int max_srq_wr; 432 int max_srq_sge; 433 unsigned int max_fast_reg_page_list_len; 434 unsigned int max_pi_fast_reg_page_list_len; 435 u16 max_pkeys; 436 u8 local_ca_ack_delay; 437 int sig_prot_cap; 438 int sig_guard_cap; 439 struct ib_odp_caps odp_caps; 440 uint64_t timestamp_mask; 441 uint64_t hca_core_clock; /* in KHZ */ 442 struct ib_rss_caps rss_caps; 443 u32 max_wq_type_rq; 444 u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */ 445 struct ib_tm_caps tm_caps; 446 struct ib_cq_caps cq_caps; 447 u64 max_dm_size; 448 /* Max entries for sgl for optimized performance per READ */ 449 u32 max_sgl_rd; 450 }; 451 452 enum ib_mtu { 453 IB_MTU_256 = 1, 454 IB_MTU_512 = 2, 455 IB_MTU_1024 = 3, 456 IB_MTU_2048 = 4, 457 IB_MTU_4096 = 5 458 }; 459 460 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu) 461 { 462 switch (mtu) { 463 case IB_MTU_256: return 256; 464 case IB_MTU_512: return 512; 465 case IB_MTU_1024: return 1024; 466 case IB_MTU_2048: return 2048; 467 case IB_MTU_4096: return 4096; 468 default: return -1; 469 } 470 } 471 472 static inline enum ib_mtu ib_mtu_int_to_enum(int mtu) 473 { 474 if (mtu >= 4096) 475 return IB_MTU_4096; 476 else if (mtu >= 2048) 477 return IB_MTU_2048; 478 else if (mtu >= 1024) 479 return IB_MTU_1024; 480 else if (mtu >= 512) 481 return IB_MTU_512; 482 else 483 return IB_MTU_256; 484 } 485 486 enum ib_port_state { 487 IB_PORT_NOP = 0, 488 IB_PORT_DOWN = 1, 489 IB_PORT_INIT = 2, 490 IB_PORT_ARMED = 3, 491 IB_PORT_ACTIVE = 4, 492 IB_PORT_ACTIVE_DEFER = 5 493 }; 494 495 enum ib_port_phys_state { 496 IB_PORT_PHYS_STATE_SLEEP = 1, 497 IB_PORT_PHYS_STATE_POLLING = 2, 498 IB_PORT_PHYS_STATE_DISABLED = 3, 499 IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4, 500 IB_PORT_PHYS_STATE_LINK_UP = 5, 501 IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6, 502 IB_PORT_PHYS_STATE_PHY_TEST = 7, 503 }; 504 505 enum ib_port_width { 506 IB_WIDTH_1X = 1, 507 IB_WIDTH_2X = 16, 508 IB_WIDTH_4X = 2, 509 IB_WIDTH_8X = 4, 510 IB_WIDTH_12X = 8 511 }; 512 513 static inline int ib_width_enum_to_int(enum ib_port_width width) 514 { 515 switch (width) { 516 case IB_WIDTH_1X: return 1; 517 case IB_WIDTH_2X: return 2; 518 case IB_WIDTH_4X: return 4; 519 case IB_WIDTH_8X: return 8; 520 case IB_WIDTH_12X: return 12; 521 default: return -1; 522 } 523 } 524 525 enum ib_port_speed { 526 IB_SPEED_SDR = 1, 527 IB_SPEED_DDR = 2, 528 IB_SPEED_QDR = 4, 529 IB_SPEED_FDR10 = 8, 530 IB_SPEED_FDR = 16, 531 IB_SPEED_EDR = 32, 532 IB_SPEED_HDR = 64 533 }; 534 535 /** 536 * struct rdma_hw_stats 537 * @lock - Mutex to protect parallel write access to lifespan and values 538 * of counters, which are 64bits and not guaranteeed to be written 539 * atomicaly on 32bits systems. 540 * @timestamp - Used by the core code to track when the last update was 541 * @lifespan - Used by the core code to determine how old the counters 542 * should be before being updated again. Stored in jiffies, defaults 543 * to 10 milliseconds, drivers can override the default be specifying 544 * their own value during their allocation routine. 545 * @name - Array of pointers to static names used for the counters in 546 * directory. 547 * @num_counters - How many hardware counters there are. If name is 548 * shorter than this number, a kernel oops will result. Driver authors 549 * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters) 550 * in their code to prevent this. 551 * @value - Array of u64 counters that are accessed by the sysfs code and 552 * filled in by the drivers get_stats routine 553 */ 554 struct rdma_hw_stats { 555 struct mutex lock; /* Protect lifespan and values[] */ 556 unsigned long timestamp; 557 unsigned long lifespan; 558 const char * const *names; 559 int num_counters; 560 u64 value[]; 561 }; 562 563 #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10 564 /** 565 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct 566 * for drivers. 567 * @names - Array of static const char * 568 * @num_counters - How many elements in array 569 * @lifespan - How many milliseconds between updates 570 */ 571 static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct( 572 const char * const *names, int num_counters, 573 unsigned long lifespan) 574 { 575 struct rdma_hw_stats *stats; 576 577 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64), 578 GFP_KERNEL); 579 if (!stats) 580 return NULL; 581 stats->names = names; 582 stats->num_counters = num_counters; 583 stats->lifespan = msecs_to_jiffies(lifespan); 584 585 return stats; 586 } 587 588 589 /* Define bits for the various functionality this port needs to be supported by 590 * the core. 591 */ 592 /* Management 0x00000FFF */ 593 #define RDMA_CORE_CAP_IB_MAD 0x00000001 594 #define RDMA_CORE_CAP_IB_SMI 0x00000002 595 #define RDMA_CORE_CAP_IB_CM 0x00000004 596 #define RDMA_CORE_CAP_IW_CM 0x00000008 597 #define RDMA_CORE_CAP_IB_SA 0x00000010 598 #define RDMA_CORE_CAP_OPA_MAD 0x00000020 599 600 /* Address format 0x000FF000 */ 601 #define RDMA_CORE_CAP_AF_IB 0x00001000 602 #define RDMA_CORE_CAP_ETH_AH 0x00002000 603 #define RDMA_CORE_CAP_OPA_AH 0x00004000 604 #define RDMA_CORE_CAP_IB_GRH_REQUIRED 0x00008000 605 606 /* Protocol 0xFFF00000 */ 607 #define RDMA_CORE_CAP_PROT_IB 0x00100000 608 #define RDMA_CORE_CAP_PROT_ROCE 0x00200000 609 #define RDMA_CORE_CAP_PROT_IWARP 0x00400000 610 #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000 611 #define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000 612 #define RDMA_CORE_CAP_PROT_USNIC 0x02000000 613 614 #define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \ 615 | RDMA_CORE_CAP_PROT_ROCE \ 616 | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP) 617 618 #define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \ 619 | RDMA_CORE_CAP_IB_MAD \ 620 | RDMA_CORE_CAP_IB_SMI \ 621 | RDMA_CORE_CAP_IB_CM \ 622 | RDMA_CORE_CAP_IB_SA \ 623 | RDMA_CORE_CAP_AF_IB) 624 #define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \ 625 | RDMA_CORE_CAP_IB_MAD \ 626 | RDMA_CORE_CAP_IB_CM \ 627 | RDMA_CORE_CAP_AF_IB \ 628 | RDMA_CORE_CAP_ETH_AH) 629 #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \ 630 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \ 631 | RDMA_CORE_CAP_IB_MAD \ 632 | RDMA_CORE_CAP_IB_CM \ 633 | RDMA_CORE_CAP_AF_IB \ 634 | RDMA_CORE_CAP_ETH_AH) 635 #define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \ 636 | RDMA_CORE_CAP_IW_CM) 637 #define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \ 638 | RDMA_CORE_CAP_OPA_MAD) 639 640 #define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET) 641 642 #define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC) 643 644 struct ib_port_attr { 645 u64 subnet_prefix; 646 enum ib_port_state state; 647 enum ib_mtu max_mtu; 648 enum ib_mtu active_mtu; 649 int gid_tbl_len; 650 unsigned int ip_gids:1; 651 /* This is the value from PortInfo CapabilityMask, defined by IBA */ 652 u32 port_cap_flags; 653 u32 max_msg_sz; 654 u32 bad_pkey_cntr; 655 u32 qkey_viol_cntr; 656 u16 pkey_tbl_len; 657 u32 sm_lid; 658 u32 lid; 659 u8 lmc; 660 u8 max_vl_num; 661 u8 sm_sl; 662 u8 subnet_timeout; 663 u8 init_type_reply; 664 u8 active_width; 665 u8 active_speed; 666 u8 phys_state; 667 u16 port_cap_flags2; 668 }; 669 670 enum ib_device_modify_flags { 671 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0, 672 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1 673 }; 674 675 #define IB_DEVICE_NODE_DESC_MAX 64 676 677 struct ib_device_modify { 678 u64 sys_image_guid; 679 char node_desc[IB_DEVICE_NODE_DESC_MAX]; 680 }; 681 682 enum ib_port_modify_flags { 683 IB_PORT_SHUTDOWN = 1, 684 IB_PORT_INIT_TYPE = (1<<2), 685 IB_PORT_RESET_QKEY_CNTR = (1<<3), 686 IB_PORT_OPA_MASK_CHG = (1<<4) 687 }; 688 689 struct ib_port_modify { 690 u32 set_port_cap_mask; 691 u32 clr_port_cap_mask; 692 u8 init_type; 693 }; 694 695 enum ib_event_type { 696 IB_EVENT_CQ_ERR, 697 IB_EVENT_QP_FATAL, 698 IB_EVENT_QP_REQ_ERR, 699 IB_EVENT_QP_ACCESS_ERR, 700 IB_EVENT_COMM_EST, 701 IB_EVENT_SQ_DRAINED, 702 IB_EVENT_PATH_MIG, 703 IB_EVENT_PATH_MIG_ERR, 704 IB_EVENT_DEVICE_FATAL, 705 IB_EVENT_PORT_ACTIVE, 706 IB_EVENT_PORT_ERR, 707 IB_EVENT_LID_CHANGE, 708 IB_EVENT_PKEY_CHANGE, 709 IB_EVENT_SM_CHANGE, 710 IB_EVENT_SRQ_ERR, 711 IB_EVENT_SRQ_LIMIT_REACHED, 712 IB_EVENT_QP_LAST_WQE_REACHED, 713 IB_EVENT_CLIENT_REREGISTER, 714 IB_EVENT_GID_CHANGE, 715 IB_EVENT_WQ_FATAL, 716 }; 717 718 const char *__attribute_const__ ib_event_msg(enum ib_event_type event); 719 720 struct ib_event { 721 struct ib_device *device; 722 union { 723 struct ib_cq *cq; 724 struct ib_qp *qp; 725 struct ib_srq *srq; 726 struct ib_wq *wq; 727 u8 port_num; 728 } element; 729 enum ib_event_type event; 730 }; 731 732 struct ib_event_handler { 733 struct ib_device *device; 734 void (*handler)(struct ib_event_handler *, struct ib_event *); 735 struct list_head list; 736 }; 737 738 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \ 739 do { \ 740 (_ptr)->device = _device; \ 741 (_ptr)->handler = _handler; \ 742 INIT_LIST_HEAD(&(_ptr)->list); \ 743 } while (0) 744 745 struct ib_global_route { 746 const struct ib_gid_attr *sgid_attr; 747 union ib_gid dgid; 748 u32 flow_label; 749 u8 sgid_index; 750 u8 hop_limit; 751 u8 traffic_class; 752 }; 753 754 struct ib_grh { 755 __be32 version_tclass_flow; 756 __be16 paylen; 757 u8 next_hdr; 758 u8 hop_limit; 759 union ib_gid sgid; 760 union ib_gid dgid; 761 }; 762 763 union rdma_network_hdr { 764 struct ib_grh ibgrh; 765 struct { 766 /* The IB spec states that if it's IPv4, the header 767 * is located in the last 20 bytes of the header. 768 */ 769 u8 reserved[20]; 770 struct iphdr roce4grh; 771 }; 772 }; 773 774 #define IB_QPN_MASK 0xFFFFFF 775 776 enum { 777 IB_MULTICAST_QPN = 0xffffff 778 }; 779 780 #define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF) 781 #define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000) 782 783 enum ib_ah_flags { 784 IB_AH_GRH = 1 785 }; 786 787 enum ib_rate { 788 IB_RATE_PORT_CURRENT = 0, 789 IB_RATE_2_5_GBPS = 2, 790 IB_RATE_5_GBPS = 5, 791 IB_RATE_10_GBPS = 3, 792 IB_RATE_20_GBPS = 6, 793 IB_RATE_30_GBPS = 4, 794 IB_RATE_40_GBPS = 7, 795 IB_RATE_60_GBPS = 8, 796 IB_RATE_80_GBPS = 9, 797 IB_RATE_120_GBPS = 10, 798 IB_RATE_14_GBPS = 11, 799 IB_RATE_56_GBPS = 12, 800 IB_RATE_112_GBPS = 13, 801 IB_RATE_168_GBPS = 14, 802 IB_RATE_25_GBPS = 15, 803 IB_RATE_100_GBPS = 16, 804 IB_RATE_200_GBPS = 17, 805 IB_RATE_300_GBPS = 18, 806 IB_RATE_28_GBPS = 19, 807 IB_RATE_50_GBPS = 20, 808 IB_RATE_400_GBPS = 21, 809 IB_RATE_600_GBPS = 22, 810 }; 811 812 /** 813 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the 814 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be 815 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec. 816 * @rate: rate to convert. 817 */ 818 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate); 819 820 /** 821 * ib_rate_to_mbps - Convert the IB rate enum to Mbps. 822 * For example, IB_RATE_2_5_GBPS will be converted to 2500. 823 * @rate: rate to convert. 824 */ 825 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate); 826 827 828 /** 829 * enum ib_mr_type - memory region type 830 * @IB_MR_TYPE_MEM_REG: memory region that is used for 831 * normal registration 832 * @IB_MR_TYPE_SG_GAPS: memory region that is capable to 833 * register any arbitrary sg lists (without 834 * the normal mr constraints - see 835 * ib_map_mr_sg) 836 * @IB_MR_TYPE_DM: memory region that is used for device 837 * memory registration 838 * @IB_MR_TYPE_USER: memory region that is used for the user-space 839 * application 840 * @IB_MR_TYPE_DMA: memory region that is used for DMA operations 841 * without address translations (VA=PA) 842 * @IB_MR_TYPE_INTEGRITY: memory region that is used for 843 * data integrity operations 844 */ 845 enum ib_mr_type { 846 IB_MR_TYPE_MEM_REG, 847 IB_MR_TYPE_SG_GAPS, 848 IB_MR_TYPE_DM, 849 IB_MR_TYPE_USER, 850 IB_MR_TYPE_DMA, 851 IB_MR_TYPE_INTEGRITY, 852 }; 853 854 enum ib_mr_status_check { 855 IB_MR_CHECK_SIG_STATUS = 1, 856 }; 857 858 /** 859 * struct ib_mr_status - Memory region status container 860 * 861 * @fail_status: Bitmask of MR checks status. For each 862 * failed check a corresponding status bit is set. 863 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS 864 * failure. 865 */ 866 struct ib_mr_status { 867 u32 fail_status; 868 struct ib_sig_err sig_err; 869 }; 870 871 /** 872 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate 873 * enum. 874 * @mult: multiple to convert. 875 */ 876 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult); 877 878 enum rdma_ah_attr_type { 879 RDMA_AH_ATTR_TYPE_UNDEFINED, 880 RDMA_AH_ATTR_TYPE_IB, 881 RDMA_AH_ATTR_TYPE_ROCE, 882 RDMA_AH_ATTR_TYPE_OPA, 883 }; 884 885 struct ib_ah_attr { 886 u16 dlid; 887 u8 src_path_bits; 888 }; 889 890 struct roce_ah_attr { 891 u8 dmac[ETH_ALEN]; 892 }; 893 894 struct opa_ah_attr { 895 u32 dlid; 896 u8 src_path_bits; 897 bool make_grd; 898 }; 899 900 struct rdma_ah_attr { 901 struct ib_global_route grh; 902 u8 sl; 903 u8 static_rate; 904 u8 port_num; 905 u8 ah_flags; 906 enum rdma_ah_attr_type type; 907 union { 908 struct ib_ah_attr ib; 909 struct roce_ah_attr roce; 910 struct opa_ah_attr opa; 911 }; 912 }; 913 914 enum ib_wc_status { 915 IB_WC_SUCCESS, 916 IB_WC_LOC_LEN_ERR, 917 IB_WC_LOC_QP_OP_ERR, 918 IB_WC_LOC_EEC_OP_ERR, 919 IB_WC_LOC_PROT_ERR, 920 IB_WC_WR_FLUSH_ERR, 921 IB_WC_MW_BIND_ERR, 922 IB_WC_BAD_RESP_ERR, 923 IB_WC_LOC_ACCESS_ERR, 924 IB_WC_REM_INV_REQ_ERR, 925 IB_WC_REM_ACCESS_ERR, 926 IB_WC_REM_OP_ERR, 927 IB_WC_RETRY_EXC_ERR, 928 IB_WC_RNR_RETRY_EXC_ERR, 929 IB_WC_LOC_RDD_VIOL_ERR, 930 IB_WC_REM_INV_RD_REQ_ERR, 931 IB_WC_REM_ABORT_ERR, 932 IB_WC_INV_EECN_ERR, 933 IB_WC_INV_EEC_STATE_ERR, 934 IB_WC_FATAL_ERR, 935 IB_WC_RESP_TIMEOUT_ERR, 936 IB_WC_GENERAL_ERR 937 }; 938 939 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status); 940 941 enum ib_wc_opcode { 942 IB_WC_SEND, 943 IB_WC_RDMA_WRITE, 944 IB_WC_RDMA_READ, 945 IB_WC_COMP_SWAP, 946 IB_WC_FETCH_ADD, 947 IB_WC_LSO, 948 IB_WC_LOCAL_INV, 949 IB_WC_REG_MR, 950 IB_WC_MASKED_COMP_SWAP, 951 IB_WC_MASKED_FETCH_ADD, 952 /* 953 * Set value of IB_WC_RECV so consumers can test if a completion is a 954 * receive by testing (opcode & IB_WC_RECV). 955 */ 956 IB_WC_RECV = 1 << 7, 957 IB_WC_RECV_RDMA_WITH_IMM 958 }; 959 960 enum ib_wc_flags { 961 IB_WC_GRH = 1, 962 IB_WC_WITH_IMM = (1<<1), 963 IB_WC_WITH_INVALIDATE = (1<<2), 964 IB_WC_IP_CSUM_OK = (1<<3), 965 IB_WC_WITH_SMAC = (1<<4), 966 IB_WC_WITH_VLAN = (1<<5), 967 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6), 968 }; 969 970 struct ib_wc { 971 union { 972 u64 wr_id; 973 struct ib_cqe *wr_cqe; 974 }; 975 enum ib_wc_status status; 976 enum ib_wc_opcode opcode; 977 u32 vendor_err; 978 u32 byte_len; 979 struct ib_qp *qp; 980 union { 981 __be32 imm_data; 982 u32 invalidate_rkey; 983 } ex; 984 u32 src_qp; 985 u32 slid; 986 int wc_flags; 987 u16 pkey_index; 988 u8 sl; 989 u8 dlid_path_bits; 990 u8 port_num; /* valid only for DR SMPs on switches */ 991 u8 smac[ETH_ALEN]; 992 u16 vlan_id; 993 u8 network_hdr_type; 994 }; 995 996 enum ib_cq_notify_flags { 997 IB_CQ_SOLICITED = 1 << 0, 998 IB_CQ_NEXT_COMP = 1 << 1, 999 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP, 1000 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2, 1001 }; 1002 1003 enum ib_srq_type { 1004 IB_SRQT_BASIC, 1005 IB_SRQT_XRC, 1006 IB_SRQT_TM, 1007 }; 1008 1009 static inline bool ib_srq_has_cq(enum ib_srq_type srq_type) 1010 { 1011 return srq_type == IB_SRQT_XRC || 1012 srq_type == IB_SRQT_TM; 1013 } 1014 1015 enum ib_srq_attr_mask { 1016 IB_SRQ_MAX_WR = 1 << 0, 1017 IB_SRQ_LIMIT = 1 << 1, 1018 }; 1019 1020 struct ib_srq_attr { 1021 u32 max_wr; 1022 u32 max_sge; 1023 u32 srq_limit; 1024 }; 1025 1026 struct ib_srq_init_attr { 1027 void (*event_handler)(struct ib_event *, void *); 1028 void *srq_context; 1029 struct ib_srq_attr attr; 1030 enum ib_srq_type srq_type; 1031 1032 struct { 1033 struct ib_cq *cq; 1034 union { 1035 struct { 1036 struct ib_xrcd *xrcd; 1037 } xrc; 1038 1039 struct { 1040 u32 max_num_tags; 1041 } tag_matching; 1042 }; 1043 } ext; 1044 }; 1045 1046 struct ib_qp_cap { 1047 u32 max_send_wr; 1048 u32 max_recv_wr; 1049 u32 max_send_sge; 1050 u32 max_recv_sge; 1051 u32 max_inline_data; 1052 1053 /* 1054 * Maximum number of rdma_rw_ctx structures in flight at a time. 1055 * ib_create_qp() will calculate the right amount of neededed WRs 1056 * and MRs based on this. 1057 */ 1058 u32 max_rdma_ctxs; 1059 }; 1060 1061 enum ib_sig_type { 1062 IB_SIGNAL_ALL_WR, 1063 IB_SIGNAL_REQ_WR 1064 }; 1065 1066 enum ib_qp_type { 1067 /* 1068 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries 1069 * here (and in that order) since the MAD layer uses them as 1070 * indices into a 2-entry table. 1071 */ 1072 IB_QPT_SMI, 1073 IB_QPT_GSI, 1074 1075 IB_QPT_RC, 1076 IB_QPT_UC, 1077 IB_QPT_UD, 1078 IB_QPT_RAW_IPV6, 1079 IB_QPT_RAW_ETHERTYPE, 1080 IB_QPT_RAW_PACKET = 8, 1081 IB_QPT_XRC_INI = 9, 1082 IB_QPT_XRC_TGT, 1083 IB_QPT_MAX, 1084 IB_QPT_DRIVER = 0xFF, 1085 /* Reserve a range for qp types internal to the low level driver. 1086 * These qp types will not be visible at the IB core layer, so the 1087 * IB_QPT_MAX usages should not be affected in the core layer 1088 */ 1089 IB_QPT_RESERVED1 = 0x1000, 1090 IB_QPT_RESERVED2, 1091 IB_QPT_RESERVED3, 1092 IB_QPT_RESERVED4, 1093 IB_QPT_RESERVED5, 1094 IB_QPT_RESERVED6, 1095 IB_QPT_RESERVED7, 1096 IB_QPT_RESERVED8, 1097 IB_QPT_RESERVED9, 1098 IB_QPT_RESERVED10, 1099 }; 1100 1101 enum ib_qp_create_flags { 1102 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0, 1103 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1, 1104 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2, 1105 IB_QP_CREATE_MANAGED_SEND = 1 << 3, 1106 IB_QP_CREATE_MANAGED_RECV = 1 << 4, 1107 IB_QP_CREATE_NETIF_QP = 1 << 5, 1108 IB_QP_CREATE_INTEGRITY_EN = 1 << 6, 1109 /* FREE = 1 << 7, */ 1110 IB_QP_CREATE_SCATTER_FCS = 1 << 8, 1111 IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9, 1112 IB_QP_CREATE_SOURCE_QPN = 1 << 10, 1113 IB_QP_CREATE_PCI_WRITE_END_PADDING = 1 << 11, 1114 /* reserve bits 26-31 for low level drivers' internal use */ 1115 IB_QP_CREATE_RESERVED_START = 1 << 26, 1116 IB_QP_CREATE_RESERVED_END = 1 << 31, 1117 }; 1118 1119 /* 1120 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler 1121 * callback to destroy the passed in QP. 1122 */ 1123 1124 struct ib_qp_init_attr { 1125 /* Consumer's event_handler callback must not block */ 1126 void (*event_handler)(struct ib_event *, void *); 1127 1128 void *qp_context; 1129 struct ib_cq *send_cq; 1130 struct ib_cq *recv_cq; 1131 struct ib_srq *srq; 1132 struct ib_xrcd *xrcd; /* XRC TGT QPs only */ 1133 struct ib_qp_cap cap; 1134 enum ib_sig_type sq_sig_type; 1135 enum ib_qp_type qp_type; 1136 u32 create_flags; 1137 1138 /* 1139 * Only needed for special QP types, or when using the RW API. 1140 */ 1141 u8 port_num; 1142 struct ib_rwq_ind_table *rwq_ind_tbl; 1143 u32 source_qpn; 1144 }; 1145 1146 struct ib_qp_open_attr { 1147 void (*event_handler)(struct ib_event *, void *); 1148 void *qp_context; 1149 u32 qp_num; 1150 enum ib_qp_type qp_type; 1151 }; 1152 1153 enum ib_rnr_timeout { 1154 IB_RNR_TIMER_655_36 = 0, 1155 IB_RNR_TIMER_000_01 = 1, 1156 IB_RNR_TIMER_000_02 = 2, 1157 IB_RNR_TIMER_000_03 = 3, 1158 IB_RNR_TIMER_000_04 = 4, 1159 IB_RNR_TIMER_000_06 = 5, 1160 IB_RNR_TIMER_000_08 = 6, 1161 IB_RNR_TIMER_000_12 = 7, 1162 IB_RNR_TIMER_000_16 = 8, 1163 IB_RNR_TIMER_000_24 = 9, 1164 IB_RNR_TIMER_000_32 = 10, 1165 IB_RNR_TIMER_000_48 = 11, 1166 IB_RNR_TIMER_000_64 = 12, 1167 IB_RNR_TIMER_000_96 = 13, 1168 IB_RNR_TIMER_001_28 = 14, 1169 IB_RNR_TIMER_001_92 = 15, 1170 IB_RNR_TIMER_002_56 = 16, 1171 IB_RNR_TIMER_003_84 = 17, 1172 IB_RNR_TIMER_005_12 = 18, 1173 IB_RNR_TIMER_007_68 = 19, 1174 IB_RNR_TIMER_010_24 = 20, 1175 IB_RNR_TIMER_015_36 = 21, 1176 IB_RNR_TIMER_020_48 = 22, 1177 IB_RNR_TIMER_030_72 = 23, 1178 IB_RNR_TIMER_040_96 = 24, 1179 IB_RNR_TIMER_061_44 = 25, 1180 IB_RNR_TIMER_081_92 = 26, 1181 IB_RNR_TIMER_122_88 = 27, 1182 IB_RNR_TIMER_163_84 = 28, 1183 IB_RNR_TIMER_245_76 = 29, 1184 IB_RNR_TIMER_327_68 = 30, 1185 IB_RNR_TIMER_491_52 = 31 1186 }; 1187 1188 enum ib_qp_attr_mask { 1189 IB_QP_STATE = 1, 1190 IB_QP_CUR_STATE = (1<<1), 1191 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2), 1192 IB_QP_ACCESS_FLAGS = (1<<3), 1193 IB_QP_PKEY_INDEX = (1<<4), 1194 IB_QP_PORT = (1<<5), 1195 IB_QP_QKEY = (1<<6), 1196 IB_QP_AV = (1<<7), 1197 IB_QP_PATH_MTU = (1<<8), 1198 IB_QP_TIMEOUT = (1<<9), 1199 IB_QP_RETRY_CNT = (1<<10), 1200 IB_QP_RNR_RETRY = (1<<11), 1201 IB_QP_RQ_PSN = (1<<12), 1202 IB_QP_MAX_QP_RD_ATOMIC = (1<<13), 1203 IB_QP_ALT_PATH = (1<<14), 1204 IB_QP_MIN_RNR_TIMER = (1<<15), 1205 IB_QP_SQ_PSN = (1<<16), 1206 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17), 1207 IB_QP_PATH_MIG_STATE = (1<<18), 1208 IB_QP_CAP = (1<<19), 1209 IB_QP_DEST_QPN = (1<<20), 1210 IB_QP_RESERVED1 = (1<<21), 1211 IB_QP_RESERVED2 = (1<<22), 1212 IB_QP_RESERVED3 = (1<<23), 1213 IB_QP_RESERVED4 = (1<<24), 1214 IB_QP_RATE_LIMIT = (1<<25), 1215 }; 1216 1217 enum ib_qp_state { 1218 IB_QPS_RESET, 1219 IB_QPS_INIT, 1220 IB_QPS_RTR, 1221 IB_QPS_RTS, 1222 IB_QPS_SQD, 1223 IB_QPS_SQE, 1224 IB_QPS_ERR 1225 }; 1226 1227 enum ib_mig_state { 1228 IB_MIG_MIGRATED, 1229 IB_MIG_REARM, 1230 IB_MIG_ARMED 1231 }; 1232 1233 enum ib_mw_type { 1234 IB_MW_TYPE_1 = 1, 1235 IB_MW_TYPE_2 = 2 1236 }; 1237 1238 struct ib_qp_attr { 1239 enum ib_qp_state qp_state; 1240 enum ib_qp_state cur_qp_state; 1241 enum ib_mtu path_mtu; 1242 enum ib_mig_state path_mig_state; 1243 u32 qkey; 1244 u32 rq_psn; 1245 u32 sq_psn; 1246 u32 dest_qp_num; 1247 int qp_access_flags; 1248 struct ib_qp_cap cap; 1249 struct rdma_ah_attr ah_attr; 1250 struct rdma_ah_attr alt_ah_attr; 1251 u16 pkey_index; 1252 u16 alt_pkey_index; 1253 u8 en_sqd_async_notify; 1254 u8 sq_draining; 1255 u8 max_rd_atomic; 1256 u8 max_dest_rd_atomic; 1257 u8 min_rnr_timer; 1258 u8 port_num; 1259 u8 timeout; 1260 u8 retry_cnt; 1261 u8 rnr_retry; 1262 u8 alt_port_num; 1263 u8 alt_timeout; 1264 u32 rate_limit; 1265 }; 1266 1267 enum ib_wr_opcode { 1268 /* These are shared with userspace */ 1269 IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE, 1270 IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM, 1271 IB_WR_SEND = IB_UVERBS_WR_SEND, 1272 IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM, 1273 IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ, 1274 IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP, 1275 IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD, 1276 IB_WR_LSO = IB_UVERBS_WR_TSO, 1277 IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV, 1278 IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV, 1279 IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV, 1280 IB_WR_MASKED_ATOMIC_CMP_AND_SWP = 1281 IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP, 1282 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD = 1283 IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD, 1284 1285 /* These are kernel only and can not be issued by userspace */ 1286 IB_WR_REG_MR = 0x20, 1287 IB_WR_REG_MR_INTEGRITY, 1288 1289 /* reserve values for low level drivers' internal use. 1290 * These values will not be used at all in the ib core layer. 1291 */ 1292 IB_WR_RESERVED1 = 0xf0, 1293 IB_WR_RESERVED2, 1294 IB_WR_RESERVED3, 1295 IB_WR_RESERVED4, 1296 IB_WR_RESERVED5, 1297 IB_WR_RESERVED6, 1298 IB_WR_RESERVED7, 1299 IB_WR_RESERVED8, 1300 IB_WR_RESERVED9, 1301 IB_WR_RESERVED10, 1302 }; 1303 1304 enum ib_send_flags { 1305 IB_SEND_FENCE = 1, 1306 IB_SEND_SIGNALED = (1<<1), 1307 IB_SEND_SOLICITED = (1<<2), 1308 IB_SEND_INLINE = (1<<3), 1309 IB_SEND_IP_CSUM = (1<<4), 1310 1311 /* reserve bits 26-31 for low level drivers' internal use */ 1312 IB_SEND_RESERVED_START = (1 << 26), 1313 IB_SEND_RESERVED_END = (1 << 31), 1314 }; 1315 1316 struct ib_sge { 1317 u64 addr; 1318 u32 length; 1319 u32 lkey; 1320 }; 1321 1322 struct ib_cqe { 1323 void (*done)(struct ib_cq *cq, struct ib_wc *wc); 1324 }; 1325 1326 struct ib_send_wr { 1327 struct ib_send_wr *next; 1328 union { 1329 u64 wr_id; 1330 struct ib_cqe *wr_cqe; 1331 }; 1332 struct ib_sge *sg_list; 1333 int num_sge; 1334 enum ib_wr_opcode opcode; 1335 int send_flags; 1336 union { 1337 __be32 imm_data; 1338 u32 invalidate_rkey; 1339 } ex; 1340 }; 1341 1342 struct ib_rdma_wr { 1343 struct ib_send_wr wr; 1344 u64 remote_addr; 1345 u32 rkey; 1346 }; 1347 1348 static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr) 1349 { 1350 return container_of(wr, struct ib_rdma_wr, wr); 1351 } 1352 1353 struct ib_atomic_wr { 1354 struct ib_send_wr wr; 1355 u64 remote_addr; 1356 u64 compare_add; 1357 u64 swap; 1358 u64 compare_add_mask; 1359 u64 swap_mask; 1360 u32 rkey; 1361 }; 1362 1363 static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr) 1364 { 1365 return container_of(wr, struct ib_atomic_wr, wr); 1366 } 1367 1368 struct ib_ud_wr { 1369 struct ib_send_wr wr; 1370 struct ib_ah *ah; 1371 void *header; 1372 int hlen; 1373 int mss; 1374 u32 remote_qpn; 1375 u32 remote_qkey; 1376 u16 pkey_index; /* valid for GSI only */ 1377 u8 port_num; /* valid for DR SMPs on switch only */ 1378 }; 1379 1380 static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr) 1381 { 1382 return container_of(wr, struct ib_ud_wr, wr); 1383 } 1384 1385 struct ib_reg_wr { 1386 struct ib_send_wr wr; 1387 struct ib_mr *mr; 1388 u32 key; 1389 int access; 1390 }; 1391 1392 static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr) 1393 { 1394 return container_of(wr, struct ib_reg_wr, wr); 1395 } 1396 1397 struct ib_recv_wr { 1398 struct ib_recv_wr *next; 1399 union { 1400 u64 wr_id; 1401 struct ib_cqe *wr_cqe; 1402 }; 1403 struct ib_sge *sg_list; 1404 int num_sge; 1405 }; 1406 1407 enum ib_access_flags { 1408 IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE, 1409 IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE, 1410 IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ, 1411 IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC, 1412 IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND, 1413 IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED, 1414 IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND, 1415 IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB, 1416 1417 IB_ACCESS_SUPPORTED = ((IB_ACCESS_HUGETLB << 1) - 1) 1418 }; 1419 1420 /* 1421 * XXX: these are apparently used for ->rereg_user_mr, no idea why they 1422 * are hidden here instead of a uapi header! 1423 */ 1424 enum ib_mr_rereg_flags { 1425 IB_MR_REREG_TRANS = 1, 1426 IB_MR_REREG_PD = (1<<1), 1427 IB_MR_REREG_ACCESS = (1<<2), 1428 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1) 1429 }; 1430 1431 struct ib_fmr_attr { 1432 int max_pages; 1433 int max_maps; 1434 u8 page_shift; 1435 }; 1436 1437 struct ib_umem; 1438 1439 enum rdma_remove_reason { 1440 /* 1441 * Userspace requested uobject deletion or initial try 1442 * to remove uobject via cleanup. Call could fail 1443 */ 1444 RDMA_REMOVE_DESTROY, 1445 /* Context deletion. This call should delete the actual object itself */ 1446 RDMA_REMOVE_CLOSE, 1447 /* Driver is being hot-unplugged. This call should delete the actual object itself */ 1448 RDMA_REMOVE_DRIVER_REMOVE, 1449 /* uobj is being cleaned-up before being committed */ 1450 RDMA_REMOVE_ABORT, 1451 }; 1452 1453 struct ib_rdmacg_object { 1454 #ifdef CONFIG_CGROUP_RDMA 1455 struct rdma_cgroup *cg; /* owner rdma cgroup */ 1456 #endif 1457 }; 1458 1459 struct ib_ucontext { 1460 struct ib_device *device; 1461 struct ib_uverbs_file *ufile; 1462 /* 1463 * 'closing' can be read by the driver only during a destroy callback, 1464 * it is set when we are closing the file descriptor and indicates 1465 * that mm_sem may be locked. 1466 */ 1467 bool closing; 1468 1469 bool cleanup_retryable; 1470 1471 struct ib_rdmacg_object cg_obj; 1472 /* 1473 * Implementation details of the RDMA core, don't use in drivers: 1474 */ 1475 struct rdma_restrack_entry res; 1476 struct xarray mmap_xa; 1477 }; 1478 1479 struct ib_uobject { 1480 u64 user_handle; /* handle given to us by userspace */ 1481 /* ufile & ucontext owning this object */ 1482 struct ib_uverbs_file *ufile; 1483 /* FIXME, save memory: ufile->context == context */ 1484 struct ib_ucontext *context; /* associated user context */ 1485 void *object; /* containing object */ 1486 struct list_head list; /* link to context's list */ 1487 struct ib_rdmacg_object cg_obj; /* rdmacg object */ 1488 int id; /* index into kernel idr */ 1489 struct kref ref; 1490 atomic_t usecnt; /* protects exclusive access */ 1491 struct rcu_head rcu; /* kfree_rcu() overhead */ 1492 1493 const struct uverbs_api_object *uapi_object; 1494 }; 1495 1496 struct ib_udata { 1497 const void __user *inbuf; 1498 void __user *outbuf; 1499 size_t inlen; 1500 size_t outlen; 1501 }; 1502 1503 struct ib_pd { 1504 u32 local_dma_lkey; 1505 u32 flags; 1506 struct ib_device *device; 1507 struct ib_uobject *uobject; 1508 atomic_t usecnt; /* count all resources */ 1509 1510 u32 unsafe_global_rkey; 1511 1512 /* 1513 * Implementation details of the RDMA core, don't use in drivers: 1514 */ 1515 struct ib_mr *__internal_mr; 1516 struct rdma_restrack_entry res; 1517 }; 1518 1519 struct ib_xrcd { 1520 struct ib_device *device; 1521 atomic_t usecnt; /* count all exposed resources */ 1522 struct inode *inode; 1523 1524 struct mutex tgt_qp_mutex; 1525 struct list_head tgt_qp_list; 1526 }; 1527 1528 struct ib_ah { 1529 struct ib_device *device; 1530 struct ib_pd *pd; 1531 struct ib_uobject *uobject; 1532 const struct ib_gid_attr *sgid_attr; 1533 enum rdma_ah_attr_type type; 1534 }; 1535 1536 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); 1537 1538 enum ib_poll_context { 1539 IB_POLL_DIRECT, /* caller context, no hw completions */ 1540 IB_POLL_SOFTIRQ, /* poll from softirq context */ 1541 IB_POLL_WORKQUEUE, /* poll from workqueue */ 1542 IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */ 1543 }; 1544 1545 struct ib_cq { 1546 struct ib_device *device; 1547 struct ib_uobject *uobject; 1548 ib_comp_handler comp_handler; 1549 void (*event_handler)(struct ib_event *, void *); 1550 void *cq_context; 1551 int cqe; 1552 atomic_t usecnt; /* count number of work queues */ 1553 enum ib_poll_context poll_ctx; 1554 struct ib_wc *wc; 1555 union { 1556 struct irq_poll iop; 1557 struct work_struct work; 1558 }; 1559 struct workqueue_struct *comp_wq; 1560 struct dim *dim; 1561 /* 1562 * Implementation details of the RDMA core, don't use in drivers: 1563 */ 1564 struct rdma_restrack_entry res; 1565 }; 1566 1567 struct ib_srq { 1568 struct ib_device *device; 1569 struct ib_pd *pd; 1570 struct ib_uobject *uobject; 1571 void (*event_handler)(struct ib_event *, void *); 1572 void *srq_context; 1573 enum ib_srq_type srq_type; 1574 atomic_t usecnt; 1575 1576 struct { 1577 struct ib_cq *cq; 1578 union { 1579 struct { 1580 struct ib_xrcd *xrcd; 1581 u32 srq_num; 1582 } xrc; 1583 }; 1584 } ext; 1585 }; 1586 1587 enum ib_raw_packet_caps { 1588 /* Strip cvlan from incoming packet and report it in the matching work 1589 * completion is supported. 1590 */ 1591 IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0), 1592 /* Scatter FCS field of an incoming packet to host memory is supported. 1593 */ 1594 IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1), 1595 /* Checksum offloads are supported (for both send and receive). */ 1596 IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2), 1597 /* When a packet is received for an RQ with no receive WQEs, the 1598 * packet processing is delayed. 1599 */ 1600 IB_RAW_PACKET_CAP_DELAY_DROP = (1 << 3), 1601 }; 1602 1603 enum ib_wq_type { 1604 IB_WQT_RQ 1605 }; 1606 1607 enum ib_wq_state { 1608 IB_WQS_RESET, 1609 IB_WQS_RDY, 1610 IB_WQS_ERR 1611 }; 1612 1613 struct ib_wq { 1614 struct ib_device *device; 1615 struct ib_uobject *uobject; 1616 void *wq_context; 1617 void (*event_handler)(struct ib_event *, void *); 1618 struct ib_pd *pd; 1619 struct ib_cq *cq; 1620 u32 wq_num; 1621 enum ib_wq_state state; 1622 enum ib_wq_type wq_type; 1623 atomic_t usecnt; 1624 }; 1625 1626 enum ib_wq_flags { 1627 IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0, 1628 IB_WQ_FLAGS_SCATTER_FCS = 1 << 1, 1629 IB_WQ_FLAGS_DELAY_DROP = 1 << 2, 1630 IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3, 1631 }; 1632 1633 struct ib_wq_init_attr { 1634 void *wq_context; 1635 enum ib_wq_type wq_type; 1636 u32 max_wr; 1637 u32 max_sge; 1638 struct ib_cq *cq; 1639 void (*event_handler)(struct ib_event *, void *); 1640 u32 create_flags; /* Use enum ib_wq_flags */ 1641 }; 1642 1643 enum ib_wq_attr_mask { 1644 IB_WQ_STATE = 1 << 0, 1645 IB_WQ_CUR_STATE = 1 << 1, 1646 IB_WQ_FLAGS = 1 << 2, 1647 }; 1648 1649 struct ib_wq_attr { 1650 enum ib_wq_state wq_state; 1651 enum ib_wq_state curr_wq_state; 1652 u32 flags; /* Use enum ib_wq_flags */ 1653 u32 flags_mask; /* Use enum ib_wq_flags */ 1654 }; 1655 1656 struct ib_rwq_ind_table { 1657 struct ib_device *device; 1658 struct ib_uobject *uobject; 1659 atomic_t usecnt; 1660 u32 ind_tbl_num; 1661 u32 log_ind_tbl_size; 1662 struct ib_wq **ind_tbl; 1663 }; 1664 1665 struct ib_rwq_ind_table_init_attr { 1666 u32 log_ind_tbl_size; 1667 /* Each entry is a pointer to Receive Work Queue */ 1668 struct ib_wq **ind_tbl; 1669 }; 1670 1671 enum port_pkey_state { 1672 IB_PORT_PKEY_NOT_VALID = 0, 1673 IB_PORT_PKEY_VALID = 1, 1674 IB_PORT_PKEY_LISTED = 2, 1675 }; 1676 1677 struct ib_qp_security; 1678 1679 struct ib_port_pkey { 1680 enum port_pkey_state state; 1681 u16 pkey_index; 1682 u8 port_num; 1683 struct list_head qp_list; 1684 struct list_head to_error_list; 1685 struct ib_qp_security *sec; 1686 }; 1687 1688 struct ib_ports_pkeys { 1689 struct ib_port_pkey main; 1690 struct ib_port_pkey alt; 1691 }; 1692 1693 struct ib_qp_security { 1694 struct ib_qp *qp; 1695 struct ib_device *dev; 1696 /* Hold this mutex when changing port and pkey settings. */ 1697 struct mutex mutex; 1698 struct ib_ports_pkeys *ports_pkeys; 1699 /* A list of all open shared QP handles. Required to enforce security 1700 * properly for all users of a shared QP. 1701 */ 1702 struct list_head shared_qp_list; 1703 void *security; 1704 bool destroying; 1705 atomic_t error_list_count; 1706 struct completion error_complete; 1707 int error_comps_pending; 1708 }; 1709 1710 /* 1711 * @max_write_sge: Maximum SGE elements per RDMA WRITE request. 1712 * @max_read_sge: Maximum SGE elements per RDMA READ request. 1713 */ 1714 struct ib_qp { 1715 struct ib_device *device; 1716 struct ib_pd *pd; 1717 struct ib_cq *send_cq; 1718 struct ib_cq *recv_cq; 1719 spinlock_t mr_lock; 1720 int mrs_used; 1721 struct list_head rdma_mrs; 1722 struct list_head sig_mrs; 1723 struct ib_srq *srq; 1724 struct ib_xrcd *xrcd; /* XRC TGT QPs only */ 1725 struct list_head xrcd_list; 1726 1727 /* count times opened, mcast attaches, flow attaches */ 1728 atomic_t usecnt; 1729 struct list_head open_list; 1730 struct ib_qp *real_qp; 1731 struct ib_uobject *uobject; 1732 void (*event_handler)(struct ib_event *, void *); 1733 void *qp_context; 1734 /* sgid_attrs associated with the AV's */ 1735 const struct ib_gid_attr *av_sgid_attr; 1736 const struct ib_gid_attr *alt_path_sgid_attr; 1737 u32 qp_num; 1738 u32 max_write_sge; 1739 u32 max_read_sge; 1740 enum ib_qp_type qp_type; 1741 struct ib_rwq_ind_table *rwq_ind_tbl; 1742 struct ib_qp_security *qp_sec; 1743 u8 port; 1744 1745 bool integrity_en; 1746 /* 1747 * Implementation details of the RDMA core, don't use in drivers: 1748 */ 1749 struct rdma_restrack_entry res; 1750 1751 /* The counter the qp is bind to */ 1752 struct rdma_counter *counter; 1753 }; 1754 1755 struct ib_dm { 1756 struct ib_device *device; 1757 u32 length; 1758 u32 flags; 1759 struct ib_uobject *uobject; 1760 atomic_t usecnt; 1761 }; 1762 1763 struct ib_mr { 1764 struct ib_device *device; 1765 struct ib_pd *pd; 1766 u32 lkey; 1767 u32 rkey; 1768 u64 iova; 1769 u64 length; 1770 unsigned int page_size; 1771 enum ib_mr_type type; 1772 bool need_inval; 1773 union { 1774 struct ib_uobject *uobject; /* user */ 1775 struct list_head qp_entry; /* FR */ 1776 }; 1777 1778 struct ib_dm *dm; 1779 struct ib_sig_attrs *sig_attrs; /* only for IB_MR_TYPE_INTEGRITY MRs */ 1780 /* 1781 * Implementation details of the RDMA core, don't use in drivers: 1782 */ 1783 struct rdma_restrack_entry res; 1784 }; 1785 1786 struct ib_mw { 1787 struct ib_device *device; 1788 struct ib_pd *pd; 1789 struct ib_uobject *uobject; 1790 u32 rkey; 1791 enum ib_mw_type type; 1792 }; 1793 1794 struct ib_fmr { 1795 struct ib_device *device; 1796 struct ib_pd *pd; 1797 struct list_head list; 1798 u32 lkey; 1799 u32 rkey; 1800 }; 1801 1802 /* Supported steering options */ 1803 enum ib_flow_attr_type { 1804 /* steering according to rule specifications */ 1805 IB_FLOW_ATTR_NORMAL = 0x0, 1806 /* default unicast and multicast rule - 1807 * receive all Eth traffic which isn't steered to any QP 1808 */ 1809 IB_FLOW_ATTR_ALL_DEFAULT = 0x1, 1810 /* default multicast rule - 1811 * receive all Eth multicast traffic which isn't steered to any QP 1812 */ 1813 IB_FLOW_ATTR_MC_DEFAULT = 0x2, 1814 /* sniffer rule - receive all port traffic */ 1815 IB_FLOW_ATTR_SNIFFER = 0x3 1816 }; 1817 1818 /* Supported steering header types */ 1819 enum ib_flow_spec_type { 1820 /* L2 headers*/ 1821 IB_FLOW_SPEC_ETH = 0x20, 1822 IB_FLOW_SPEC_IB = 0x22, 1823 /* L3 header*/ 1824 IB_FLOW_SPEC_IPV4 = 0x30, 1825 IB_FLOW_SPEC_IPV6 = 0x31, 1826 IB_FLOW_SPEC_ESP = 0x34, 1827 /* L4 headers*/ 1828 IB_FLOW_SPEC_TCP = 0x40, 1829 IB_FLOW_SPEC_UDP = 0x41, 1830 IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50, 1831 IB_FLOW_SPEC_GRE = 0x51, 1832 IB_FLOW_SPEC_MPLS = 0x60, 1833 IB_FLOW_SPEC_INNER = 0x100, 1834 /* Actions */ 1835 IB_FLOW_SPEC_ACTION_TAG = 0x1000, 1836 IB_FLOW_SPEC_ACTION_DROP = 0x1001, 1837 IB_FLOW_SPEC_ACTION_HANDLE = 0x1002, 1838 IB_FLOW_SPEC_ACTION_COUNT = 0x1003, 1839 }; 1840 #define IB_FLOW_SPEC_LAYER_MASK 0xF0 1841 #define IB_FLOW_SPEC_SUPPORT_LAYERS 10 1842 1843 /* Flow steering rule priority is set according to it's domain. 1844 * Lower domain value means higher priority. 1845 */ 1846 enum ib_flow_domain { 1847 IB_FLOW_DOMAIN_USER, 1848 IB_FLOW_DOMAIN_ETHTOOL, 1849 IB_FLOW_DOMAIN_RFS, 1850 IB_FLOW_DOMAIN_NIC, 1851 IB_FLOW_DOMAIN_NUM /* Must be last */ 1852 }; 1853 1854 enum ib_flow_flags { 1855 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */ 1856 IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */ 1857 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 3 /* Must be last */ 1858 }; 1859 1860 struct ib_flow_eth_filter { 1861 u8 dst_mac[6]; 1862 u8 src_mac[6]; 1863 __be16 ether_type; 1864 __be16 vlan_tag; 1865 /* Must be last */ 1866 u8 real_sz[0]; 1867 }; 1868 1869 struct ib_flow_spec_eth { 1870 u32 type; 1871 u16 size; 1872 struct ib_flow_eth_filter val; 1873 struct ib_flow_eth_filter mask; 1874 }; 1875 1876 struct ib_flow_ib_filter { 1877 __be16 dlid; 1878 __u8 sl; 1879 /* Must be last */ 1880 u8 real_sz[0]; 1881 }; 1882 1883 struct ib_flow_spec_ib { 1884 u32 type; 1885 u16 size; 1886 struct ib_flow_ib_filter val; 1887 struct ib_flow_ib_filter mask; 1888 }; 1889 1890 /* IPv4 header flags */ 1891 enum ib_ipv4_flags { 1892 IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */ 1893 IB_IPV4_MORE_FRAG = 0X4 /* For All fragmented packets except the 1894 last have this flag set */ 1895 }; 1896 1897 struct ib_flow_ipv4_filter { 1898 __be32 src_ip; 1899 __be32 dst_ip; 1900 u8 proto; 1901 u8 tos; 1902 u8 ttl; 1903 u8 flags; 1904 /* Must be last */ 1905 u8 real_sz[0]; 1906 }; 1907 1908 struct ib_flow_spec_ipv4 { 1909 u32 type; 1910 u16 size; 1911 struct ib_flow_ipv4_filter val; 1912 struct ib_flow_ipv4_filter mask; 1913 }; 1914 1915 struct ib_flow_ipv6_filter { 1916 u8 src_ip[16]; 1917 u8 dst_ip[16]; 1918 __be32 flow_label; 1919 u8 next_hdr; 1920 u8 traffic_class; 1921 u8 hop_limit; 1922 /* Must be last */ 1923 u8 real_sz[0]; 1924 }; 1925 1926 struct ib_flow_spec_ipv6 { 1927 u32 type; 1928 u16 size; 1929 struct ib_flow_ipv6_filter val; 1930 struct ib_flow_ipv6_filter mask; 1931 }; 1932 1933 struct ib_flow_tcp_udp_filter { 1934 __be16 dst_port; 1935 __be16 src_port; 1936 /* Must be last */ 1937 u8 real_sz[0]; 1938 }; 1939 1940 struct ib_flow_spec_tcp_udp { 1941 u32 type; 1942 u16 size; 1943 struct ib_flow_tcp_udp_filter val; 1944 struct ib_flow_tcp_udp_filter mask; 1945 }; 1946 1947 struct ib_flow_tunnel_filter { 1948 __be32 tunnel_id; 1949 u8 real_sz[0]; 1950 }; 1951 1952 /* ib_flow_spec_tunnel describes the Vxlan tunnel 1953 * the tunnel_id from val has the vni value 1954 */ 1955 struct ib_flow_spec_tunnel { 1956 u32 type; 1957 u16 size; 1958 struct ib_flow_tunnel_filter val; 1959 struct ib_flow_tunnel_filter mask; 1960 }; 1961 1962 struct ib_flow_esp_filter { 1963 __be32 spi; 1964 __be32 seq; 1965 /* Must be last */ 1966 u8 real_sz[0]; 1967 }; 1968 1969 struct ib_flow_spec_esp { 1970 u32 type; 1971 u16 size; 1972 struct ib_flow_esp_filter val; 1973 struct ib_flow_esp_filter mask; 1974 }; 1975 1976 struct ib_flow_gre_filter { 1977 __be16 c_ks_res0_ver; 1978 __be16 protocol; 1979 __be32 key; 1980 /* Must be last */ 1981 u8 real_sz[0]; 1982 }; 1983 1984 struct ib_flow_spec_gre { 1985 u32 type; 1986 u16 size; 1987 struct ib_flow_gre_filter val; 1988 struct ib_flow_gre_filter mask; 1989 }; 1990 1991 struct ib_flow_mpls_filter { 1992 __be32 tag; 1993 /* Must be last */ 1994 u8 real_sz[0]; 1995 }; 1996 1997 struct ib_flow_spec_mpls { 1998 u32 type; 1999 u16 size; 2000 struct ib_flow_mpls_filter val; 2001 struct ib_flow_mpls_filter mask; 2002 }; 2003 2004 struct ib_flow_spec_action_tag { 2005 enum ib_flow_spec_type type; 2006 u16 size; 2007 u32 tag_id; 2008 }; 2009 2010 struct ib_flow_spec_action_drop { 2011 enum ib_flow_spec_type type; 2012 u16 size; 2013 }; 2014 2015 struct ib_flow_spec_action_handle { 2016 enum ib_flow_spec_type type; 2017 u16 size; 2018 struct ib_flow_action *act; 2019 }; 2020 2021 enum ib_counters_description { 2022 IB_COUNTER_PACKETS, 2023 IB_COUNTER_BYTES, 2024 }; 2025 2026 struct ib_flow_spec_action_count { 2027 enum ib_flow_spec_type type; 2028 u16 size; 2029 struct ib_counters *counters; 2030 }; 2031 2032 union ib_flow_spec { 2033 struct { 2034 u32 type; 2035 u16 size; 2036 }; 2037 struct ib_flow_spec_eth eth; 2038 struct ib_flow_spec_ib ib; 2039 struct ib_flow_spec_ipv4 ipv4; 2040 struct ib_flow_spec_tcp_udp tcp_udp; 2041 struct ib_flow_spec_ipv6 ipv6; 2042 struct ib_flow_spec_tunnel tunnel; 2043 struct ib_flow_spec_esp esp; 2044 struct ib_flow_spec_gre gre; 2045 struct ib_flow_spec_mpls mpls; 2046 struct ib_flow_spec_action_tag flow_tag; 2047 struct ib_flow_spec_action_drop drop; 2048 struct ib_flow_spec_action_handle action; 2049 struct ib_flow_spec_action_count flow_count; 2050 }; 2051 2052 struct ib_flow_attr { 2053 enum ib_flow_attr_type type; 2054 u16 size; 2055 u16 priority; 2056 u32 flags; 2057 u8 num_of_specs; 2058 u8 port; 2059 union ib_flow_spec flows[]; 2060 }; 2061 2062 struct ib_flow { 2063 struct ib_qp *qp; 2064 struct ib_device *device; 2065 struct ib_uobject *uobject; 2066 }; 2067 2068 enum ib_flow_action_type { 2069 IB_FLOW_ACTION_UNSPECIFIED, 2070 IB_FLOW_ACTION_ESP = 1, 2071 }; 2072 2073 struct ib_flow_action_attrs_esp_keymats { 2074 enum ib_uverbs_flow_action_esp_keymat protocol; 2075 union { 2076 struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm; 2077 } keymat; 2078 }; 2079 2080 struct ib_flow_action_attrs_esp_replays { 2081 enum ib_uverbs_flow_action_esp_replay protocol; 2082 union { 2083 struct ib_uverbs_flow_action_esp_replay_bmp bmp; 2084 } replay; 2085 }; 2086 2087 enum ib_flow_action_attrs_esp_flags { 2088 /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags 2089 * This is done in order to share the same flags between user-space and 2090 * kernel and spare an unnecessary translation. 2091 */ 2092 2093 /* Kernel flags */ 2094 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED = 1ULL << 32, 2095 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS = 1ULL << 33, 2096 }; 2097 2098 struct ib_flow_spec_list { 2099 struct ib_flow_spec_list *next; 2100 union ib_flow_spec spec; 2101 }; 2102 2103 struct ib_flow_action_attrs_esp { 2104 struct ib_flow_action_attrs_esp_keymats *keymat; 2105 struct ib_flow_action_attrs_esp_replays *replay; 2106 struct ib_flow_spec_list *encap; 2107 /* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled. 2108 * Value of 0 is a valid value. 2109 */ 2110 u32 esn; 2111 u32 spi; 2112 u32 seq; 2113 u32 tfc_pad; 2114 /* Use enum ib_flow_action_attrs_esp_flags */ 2115 u64 flags; 2116 u64 hard_limit_pkts; 2117 }; 2118 2119 struct ib_flow_action { 2120 struct ib_device *device; 2121 struct ib_uobject *uobject; 2122 enum ib_flow_action_type type; 2123 atomic_t usecnt; 2124 }; 2125 2126 struct ib_mad; 2127 struct ib_grh; 2128 2129 enum ib_process_mad_flags { 2130 IB_MAD_IGNORE_MKEY = 1, 2131 IB_MAD_IGNORE_BKEY = 2, 2132 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY 2133 }; 2134 2135 enum ib_mad_result { 2136 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */ 2137 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */ 2138 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */ 2139 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */ 2140 }; 2141 2142 struct ib_port_cache { 2143 u64 subnet_prefix; 2144 struct ib_pkey_cache *pkey; 2145 struct ib_gid_table *gid; 2146 u8 lmc; 2147 enum ib_port_state port_state; 2148 }; 2149 2150 struct ib_cache { 2151 rwlock_t lock; 2152 struct ib_event_handler event_handler; 2153 }; 2154 2155 struct ib_port_immutable { 2156 int pkey_tbl_len; 2157 int gid_tbl_len; 2158 u32 core_cap_flags; 2159 u32 max_mad_size; 2160 }; 2161 2162 struct ib_port_data { 2163 struct ib_device *ib_dev; 2164 2165 struct ib_port_immutable immutable; 2166 2167 spinlock_t pkey_list_lock; 2168 struct list_head pkey_list; 2169 2170 struct ib_port_cache cache; 2171 2172 spinlock_t netdev_lock; 2173 struct net_device __rcu *netdev; 2174 struct hlist_node ndev_hash_link; 2175 struct rdma_port_counter port_counter; 2176 struct rdma_hw_stats *hw_stats; 2177 }; 2178 2179 /* rdma netdev type - specifies protocol type */ 2180 enum rdma_netdev_t { 2181 RDMA_NETDEV_OPA_VNIC, 2182 RDMA_NETDEV_IPOIB, 2183 }; 2184 2185 /** 2186 * struct rdma_netdev - rdma netdev 2187 * For cases where netstack interfacing is required. 2188 */ 2189 struct rdma_netdev { 2190 void *clnt_priv; 2191 struct ib_device *hca; 2192 u8 port_num; 2193 2194 /* 2195 * cleanup function must be specified. 2196 * FIXME: This is only used for OPA_VNIC and that usage should be 2197 * removed too. 2198 */ 2199 void (*free_rdma_netdev)(struct net_device *netdev); 2200 2201 /* control functions */ 2202 void (*set_id)(struct net_device *netdev, int id); 2203 /* send packet */ 2204 int (*send)(struct net_device *dev, struct sk_buff *skb, 2205 struct ib_ah *address, u32 dqpn); 2206 /* multicast */ 2207 int (*attach_mcast)(struct net_device *dev, struct ib_device *hca, 2208 union ib_gid *gid, u16 mlid, 2209 int set_qkey, u32 qkey); 2210 int (*detach_mcast)(struct net_device *dev, struct ib_device *hca, 2211 union ib_gid *gid, u16 mlid); 2212 }; 2213 2214 struct rdma_netdev_alloc_params { 2215 size_t sizeof_priv; 2216 unsigned int txqs; 2217 unsigned int rxqs; 2218 void *param; 2219 2220 int (*initialize_rdma_netdev)(struct ib_device *device, u8 port_num, 2221 struct net_device *netdev, void *param); 2222 }; 2223 2224 struct ib_odp_counters { 2225 atomic64_t faults; 2226 atomic64_t invalidations; 2227 }; 2228 2229 struct ib_counters { 2230 struct ib_device *device; 2231 struct ib_uobject *uobject; 2232 /* num of objects attached */ 2233 atomic_t usecnt; 2234 }; 2235 2236 struct ib_counters_read_attr { 2237 u64 *counters_buff; 2238 u32 ncounters; 2239 u32 flags; /* use enum ib_read_counters_flags */ 2240 }; 2241 2242 struct uverbs_attr_bundle; 2243 struct iw_cm_id; 2244 struct iw_cm_conn_param; 2245 2246 #define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \ 2247 .size_##ib_struct = \ 2248 (sizeof(struct drv_struct) + \ 2249 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) + \ 2250 BUILD_BUG_ON_ZERO( \ 2251 !__same_type(((struct drv_struct *)NULL)->member, \ 2252 struct ib_struct))) 2253 2254 #define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \ 2255 ((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp)) 2256 2257 #define rdma_zalloc_drv_obj(ib_dev, ib_type) \ 2258 rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL) 2259 2260 #define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct 2261 2262 struct rdma_user_mmap_entry { 2263 struct kref ref; 2264 struct ib_ucontext *ucontext; 2265 unsigned long start_pgoff; 2266 size_t npages; 2267 bool driver_removed; 2268 }; 2269 2270 /* Return the offset (in bytes) the user should pass to libc's mmap() */ 2271 static inline u64 2272 rdma_user_mmap_get_offset(const struct rdma_user_mmap_entry *entry) 2273 { 2274 return (u64)entry->start_pgoff << PAGE_SHIFT; 2275 } 2276 2277 /** 2278 * struct ib_device_ops - InfiniBand device operations 2279 * This structure defines all the InfiniBand device operations, providers will 2280 * need to define the supported operations, otherwise they will be set to null. 2281 */ 2282 struct ib_device_ops { 2283 struct module *owner; 2284 enum rdma_driver_id driver_id; 2285 u32 uverbs_abi_ver; 2286 unsigned int uverbs_no_driver_id_binding:1; 2287 2288 int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr, 2289 const struct ib_send_wr **bad_send_wr); 2290 int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr, 2291 const struct ib_recv_wr **bad_recv_wr); 2292 void (*drain_rq)(struct ib_qp *qp); 2293 void (*drain_sq)(struct ib_qp *qp); 2294 int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc); 2295 int (*peek_cq)(struct ib_cq *cq, int wc_cnt); 2296 int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags); 2297 int (*req_ncomp_notif)(struct ib_cq *cq, int wc_cnt); 2298 int (*post_srq_recv)(struct ib_srq *srq, 2299 const struct ib_recv_wr *recv_wr, 2300 const struct ib_recv_wr **bad_recv_wr); 2301 int (*process_mad)(struct ib_device *device, int process_mad_flags, 2302 u8 port_num, const struct ib_wc *in_wc, 2303 const struct ib_grh *in_grh, 2304 const struct ib_mad *in_mad, struct ib_mad *out_mad, 2305 size_t *out_mad_size, u16 *out_mad_pkey_index); 2306 int (*query_device)(struct ib_device *device, 2307 struct ib_device_attr *device_attr, 2308 struct ib_udata *udata); 2309 int (*modify_device)(struct ib_device *device, int device_modify_mask, 2310 struct ib_device_modify *device_modify); 2311 void (*get_dev_fw_str)(struct ib_device *device, char *str); 2312 const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev, 2313 int comp_vector); 2314 int (*query_port)(struct ib_device *device, u8 port_num, 2315 struct ib_port_attr *port_attr); 2316 int (*modify_port)(struct ib_device *device, u8 port_num, 2317 int port_modify_mask, 2318 struct ib_port_modify *port_modify); 2319 /** 2320 * The following mandatory functions are used only at device 2321 * registration. Keep functions such as these at the end of this 2322 * structure to avoid cache line misses when accessing struct ib_device 2323 * in fast paths. 2324 */ 2325 int (*get_port_immutable)(struct ib_device *device, u8 port_num, 2326 struct ib_port_immutable *immutable); 2327 enum rdma_link_layer (*get_link_layer)(struct ib_device *device, 2328 u8 port_num); 2329 /** 2330 * When calling get_netdev, the HW vendor's driver should return the 2331 * net device of device @device at port @port_num or NULL if such 2332 * a net device doesn't exist. The vendor driver should call dev_hold 2333 * on this net device. The HW vendor's device driver must guarantee 2334 * that this function returns NULL before the net device has finished 2335 * NETDEV_UNREGISTER state. 2336 */ 2337 struct net_device *(*get_netdev)(struct ib_device *device, u8 port_num); 2338 /** 2339 * rdma netdev operation 2340 * 2341 * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params 2342 * must return -EOPNOTSUPP if it doesn't support the specified type. 2343 */ 2344 struct net_device *(*alloc_rdma_netdev)( 2345 struct ib_device *device, u8 port_num, enum rdma_netdev_t type, 2346 const char *name, unsigned char name_assign_type, 2347 void (*setup)(struct net_device *)); 2348 2349 int (*rdma_netdev_get_params)(struct ib_device *device, u8 port_num, 2350 enum rdma_netdev_t type, 2351 struct rdma_netdev_alloc_params *params); 2352 /** 2353 * query_gid should be return GID value for @device, when @port_num 2354 * link layer is either IB or iWarp. It is no-op if @port_num port 2355 * is RoCE link layer. 2356 */ 2357 int (*query_gid)(struct ib_device *device, u8 port_num, int index, 2358 union ib_gid *gid); 2359 /** 2360 * When calling add_gid, the HW vendor's driver should add the gid 2361 * of device of port at gid index available at @attr. Meta-info of 2362 * that gid (for example, the network device related to this gid) is 2363 * available at @attr. @context allows the HW vendor driver to store 2364 * extra information together with a GID entry. The HW vendor driver may 2365 * allocate memory to contain this information and store it in @context 2366 * when a new GID entry is written to. Params are consistent until the 2367 * next call of add_gid or delete_gid. The function should return 0 on 2368 * success or error otherwise. The function could be called 2369 * concurrently for different ports. This function is only called when 2370 * roce_gid_table is used. 2371 */ 2372 int (*add_gid)(const struct ib_gid_attr *attr, void **context); 2373 /** 2374 * When calling del_gid, the HW vendor's driver should delete the 2375 * gid of device @device at gid index gid_index of port port_num 2376 * available in @attr. 2377 * Upon the deletion of a GID entry, the HW vendor must free any 2378 * allocated memory. The caller will clear @context afterwards. 2379 * This function is only called when roce_gid_table is used. 2380 */ 2381 int (*del_gid)(const struct ib_gid_attr *attr, void **context); 2382 int (*query_pkey)(struct ib_device *device, u8 port_num, u16 index, 2383 u16 *pkey); 2384 int (*alloc_ucontext)(struct ib_ucontext *context, 2385 struct ib_udata *udata); 2386 void (*dealloc_ucontext)(struct ib_ucontext *context); 2387 int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma); 2388 /** 2389 * This will be called once refcount of an entry in mmap_xa reaches 2390 * zero. The type of the memory that was mapped may differ between 2391 * entries and is opaque to the rdma_user_mmap interface. 2392 * Therefore needs to be implemented by the driver in mmap_free. 2393 */ 2394 void (*mmap_free)(struct rdma_user_mmap_entry *entry); 2395 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext); 2396 int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata); 2397 void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata); 2398 int (*create_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, 2399 u32 flags, struct ib_udata *udata); 2400 int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); 2401 int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); 2402 void (*destroy_ah)(struct ib_ah *ah, u32 flags); 2403 int (*create_srq)(struct ib_srq *srq, 2404 struct ib_srq_init_attr *srq_init_attr, 2405 struct ib_udata *udata); 2406 int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr, 2407 enum ib_srq_attr_mask srq_attr_mask, 2408 struct ib_udata *udata); 2409 int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr); 2410 void (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata); 2411 struct ib_qp *(*create_qp)(struct ib_pd *pd, 2412 struct ib_qp_init_attr *qp_init_attr, 2413 struct ib_udata *udata); 2414 int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr, 2415 int qp_attr_mask, struct ib_udata *udata); 2416 int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr, 2417 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); 2418 int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata); 2419 int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr, 2420 struct ib_udata *udata); 2421 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period); 2422 void (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata); 2423 int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata); 2424 struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags); 2425 struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length, 2426 u64 virt_addr, int mr_access_flags, 2427 struct ib_udata *udata); 2428 int (*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start, u64 length, 2429 u64 virt_addr, int mr_access_flags, 2430 struct ib_pd *pd, struct ib_udata *udata); 2431 int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata); 2432 struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type, 2433 u32 max_num_sg, struct ib_udata *udata); 2434 struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd, 2435 u32 max_num_data_sg, 2436 u32 max_num_meta_sg); 2437 int (*advise_mr)(struct ib_pd *pd, 2438 enum ib_uverbs_advise_mr_advice advice, u32 flags, 2439 struct ib_sge *sg_list, u32 num_sge, 2440 struct uverbs_attr_bundle *attrs); 2441 int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 2442 unsigned int *sg_offset); 2443 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask, 2444 struct ib_mr_status *mr_status); 2445 struct ib_mw *(*alloc_mw)(struct ib_pd *pd, enum ib_mw_type type, 2446 struct ib_udata *udata); 2447 int (*dealloc_mw)(struct ib_mw *mw); 2448 struct ib_fmr *(*alloc_fmr)(struct ib_pd *pd, int mr_access_flags, 2449 struct ib_fmr_attr *fmr_attr); 2450 int (*map_phys_fmr)(struct ib_fmr *fmr, u64 *page_list, int list_len, 2451 u64 iova); 2452 int (*unmap_fmr)(struct list_head *fmr_list); 2453 int (*dealloc_fmr)(struct ib_fmr *fmr); 2454 int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid); 2455 int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid); 2456 struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device, 2457 struct ib_udata *udata); 2458 int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata); 2459 struct ib_flow *(*create_flow)(struct ib_qp *qp, 2460 struct ib_flow_attr *flow_attr, 2461 int domain, struct ib_udata *udata); 2462 int (*destroy_flow)(struct ib_flow *flow_id); 2463 struct ib_flow_action *(*create_flow_action_esp)( 2464 struct ib_device *device, 2465 const struct ib_flow_action_attrs_esp *attr, 2466 struct uverbs_attr_bundle *attrs); 2467 int (*destroy_flow_action)(struct ib_flow_action *action); 2468 int (*modify_flow_action_esp)( 2469 struct ib_flow_action *action, 2470 const struct ib_flow_action_attrs_esp *attr, 2471 struct uverbs_attr_bundle *attrs); 2472 int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port, 2473 int state); 2474 int (*get_vf_config)(struct ib_device *device, int vf, u8 port, 2475 struct ifla_vf_info *ivf); 2476 int (*get_vf_stats)(struct ib_device *device, int vf, u8 port, 2477 struct ifla_vf_stats *stats); 2478 int (*get_vf_guid)(struct ib_device *device, int vf, u8 port, 2479 struct ifla_vf_guid *node_guid, 2480 struct ifla_vf_guid *port_guid); 2481 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid, 2482 int type); 2483 struct ib_wq *(*create_wq)(struct ib_pd *pd, 2484 struct ib_wq_init_attr *init_attr, 2485 struct ib_udata *udata); 2486 void (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata); 2487 int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr, 2488 u32 wq_attr_mask, struct ib_udata *udata); 2489 struct ib_rwq_ind_table *(*create_rwq_ind_table)( 2490 struct ib_device *device, 2491 struct ib_rwq_ind_table_init_attr *init_attr, 2492 struct ib_udata *udata); 2493 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table); 2494 struct ib_dm *(*alloc_dm)(struct ib_device *device, 2495 struct ib_ucontext *context, 2496 struct ib_dm_alloc_attr *attr, 2497 struct uverbs_attr_bundle *attrs); 2498 int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs); 2499 struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm, 2500 struct ib_dm_mr_attr *attr, 2501 struct uverbs_attr_bundle *attrs); 2502 struct ib_counters *(*create_counters)( 2503 struct ib_device *device, struct uverbs_attr_bundle *attrs); 2504 int (*destroy_counters)(struct ib_counters *counters); 2505 int (*read_counters)(struct ib_counters *counters, 2506 struct ib_counters_read_attr *counters_read_attr, 2507 struct uverbs_attr_bundle *attrs); 2508 int (*map_mr_sg_pi)(struct ib_mr *mr, struct scatterlist *data_sg, 2509 int data_sg_nents, unsigned int *data_sg_offset, 2510 struct scatterlist *meta_sg, int meta_sg_nents, 2511 unsigned int *meta_sg_offset); 2512 2513 /** 2514 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the 2515 * driver initialized data. The struct is kfree()'ed by the sysfs 2516 * core when the device is removed. A lifespan of -1 in the return 2517 * struct tells the core to set a default lifespan. 2518 */ 2519 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device, 2520 u8 port_num); 2521 /** 2522 * get_hw_stats - Fill in the counter value(s) in the stats struct. 2523 * @index - The index in the value array we wish to have updated, or 2524 * num_counters if we want all stats updated 2525 * Return codes - 2526 * < 0 - Error, no counters updated 2527 * index - Updated the single counter pointed to by index 2528 * num_counters - Updated all counters (will reset the timestamp 2529 * and prevent further calls for lifespan milliseconds) 2530 * Drivers are allowed to update all counters in leiu of just the 2531 * one given in index at their option 2532 */ 2533 int (*get_hw_stats)(struct ib_device *device, 2534 struct rdma_hw_stats *stats, u8 port, int index); 2535 /* 2536 * This function is called once for each port when a ib device is 2537 * registered. 2538 */ 2539 int (*init_port)(struct ib_device *device, u8 port_num, 2540 struct kobject *port_sysfs); 2541 /** 2542 * Allows rdma drivers to add their own restrack attributes. 2543 */ 2544 int (*fill_res_entry)(struct sk_buff *msg, 2545 struct rdma_restrack_entry *entry); 2546 2547 /* Device lifecycle callbacks */ 2548 /* 2549 * Called after the device becomes registered, before clients are 2550 * attached 2551 */ 2552 int (*enable_driver)(struct ib_device *dev); 2553 /* 2554 * This is called as part of ib_dealloc_device(). 2555 */ 2556 void (*dealloc_driver)(struct ib_device *dev); 2557 2558 /* iWarp CM callbacks */ 2559 void (*iw_add_ref)(struct ib_qp *qp); 2560 void (*iw_rem_ref)(struct ib_qp *qp); 2561 struct ib_qp *(*iw_get_qp)(struct ib_device *device, int qpn); 2562 int (*iw_connect)(struct iw_cm_id *cm_id, 2563 struct iw_cm_conn_param *conn_param); 2564 int (*iw_accept)(struct iw_cm_id *cm_id, 2565 struct iw_cm_conn_param *conn_param); 2566 int (*iw_reject)(struct iw_cm_id *cm_id, const void *pdata, 2567 u8 pdata_len); 2568 int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog); 2569 int (*iw_destroy_listen)(struct iw_cm_id *cm_id); 2570 /** 2571 * counter_bind_qp - Bind a QP to a counter. 2572 * @counter - The counter to be bound. If counter->id is zero then 2573 * the driver needs to allocate a new counter and set counter->id 2574 */ 2575 int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp); 2576 /** 2577 * counter_unbind_qp - Unbind the qp from the dynamically-allocated 2578 * counter and bind it onto the default one 2579 */ 2580 int (*counter_unbind_qp)(struct ib_qp *qp); 2581 /** 2582 * counter_dealloc -De-allocate the hw counter 2583 */ 2584 int (*counter_dealloc)(struct rdma_counter *counter); 2585 /** 2586 * counter_alloc_stats - Allocate a struct rdma_hw_stats and fill in 2587 * the driver initialized data. 2588 */ 2589 struct rdma_hw_stats *(*counter_alloc_stats)( 2590 struct rdma_counter *counter); 2591 /** 2592 * counter_update_stats - Query the stats value of this counter 2593 */ 2594 int (*counter_update_stats)(struct rdma_counter *counter); 2595 2596 /** 2597 * Allows rdma drivers to add their own restrack attributes 2598 * dumped via 'rdma stat' iproute2 command. 2599 */ 2600 int (*fill_stat_entry)(struct sk_buff *msg, 2601 struct rdma_restrack_entry *entry); 2602 2603 DECLARE_RDMA_OBJ_SIZE(ib_ah); 2604 DECLARE_RDMA_OBJ_SIZE(ib_cq); 2605 DECLARE_RDMA_OBJ_SIZE(ib_pd); 2606 DECLARE_RDMA_OBJ_SIZE(ib_srq); 2607 DECLARE_RDMA_OBJ_SIZE(ib_ucontext); 2608 }; 2609 2610 struct ib_core_device { 2611 /* device must be the first element in structure until, 2612 * union of ib_core_device and device exists in ib_device. 2613 */ 2614 struct device dev; 2615 possible_net_t rdma_net; 2616 struct kobject *ports_kobj; 2617 struct list_head port_list; 2618 struct ib_device *owner; /* reach back to owner ib_device */ 2619 }; 2620 2621 struct rdma_restrack_root; 2622 struct ib_device { 2623 /* Do not access @dma_device directly from ULP nor from HW drivers. */ 2624 struct device *dma_device; 2625 struct ib_device_ops ops; 2626 char name[IB_DEVICE_NAME_MAX]; 2627 struct rcu_head rcu_head; 2628 2629 struct list_head event_handler_list; 2630 spinlock_t event_handler_lock; 2631 2632 struct rw_semaphore client_data_rwsem; 2633 struct xarray client_data; 2634 struct mutex unregistration_lock; 2635 2636 struct ib_cache cache; 2637 /** 2638 * port_data is indexed by port number 2639 */ 2640 struct ib_port_data *port_data; 2641 2642 int num_comp_vectors; 2643 2644 union { 2645 struct device dev; 2646 struct ib_core_device coredev; 2647 }; 2648 2649 /* First group for device attributes, 2650 * Second group for driver provided attributes (optional). 2651 * It is NULL terminated array. 2652 */ 2653 const struct attribute_group *groups[3]; 2654 2655 u64 uverbs_cmd_mask; 2656 u64 uverbs_ex_cmd_mask; 2657 2658 char node_desc[IB_DEVICE_NODE_DESC_MAX]; 2659 __be64 node_guid; 2660 u32 local_dma_lkey; 2661 u16 is_switch:1; 2662 /* Indicates kernel verbs support, should not be used in drivers */ 2663 u16 kverbs_provider:1; 2664 /* CQ adaptive moderation (RDMA DIM) */ 2665 u16 use_cq_dim:1; 2666 u8 node_type; 2667 u8 phys_port_cnt; 2668 struct ib_device_attr attrs; 2669 struct attribute_group *hw_stats_ag; 2670 struct rdma_hw_stats *hw_stats; 2671 2672 #ifdef CONFIG_CGROUP_RDMA 2673 struct rdmacg_device cg_device; 2674 #endif 2675 2676 u32 index; 2677 struct rdma_restrack_root *res; 2678 2679 const struct uapi_definition *driver_def; 2680 2681 /* 2682 * Positive refcount indicates that the device is currently 2683 * registered and cannot be unregistered. 2684 */ 2685 refcount_t refcount; 2686 struct completion unreg_completion; 2687 struct work_struct unregistration_work; 2688 2689 const struct rdma_link_ops *link_ops; 2690 2691 /* Protects compat_devs xarray modifications */ 2692 struct mutex compat_devs_mutex; 2693 /* Maintains compat devices for each net namespace */ 2694 struct xarray compat_devs; 2695 2696 /* Used by iWarp CM */ 2697 char iw_ifname[IFNAMSIZ]; 2698 u32 iw_driver_flags; 2699 }; 2700 2701 struct ib_client_nl_info; 2702 struct ib_client { 2703 const char *name; 2704 void (*add) (struct ib_device *); 2705 void (*remove)(struct ib_device *, void *client_data); 2706 void (*rename)(struct ib_device *dev, void *client_data); 2707 int (*get_nl_info)(struct ib_device *ibdev, void *client_data, 2708 struct ib_client_nl_info *res); 2709 int (*get_global_nl_info)(struct ib_client_nl_info *res); 2710 2711 /* Returns the net_dev belonging to this ib_client and matching the 2712 * given parameters. 2713 * @dev: An RDMA device that the net_dev use for communication. 2714 * @port: A physical port number on the RDMA device. 2715 * @pkey: P_Key that the net_dev uses if applicable. 2716 * @gid: A GID that the net_dev uses to communicate. 2717 * @addr: An IP address the net_dev is configured with. 2718 * @client_data: The device's client data set by ib_set_client_data(). 2719 * 2720 * An ib_client that implements a net_dev on top of RDMA devices 2721 * (such as IP over IB) should implement this callback, allowing the 2722 * rdma_cm module to find the right net_dev for a given request. 2723 * 2724 * The caller is responsible for calling dev_put on the returned 2725 * netdev. */ 2726 struct net_device *(*get_net_dev_by_params)( 2727 struct ib_device *dev, 2728 u8 port, 2729 u16 pkey, 2730 const union ib_gid *gid, 2731 const struct sockaddr *addr, 2732 void *client_data); 2733 2734 refcount_t uses; 2735 struct completion uses_zero; 2736 u32 client_id; 2737 2738 /* kverbs are not required by the client */ 2739 u8 no_kverbs_req:1; 2740 }; 2741 2742 /* 2743 * IB block DMA iterator 2744 * 2745 * Iterates the DMA-mapped SGL in contiguous memory blocks aligned 2746 * to a HW supported page size. 2747 */ 2748 struct ib_block_iter { 2749 /* internal states */ 2750 struct scatterlist *__sg; /* sg holding the current aligned block */ 2751 dma_addr_t __dma_addr; /* unaligned DMA address of this block */ 2752 unsigned int __sg_nents; /* number of SG entries */ 2753 unsigned int __sg_advance; /* number of bytes to advance in sg in next step */ 2754 unsigned int __pg_bit; /* alignment of current block */ 2755 }; 2756 2757 struct ib_device *_ib_alloc_device(size_t size); 2758 #define ib_alloc_device(drv_struct, member) \ 2759 container_of(_ib_alloc_device(sizeof(struct drv_struct) + \ 2760 BUILD_BUG_ON_ZERO(offsetof( \ 2761 struct drv_struct, member))), \ 2762 struct drv_struct, member) 2763 2764 void ib_dealloc_device(struct ib_device *device); 2765 2766 void ib_get_device_fw_str(struct ib_device *device, char *str); 2767 2768 int ib_register_device(struct ib_device *device, const char *name); 2769 void ib_unregister_device(struct ib_device *device); 2770 void ib_unregister_driver(enum rdma_driver_id driver_id); 2771 void ib_unregister_device_and_put(struct ib_device *device); 2772 void ib_unregister_device_queued(struct ib_device *ib_dev); 2773 2774 int ib_register_client (struct ib_client *client); 2775 void ib_unregister_client(struct ib_client *client); 2776 2777 void __rdma_block_iter_start(struct ib_block_iter *biter, 2778 struct scatterlist *sglist, 2779 unsigned int nents, 2780 unsigned long pgsz); 2781 bool __rdma_block_iter_next(struct ib_block_iter *biter); 2782 2783 /** 2784 * rdma_block_iter_dma_address - get the aligned dma address of the current 2785 * block held by the block iterator. 2786 * @biter: block iterator holding the memory block 2787 */ 2788 static inline dma_addr_t 2789 rdma_block_iter_dma_address(struct ib_block_iter *biter) 2790 { 2791 return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1); 2792 } 2793 2794 /** 2795 * rdma_for_each_block - iterate over contiguous memory blocks of the sg list 2796 * @sglist: sglist to iterate over 2797 * @biter: block iterator holding the memory block 2798 * @nents: maximum number of sg entries to iterate over 2799 * @pgsz: best HW supported page size to use 2800 * 2801 * Callers may use rdma_block_iter_dma_address() to get each 2802 * blocks aligned DMA address. 2803 */ 2804 #define rdma_for_each_block(sglist, biter, nents, pgsz) \ 2805 for (__rdma_block_iter_start(biter, sglist, nents, \ 2806 pgsz); \ 2807 __rdma_block_iter_next(biter);) 2808 2809 /** 2810 * ib_get_client_data - Get IB client context 2811 * @device:Device to get context for 2812 * @client:Client to get context for 2813 * 2814 * ib_get_client_data() returns the client context data set with 2815 * ib_set_client_data(). This can only be called while the client is 2816 * registered to the device, once the ib_client remove() callback returns this 2817 * cannot be called. 2818 */ 2819 static inline void *ib_get_client_data(struct ib_device *device, 2820 struct ib_client *client) 2821 { 2822 return xa_load(&device->client_data, client->client_id); 2823 } 2824 void ib_set_client_data(struct ib_device *device, struct ib_client *client, 2825 void *data); 2826 void ib_set_device_ops(struct ib_device *device, 2827 const struct ib_device_ops *ops); 2828 2829 int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma, 2830 unsigned long pfn, unsigned long size, pgprot_t prot, 2831 struct rdma_user_mmap_entry *entry); 2832 int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext, 2833 struct rdma_user_mmap_entry *entry, 2834 size_t length); 2835 int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext, 2836 struct rdma_user_mmap_entry *entry, 2837 size_t length, u32 min_pgoff, 2838 u32 max_pgoff); 2839 2840 struct rdma_user_mmap_entry * 2841 rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext, 2842 unsigned long pgoff); 2843 struct rdma_user_mmap_entry * 2844 rdma_user_mmap_entry_get(struct ib_ucontext *ucontext, 2845 struct vm_area_struct *vma); 2846 void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry); 2847 2848 void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry); 2849 2850 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len) 2851 { 2852 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0; 2853 } 2854 2855 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len) 2856 { 2857 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; 2858 } 2859 2860 static inline bool ib_is_buffer_cleared(const void __user *p, 2861 size_t len) 2862 { 2863 bool ret; 2864 u8 *buf; 2865 2866 if (len > USHRT_MAX) 2867 return false; 2868 2869 buf = memdup_user(p, len); 2870 if (IS_ERR(buf)) 2871 return false; 2872 2873 ret = !memchr_inv(buf, 0, len); 2874 kfree(buf); 2875 return ret; 2876 } 2877 2878 static inline bool ib_is_udata_cleared(struct ib_udata *udata, 2879 size_t offset, 2880 size_t len) 2881 { 2882 return ib_is_buffer_cleared(udata->inbuf + offset, len); 2883 } 2884 2885 /** 2886 * ib_is_destroy_retryable - Check whether the uobject destruction 2887 * is retryable. 2888 * @ret: The initial destruction return code 2889 * @why: remove reason 2890 * @uobj: The uobject that is destroyed 2891 * 2892 * This function is a helper function that IB layer and low-level drivers 2893 * can use to consider whether the destruction of the given uobject is 2894 * retry-able. 2895 * It checks the original return code, if it wasn't success the destruction 2896 * is retryable according to the ucontext state (i.e. cleanup_retryable) and 2897 * the remove reason. (i.e. why). 2898 * Must be called with the object locked for destroy. 2899 */ 2900 static inline bool ib_is_destroy_retryable(int ret, enum rdma_remove_reason why, 2901 struct ib_uobject *uobj) 2902 { 2903 return ret && (why == RDMA_REMOVE_DESTROY || 2904 uobj->context->cleanup_retryable); 2905 } 2906 2907 /** 2908 * ib_destroy_usecnt - Called during destruction to check the usecnt 2909 * @usecnt: The usecnt atomic 2910 * @why: remove reason 2911 * @uobj: The uobject that is destroyed 2912 * 2913 * Non-zero usecnts will block destruction unless destruction was triggered by 2914 * a ucontext cleanup. 2915 */ 2916 static inline int ib_destroy_usecnt(atomic_t *usecnt, 2917 enum rdma_remove_reason why, 2918 struct ib_uobject *uobj) 2919 { 2920 if (atomic_read(usecnt) && ib_is_destroy_retryable(-EBUSY, why, uobj)) 2921 return -EBUSY; 2922 return 0; 2923 } 2924 2925 /** 2926 * ib_modify_qp_is_ok - Check that the supplied attribute mask 2927 * contains all required attributes and no attributes not allowed for 2928 * the given QP state transition. 2929 * @cur_state: Current QP state 2930 * @next_state: Next QP state 2931 * @type: QP type 2932 * @mask: Mask of supplied QP attributes 2933 * 2934 * This function is a helper function that a low-level driver's 2935 * modify_qp method can use to validate the consumer's input. It 2936 * checks that cur_state and next_state are valid QP states, that a 2937 * transition from cur_state to next_state is allowed by the IB spec, 2938 * and that the attribute mask supplied is allowed for the transition. 2939 */ 2940 bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 2941 enum ib_qp_type type, enum ib_qp_attr_mask mask); 2942 2943 void ib_register_event_handler(struct ib_event_handler *event_handler); 2944 void ib_unregister_event_handler(struct ib_event_handler *event_handler); 2945 void ib_dispatch_event(struct ib_event *event); 2946 2947 int ib_query_port(struct ib_device *device, 2948 u8 port_num, struct ib_port_attr *port_attr); 2949 2950 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, 2951 u8 port_num); 2952 2953 /** 2954 * rdma_cap_ib_switch - Check if the device is IB switch 2955 * @device: Device to check 2956 * 2957 * Device driver is responsible for setting is_switch bit on 2958 * in ib_device structure at init time. 2959 * 2960 * Return: true if the device is IB switch. 2961 */ 2962 static inline bool rdma_cap_ib_switch(const struct ib_device *device) 2963 { 2964 return device->is_switch; 2965 } 2966 2967 /** 2968 * rdma_start_port - Return the first valid port number for the device 2969 * specified 2970 * 2971 * @device: Device to be checked 2972 * 2973 * Return start port number 2974 */ 2975 static inline u8 rdma_start_port(const struct ib_device *device) 2976 { 2977 return rdma_cap_ib_switch(device) ? 0 : 1; 2978 } 2979 2980 /** 2981 * rdma_for_each_port - Iterate over all valid port numbers of the IB device 2982 * @device - The struct ib_device * to iterate over 2983 * @iter - The unsigned int to store the port number 2984 */ 2985 #define rdma_for_each_port(device, iter) \ 2986 for (iter = rdma_start_port(device + BUILD_BUG_ON_ZERO(!__same_type( \ 2987 unsigned int, iter))); \ 2988 iter <= rdma_end_port(device); (iter)++) 2989 2990 /** 2991 * rdma_end_port - Return the last valid port number for the device 2992 * specified 2993 * 2994 * @device: Device to be checked 2995 * 2996 * Return last port number 2997 */ 2998 static inline u8 rdma_end_port(const struct ib_device *device) 2999 { 3000 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt; 3001 } 3002 3003 static inline int rdma_is_port_valid(const struct ib_device *device, 3004 unsigned int port) 3005 { 3006 return (port >= rdma_start_port(device) && 3007 port <= rdma_end_port(device)); 3008 } 3009 3010 static inline bool rdma_is_grh_required(const struct ib_device *device, 3011 u8 port_num) 3012 { 3013 return device->port_data[port_num].immutable.core_cap_flags & 3014 RDMA_CORE_PORT_IB_GRH_REQUIRED; 3015 } 3016 3017 static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num) 3018 { 3019 return device->port_data[port_num].immutable.core_cap_flags & 3020 RDMA_CORE_CAP_PROT_IB; 3021 } 3022 3023 static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num) 3024 { 3025 return device->port_data[port_num].immutable.core_cap_flags & 3026 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP); 3027 } 3028 3029 static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num) 3030 { 3031 return device->port_data[port_num].immutable.core_cap_flags & 3032 RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP; 3033 } 3034 3035 static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num) 3036 { 3037 return device->port_data[port_num].immutable.core_cap_flags & 3038 RDMA_CORE_CAP_PROT_ROCE; 3039 } 3040 3041 static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num) 3042 { 3043 return device->port_data[port_num].immutable.core_cap_flags & 3044 RDMA_CORE_CAP_PROT_IWARP; 3045 } 3046 3047 static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num) 3048 { 3049 return rdma_protocol_ib(device, port_num) || 3050 rdma_protocol_roce(device, port_num); 3051 } 3052 3053 static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num) 3054 { 3055 return device->port_data[port_num].immutable.core_cap_flags & 3056 RDMA_CORE_CAP_PROT_RAW_PACKET; 3057 } 3058 3059 static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num) 3060 { 3061 return device->port_data[port_num].immutable.core_cap_flags & 3062 RDMA_CORE_CAP_PROT_USNIC; 3063 } 3064 3065 /** 3066 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband 3067 * Management Datagrams. 3068 * @device: Device to check 3069 * @port_num: Port number to check 3070 * 3071 * Management Datagrams (MAD) are a required part of the InfiniBand 3072 * specification and are supported on all InfiniBand devices. A slightly 3073 * extended version are also supported on OPA interfaces. 3074 * 3075 * Return: true if the port supports sending/receiving of MAD packets. 3076 */ 3077 static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num) 3078 { 3079 return device->port_data[port_num].immutable.core_cap_flags & 3080 RDMA_CORE_CAP_IB_MAD; 3081 } 3082 3083 /** 3084 * rdma_cap_opa_mad - Check if the port of device provides support for OPA 3085 * Management Datagrams. 3086 * @device: Device to check 3087 * @port_num: Port number to check 3088 * 3089 * Intel OmniPath devices extend and/or replace the InfiniBand Management 3090 * datagrams with their own versions. These OPA MADs share many but not all of 3091 * the characteristics of InfiniBand MADs. 3092 * 3093 * OPA MADs differ in the following ways: 3094 * 3095 * 1) MADs are variable size up to 2K 3096 * IBTA defined MADs remain fixed at 256 bytes 3097 * 2) OPA SMPs must carry valid PKeys 3098 * 3) OPA SMP packets are a different format 3099 * 3100 * Return: true if the port supports OPA MAD packet formats. 3101 */ 3102 static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num) 3103 { 3104 return device->port_data[port_num].immutable.core_cap_flags & 3105 RDMA_CORE_CAP_OPA_MAD; 3106 } 3107 3108 /** 3109 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband 3110 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI). 3111 * @device: Device to check 3112 * @port_num: Port number to check 3113 * 3114 * Each InfiniBand node is required to provide a Subnet Management Agent 3115 * that the subnet manager can access. Prior to the fabric being fully 3116 * configured by the subnet manager, the SMA is accessed via a well known 3117 * interface called the Subnet Management Interface (SMI). This interface 3118 * uses directed route packets to communicate with the SM to get around the 3119 * chicken and egg problem of the SM needing to know what's on the fabric 3120 * in order to configure the fabric, and needing to configure the fabric in 3121 * order to send packets to the devices on the fabric. These directed 3122 * route packets do not need the fabric fully configured in order to reach 3123 * their destination. The SMI is the only method allowed to send 3124 * directed route packets on an InfiniBand fabric. 3125 * 3126 * Return: true if the port provides an SMI. 3127 */ 3128 static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num) 3129 { 3130 return device->port_data[port_num].immutable.core_cap_flags & 3131 RDMA_CORE_CAP_IB_SMI; 3132 } 3133 3134 /** 3135 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband 3136 * Communication Manager. 3137 * @device: Device to check 3138 * @port_num: Port number to check 3139 * 3140 * The InfiniBand Communication Manager is one of many pre-defined General 3141 * Service Agents (GSA) that are accessed via the General Service 3142 * Interface (GSI). It's role is to facilitate establishment of connections 3143 * between nodes as well as other management related tasks for established 3144 * connections. 3145 * 3146 * Return: true if the port supports an IB CM (this does not guarantee that 3147 * a CM is actually running however). 3148 */ 3149 static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num) 3150 { 3151 return device->port_data[port_num].immutable.core_cap_flags & 3152 RDMA_CORE_CAP_IB_CM; 3153 } 3154 3155 /** 3156 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP 3157 * Communication Manager. 3158 * @device: Device to check 3159 * @port_num: Port number to check 3160 * 3161 * Similar to above, but specific to iWARP connections which have a different 3162 * managment protocol than InfiniBand. 3163 * 3164 * Return: true if the port supports an iWARP CM (this does not guarantee that 3165 * a CM is actually running however). 3166 */ 3167 static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num) 3168 { 3169 return device->port_data[port_num].immutable.core_cap_flags & 3170 RDMA_CORE_CAP_IW_CM; 3171 } 3172 3173 /** 3174 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband 3175 * Subnet Administration. 3176 * @device: Device to check 3177 * @port_num: Port number to check 3178 * 3179 * An InfiniBand Subnet Administration (SA) service is a pre-defined General 3180 * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand 3181 * fabrics, devices should resolve routes to other hosts by contacting the 3182 * SA to query the proper route. 3183 * 3184 * Return: true if the port should act as a client to the fabric Subnet 3185 * Administration interface. This does not imply that the SA service is 3186 * running locally. 3187 */ 3188 static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num) 3189 { 3190 return device->port_data[port_num].immutable.core_cap_flags & 3191 RDMA_CORE_CAP_IB_SA; 3192 } 3193 3194 /** 3195 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband 3196 * Multicast. 3197 * @device: Device to check 3198 * @port_num: Port number to check 3199 * 3200 * InfiniBand multicast registration is more complex than normal IPv4 or 3201 * IPv6 multicast registration. Each Host Channel Adapter must register 3202 * with the Subnet Manager when it wishes to join a multicast group. It 3203 * should do so only once regardless of how many queue pairs it subscribes 3204 * to this group. And it should leave the group only after all queue pairs 3205 * attached to the group have been detached. 3206 * 3207 * Return: true if the port must undertake the additional adminstrative 3208 * overhead of registering/unregistering with the SM and tracking of the 3209 * total number of queue pairs attached to the multicast group. 3210 */ 3211 static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num) 3212 { 3213 return rdma_cap_ib_sa(device, port_num); 3214 } 3215 3216 /** 3217 * rdma_cap_af_ib - Check if the port of device has the capability 3218 * Native Infiniband Address. 3219 * @device: Device to check 3220 * @port_num: Port number to check 3221 * 3222 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default 3223 * GID. RoCE uses a different mechanism, but still generates a GID via 3224 * a prescribed mechanism and port specific data. 3225 * 3226 * Return: true if the port uses a GID address to identify devices on the 3227 * network. 3228 */ 3229 static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num) 3230 { 3231 return device->port_data[port_num].immutable.core_cap_flags & 3232 RDMA_CORE_CAP_AF_IB; 3233 } 3234 3235 /** 3236 * rdma_cap_eth_ah - Check if the port of device has the capability 3237 * Ethernet Address Handle. 3238 * @device: Device to check 3239 * @port_num: Port number to check 3240 * 3241 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique 3242 * to fabricate GIDs over Ethernet/IP specific addresses native to the 3243 * port. Normally, packet headers are generated by the sending host 3244 * adapter, but when sending connectionless datagrams, we must manually 3245 * inject the proper headers for the fabric we are communicating over. 3246 * 3247 * Return: true if we are running as a RoCE port and must force the 3248 * addition of a Global Route Header built from our Ethernet Address 3249 * Handle into our header list for connectionless packets. 3250 */ 3251 static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num) 3252 { 3253 return device->port_data[port_num].immutable.core_cap_flags & 3254 RDMA_CORE_CAP_ETH_AH; 3255 } 3256 3257 /** 3258 * rdma_cap_opa_ah - Check if the port of device supports 3259 * OPA Address handles 3260 * @device: Device to check 3261 * @port_num: Port number to check 3262 * 3263 * Return: true if we are running on an OPA device which supports 3264 * the extended OPA addressing. 3265 */ 3266 static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num) 3267 { 3268 return (device->port_data[port_num].immutable.core_cap_flags & 3269 RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH; 3270 } 3271 3272 /** 3273 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port. 3274 * 3275 * @device: Device 3276 * @port_num: Port number 3277 * 3278 * This MAD size includes the MAD headers and MAD payload. No other headers 3279 * are included. 3280 * 3281 * Return the max MAD size required by the Port. Will return 0 if the port 3282 * does not support MADs 3283 */ 3284 static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num) 3285 { 3286 return device->port_data[port_num].immutable.max_mad_size; 3287 } 3288 3289 /** 3290 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table 3291 * @device: Device to check 3292 * @port_num: Port number to check 3293 * 3294 * RoCE GID table mechanism manages the various GIDs for a device. 3295 * 3296 * NOTE: if allocating the port's GID table has failed, this call will still 3297 * return true, but any RoCE GID table API will fail. 3298 * 3299 * Return: true if the port uses RoCE GID table mechanism in order to manage 3300 * its GIDs. 3301 */ 3302 static inline bool rdma_cap_roce_gid_table(const struct ib_device *device, 3303 u8 port_num) 3304 { 3305 return rdma_protocol_roce(device, port_num) && 3306 device->ops.add_gid && device->ops.del_gid; 3307 } 3308 3309 /* 3310 * Check if the device supports READ W/ INVALIDATE. 3311 */ 3312 static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num) 3313 { 3314 /* 3315 * iWarp drivers must support READ W/ INVALIDATE. No other protocol 3316 * has support for it yet. 3317 */ 3318 return rdma_protocol_iwarp(dev, port_num); 3319 } 3320 3321 /** 3322 * rdma_find_pg_bit - Find page bit given address and HW supported page sizes 3323 * 3324 * @addr: address 3325 * @pgsz_bitmap: bitmap of HW supported page sizes 3326 */ 3327 static inline unsigned int rdma_find_pg_bit(unsigned long addr, 3328 unsigned long pgsz_bitmap) 3329 { 3330 unsigned long align; 3331 unsigned long pgsz; 3332 3333 align = addr & -addr; 3334 3335 /* Find page bit such that addr is aligned to the highest supported 3336 * HW page size 3337 */ 3338 pgsz = pgsz_bitmap & ~(-align << 1); 3339 if (!pgsz) 3340 return __ffs(pgsz_bitmap); 3341 3342 return __fls(pgsz); 3343 } 3344 3345 int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, 3346 int state); 3347 int ib_get_vf_config(struct ib_device *device, int vf, u8 port, 3348 struct ifla_vf_info *info); 3349 int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, 3350 struct ifla_vf_stats *stats); 3351 int ib_get_vf_guid(struct ib_device *device, int vf, u8 port, 3352 struct ifla_vf_guid *node_guid, 3353 struct ifla_vf_guid *port_guid); 3354 int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, 3355 int type); 3356 3357 int ib_query_pkey(struct ib_device *device, 3358 u8 port_num, u16 index, u16 *pkey); 3359 3360 int ib_modify_device(struct ib_device *device, 3361 int device_modify_mask, 3362 struct ib_device_modify *device_modify); 3363 3364 int ib_modify_port(struct ib_device *device, 3365 u8 port_num, int port_modify_mask, 3366 struct ib_port_modify *port_modify); 3367 3368 int ib_find_gid(struct ib_device *device, union ib_gid *gid, 3369 u8 *port_num, u16 *index); 3370 3371 int ib_find_pkey(struct ib_device *device, 3372 u8 port_num, u16 pkey, u16 *index); 3373 3374 enum ib_pd_flags { 3375 /* 3376 * Create a memory registration for all memory in the system and place 3377 * the rkey for it into pd->unsafe_global_rkey. This can be used by 3378 * ULPs to avoid the overhead of dynamic MRs. 3379 * 3380 * This flag is generally considered unsafe and must only be used in 3381 * extremly trusted environments. Every use of it will log a warning 3382 * in the kernel log. 3383 */ 3384 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01, 3385 }; 3386 3387 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, 3388 const char *caller); 3389 3390 #define ib_alloc_pd(device, flags) \ 3391 __ib_alloc_pd((device), (flags), KBUILD_MODNAME) 3392 3393 /** 3394 * ib_dealloc_pd_user - Deallocate kernel/user PD 3395 * @pd: The protection domain 3396 * @udata: Valid user data or NULL for kernel objects 3397 */ 3398 void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata); 3399 3400 /** 3401 * ib_dealloc_pd - Deallocate kernel PD 3402 * @pd: The protection domain 3403 * 3404 * NOTE: for user PD use ib_dealloc_pd_user with valid udata! 3405 */ 3406 static inline void ib_dealloc_pd(struct ib_pd *pd) 3407 { 3408 ib_dealloc_pd_user(pd, NULL); 3409 } 3410 3411 enum rdma_create_ah_flags { 3412 /* In a sleepable context */ 3413 RDMA_CREATE_AH_SLEEPABLE = BIT(0), 3414 }; 3415 3416 /** 3417 * rdma_create_ah - Creates an address handle for the given address vector. 3418 * @pd: The protection domain associated with the address handle. 3419 * @ah_attr: The attributes of the address vector. 3420 * @flags: Create address handle flags (see enum rdma_create_ah_flags). 3421 * 3422 * The address handle is used to reference a local or global destination 3423 * in all UD QP post sends. 3424 */ 3425 struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, 3426 u32 flags); 3427 3428 /** 3429 * rdma_create_user_ah - Creates an address handle for the given address vector. 3430 * It resolves destination mac address for ah attribute of RoCE type. 3431 * @pd: The protection domain associated with the address handle. 3432 * @ah_attr: The attributes of the address vector. 3433 * @udata: pointer to user's input output buffer information need by 3434 * provider driver. 3435 * 3436 * It returns 0 on success and returns appropriate error code on error. 3437 * The address handle is used to reference a local or global destination 3438 * in all UD QP post sends. 3439 */ 3440 struct ib_ah *rdma_create_user_ah(struct ib_pd *pd, 3441 struct rdma_ah_attr *ah_attr, 3442 struct ib_udata *udata); 3443 /** 3444 * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header 3445 * work completion. 3446 * @hdr: the L3 header to parse 3447 * @net_type: type of header to parse 3448 * @sgid: place to store source gid 3449 * @dgid: place to store destination gid 3450 */ 3451 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr, 3452 enum rdma_network_type net_type, 3453 union ib_gid *sgid, union ib_gid *dgid); 3454 3455 /** 3456 * ib_get_rdma_header_version - Get the header version 3457 * @hdr: the L3 header to parse 3458 */ 3459 int ib_get_rdma_header_version(const union rdma_network_hdr *hdr); 3460 3461 /** 3462 * ib_init_ah_attr_from_wc - Initializes address handle attributes from a 3463 * work completion. 3464 * @device: Device on which the received message arrived. 3465 * @port_num: Port on which the received message arrived. 3466 * @wc: Work completion associated with the received message. 3467 * @grh: References the received global route header. This parameter is 3468 * ignored unless the work completion indicates that the GRH is valid. 3469 * @ah_attr: Returned attributes that can be used when creating an address 3470 * handle for replying to the message. 3471 * When ib_init_ah_attr_from_wc() returns success, 3472 * (a) for IB link layer it optionally contains a reference to SGID attribute 3473 * when GRH is present for IB link layer. 3474 * (b) for RoCE link layer it contains a reference to SGID attribute. 3475 * User must invoke rdma_cleanup_ah_attr_gid_attr() to release reference to SGID 3476 * attributes which are initialized using ib_init_ah_attr_from_wc(). 3477 * 3478 */ 3479 int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num, 3480 const struct ib_wc *wc, const struct ib_grh *grh, 3481 struct rdma_ah_attr *ah_attr); 3482 3483 /** 3484 * ib_create_ah_from_wc - Creates an address handle associated with the 3485 * sender of the specified work completion. 3486 * @pd: The protection domain associated with the address handle. 3487 * @wc: Work completion information associated with a received message. 3488 * @grh: References the received global route header. This parameter is 3489 * ignored unless the work completion indicates that the GRH is valid. 3490 * @port_num: The outbound port number to associate with the address. 3491 * 3492 * The address handle is used to reference a local or global destination 3493 * in all UD QP post sends. 3494 */ 3495 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, 3496 const struct ib_grh *grh, u8 port_num); 3497 3498 /** 3499 * rdma_modify_ah - Modifies the address vector associated with an address 3500 * handle. 3501 * @ah: The address handle to modify. 3502 * @ah_attr: The new address vector attributes to associate with the 3503 * address handle. 3504 */ 3505 int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); 3506 3507 /** 3508 * rdma_query_ah - Queries the address vector associated with an address 3509 * handle. 3510 * @ah: The address handle to query. 3511 * @ah_attr: The address vector attributes associated with the address 3512 * handle. 3513 */ 3514 int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); 3515 3516 enum rdma_destroy_ah_flags { 3517 /* In a sleepable context */ 3518 RDMA_DESTROY_AH_SLEEPABLE = BIT(0), 3519 }; 3520 3521 /** 3522 * rdma_destroy_ah_user - Destroys an address handle. 3523 * @ah: The address handle to destroy. 3524 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags). 3525 * @udata: Valid user data or NULL for kernel objects 3526 */ 3527 int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata); 3528 3529 /** 3530 * rdma_destroy_ah - Destroys an kernel address handle. 3531 * @ah: The address handle to destroy. 3532 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags). 3533 * 3534 * NOTE: for user ah use rdma_destroy_ah_user with valid udata! 3535 */ 3536 static inline int rdma_destroy_ah(struct ib_ah *ah, u32 flags) 3537 { 3538 return rdma_destroy_ah_user(ah, flags, NULL); 3539 } 3540 3541 /** 3542 * ib_create_srq - Creates a SRQ associated with the specified protection 3543 * domain. 3544 * @pd: The protection domain associated with the SRQ. 3545 * @srq_init_attr: A list of initial attributes required to create the 3546 * SRQ. If SRQ creation succeeds, then the attributes are updated to 3547 * the actual capabilities of the created SRQ. 3548 * 3549 * srq_attr->max_wr and srq_attr->max_sge are read the determine the 3550 * requested size of the SRQ, and set to the actual values allocated 3551 * on return. If ib_create_srq() succeeds, then max_wr and max_sge 3552 * will always be at least as large as the requested values. 3553 */ 3554 struct ib_srq *ib_create_srq(struct ib_pd *pd, 3555 struct ib_srq_init_attr *srq_init_attr); 3556 3557 /** 3558 * ib_modify_srq - Modifies the attributes for the specified SRQ. 3559 * @srq: The SRQ to modify. 3560 * @srq_attr: On input, specifies the SRQ attributes to modify. On output, 3561 * the current values of selected SRQ attributes are returned. 3562 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ 3563 * are being modified. 3564 * 3565 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or 3566 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when 3567 * the number of receives queued drops below the limit. 3568 */ 3569 int ib_modify_srq(struct ib_srq *srq, 3570 struct ib_srq_attr *srq_attr, 3571 enum ib_srq_attr_mask srq_attr_mask); 3572 3573 /** 3574 * ib_query_srq - Returns the attribute list and current values for the 3575 * specified SRQ. 3576 * @srq: The SRQ to query. 3577 * @srq_attr: The attributes of the specified SRQ. 3578 */ 3579 int ib_query_srq(struct ib_srq *srq, 3580 struct ib_srq_attr *srq_attr); 3581 3582 /** 3583 * ib_destroy_srq_user - Destroys the specified SRQ. 3584 * @srq: The SRQ to destroy. 3585 * @udata: Valid user data or NULL for kernel objects 3586 */ 3587 int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata); 3588 3589 /** 3590 * ib_destroy_srq - Destroys the specified kernel SRQ. 3591 * @srq: The SRQ to destroy. 3592 * 3593 * NOTE: for user srq use ib_destroy_srq_user with valid udata! 3594 */ 3595 static inline int ib_destroy_srq(struct ib_srq *srq) 3596 { 3597 return ib_destroy_srq_user(srq, NULL); 3598 } 3599 3600 /** 3601 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ. 3602 * @srq: The SRQ to post the work request on. 3603 * @recv_wr: A list of work requests to post on the receive queue. 3604 * @bad_recv_wr: On an immediate failure, this parameter will reference 3605 * the work request that failed to be posted on the QP. 3606 */ 3607 static inline int ib_post_srq_recv(struct ib_srq *srq, 3608 const struct ib_recv_wr *recv_wr, 3609 const struct ib_recv_wr **bad_recv_wr) 3610 { 3611 const struct ib_recv_wr *dummy; 3612 3613 return srq->device->ops.post_srq_recv(srq, recv_wr, 3614 bad_recv_wr ? : &dummy); 3615 } 3616 3617 /** 3618 * ib_create_qp_user - Creates a QP associated with the specified protection 3619 * domain. 3620 * @pd: The protection domain associated with the QP. 3621 * @qp_init_attr: A list of initial attributes required to create the 3622 * QP. If QP creation succeeds, then the attributes are updated to 3623 * the actual capabilities of the created QP. 3624 * @udata: Valid user data or NULL for kernel objects 3625 */ 3626 struct ib_qp *ib_create_qp_user(struct ib_pd *pd, 3627 struct ib_qp_init_attr *qp_init_attr, 3628 struct ib_udata *udata); 3629 3630 /** 3631 * ib_create_qp - Creates a kernel QP associated with the specified protection 3632 * domain. 3633 * @pd: The protection domain associated with the QP. 3634 * @qp_init_attr: A list of initial attributes required to create the 3635 * QP. If QP creation succeeds, then the attributes are updated to 3636 * the actual capabilities of the created QP. 3637 * @udata: Valid user data or NULL for kernel objects 3638 * 3639 * NOTE: for user qp use ib_create_qp_user with valid udata! 3640 */ 3641 static inline struct ib_qp *ib_create_qp(struct ib_pd *pd, 3642 struct ib_qp_init_attr *qp_init_attr) 3643 { 3644 return ib_create_qp_user(pd, qp_init_attr, NULL); 3645 } 3646 3647 /** 3648 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP. 3649 * @qp: The QP to modify. 3650 * @attr: On input, specifies the QP attributes to modify. On output, 3651 * the current values of selected QP attributes are returned. 3652 * @attr_mask: A bit-mask used to specify which attributes of the QP 3653 * are being modified. 3654 * @udata: pointer to user's input output buffer information 3655 * are being modified. 3656 * It returns 0 on success and returns appropriate error code on error. 3657 */ 3658 int ib_modify_qp_with_udata(struct ib_qp *qp, 3659 struct ib_qp_attr *attr, 3660 int attr_mask, 3661 struct ib_udata *udata); 3662 3663 /** 3664 * ib_modify_qp - Modifies the attributes for the specified QP and then 3665 * transitions the QP to the given state. 3666 * @qp: The QP to modify. 3667 * @qp_attr: On input, specifies the QP attributes to modify. On output, 3668 * the current values of selected QP attributes are returned. 3669 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP 3670 * are being modified. 3671 */ 3672 int ib_modify_qp(struct ib_qp *qp, 3673 struct ib_qp_attr *qp_attr, 3674 int qp_attr_mask); 3675 3676 /** 3677 * ib_query_qp - Returns the attribute list and current values for the 3678 * specified QP. 3679 * @qp: The QP to query. 3680 * @qp_attr: The attributes of the specified QP. 3681 * @qp_attr_mask: A bit-mask used to select specific attributes to query. 3682 * @qp_init_attr: Additional attributes of the selected QP. 3683 * 3684 * The qp_attr_mask may be used to limit the query to gathering only the 3685 * selected attributes. 3686 */ 3687 int ib_query_qp(struct ib_qp *qp, 3688 struct ib_qp_attr *qp_attr, 3689 int qp_attr_mask, 3690 struct ib_qp_init_attr *qp_init_attr); 3691 3692 /** 3693 * ib_destroy_qp - Destroys the specified QP. 3694 * @qp: The QP to destroy. 3695 * @udata: Valid udata or NULL for kernel objects 3696 */ 3697 int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata); 3698 3699 /** 3700 * ib_destroy_qp - Destroys the specified kernel QP. 3701 * @qp: The QP to destroy. 3702 * 3703 * NOTE: for user qp use ib_destroy_qp_user with valid udata! 3704 */ 3705 static inline int ib_destroy_qp(struct ib_qp *qp) 3706 { 3707 return ib_destroy_qp_user(qp, NULL); 3708 } 3709 3710 /** 3711 * ib_open_qp - Obtain a reference to an existing sharable QP. 3712 * @xrcd - XRC domain 3713 * @qp_open_attr: Attributes identifying the QP to open. 3714 * 3715 * Returns a reference to a sharable QP. 3716 */ 3717 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, 3718 struct ib_qp_open_attr *qp_open_attr); 3719 3720 /** 3721 * ib_close_qp - Release an external reference to a QP. 3722 * @qp: The QP handle to release 3723 * 3724 * The opened QP handle is released by the caller. The underlying 3725 * shared QP is not destroyed until all internal references are released. 3726 */ 3727 int ib_close_qp(struct ib_qp *qp); 3728 3729 /** 3730 * ib_post_send - Posts a list of work requests to the send queue of 3731 * the specified QP. 3732 * @qp: The QP to post the work request on. 3733 * @send_wr: A list of work requests to post on the send queue. 3734 * @bad_send_wr: On an immediate failure, this parameter will reference 3735 * the work request that failed to be posted on the QP. 3736 * 3737 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate 3738 * error is returned, the QP state shall not be affected, 3739 * ib_post_send() will return an immediate error after queueing any 3740 * earlier work requests in the list. 3741 */ 3742 static inline int ib_post_send(struct ib_qp *qp, 3743 const struct ib_send_wr *send_wr, 3744 const struct ib_send_wr **bad_send_wr) 3745 { 3746 const struct ib_send_wr *dummy; 3747 3748 return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy); 3749 } 3750 3751 /** 3752 * ib_post_recv - Posts a list of work requests to the receive queue of 3753 * the specified QP. 3754 * @qp: The QP to post the work request on. 3755 * @recv_wr: A list of work requests to post on the receive queue. 3756 * @bad_recv_wr: On an immediate failure, this parameter will reference 3757 * the work request that failed to be posted on the QP. 3758 */ 3759 static inline int ib_post_recv(struct ib_qp *qp, 3760 const struct ib_recv_wr *recv_wr, 3761 const struct ib_recv_wr **bad_recv_wr) 3762 { 3763 const struct ib_recv_wr *dummy; 3764 3765 return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy); 3766 } 3767 3768 struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private, 3769 int nr_cqe, int comp_vector, 3770 enum ib_poll_context poll_ctx, 3771 const char *caller, struct ib_udata *udata); 3772 3773 /** 3774 * ib_alloc_cq_user: Allocate kernel/user CQ 3775 * @dev: The IB device 3776 * @private: Private data attached to the CQE 3777 * @nr_cqe: Number of CQEs in the CQ 3778 * @comp_vector: Completion vector used for the IRQs 3779 * @poll_ctx: Context used for polling the CQ 3780 * @udata: Valid user data or NULL for kernel objects 3781 */ 3782 static inline struct ib_cq *ib_alloc_cq_user(struct ib_device *dev, 3783 void *private, int nr_cqe, 3784 int comp_vector, 3785 enum ib_poll_context poll_ctx, 3786 struct ib_udata *udata) 3787 { 3788 return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx, 3789 KBUILD_MODNAME, udata); 3790 } 3791 3792 /** 3793 * ib_alloc_cq: Allocate kernel CQ 3794 * @dev: The IB device 3795 * @private: Private data attached to the CQE 3796 * @nr_cqe: Number of CQEs in the CQ 3797 * @comp_vector: Completion vector used for the IRQs 3798 * @poll_ctx: Context used for polling the CQ 3799 * 3800 * NOTE: for user cq use ib_alloc_cq_user with valid udata! 3801 */ 3802 static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private, 3803 int nr_cqe, int comp_vector, 3804 enum ib_poll_context poll_ctx) 3805 { 3806 return ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx, 3807 NULL); 3808 } 3809 3810 struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private, 3811 int nr_cqe, enum ib_poll_context poll_ctx, 3812 const char *caller); 3813 3814 /** 3815 * ib_alloc_cq_any: Allocate kernel CQ 3816 * @dev: The IB device 3817 * @private: Private data attached to the CQE 3818 * @nr_cqe: Number of CQEs in the CQ 3819 * @poll_ctx: Context used for polling the CQ 3820 */ 3821 static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev, 3822 void *private, int nr_cqe, 3823 enum ib_poll_context poll_ctx) 3824 { 3825 return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx, 3826 KBUILD_MODNAME); 3827 } 3828 3829 /** 3830 * ib_free_cq_user - Free kernel/user CQ 3831 * @cq: The CQ to free 3832 * @udata: Valid user data or NULL for kernel objects 3833 */ 3834 void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata); 3835 3836 /** 3837 * ib_free_cq - Free kernel CQ 3838 * @cq: The CQ to free 3839 * 3840 * NOTE: for user cq use ib_free_cq_user with valid udata! 3841 */ 3842 static inline void ib_free_cq(struct ib_cq *cq) 3843 { 3844 ib_free_cq_user(cq, NULL); 3845 } 3846 3847 int ib_process_cq_direct(struct ib_cq *cq, int budget); 3848 3849 /** 3850 * ib_create_cq - Creates a CQ on the specified device. 3851 * @device: The device on which to create the CQ. 3852 * @comp_handler: A user-specified callback that is invoked when a 3853 * completion event occurs on the CQ. 3854 * @event_handler: A user-specified callback that is invoked when an 3855 * asynchronous event not associated with a completion occurs on the CQ. 3856 * @cq_context: Context associated with the CQ returned to the user via 3857 * the associated completion and event handlers. 3858 * @cq_attr: The attributes the CQ should be created upon. 3859 * 3860 * Users can examine the cq structure to determine the actual CQ size. 3861 */ 3862 struct ib_cq *__ib_create_cq(struct ib_device *device, 3863 ib_comp_handler comp_handler, 3864 void (*event_handler)(struct ib_event *, void *), 3865 void *cq_context, 3866 const struct ib_cq_init_attr *cq_attr, 3867 const char *caller); 3868 #define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \ 3869 __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME) 3870 3871 /** 3872 * ib_resize_cq - Modifies the capacity of the CQ. 3873 * @cq: The CQ to resize. 3874 * @cqe: The minimum size of the CQ. 3875 * 3876 * Users can examine the cq structure to determine the actual CQ size. 3877 */ 3878 int ib_resize_cq(struct ib_cq *cq, int cqe); 3879 3880 /** 3881 * rdma_set_cq_moderation - Modifies moderation params of the CQ 3882 * @cq: The CQ to modify. 3883 * @cq_count: number of CQEs that will trigger an event 3884 * @cq_period: max period of time in usec before triggering an event 3885 * 3886 */ 3887 int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period); 3888 3889 /** 3890 * ib_destroy_cq_user - Destroys the specified CQ. 3891 * @cq: The CQ to destroy. 3892 * @udata: Valid user data or NULL for kernel objects 3893 */ 3894 int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata); 3895 3896 /** 3897 * ib_destroy_cq - Destroys the specified kernel CQ. 3898 * @cq: The CQ to destroy. 3899 * 3900 * NOTE: for user cq use ib_destroy_cq_user with valid udata! 3901 */ 3902 static inline void ib_destroy_cq(struct ib_cq *cq) 3903 { 3904 ib_destroy_cq_user(cq, NULL); 3905 } 3906 3907 /** 3908 * ib_poll_cq - poll a CQ for completion(s) 3909 * @cq:the CQ being polled 3910 * @num_entries:maximum number of completions to return 3911 * @wc:array of at least @num_entries &struct ib_wc where completions 3912 * will be returned 3913 * 3914 * Poll a CQ for (possibly multiple) completions. If the return value 3915 * is < 0, an error occurred. If the return value is >= 0, it is the 3916 * number of completions returned. If the return value is 3917 * non-negative and < num_entries, then the CQ was emptied. 3918 */ 3919 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries, 3920 struct ib_wc *wc) 3921 { 3922 return cq->device->ops.poll_cq(cq, num_entries, wc); 3923 } 3924 3925 /** 3926 * ib_req_notify_cq - Request completion notification on a CQ. 3927 * @cq: The CQ to generate an event for. 3928 * @flags: 3929 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP 3930 * to request an event on the next solicited event or next work 3931 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS 3932 * may also be |ed in to request a hint about missed events, as 3933 * described below. 3934 * 3935 * Return Value: 3936 * < 0 means an error occurred while requesting notification 3937 * == 0 means notification was requested successfully, and if 3938 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events 3939 * were missed and it is safe to wait for another event. In 3940 * this case is it guaranteed that any work completions added 3941 * to the CQ since the last CQ poll will trigger a completion 3942 * notification event. 3943 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed 3944 * in. It means that the consumer must poll the CQ again to 3945 * make sure it is empty to avoid missing an event because of a 3946 * race between requesting notification and an entry being 3947 * added to the CQ. This return value means it is possible 3948 * (but not guaranteed) that a work completion has been added 3949 * to the CQ since the last poll without triggering a 3950 * completion notification event. 3951 */ 3952 static inline int ib_req_notify_cq(struct ib_cq *cq, 3953 enum ib_cq_notify_flags flags) 3954 { 3955 return cq->device->ops.req_notify_cq(cq, flags); 3956 } 3957 3958 /** 3959 * ib_req_ncomp_notif - Request completion notification when there are 3960 * at least the specified number of unreaped completions on the CQ. 3961 * @cq: The CQ to generate an event for. 3962 * @wc_cnt: The number of unreaped completions that should be on the 3963 * CQ before an event is generated. 3964 */ 3965 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) 3966 { 3967 return cq->device->ops.req_ncomp_notif ? 3968 cq->device->ops.req_ncomp_notif(cq, wc_cnt) : 3969 -ENOSYS; 3970 } 3971 3972 /** 3973 * ib_dma_mapping_error - check a DMA addr for error 3974 * @dev: The device for which the dma_addr was created 3975 * @dma_addr: The DMA address to check 3976 */ 3977 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) 3978 { 3979 return dma_mapping_error(dev->dma_device, dma_addr); 3980 } 3981 3982 /** 3983 * ib_dma_map_single - Map a kernel virtual address to DMA address 3984 * @dev: The device for which the dma_addr is to be created 3985 * @cpu_addr: The kernel virtual address 3986 * @size: The size of the region in bytes 3987 * @direction: The direction of the DMA 3988 */ 3989 static inline u64 ib_dma_map_single(struct ib_device *dev, 3990 void *cpu_addr, size_t size, 3991 enum dma_data_direction direction) 3992 { 3993 return dma_map_single(dev->dma_device, cpu_addr, size, direction); 3994 } 3995 3996 /** 3997 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single() 3998 * @dev: The device for which the DMA address was created 3999 * @addr: The DMA address 4000 * @size: The size of the region in bytes 4001 * @direction: The direction of the DMA 4002 */ 4003 static inline void ib_dma_unmap_single(struct ib_device *dev, 4004 u64 addr, size_t size, 4005 enum dma_data_direction direction) 4006 { 4007 dma_unmap_single(dev->dma_device, addr, size, direction); 4008 } 4009 4010 /** 4011 * ib_dma_map_page - Map a physical page to DMA address 4012 * @dev: The device for which the dma_addr is to be created 4013 * @page: The page to be mapped 4014 * @offset: The offset within the page 4015 * @size: The size of the region in bytes 4016 * @direction: The direction of the DMA 4017 */ 4018 static inline u64 ib_dma_map_page(struct ib_device *dev, 4019 struct page *page, 4020 unsigned long offset, 4021 size_t size, 4022 enum dma_data_direction direction) 4023 { 4024 return dma_map_page(dev->dma_device, page, offset, size, direction); 4025 } 4026 4027 /** 4028 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page() 4029 * @dev: The device for which the DMA address was created 4030 * @addr: The DMA address 4031 * @size: The size of the region in bytes 4032 * @direction: The direction of the DMA 4033 */ 4034 static inline void ib_dma_unmap_page(struct ib_device *dev, 4035 u64 addr, size_t size, 4036 enum dma_data_direction direction) 4037 { 4038 dma_unmap_page(dev->dma_device, addr, size, direction); 4039 } 4040 4041 /** 4042 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses 4043 * @dev: The device for which the DMA addresses are to be created 4044 * @sg: The array of scatter/gather entries 4045 * @nents: The number of scatter/gather entries 4046 * @direction: The direction of the DMA 4047 */ 4048 static inline int ib_dma_map_sg(struct ib_device *dev, 4049 struct scatterlist *sg, int nents, 4050 enum dma_data_direction direction) 4051 { 4052 return dma_map_sg(dev->dma_device, sg, nents, direction); 4053 } 4054 4055 /** 4056 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses 4057 * @dev: The device for which the DMA addresses were created 4058 * @sg: The array of scatter/gather entries 4059 * @nents: The number of scatter/gather entries 4060 * @direction: The direction of the DMA 4061 */ 4062 static inline void ib_dma_unmap_sg(struct ib_device *dev, 4063 struct scatterlist *sg, int nents, 4064 enum dma_data_direction direction) 4065 { 4066 dma_unmap_sg(dev->dma_device, sg, nents, direction); 4067 } 4068 4069 static inline int ib_dma_map_sg_attrs(struct ib_device *dev, 4070 struct scatterlist *sg, int nents, 4071 enum dma_data_direction direction, 4072 unsigned long dma_attrs) 4073 { 4074 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, 4075 dma_attrs); 4076 } 4077 4078 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, 4079 struct scatterlist *sg, int nents, 4080 enum dma_data_direction direction, 4081 unsigned long dma_attrs) 4082 { 4083 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs); 4084 } 4085 4086 /** 4087 * ib_dma_max_seg_size - Return the size limit of a single DMA transfer 4088 * @dev: The device to query 4089 * 4090 * The returned value represents a size in bytes. 4091 */ 4092 static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev) 4093 { 4094 return dma_get_max_seg_size(dev->dma_device); 4095 } 4096 4097 /** 4098 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU 4099 * @dev: The device for which the DMA address was created 4100 * @addr: The DMA address 4101 * @size: The size of the region in bytes 4102 * @dir: The direction of the DMA 4103 */ 4104 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, 4105 u64 addr, 4106 size_t size, 4107 enum dma_data_direction dir) 4108 { 4109 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); 4110 } 4111 4112 /** 4113 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device 4114 * @dev: The device for which the DMA address was created 4115 * @addr: The DMA address 4116 * @size: The size of the region in bytes 4117 * @dir: The direction of the DMA 4118 */ 4119 static inline void ib_dma_sync_single_for_device(struct ib_device *dev, 4120 u64 addr, 4121 size_t size, 4122 enum dma_data_direction dir) 4123 { 4124 dma_sync_single_for_device(dev->dma_device, addr, size, dir); 4125 } 4126 4127 /** 4128 * ib_dma_alloc_coherent - Allocate memory and map it for DMA 4129 * @dev: The device for which the DMA address is requested 4130 * @size: The size of the region to allocate in bytes 4131 * @dma_handle: A pointer for returning the DMA address of the region 4132 * @flag: memory allocator flags 4133 */ 4134 static inline void *ib_dma_alloc_coherent(struct ib_device *dev, 4135 size_t size, 4136 dma_addr_t *dma_handle, 4137 gfp_t flag) 4138 { 4139 return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag); 4140 } 4141 4142 /** 4143 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent() 4144 * @dev: The device for which the DMA addresses were allocated 4145 * @size: The size of the region 4146 * @cpu_addr: the address returned by ib_dma_alloc_coherent() 4147 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent() 4148 */ 4149 static inline void ib_dma_free_coherent(struct ib_device *dev, 4150 size_t size, void *cpu_addr, 4151 dma_addr_t dma_handle) 4152 { 4153 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); 4154 } 4155 4156 /** 4157 * ib_dereg_mr_user - Deregisters a memory region and removes it from the 4158 * HCA translation table. 4159 * @mr: The memory region to deregister. 4160 * @udata: Valid user data or NULL for kernel object 4161 * 4162 * This function can fail, if the memory region has memory windows bound to it. 4163 */ 4164 int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata); 4165 4166 /** 4167 * ib_dereg_mr - Deregisters a kernel memory region and removes it from the 4168 * HCA translation table. 4169 * @mr: The memory region to deregister. 4170 * 4171 * This function can fail, if the memory region has memory windows bound to it. 4172 * 4173 * NOTE: for user mr use ib_dereg_mr_user with valid udata! 4174 */ 4175 static inline int ib_dereg_mr(struct ib_mr *mr) 4176 { 4177 return ib_dereg_mr_user(mr, NULL); 4178 } 4179 4180 struct ib_mr *ib_alloc_mr_user(struct ib_pd *pd, enum ib_mr_type mr_type, 4181 u32 max_num_sg, struct ib_udata *udata); 4182 4183 static inline struct ib_mr *ib_alloc_mr(struct ib_pd *pd, 4184 enum ib_mr_type mr_type, u32 max_num_sg) 4185 { 4186 return ib_alloc_mr_user(pd, mr_type, max_num_sg, NULL); 4187 } 4188 4189 struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd, 4190 u32 max_num_data_sg, 4191 u32 max_num_meta_sg); 4192 4193 /** 4194 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR 4195 * R_Key and L_Key. 4196 * @mr - struct ib_mr pointer to be updated. 4197 * @newkey - new key to be used. 4198 */ 4199 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey) 4200 { 4201 mr->lkey = (mr->lkey & 0xffffff00) | newkey; 4202 mr->rkey = (mr->rkey & 0xffffff00) | newkey; 4203 } 4204 4205 /** 4206 * ib_inc_rkey - increments the key portion of the given rkey. Can be used 4207 * for calculating a new rkey for type 2 memory windows. 4208 * @rkey - the rkey to increment. 4209 */ 4210 static inline u32 ib_inc_rkey(u32 rkey) 4211 { 4212 const u32 mask = 0x000000ff; 4213 return ((rkey + 1) & mask) | (rkey & ~mask); 4214 } 4215 4216 /** 4217 * ib_alloc_fmr - Allocates a unmapped fast memory region. 4218 * @pd: The protection domain associated with the unmapped region. 4219 * @mr_access_flags: Specifies the memory access rights. 4220 * @fmr_attr: Attributes of the unmapped region. 4221 * 4222 * A fast memory region must be mapped before it can be used as part of 4223 * a work request. 4224 */ 4225 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, 4226 int mr_access_flags, 4227 struct ib_fmr_attr *fmr_attr); 4228 4229 /** 4230 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region. 4231 * @fmr: The fast memory region to associate with the pages. 4232 * @page_list: An array of physical pages to map to the fast memory region. 4233 * @list_len: The number of pages in page_list. 4234 * @iova: The I/O virtual address to use with the mapped region. 4235 */ 4236 static inline int ib_map_phys_fmr(struct ib_fmr *fmr, 4237 u64 *page_list, int list_len, 4238 u64 iova) 4239 { 4240 return fmr->device->ops.map_phys_fmr(fmr, page_list, list_len, iova); 4241 } 4242 4243 /** 4244 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions. 4245 * @fmr_list: A linked list of fast memory regions to unmap. 4246 */ 4247 int ib_unmap_fmr(struct list_head *fmr_list); 4248 4249 /** 4250 * ib_dealloc_fmr - Deallocates a fast memory region. 4251 * @fmr: The fast memory region to deallocate. 4252 */ 4253 int ib_dealloc_fmr(struct ib_fmr *fmr); 4254 4255 /** 4256 * ib_attach_mcast - Attaches the specified QP to a multicast group. 4257 * @qp: QP to attach to the multicast group. The QP must be type 4258 * IB_QPT_UD. 4259 * @gid: Multicast group GID. 4260 * @lid: Multicast group LID in host byte order. 4261 * 4262 * In order to send and receive multicast packets, subnet 4263 * administration must have created the multicast group and configured 4264 * the fabric appropriately. The port associated with the specified 4265 * QP must also be a member of the multicast group. 4266 */ 4267 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 4268 4269 /** 4270 * ib_detach_mcast - Detaches the specified QP from a multicast group. 4271 * @qp: QP to detach from the multicast group. 4272 * @gid: Multicast group GID. 4273 * @lid: Multicast group LID in host byte order. 4274 */ 4275 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 4276 4277 /** 4278 * ib_alloc_xrcd - Allocates an XRC domain. 4279 * @device: The device on which to allocate the XRC domain. 4280 * @caller: Module name for kernel consumers 4281 */ 4282 struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller); 4283 #define ib_alloc_xrcd(device) \ 4284 __ib_alloc_xrcd((device), KBUILD_MODNAME) 4285 4286 /** 4287 * ib_dealloc_xrcd - Deallocates an XRC domain. 4288 * @xrcd: The XRC domain to deallocate. 4289 * @udata: Valid user data or NULL for kernel object 4290 */ 4291 int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata); 4292 4293 static inline int ib_check_mr_access(int flags) 4294 { 4295 /* 4296 * Local write permission is required if remote write or 4297 * remote atomic permission is also requested. 4298 */ 4299 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && 4300 !(flags & IB_ACCESS_LOCAL_WRITE)) 4301 return -EINVAL; 4302 4303 return 0; 4304 } 4305 4306 static inline bool ib_access_writable(int access_flags) 4307 { 4308 /* 4309 * We have writable memory backing the MR if any of the following 4310 * access flags are set. "Local write" and "remote write" obviously 4311 * require write access. "Remote atomic" can do things like fetch and 4312 * add, which will modify memory, and "MW bind" can change permissions 4313 * by binding a window. 4314 */ 4315 return access_flags & 4316 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | 4317 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND); 4318 } 4319 4320 /** 4321 * ib_check_mr_status: lightweight check of MR status. 4322 * This routine may provide status checks on a selected 4323 * ib_mr. first use is for signature status check. 4324 * 4325 * @mr: A memory region. 4326 * @check_mask: Bitmask of which checks to perform from 4327 * ib_mr_status_check enumeration. 4328 * @mr_status: The container of relevant status checks. 4329 * failed checks will be indicated in the status bitmask 4330 * and the relevant info shall be in the error item. 4331 */ 4332 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, 4333 struct ib_mr_status *mr_status); 4334 4335 /** 4336 * ib_device_try_get: Hold a registration lock 4337 * device: The device to lock 4338 * 4339 * A device under an active registration lock cannot become unregistered. It 4340 * is only possible to obtain a registration lock on a device that is fully 4341 * registered, otherwise this function returns false. 4342 * 4343 * The registration lock is only necessary for actions which require the 4344 * device to still be registered. Uses that only require the device pointer to 4345 * be valid should use get_device(&ibdev->dev) to hold the memory. 4346 * 4347 */ 4348 static inline bool ib_device_try_get(struct ib_device *dev) 4349 { 4350 return refcount_inc_not_zero(&dev->refcount); 4351 } 4352 4353 void ib_device_put(struct ib_device *device); 4354 struct ib_device *ib_device_get_by_netdev(struct net_device *ndev, 4355 enum rdma_driver_id driver_id); 4356 struct ib_device *ib_device_get_by_name(const char *name, 4357 enum rdma_driver_id driver_id); 4358 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port, 4359 u16 pkey, const union ib_gid *gid, 4360 const struct sockaddr *addr); 4361 int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev, 4362 unsigned int port); 4363 struct net_device *ib_device_netdev(struct ib_device *dev, u8 port); 4364 4365 struct ib_wq *ib_create_wq(struct ib_pd *pd, 4366 struct ib_wq_init_attr *init_attr); 4367 int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata); 4368 int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr, 4369 u32 wq_attr_mask); 4370 struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, 4371 struct ib_rwq_ind_table_init_attr* 4372 wq_ind_table_init_attr); 4373 int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); 4374 4375 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 4376 unsigned int *sg_offset, unsigned int page_size); 4377 int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg, 4378 int data_sg_nents, unsigned int *data_sg_offset, 4379 struct scatterlist *meta_sg, int meta_sg_nents, 4380 unsigned int *meta_sg_offset, unsigned int page_size); 4381 4382 static inline int 4383 ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 4384 unsigned int *sg_offset, unsigned int page_size) 4385 { 4386 int n; 4387 4388 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size); 4389 mr->iova = 0; 4390 4391 return n; 4392 } 4393 4394 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents, 4395 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64)); 4396 4397 void ib_drain_rq(struct ib_qp *qp); 4398 void ib_drain_sq(struct ib_qp *qp); 4399 void ib_drain_qp(struct ib_qp *qp); 4400 4401 int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width); 4402 4403 static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr) 4404 { 4405 if (attr->type == RDMA_AH_ATTR_TYPE_ROCE) 4406 return attr->roce.dmac; 4407 return NULL; 4408 } 4409 4410 static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid) 4411 { 4412 if (attr->type == RDMA_AH_ATTR_TYPE_IB) 4413 attr->ib.dlid = (u16)dlid; 4414 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 4415 attr->opa.dlid = dlid; 4416 } 4417 4418 static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr) 4419 { 4420 if (attr->type == RDMA_AH_ATTR_TYPE_IB) 4421 return attr->ib.dlid; 4422 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 4423 return attr->opa.dlid; 4424 return 0; 4425 } 4426 4427 static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl) 4428 { 4429 attr->sl = sl; 4430 } 4431 4432 static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr) 4433 { 4434 return attr->sl; 4435 } 4436 4437 static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr, 4438 u8 src_path_bits) 4439 { 4440 if (attr->type == RDMA_AH_ATTR_TYPE_IB) 4441 attr->ib.src_path_bits = src_path_bits; 4442 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 4443 attr->opa.src_path_bits = src_path_bits; 4444 } 4445 4446 static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr) 4447 { 4448 if (attr->type == RDMA_AH_ATTR_TYPE_IB) 4449 return attr->ib.src_path_bits; 4450 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 4451 return attr->opa.src_path_bits; 4452 return 0; 4453 } 4454 4455 static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr, 4456 bool make_grd) 4457 { 4458 if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 4459 attr->opa.make_grd = make_grd; 4460 } 4461 4462 static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr) 4463 { 4464 if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 4465 return attr->opa.make_grd; 4466 return false; 4467 } 4468 4469 static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num) 4470 { 4471 attr->port_num = port_num; 4472 } 4473 4474 static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr) 4475 { 4476 return attr->port_num; 4477 } 4478 4479 static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr, 4480 u8 static_rate) 4481 { 4482 attr->static_rate = static_rate; 4483 } 4484 4485 static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr) 4486 { 4487 return attr->static_rate; 4488 } 4489 4490 static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr, 4491 enum ib_ah_flags flag) 4492 { 4493 attr->ah_flags = flag; 4494 } 4495 4496 static inline enum ib_ah_flags 4497 rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr) 4498 { 4499 return attr->ah_flags; 4500 } 4501 4502 static inline const struct ib_global_route 4503 *rdma_ah_read_grh(const struct rdma_ah_attr *attr) 4504 { 4505 return &attr->grh; 4506 } 4507 4508 /*To retrieve and modify the grh */ 4509 static inline struct ib_global_route 4510 *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr) 4511 { 4512 return &attr->grh; 4513 } 4514 4515 static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid) 4516 { 4517 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); 4518 4519 memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid)); 4520 } 4521 4522 static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr, 4523 __be64 prefix) 4524 { 4525 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); 4526 4527 grh->dgid.global.subnet_prefix = prefix; 4528 } 4529 4530 static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr, 4531 __be64 if_id) 4532 { 4533 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); 4534 4535 grh->dgid.global.interface_id = if_id; 4536 } 4537 4538 static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr, 4539 union ib_gid *dgid, u32 flow_label, 4540 u8 sgid_index, u8 hop_limit, 4541 u8 traffic_class) 4542 { 4543 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); 4544 4545 attr->ah_flags = IB_AH_GRH; 4546 if (dgid) 4547 grh->dgid = *dgid; 4548 grh->flow_label = flow_label; 4549 grh->sgid_index = sgid_index; 4550 grh->hop_limit = hop_limit; 4551 grh->traffic_class = traffic_class; 4552 grh->sgid_attr = NULL; 4553 } 4554 4555 void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr); 4556 void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid, 4557 u32 flow_label, u8 hop_limit, u8 traffic_class, 4558 const struct ib_gid_attr *sgid_attr); 4559 void rdma_copy_ah_attr(struct rdma_ah_attr *dest, 4560 const struct rdma_ah_attr *src); 4561 void rdma_replace_ah_attr(struct rdma_ah_attr *old, 4562 const struct rdma_ah_attr *new); 4563 void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src); 4564 4565 /** 4566 * rdma_ah_find_type - Return address handle type. 4567 * 4568 * @dev: Device to be checked 4569 * @port_num: Port number 4570 */ 4571 static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev, 4572 u8 port_num) 4573 { 4574 if (rdma_protocol_roce(dev, port_num)) 4575 return RDMA_AH_ATTR_TYPE_ROCE; 4576 if (rdma_protocol_ib(dev, port_num)) { 4577 if (rdma_cap_opa_ah(dev, port_num)) 4578 return RDMA_AH_ATTR_TYPE_OPA; 4579 return RDMA_AH_ATTR_TYPE_IB; 4580 } 4581 4582 return RDMA_AH_ATTR_TYPE_UNDEFINED; 4583 } 4584 4585 /** 4586 * ib_lid_cpu16 - Return lid in 16bit CPU encoding. 4587 * In the current implementation the only way to get 4588 * get the 32bit lid is from other sources for OPA. 4589 * For IB, lids will always be 16bits so cast the 4590 * value accordingly. 4591 * 4592 * @lid: A 32bit LID 4593 */ 4594 static inline u16 ib_lid_cpu16(u32 lid) 4595 { 4596 WARN_ON_ONCE(lid & 0xFFFF0000); 4597 return (u16)lid; 4598 } 4599 4600 /** 4601 * ib_lid_be16 - Return lid in 16bit BE encoding. 4602 * 4603 * @lid: A 32bit LID 4604 */ 4605 static inline __be16 ib_lid_be16(u32 lid) 4606 { 4607 WARN_ON_ONCE(lid & 0xFFFF0000); 4608 return cpu_to_be16((u16)lid); 4609 } 4610 4611 /** 4612 * ib_get_vector_affinity - Get the affinity mappings of a given completion 4613 * vector 4614 * @device: the rdma device 4615 * @comp_vector: index of completion vector 4616 * 4617 * Returns NULL on failure, otherwise a corresponding cpu map of the 4618 * completion vector (returns all-cpus map if the device driver doesn't 4619 * implement get_vector_affinity). 4620 */ 4621 static inline const struct cpumask * 4622 ib_get_vector_affinity(struct ib_device *device, int comp_vector) 4623 { 4624 if (comp_vector < 0 || comp_vector >= device->num_comp_vectors || 4625 !device->ops.get_vector_affinity) 4626 return NULL; 4627 4628 return device->ops.get_vector_affinity(device, comp_vector); 4629 4630 } 4631 4632 /** 4633 * rdma_roce_rescan_device - Rescan all of the network devices in the system 4634 * and add their gids, as needed, to the relevant RoCE devices. 4635 * 4636 * @device: the rdma device 4637 */ 4638 void rdma_roce_rescan_device(struct ib_device *ibdev); 4639 4640 struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile); 4641 4642 int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs); 4643 4644 struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num, 4645 enum rdma_netdev_t type, const char *name, 4646 unsigned char name_assign_type, 4647 void (*setup)(struct net_device *)); 4648 4649 int rdma_init_netdev(struct ib_device *device, u8 port_num, 4650 enum rdma_netdev_t type, const char *name, 4651 unsigned char name_assign_type, 4652 void (*setup)(struct net_device *), 4653 struct net_device *netdev); 4654 4655 /** 4656 * rdma_set_device_sysfs_group - Set device attributes group to have 4657 * driver specific sysfs entries at 4658 * for infiniband class. 4659 * 4660 * @device: device pointer for which attributes to be created 4661 * @group: Pointer to group which should be added when device 4662 * is registered with sysfs. 4663 * rdma_set_device_sysfs_group() allows existing drivers to expose one 4664 * group per device to have sysfs attributes. 4665 * 4666 * NOTE: New drivers should not make use of this API; instead new device 4667 * parameter should be exposed via netlink command. This API and mechanism 4668 * exist only for existing drivers. 4669 */ 4670 static inline void 4671 rdma_set_device_sysfs_group(struct ib_device *dev, 4672 const struct attribute_group *group) 4673 { 4674 dev->groups[1] = group; 4675 } 4676 4677 /** 4678 * rdma_device_to_ibdev - Get ib_device pointer from device pointer 4679 * 4680 * @device: device pointer for which ib_device pointer to retrieve 4681 * 4682 * rdma_device_to_ibdev() retrieves ib_device pointer from device. 4683 * 4684 */ 4685 static inline struct ib_device *rdma_device_to_ibdev(struct device *device) 4686 { 4687 struct ib_core_device *coredev = 4688 container_of(device, struct ib_core_device, dev); 4689 4690 return coredev->owner; 4691 } 4692 4693 /** 4694 * rdma_device_to_drv_device - Helper macro to reach back to driver's 4695 * ib_device holder structure from device pointer. 4696 * 4697 * NOTE: New drivers should not make use of this API; This API is only for 4698 * existing drivers who have exposed sysfs entries using 4699 * rdma_set_device_sysfs_group(). 4700 */ 4701 #define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member) \ 4702 container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member) 4703 4704 bool rdma_dev_access_netns(const struct ib_device *device, 4705 const struct net *net); 4706 #endif /* IB_VERBS_H */ 4707