1 /* 2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 8 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 9 * 10 * This software is available to you under a choice of one of two 11 * licenses. You may choose to be licensed under the terms of the GNU 12 * General Public License (GPL) Version 2, available from the file 13 * COPYING in the main directory of this source tree, or the 14 * OpenIB.org BSD license below: 15 * 16 * Redistribution and use in source and binary forms, with or 17 * without modification, are permitted provided that the following 18 * conditions are met: 19 * 20 * - Redistributions of source code must retain the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer. 23 * 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials 27 * provided with the distribution. 28 * 29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 36 * SOFTWARE. 37 */ 38 39 #if !defined(IB_VERBS_H) 40 #define IB_VERBS_H 41 42 #include <linux/types.h> 43 #include <linux/device.h> 44 #include <linux/mm.h> 45 #include <linux/dma-mapping.h> 46 #include <linux/kref.h> 47 #include <linux/list.h> 48 #include <linux/rwsem.h> 49 #include <linux/scatterlist.h> 50 #include <linux/workqueue.h> 51 #include <linux/socket.h> 52 #include <linux/irq_poll.h> 53 #include <uapi/linux/if_ether.h> 54 #include <net/ipv6.h> 55 #include <net/ip.h> 56 #include <linux/string.h> 57 #include <linux/slab.h> 58 #include <linux/netdevice.h> 59 60 #include <linux/if_link.h> 61 #include <linux/atomic.h> 62 #include <linux/mmu_notifier.h> 63 #include <linux/uaccess.h> 64 #include <linux/cgroup_rdma.h> 65 #include <uapi/rdma/ib_user_verbs.h> 66 #include <rdma/restrack.h> 67 #include <uapi/rdma/rdma_user_ioctl.h> 68 #include <uapi/rdma/ib_user_ioctl_verbs.h> 69 70 #define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN 71 72 extern struct workqueue_struct *ib_wq; 73 extern struct workqueue_struct *ib_comp_wq; 74 75 union ib_gid { 76 u8 raw[16]; 77 struct { 78 __be64 subnet_prefix; 79 __be64 interface_id; 80 } global; 81 }; 82 83 extern union ib_gid zgid; 84 85 enum ib_gid_type { 86 /* If link layer is Ethernet, this is RoCE V1 */ 87 IB_GID_TYPE_IB = 0, 88 IB_GID_TYPE_ROCE = 0, 89 IB_GID_TYPE_ROCE_UDP_ENCAP = 1, 90 IB_GID_TYPE_SIZE 91 }; 92 93 #define ROCE_V2_UDP_DPORT 4791 94 struct ib_gid_attr { 95 struct net_device *ndev; 96 struct ib_device *device; 97 enum ib_gid_type gid_type; 98 u16 index; 99 u8 port_num; 100 }; 101 102 enum rdma_node_type { 103 /* IB values map to NodeInfo:NodeType. */ 104 RDMA_NODE_IB_CA = 1, 105 RDMA_NODE_IB_SWITCH, 106 RDMA_NODE_IB_ROUTER, 107 RDMA_NODE_RNIC, 108 RDMA_NODE_USNIC, 109 RDMA_NODE_USNIC_UDP, 110 }; 111 112 enum { 113 /* set the local administered indication */ 114 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2, 115 }; 116 117 enum rdma_transport_type { 118 RDMA_TRANSPORT_IB, 119 RDMA_TRANSPORT_IWARP, 120 RDMA_TRANSPORT_USNIC, 121 RDMA_TRANSPORT_USNIC_UDP 122 }; 123 124 enum rdma_protocol_type { 125 RDMA_PROTOCOL_IB, 126 RDMA_PROTOCOL_IBOE, 127 RDMA_PROTOCOL_IWARP, 128 RDMA_PROTOCOL_USNIC_UDP 129 }; 130 131 __attribute_const__ enum rdma_transport_type 132 rdma_node_get_transport(enum rdma_node_type node_type); 133 134 enum rdma_network_type { 135 RDMA_NETWORK_IB, 136 RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB, 137 RDMA_NETWORK_IPV4, 138 RDMA_NETWORK_IPV6 139 }; 140 141 static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type) 142 { 143 if (network_type == RDMA_NETWORK_IPV4 || 144 network_type == RDMA_NETWORK_IPV6) 145 return IB_GID_TYPE_ROCE_UDP_ENCAP; 146 147 /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */ 148 return IB_GID_TYPE_IB; 149 } 150 151 static inline enum rdma_network_type ib_gid_to_network_type(enum ib_gid_type gid_type, 152 union ib_gid *gid) 153 { 154 if (gid_type == IB_GID_TYPE_IB) 155 return RDMA_NETWORK_IB; 156 157 if (ipv6_addr_v4mapped((struct in6_addr *)gid)) 158 return RDMA_NETWORK_IPV4; 159 else 160 return RDMA_NETWORK_IPV6; 161 } 162 163 enum rdma_link_layer { 164 IB_LINK_LAYER_UNSPECIFIED, 165 IB_LINK_LAYER_INFINIBAND, 166 IB_LINK_LAYER_ETHERNET, 167 }; 168 169 enum ib_device_cap_flags { 170 IB_DEVICE_RESIZE_MAX_WR = (1 << 0), 171 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1), 172 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2), 173 IB_DEVICE_RAW_MULTI = (1 << 3), 174 IB_DEVICE_AUTO_PATH_MIG = (1 << 4), 175 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5), 176 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6), 177 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7), 178 IB_DEVICE_SHUTDOWN_PORT = (1 << 8), 179 /* Not in use, former INIT_TYPE = (1 << 9),*/ 180 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10), 181 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11), 182 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12), 183 IB_DEVICE_SRQ_RESIZE = (1 << 13), 184 IB_DEVICE_N_NOTIFY_CQ = (1 << 14), 185 186 /* 187 * This device supports a per-device lkey or stag that can be 188 * used without performing a memory registration for the local 189 * memory. Note that ULPs should never check this flag, but 190 * instead of use the local_dma_lkey flag in the ib_pd structure, 191 * which will always contain a usable lkey. 192 */ 193 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15), 194 /* Reserved, old SEND_W_INV = (1 << 16),*/ 195 IB_DEVICE_MEM_WINDOW = (1 << 17), 196 /* 197 * Devices should set IB_DEVICE_UD_IP_SUM if they support 198 * insertion of UDP and TCP checksum on outgoing UD IPoIB 199 * messages and can verify the validity of checksum for 200 * incoming messages. Setting this flag implies that the 201 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode. 202 */ 203 IB_DEVICE_UD_IP_CSUM = (1 << 18), 204 IB_DEVICE_UD_TSO = (1 << 19), 205 IB_DEVICE_XRC = (1 << 20), 206 207 /* 208 * This device supports the IB "base memory management extension", 209 * which includes support for fast registrations (IB_WR_REG_MR, 210 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should 211 * also be set by any iWarp device which must support FRs to comply 212 * to the iWarp verbs spec. iWarp devices also support the 213 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the 214 * stag. 215 */ 216 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21), 217 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22), 218 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23), 219 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24), 220 IB_DEVICE_RC_IP_CSUM = (1 << 25), 221 /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */ 222 IB_DEVICE_RAW_IP_CSUM = (1 << 26), 223 /* 224 * Devices should set IB_DEVICE_CROSS_CHANNEL if they 225 * support execution of WQEs that involve synchronization 226 * of I/O operations with single completion queue managed 227 * by hardware. 228 */ 229 IB_DEVICE_CROSS_CHANNEL = (1 << 27), 230 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29), 231 IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30), 232 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31), 233 IB_DEVICE_SG_GAPS_REG = (1ULL << 32), 234 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33), 235 /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */ 236 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34), 237 IB_DEVICE_RDMA_NETDEV_OPA_VNIC = (1ULL << 35), 238 /* The device supports padding incoming writes to cacheline. */ 239 IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36), 240 }; 241 242 enum ib_signature_prot_cap { 243 IB_PROT_T10DIF_TYPE_1 = 1, 244 IB_PROT_T10DIF_TYPE_2 = 1 << 1, 245 IB_PROT_T10DIF_TYPE_3 = 1 << 2, 246 }; 247 248 enum ib_signature_guard_cap { 249 IB_GUARD_T10DIF_CRC = 1, 250 IB_GUARD_T10DIF_CSUM = 1 << 1, 251 }; 252 253 enum ib_atomic_cap { 254 IB_ATOMIC_NONE, 255 IB_ATOMIC_HCA, 256 IB_ATOMIC_GLOB 257 }; 258 259 enum ib_odp_general_cap_bits { 260 IB_ODP_SUPPORT = 1 << 0, 261 IB_ODP_SUPPORT_IMPLICIT = 1 << 1, 262 }; 263 264 enum ib_odp_transport_cap_bits { 265 IB_ODP_SUPPORT_SEND = 1 << 0, 266 IB_ODP_SUPPORT_RECV = 1 << 1, 267 IB_ODP_SUPPORT_WRITE = 1 << 2, 268 IB_ODP_SUPPORT_READ = 1 << 3, 269 IB_ODP_SUPPORT_ATOMIC = 1 << 4, 270 }; 271 272 struct ib_odp_caps { 273 uint64_t general_caps; 274 struct { 275 uint32_t rc_odp_caps; 276 uint32_t uc_odp_caps; 277 uint32_t ud_odp_caps; 278 } per_transport_caps; 279 }; 280 281 struct ib_rss_caps { 282 /* Corresponding bit will be set if qp type from 283 * 'enum ib_qp_type' is supported, e.g. 284 * supported_qpts |= 1 << IB_QPT_UD 285 */ 286 u32 supported_qpts; 287 u32 max_rwq_indirection_tables; 288 u32 max_rwq_indirection_table_size; 289 }; 290 291 enum ib_tm_cap_flags { 292 /* Support tag matching on RC transport */ 293 IB_TM_CAP_RC = 1 << 0, 294 }; 295 296 struct ib_tm_caps { 297 /* Max size of RNDV header */ 298 u32 max_rndv_hdr_size; 299 /* Max number of entries in tag matching list */ 300 u32 max_num_tags; 301 /* From enum ib_tm_cap_flags */ 302 u32 flags; 303 /* Max number of outstanding list operations */ 304 u32 max_ops; 305 /* Max number of SGE in tag matching entry */ 306 u32 max_sge; 307 }; 308 309 struct ib_cq_init_attr { 310 unsigned int cqe; 311 int comp_vector; 312 u32 flags; 313 }; 314 315 enum ib_cq_attr_mask { 316 IB_CQ_MODERATE = 1 << 0, 317 }; 318 319 struct ib_cq_caps { 320 u16 max_cq_moderation_count; 321 u16 max_cq_moderation_period; 322 }; 323 324 struct ib_dm_mr_attr { 325 u64 length; 326 u64 offset; 327 u32 access_flags; 328 }; 329 330 struct ib_dm_alloc_attr { 331 u64 length; 332 u32 alignment; 333 u32 flags; 334 }; 335 336 struct ib_device_attr { 337 u64 fw_ver; 338 __be64 sys_image_guid; 339 u64 max_mr_size; 340 u64 page_size_cap; 341 u32 vendor_id; 342 u32 vendor_part_id; 343 u32 hw_ver; 344 int max_qp; 345 int max_qp_wr; 346 u64 device_cap_flags; 347 int max_sge; 348 int max_sge_rd; 349 int max_cq; 350 int max_cqe; 351 int max_mr; 352 int max_pd; 353 int max_qp_rd_atom; 354 int max_ee_rd_atom; 355 int max_res_rd_atom; 356 int max_qp_init_rd_atom; 357 int max_ee_init_rd_atom; 358 enum ib_atomic_cap atomic_cap; 359 enum ib_atomic_cap masked_atomic_cap; 360 int max_ee; 361 int max_rdd; 362 int max_mw; 363 int max_raw_ipv6_qp; 364 int max_raw_ethy_qp; 365 int max_mcast_grp; 366 int max_mcast_qp_attach; 367 int max_total_mcast_qp_attach; 368 int max_ah; 369 int max_fmr; 370 int max_map_per_fmr; 371 int max_srq; 372 int max_srq_wr; 373 int max_srq_sge; 374 unsigned int max_fast_reg_page_list_len; 375 u16 max_pkeys; 376 u8 local_ca_ack_delay; 377 int sig_prot_cap; 378 int sig_guard_cap; 379 struct ib_odp_caps odp_caps; 380 uint64_t timestamp_mask; 381 uint64_t hca_core_clock; /* in KHZ */ 382 struct ib_rss_caps rss_caps; 383 u32 max_wq_type_rq; 384 u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */ 385 struct ib_tm_caps tm_caps; 386 struct ib_cq_caps cq_caps; 387 u64 max_dm_size; 388 }; 389 390 enum ib_mtu { 391 IB_MTU_256 = 1, 392 IB_MTU_512 = 2, 393 IB_MTU_1024 = 3, 394 IB_MTU_2048 = 4, 395 IB_MTU_4096 = 5 396 }; 397 398 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu) 399 { 400 switch (mtu) { 401 case IB_MTU_256: return 256; 402 case IB_MTU_512: return 512; 403 case IB_MTU_1024: return 1024; 404 case IB_MTU_2048: return 2048; 405 case IB_MTU_4096: return 4096; 406 default: return -1; 407 } 408 } 409 410 static inline enum ib_mtu ib_mtu_int_to_enum(int mtu) 411 { 412 if (mtu >= 4096) 413 return IB_MTU_4096; 414 else if (mtu >= 2048) 415 return IB_MTU_2048; 416 else if (mtu >= 1024) 417 return IB_MTU_1024; 418 else if (mtu >= 512) 419 return IB_MTU_512; 420 else 421 return IB_MTU_256; 422 } 423 424 enum ib_port_state { 425 IB_PORT_NOP = 0, 426 IB_PORT_DOWN = 1, 427 IB_PORT_INIT = 2, 428 IB_PORT_ARMED = 3, 429 IB_PORT_ACTIVE = 4, 430 IB_PORT_ACTIVE_DEFER = 5 431 }; 432 433 enum ib_port_cap_flags { 434 IB_PORT_SM = 1 << 1, 435 IB_PORT_NOTICE_SUP = 1 << 2, 436 IB_PORT_TRAP_SUP = 1 << 3, 437 IB_PORT_OPT_IPD_SUP = 1 << 4, 438 IB_PORT_AUTO_MIGR_SUP = 1 << 5, 439 IB_PORT_SL_MAP_SUP = 1 << 6, 440 IB_PORT_MKEY_NVRAM = 1 << 7, 441 IB_PORT_PKEY_NVRAM = 1 << 8, 442 IB_PORT_LED_INFO_SUP = 1 << 9, 443 IB_PORT_SM_DISABLED = 1 << 10, 444 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11, 445 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12, 446 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14, 447 IB_PORT_CM_SUP = 1 << 16, 448 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17, 449 IB_PORT_REINIT_SUP = 1 << 18, 450 IB_PORT_DEVICE_MGMT_SUP = 1 << 19, 451 IB_PORT_VENDOR_CLASS_SUP = 1 << 20, 452 IB_PORT_DR_NOTICE_SUP = 1 << 21, 453 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22, 454 IB_PORT_BOOT_MGMT_SUP = 1 << 23, 455 IB_PORT_LINK_LATENCY_SUP = 1 << 24, 456 IB_PORT_CLIENT_REG_SUP = 1 << 25, 457 IB_PORT_IP_BASED_GIDS = 1 << 26, 458 }; 459 460 enum ib_port_width { 461 IB_WIDTH_1X = 1, 462 IB_WIDTH_4X = 2, 463 IB_WIDTH_8X = 4, 464 IB_WIDTH_12X = 8 465 }; 466 467 static inline int ib_width_enum_to_int(enum ib_port_width width) 468 { 469 switch (width) { 470 case IB_WIDTH_1X: return 1; 471 case IB_WIDTH_4X: return 4; 472 case IB_WIDTH_8X: return 8; 473 case IB_WIDTH_12X: return 12; 474 default: return -1; 475 } 476 } 477 478 enum ib_port_speed { 479 IB_SPEED_SDR = 1, 480 IB_SPEED_DDR = 2, 481 IB_SPEED_QDR = 4, 482 IB_SPEED_FDR10 = 8, 483 IB_SPEED_FDR = 16, 484 IB_SPEED_EDR = 32, 485 IB_SPEED_HDR = 64 486 }; 487 488 /** 489 * struct rdma_hw_stats 490 * @lock - Mutex to protect parallel write access to lifespan and values 491 * of counters, which are 64bits and not guaranteeed to be written 492 * atomicaly on 32bits systems. 493 * @timestamp - Used by the core code to track when the last update was 494 * @lifespan - Used by the core code to determine how old the counters 495 * should be before being updated again. Stored in jiffies, defaults 496 * to 10 milliseconds, drivers can override the default be specifying 497 * their own value during their allocation routine. 498 * @name - Array of pointers to static names used for the counters in 499 * directory. 500 * @num_counters - How many hardware counters there are. If name is 501 * shorter than this number, a kernel oops will result. Driver authors 502 * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters) 503 * in their code to prevent this. 504 * @value - Array of u64 counters that are accessed by the sysfs code and 505 * filled in by the drivers get_stats routine 506 */ 507 struct rdma_hw_stats { 508 struct mutex lock; /* Protect lifespan and values[] */ 509 unsigned long timestamp; 510 unsigned long lifespan; 511 const char * const *names; 512 int num_counters; 513 u64 value[]; 514 }; 515 516 #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10 517 /** 518 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct 519 * for drivers. 520 * @names - Array of static const char * 521 * @num_counters - How many elements in array 522 * @lifespan - How many milliseconds between updates 523 */ 524 static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct( 525 const char * const *names, int num_counters, 526 unsigned long lifespan) 527 { 528 struct rdma_hw_stats *stats; 529 530 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64), 531 GFP_KERNEL); 532 if (!stats) 533 return NULL; 534 stats->names = names; 535 stats->num_counters = num_counters; 536 stats->lifespan = msecs_to_jiffies(lifespan); 537 538 return stats; 539 } 540 541 542 /* Define bits for the various functionality this port needs to be supported by 543 * the core. 544 */ 545 /* Management 0x00000FFF */ 546 #define RDMA_CORE_CAP_IB_MAD 0x00000001 547 #define RDMA_CORE_CAP_IB_SMI 0x00000002 548 #define RDMA_CORE_CAP_IB_CM 0x00000004 549 #define RDMA_CORE_CAP_IW_CM 0x00000008 550 #define RDMA_CORE_CAP_IB_SA 0x00000010 551 #define RDMA_CORE_CAP_OPA_MAD 0x00000020 552 553 /* Address format 0x000FF000 */ 554 #define RDMA_CORE_CAP_AF_IB 0x00001000 555 #define RDMA_CORE_CAP_ETH_AH 0x00002000 556 #define RDMA_CORE_CAP_OPA_AH 0x00004000 557 558 /* Protocol 0xFFF00000 */ 559 #define RDMA_CORE_CAP_PROT_IB 0x00100000 560 #define RDMA_CORE_CAP_PROT_ROCE 0x00200000 561 #define RDMA_CORE_CAP_PROT_IWARP 0x00400000 562 #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000 563 #define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000 564 #define RDMA_CORE_CAP_PROT_USNIC 0x02000000 565 566 #define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \ 567 | RDMA_CORE_CAP_IB_MAD \ 568 | RDMA_CORE_CAP_IB_SMI \ 569 | RDMA_CORE_CAP_IB_CM \ 570 | RDMA_CORE_CAP_IB_SA \ 571 | RDMA_CORE_CAP_AF_IB) 572 #define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \ 573 | RDMA_CORE_CAP_IB_MAD \ 574 | RDMA_CORE_CAP_IB_CM \ 575 | RDMA_CORE_CAP_AF_IB \ 576 | RDMA_CORE_CAP_ETH_AH) 577 #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \ 578 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \ 579 | RDMA_CORE_CAP_IB_MAD \ 580 | RDMA_CORE_CAP_IB_CM \ 581 | RDMA_CORE_CAP_AF_IB \ 582 | RDMA_CORE_CAP_ETH_AH) 583 #define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \ 584 | RDMA_CORE_CAP_IW_CM) 585 #define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \ 586 | RDMA_CORE_CAP_OPA_MAD) 587 588 #define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET) 589 590 #define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC) 591 592 struct ib_port_attr { 593 u64 subnet_prefix; 594 enum ib_port_state state; 595 enum ib_mtu max_mtu; 596 enum ib_mtu active_mtu; 597 int gid_tbl_len; 598 u32 port_cap_flags; 599 u32 max_msg_sz; 600 u32 bad_pkey_cntr; 601 u32 qkey_viol_cntr; 602 u16 pkey_tbl_len; 603 u32 sm_lid; 604 u32 lid; 605 u8 lmc; 606 u8 max_vl_num; 607 u8 sm_sl; 608 u8 subnet_timeout; 609 u8 init_type_reply; 610 u8 active_width; 611 u8 active_speed; 612 u8 phys_state; 613 bool grh_required; 614 }; 615 616 enum ib_device_modify_flags { 617 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0, 618 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1 619 }; 620 621 #define IB_DEVICE_NODE_DESC_MAX 64 622 623 struct ib_device_modify { 624 u64 sys_image_guid; 625 char node_desc[IB_DEVICE_NODE_DESC_MAX]; 626 }; 627 628 enum ib_port_modify_flags { 629 IB_PORT_SHUTDOWN = 1, 630 IB_PORT_INIT_TYPE = (1<<2), 631 IB_PORT_RESET_QKEY_CNTR = (1<<3), 632 IB_PORT_OPA_MASK_CHG = (1<<4) 633 }; 634 635 struct ib_port_modify { 636 u32 set_port_cap_mask; 637 u32 clr_port_cap_mask; 638 u8 init_type; 639 }; 640 641 enum ib_event_type { 642 IB_EVENT_CQ_ERR, 643 IB_EVENT_QP_FATAL, 644 IB_EVENT_QP_REQ_ERR, 645 IB_EVENT_QP_ACCESS_ERR, 646 IB_EVENT_COMM_EST, 647 IB_EVENT_SQ_DRAINED, 648 IB_EVENT_PATH_MIG, 649 IB_EVENT_PATH_MIG_ERR, 650 IB_EVENT_DEVICE_FATAL, 651 IB_EVENT_PORT_ACTIVE, 652 IB_EVENT_PORT_ERR, 653 IB_EVENT_LID_CHANGE, 654 IB_EVENT_PKEY_CHANGE, 655 IB_EVENT_SM_CHANGE, 656 IB_EVENT_SRQ_ERR, 657 IB_EVENT_SRQ_LIMIT_REACHED, 658 IB_EVENT_QP_LAST_WQE_REACHED, 659 IB_EVENT_CLIENT_REREGISTER, 660 IB_EVENT_GID_CHANGE, 661 IB_EVENT_WQ_FATAL, 662 }; 663 664 const char *__attribute_const__ ib_event_msg(enum ib_event_type event); 665 666 struct ib_event { 667 struct ib_device *device; 668 union { 669 struct ib_cq *cq; 670 struct ib_qp *qp; 671 struct ib_srq *srq; 672 struct ib_wq *wq; 673 u8 port_num; 674 } element; 675 enum ib_event_type event; 676 }; 677 678 struct ib_event_handler { 679 struct ib_device *device; 680 void (*handler)(struct ib_event_handler *, struct ib_event *); 681 struct list_head list; 682 }; 683 684 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \ 685 do { \ 686 (_ptr)->device = _device; \ 687 (_ptr)->handler = _handler; \ 688 INIT_LIST_HEAD(&(_ptr)->list); \ 689 } while (0) 690 691 struct ib_global_route { 692 union ib_gid dgid; 693 u32 flow_label; 694 u8 sgid_index; 695 u8 hop_limit; 696 u8 traffic_class; 697 }; 698 699 struct ib_grh { 700 __be32 version_tclass_flow; 701 __be16 paylen; 702 u8 next_hdr; 703 u8 hop_limit; 704 union ib_gid sgid; 705 union ib_gid dgid; 706 }; 707 708 union rdma_network_hdr { 709 struct ib_grh ibgrh; 710 struct { 711 /* The IB spec states that if it's IPv4, the header 712 * is located in the last 20 bytes of the header. 713 */ 714 u8 reserved[20]; 715 struct iphdr roce4grh; 716 }; 717 }; 718 719 #define IB_QPN_MASK 0xFFFFFF 720 721 enum { 722 IB_MULTICAST_QPN = 0xffffff 723 }; 724 725 #define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF) 726 #define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000) 727 728 enum ib_ah_flags { 729 IB_AH_GRH = 1 730 }; 731 732 enum ib_rate { 733 IB_RATE_PORT_CURRENT = 0, 734 IB_RATE_2_5_GBPS = 2, 735 IB_RATE_5_GBPS = 5, 736 IB_RATE_10_GBPS = 3, 737 IB_RATE_20_GBPS = 6, 738 IB_RATE_30_GBPS = 4, 739 IB_RATE_40_GBPS = 7, 740 IB_RATE_60_GBPS = 8, 741 IB_RATE_80_GBPS = 9, 742 IB_RATE_120_GBPS = 10, 743 IB_RATE_14_GBPS = 11, 744 IB_RATE_56_GBPS = 12, 745 IB_RATE_112_GBPS = 13, 746 IB_RATE_168_GBPS = 14, 747 IB_RATE_25_GBPS = 15, 748 IB_RATE_100_GBPS = 16, 749 IB_RATE_200_GBPS = 17, 750 IB_RATE_300_GBPS = 18 751 }; 752 753 /** 754 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the 755 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be 756 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec. 757 * @rate: rate to convert. 758 */ 759 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate); 760 761 /** 762 * ib_rate_to_mbps - Convert the IB rate enum to Mbps. 763 * For example, IB_RATE_2_5_GBPS will be converted to 2500. 764 * @rate: rate to convert. 765 */ 766 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate); 767 768 769 /** 770 * enum ib_mr_type - memory region type 771 * @IB_MR_TYPE_MEM_REG: memory region that is used for 772 * normal registration 773 * @IB_MR_TYPE_SIGNATURE: memory region that is used for 774 * signature operations (data-integrity 775 * capable regions) 776 * @IB_MR_TYPE_SG_GAPS: memory region that is capable to 777 * register any arbitrary sg lists (without 778 * the normal mr constraints - see 779 * ib_map_mr_sg) 780 */ 781 enum ib_mr_type { 782 IB_MR_TYPE_MEM_REG, 783 IB_MR_TYPE_SIGNATURE, 784 IB_MR_TYPE_SG_GAPS, 785 }; 786 787 /** 788 * Signature types 789 * IB_SIG_TYPE_NONE: Unprotected. 790 * IB_SIG_TYPE_T10_DIF: Type T10-DIF 791 */ 792 enum ib_signature_type { 793 IB_SIG_TYPE_NONE, 794 IB_SIG_TYPE_T10_DIF, 795 }; 796 797 /** 798 * Signature T10-DIF block-guard types 799 * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules. 800 * IB_T10DIF_CSUM: Corresponds to IP checksum rules. 801 */ 802 enum ib_t10_dif_bg_type { 803 IB_T10DIF_CRC, 804 IB_T10DIF_CSUM 805 }; 806 807 /** 808 * struct ib_t10_dif_domain - Parameters specific for T10-DIF 809 * domain. 810 * @bg_type: T10-DIF block guard type (CRC|CSUM) 811 * @pi_interval: protection information interval. 812 * @bg: seed of guard computation. 813 * @app_tag: application tag of guard block 814 * @ref_tag: initial guard block reference tag. 815 * @ref_remap: Indicate wethear the reftag increments each block 816 * @app_escape: Indicate to skip block check if apptag=0xffff 817 * @ref_escape: Indicate to skip block check if reftag=0xffffffff 818 * @apptag_check_mask: check bitmask of application tag. 819 */ 820 struct ib_t10_dif_domain { 821 enum ib_t10_dif_bg_type bg_type; 822 u16 pi_interval; 823 u16 bg; 824 u16 app_tag; 825 u32 ref_tag; 826 bool ref_remap; 827 bool app_escape; 828 bool ref_escape; 829 u16 apptag_check_mask; 830 }; 831 832 /** 833 * struct ib_sig_domain - Parameters for signature domain 834 * @sig_type: specific signauture type 835 * @sig: union of all signature domain attributes that may 836 * be used to set domain layout. 837 */ 838 struct ib_sig_domain { 839 enum ib_signature_type sig_type; 840 union { 841 struct ib_t10_dif_domain dif; 842 } sig; 843 }; 844 845 /** 846 * struct ib_sig_attrs - Parameters for signature handover operation 847 * @check_mask: bitmask for signature byte check (8 bytes) 848 * @mem: memory domain layout desciptor. 849 * @wire: wire domain layout desciptor. 850 */ 851 struct ib_sig_attrs { 852 u8 check_mask; 853 struct ib_sig_domain mem; 854 struct ib_sig_domain wire; 855 }; 856 857 enum ib_sig_err_type { 858 IB_SIG_BAD_GUARD, 859 IB_SIG_BAD_REFTAG, 860 IB_SIG_BAD_APPTAG, 861 }; 862 863 /** 864 * Signature check masks (8 bytes in total) according to the T10-PI standard: 865 * -------- -------- ------------ 866 * | GUARD | APPTAG | REFTAG | 867 * | 2B | 2B | 4B | 868 * -------- -------- ------------ 869 */ 870 enum { 871 IB_SIG_CHECK_GUARD = 0xc0, 872 IB_SIG_CHECK_APPTAG = 0x30, 873 IB_SIG_CHECK_REFTAG = 0x0f, 874 }; 875 876 /** 877 * struct ib_sig_err - signature error descriptor 878 */ 879 struct ib_sig_err { 880 enum ib_sig_err_type err_type; 881 u32 expected; 882 u32 actual; 883 u64 sig_err_offset; 884 u32 key; 885 }; 886 887 enum ib_mr_status_check { 888 IB_MR_CHECK_SIG_STATUS = 1, 889 }; 890 891 /** 892 * struct ib_mr_status - Memory region status container 893 * 894 * @fail_status: Bitmask of MR checks status. For each 895 * failed check a corresponding status bit is set. 896 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS 897 * failure. 898 */ 899 struct ib_mr_status { 900 u32 fail_status; 901 struct ib_sig_err sig_err; 902 }; 903 904 /** 905 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate 906 * enum. 907 * @mult: multiple to convert. 908 */ 909 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult); 910 911 enum rdma_ah_attr_type { 912 RDMA_AH_ATTR_TYPE_UNDEFINED, 913 RDMA_AH_ATTR_TYPE_IB, 914 RDMA_AH_ATTR_TYPE_ROCE, 915 RDMA_AH_ATTR_TYPE_OPA, 916 }; 917 918 struct ib_ah_attr { 919 u16 dlid; 920 u8 src_path_bits; 921 }; 922 923 struct roce_ah_attr { 924 u8 dmac[ETH_ALEN]; 925 }; 926 927 struct opa_ah_attr { 928 u32 dlid; 929 u8 src_path_bits; 930 bool make_grd; 931 }; 932 933 struct rdma_ah_attr { 934 struct ib_global_route grh; 935 u8 sl; 936 u8 static_rate; 937 u8 port_num; 938 u8 ah_flags; 939 enum rdma_ah_attr_type type; 940 union { 941 struct ib_ah_attr ib; 942 struct roce_ah_attr roce; 943 struct opa_ah_attr opa; 944 }; 945 }; 946 947 enum ib_wc_status { 948 IB_WC_SUCCESS, 949 IB_WC_LOC_LEN_ERR, 950 IB_WC_LOC_QP_OP_ERR, 951 IB_WC_LOC_EEC_OP_ERR, 952 IB_WC_LOC_PROT_ERR, 953 IB_WC_WR_FLUSH_ERR, 954 IB_WC_MW_BIND_ERR, 955 IB_WC_BAD_RESP_ERR, 956 IB_WC_LOC_ACCESS_ERR, 957 IB_WC_REM_INV_REQ_ERR, 958 IB_WC_REM_ACCESS_ERR, 959 IB_WC_REM_OP_ERR, 960 IB_WC_RETRY_EXC_ERR, 961 IB_WC_RNR_RETRY_EXC_ERR, 962 IB_WC_LOC_RDD_VIOL_ERR, 963 IB_WC_REM_INV_RD_REQ_ERR, 964 IB_WC_REM_ABORT_ERR, 965 IB_WC_INV_EECN_ERR, 966 IB_WC_INV_EEC_STATE_ERR, 967 IB_WC_FATAL_ERR, 968 IB_WC_RESP_TIMEOUT_ERR, 969 IB_WC_GENERAL_ERR 970 }; 971 972 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status); 973 974 enum ib_wc_opcode { 975 IB_WC_SEND, 976 IB_WC_RDMA_WRITE, 977 IB_WC_RDMA_READ, 978 IB_WC_COMP_SWAP, 979 IB_WC_FETCH_ADD, 980 IB_WC_LSO, 981 IB_WC_LOCAL_INV, 982 IB_WC_REG_MR, 983 IB_WC_MASKED_COMP_SWAP, 984 IB_WC_MASKED_FETCH_ADD, 985 /* 986 * Set value of IB_WC_RECV so consumers can test if a completion is a 987 * receive by testing (opcode & IB_WC_RECV). 988 */ 989 IB_WC_RECV = 1 << 7, 990 IB_WC_RECV_RDMA_WITH_IMM 991 }; 992 993 enum ib_wc_flags { 994 IB_WC_GRH = 1, 995 IB_WC_WITH_IMM = (1<<1), 996 IB_WC_WITH_INVALIDATE = (1<<2), 997 IB_WC_IP_CSUM_OK = (1<<3), 998 IB_WC_WITH_SMAC = (1<<4), 999 IB_WC_WITH_VLAN = (1<<5), 1000 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6), 1001 }; 1002 1003 struct ib_wc { 1004 union { 1005 u64 wr_id; 1006 struct ib_cqe *wr_cqe; 1007 }; 1008 enum ib_wc_status status; 1009 enum ib_wc_opcode opcode; 1010 u32 vendor_err; 1011 u32 byte_len; 1012 struct ib_qp *qp; 1013 union { 1014 __be32 imm_data; 1015 u32 invalidate_rkey; 1016 } ex; 1017 u32 src_qp; 1018 u32 slid; 1019 int wc_flags; 1020 u16 pkey_index; 1021 u8 sl; 1022 u8 dlid_path_bits; 1023 u8 port_num; /* valid only for DR SMPs on switches */ 1024 u8 smac[ETH_ALEN]; 1025 u16 vlan_id; 1026 u8 network_hdr_type; 1027 }; 1028 1029 enum ib_cq_notify_flags { 1030 IB_CQ_SOLICITED = 1 << 0, 1031 IB_CQ_NEXT_COMP = 1 << 1, 1032 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP, 1033 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2, 1034 }; 1035 1036 enum ib_srq_type { 1037 IB_SRQT_BASIC, 1038 IB_SRQT_XRC, 1039 IB_SRQT_TM, 1040 }; 1041 1042 static inline bool ib_srq_has_cq(enum ib_srq_type srq_type) 1043 { 1044 return srq_type == IB_SRQT_XRC || 1045 srq_type == IB_SRQT_TM; 1046 } 1047 1048 enum ib_srq_attr_mask { 1049 IB_SRQ_MAX_WR = 1 << 0, 1050 IB_SRQ_LIMIT = 1 << 1, 1051 }; 1052 1053 struct ib_srq_attr { 1054 u32 max_wr; 1055 u32 max_sge; 1056 u32 srq_limit; 1057 }; 1058 1059 struct ib_srq_init_attr { 1060 void (*event_handler)(struct ib_event *, void *); 1061 void *srq_context; 1062 struct ib_srq_attr attr; 1063 enum ib_srq_type srq_type; 1064 1065 struct { 1066 struct ib_cq *cq; 1067 union { 1068 struct { 1069 struct ib_xrcd *xrcd; 1070 } xrc; 1071 1072 struct { 1073 u32 max_num_tags; 1074 } tag_matching; 1075 }; 1076 } ext; 1077 }; 1078 1079 struct ib_qp_cap { 1080 u32 max_send_wr; 1081 u32 max_recv_wr; 1082 u32 max_send_sge; 1083 u32 max_recv_sge; 1084 u32 max_inline_data; 1085 1086 /* 1087 * Maximum number of rdma_rw_ctx structures in flight at a time. 1088 * ib_create_qp() will calculate the right amount of neededed WRs 1089 * and MRs based on this. 1090 */ 1091 u32 max_rdma_ctxs; 1092 }; 1093 1094 enum ib_sig_type { 1095 IB_SIGNAL_ALL_WR, 1096 IB_SIGNAL_REQ_WR 1097 }; 1098 1099 enum ib_qp_type { 1100 /* 1101 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries 1102 * here (and in that order) since the MAD layer uses them as 1103 * indices into a 2-entry table. 1104 */ 1105 IB_QPT_SMI, 1106 IB_QPT_GSI, 1107 1108 IB_QPT_RC, 1109 IB_QPT_UC, 1110 IB_QPT_UD, 1111 IB_QPT_RAW_IPV6, 1112 IB_QPT_RAW_ETHERTYPE, 1113 IB_QPT_RAW_PACKET = 8, 1114 IB_QPT_XRC_INI = 9, 1115 IB_QPT_XRC_TGT, 1116 IB_QPT_MAX, 1117 IB_QPT_DRIVER = 0xFF, 1118 /* Reserve a range for qp types internal to the low level driver. 1119 * These qp types will not be visible at the IB core layer, so the 1120 * IB_QPT_MAX usages should not be affected in the core layer 1121 */ 1122 IB_QPT_RESERVED1 = 0x1000, 1123 IB_QPT_RESERVED2, 1124 IB_QPT_RESERVED3, 1125 IB_QPT_RESERVED4, 1126 IB_QPT_RESERVED5, 1127 IB_QPT_RESERVED6, 1128 IB_QPT_RESERVED7, 1129 IB_QPT_RESERVED8, 1130 IB_QPT_RESERVED9, 1131 IB_QPT_RESERVED10, 1132 }; 1133 1134 enum ib_qp_create_flags { 1135 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0, 1136 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1, 1137 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2, 1138 IB_QP_CREATE_MANAGED_SEND = 1 << 3, 1139 IB_QP_CREATE_MANAGED_RECV = 1 << 4, 1140 IB_QP_CREATE_NETIF_QP = 1 << 5, 1141 IB_QP_CREATE_SIGNATURE_EN = 1 << 6, 1142 /* FREE = 1 << 7, */ 1143 IB_QP_CREATE_SCATTER_FCS = 1 << 8, 1144 IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9, 1145 IB_QP_CREATE_SOURCE_QPN = 1 << 10, 1146 IB_QP_CREATE_PCI_WRITE_END_PADDING = 1 << 11, 1147 /* reserve bits 26-31 for low level drivers' internal use */ 1148 IB_QP_CREATE_RESERVED_START = 1 << 26, 1149 IB_QP_CREATE_RESERVED_END = 1 << 31, 1150 }; 1151 1152 /* 1153 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler 1154 * callback to destroy the passed in QP. 1155 */ 1156 1157 struct ib_qp_init_attr { 1158 void (*event_handler)(struct ib_event *, void *); 1159 void *qp_context; 1160 struct ib_cq *send_cq; 1161 struct ib_cq *recv_cq; 1162 struct ib_srq *srq; 1163 struct ib_xrcd *xrcd; /* XRC TGT QPs only */ 1164 struct ib_qp_cap cap; 1165 enum ib_sig_type sq_sig_type; 1166 enum ib_qp_type qp_type; 1167 enum ib_qp_create_flags create_flags; 1168 1169 /* 1170 * Only needed for special QP types, or when using the RW API. 1171 */ 1172 u8 port_num; 1173 struct ib_rwq_ind_table *rwq_ind_tbl; 1174 u32 source_qpn; 1175 }; 1176 1177 struct ib_qp_open_attr { 1178 void (*event_handler)(struct ib_event *, void *); 1179 void *qp_context; 1180 u32 qp_num; 1181 enum ib_qp_type qp_type; 1182 }; 1183 1184 enum ib_rnr_timeout { 1185 IB_RNR_TIMER_655_36 = 0, 1186 IB_RNR_TIMER_000_01 = 1, 1187 IB_RNR_TIMER_000_02 = 2, 1188 IB_RNR_TIMER_000_03 = 3, 1189 IB_RNR_TIMER_000_04 = 4, 1190 IB_RNR_TIMER_000_06 = 5, 1191 IB_RNR_TIMER_000_08 = 6, 1192 IB_RNR_TIMER_000_12 = 7, 1193 IB_RNR_TIMER_000_16 = 8, 1194 IB_RNR_TIMER_000_24 = 9, 1195 IB_RNR_TIMER_000_32 = 10, 1196 IB_RNR_TIMER_000_48 = 11, 1197 IB_RNR_TIMER_000_64 = 12, 1198 IB_RNR_TIMER_000_96 = 13, 1199 IB_RNR_TIMER_001_28 = 14, 1200 IB_RNR_TIMER_001_92 = 15, 1201 IB_RNR_TIMER_002_56 = 16, 1202 IB_RNR_TIMER_003_84 = 17, 1203 IB_RNR_TIMER_005_12 = 18, 1204 IB_RNR_TIMER_007_68 = 19, 1205 IB_RNR_TIMER_010_24 = 20, 1206 IB_RNR_TIMER_015_36 = 21, 1207 IB_RNR_TIMER_020_48 = 22, 1208 IB_RNR_TIMER_030_72 = 23, 1209 IB_RNR_TIMER_040_96 = 24, 1210 IB_RNR_TIMER_061_44 = 25, 1211 IB_RNR_TIMER_081_92 = 26, 1212 IB_RNR_TIMER_122_88 = 27, 1213 IB_RNR_TIMER_163_84 = 28, 1214 IB_RNR_TIMER_245_76 = 29, 1215 IB_RNR_TIMER_327_68 = 30, 1216 IB_RNR_TIMER_491_52 = 31 1217 }; 1218 1219 enum ib_qp_attr_mask { 1220 IB_QP_STATE = 1, 1221 IB_QP_CUR_STATE = (1<<1), 1222 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2), 1223 IB_QP_ACCESS_FLAGS = (1<<3), 1224 IB_QP_PKEY_INDEX = (1<<4), 1225 IB_QP_PORT = (1<<5), 1226 IB_QP_QKEY = (1<<6), 1227 IB_QP_AV = (1<<7), 1228 IB_QP_PATH_MTU = (1<<8), 1229 IB_QP_TIMEOUT = (1<<9), 1230 IB_QP_RETRY_CNT = (1<<10), 1231 IB_QP_RNR_RETRY = (1<<11), 1232 IB_QP_RQ_PSN = (1<<12), 1233 IB_QP_MAX_QP_RD_ATOMIC = (1<<13), 1234 IB_QP_ALT_PATH = (1<<14), 1235 IB_QP_MIN_RNR_TIMER = (1<<15), 1236 IB_QP_SQ_PSN = (1<<16), 1237 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17), 1238 IB_QP_PATH_MIG_STATE = (1<<18), 1239 IB_QP_CAP = (1<<19), 1240 IB_QP_DEST_QPN = (1<<20), 1241 IB_QP_RESERVED1 = (1<<21), 1242 IB_QP_RESERVED2 = (1<<22), 1243 IB_QP_RESERVED3 = (1<<23), 1244 IB_QP_RESERVED4 = (1<<24), 1245 IB_QP_RATE_LIMIT = (1<<25), 1246 }; 1247 1248 enum ib_qp_state { 1249 IB_QPS_RESET, 1250 IB_QPS_INIT, 1251 IB_QPS_RTR, 1252 IB_QPS_RTS, 1253 IB_QPS_SQD, 1254 IB_QPS_SQE, 1255 IB_QPS_ERR 1256 }; 1257 1258 enum ib_mig_state { 1259 IB_MIG_MIGRATED, 1260 IB_MIG_REARM, 1261 IB_MIG_ARMED 1262 }; 1263 1264 enum ib_mw_type { 1265 IB_MW_TYPE_1 = 1, 1266 IB_MW_TYPE_2 = 2 1267 }; 1268 1269 struct ib_qp_attr { 1270 enum ib_qp_state qp_state; 1271 enum ib_qp_state cur_qp_state; 1272 enum ib_mtu path_mtu; 1273 enum ib_mig_state path_mig_state; 1274 u32 qkey; 1275 u32 rq_psn; 1276 u32 sq_psn; 1277 u32 dest_qp_num; 1278 int qp_access_flags; 1279 struct ib_qp_cap cap; 1280 struct rdma_ah_attr ah_attr; 1281 struct rdma_ah_attr alt_ah_attr; 1282 u16 pkey_index; 1283 u16 alt_pkey_index; 1284 u8 en_sqd_async_notify; 1285 u8 sq_draining; 1286 u8 max_rd_atomic; 1287 u8 max_dest_rd_atomic; 1288 u8 min_rnr_timer; 1289 u8 port_num; 1290 u8 timeout; 1291 u8 retry_cnt; 1292 u8 rnr_retry; 1293 u8 alt_port_num; 1294 u8 alt_timeout; 1295 u32 rate_limit; 1296 }; 1297 1298 enum ib_wr_opcode { 1299 IB_WR_RDMA_WRITE, 1300 IB_WR_RDMA_WRITE_WITH_IMM, 1301 IB_WR_SEND, 1302 IB_WR_SEND_WITH_IMM, 1303 IB_WR_RDMA_READ, 1304 IB_WR_ATOMIC_CMP_AND_SWP, 1305 IB_WR_ATOMIC_FETCH_AND_ADD, 1306 IB_WR_LSO, 1307 IB_WR_SEND_WITH_INV, 1308 IB_WR_RDMA_READ_WITH_INV, 1309 IB_WR_LOCAL_INV, 1310 IB_WR_REG_MR, 1311 IB_WR_MASKED_ATOMIC_CMP_AND_SWP, 1312 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD, 1313 IB_WR_REG_SIG_MR, 1314 /* reserve values for low level drivers' internal use. 1315 * These values will not be used at all in the ib core layer. 1316 */ 1317 IB_WR_RESERVED1 = 0xf0, 1318 IB_WR_RESERVED2, 1319 IB_WR_RESERVED3, 1320 IB_WR_RESERVED4, 1321 IB_WR_RESERVED5, 1322 IB_WR_RESERVED6, 1323 IB_WR_RESERVED7, 1324 IB_WR_RESERVED8, 1325 IB_WR_RESERVED9, 1326 IB_WR_RESERVED10, 1327 }; 1328 1329 enum ib_send_flags { 1330 IB_SEND_FENCE = 1, 1331 IB_SEND_SIGNALED = (1<<1), 1332 IB_SEND_SOLICITED = (1<<2), 1333 IB_SEND_INLINE = (1<<3), 1334 IB_SEND_IP_CSUM = (1<<4), 1335 1336 /* reserve bits 26-31 for low level drivers' internal use */ 1337 IB_SEND_RESERVED_START = (1 << 26), 1338 IB_SEND_RESERVED_END = (1 << 31), 1339 }; 1340 1341 struct ib_sge { 1342 u64 addr; 1343 u32 length; 1344 u32 lkey; 1345 }; 1346 1347 struct ib_cqe { 1348 void (*done)(struct ib_cq *cq, struct ib_wc *wc); 1349 }; 1350 1351 struct ib_send_wr { 1352 struct ib_send_wr *next; 1353 union { 1354 u64 wr_id; 1355 struct ib_cqe *wr_cqe; 1356 }; 1357 struct ib_sge *sg_list; 1358 int num_sge; 1359 enum ib_wr_opcode opcode; 1360 int send_flags; 1361 union { 1362 __be32 imm_data; 1363 u32 invalidate_rkey; 1364 } ex; 1365 }; 1366 1367 struct ib_rdma_wr { 1368 struct ib_send_wr wr; 1369 u64 remote_addr; 1370 u32 rkey; 1371 }; 1372 1373 static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr) 1374 { 1375 return container_of(wr, struct ib_rdma_wr, wr); 1376 } 1377 1378 struct ib_atomic_wr { 1379 struct ib_send_wr wr; 1380 u64 remote_addr; 1381 u64 compare_add; 1382 u64 swap; 1383 u64 compare_add_mask; 1384 u64 swap_mask; 1385 u32 rkey; 1386 }; 1387 1388 static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr) 1389 { 1390 return container_of(wr, struct ib_atomic_wr, wr); 1391 } 1392 1393 struct ib_ud_wr { 1394 struct ib_send_wr wr; 1395 struct ib_ah *ah; 1396 void *header; 1397 int hlen; 1398 int mss; 1399 u32 remote_qpn; 1400 u32 remote_qkey; 1401 u16 pkey_index; /* valid for GSI only */ 1402 u8 port_num; /* valid for DR SMPs on switch only */ 1403 }; 1404 1405 static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr) 1406 { 1407 return container_of(wr, struct ib_ud_wr, wr); 1408 } 1409 1410 struct ib_reg_wr { 1411 struct ib_send_wr wr; 1412 struct ib_mr *mr; 1413 u32 key; 1414 int access; 1415 }; 1416 1417 static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr) 1418 { 1419 return container_of(wr, struct ib_reg_wr, wr); 1420 } 1421 1422 struct ib_sig_handover_wr { 1423 struct ib_send_wr wr; 1424 struct ib_sig_attrs *sig_attrs; 1425 struct ib_mr *sig_mr; 1426 int access_flags; 1427 struct ib_sge *prot; 1428 }; 1429 1430 static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr) 1431 { 1432 return container_of(wr, struct ib_sig_handover_wr, wr); 1433 } 1434 1435 struct ib_recv_wr { 1436 struct ib_recv_wr *next; 1437 union { 1438 u64 wr_id; 1439 struct ib_cqe *wr_cqe; 1440 }; 1441 struct ib_sge *sg_list; 1442 int num_sge; 1443 }; 1444 1445 enum ib_access_flags { 1446 IB_ACCESS_LOCAL_WRITE = 1, 1447 IB_ACCESS_REMOTE_WRITE = (1<<1), 1448 IB_ACCESS_REMOTE_READ = (1<<2), 1449 IB_ACCESS_REMOTE_ATOMIC = (1<<3), 1450 IB_ACCESS_MW_BIND = (1<<4), 1451 IB_ZERO_BASED = (1<<5), 1452 IB_ACCESS_ON_DEMAND = (1<<6), 1453 IB_ACCESS_HUGETLB = (1<<7), 1454 }; 1455 1456 /* 1457 * XXX: these are apparently used for ->rereg_user_mr, no idea why they 1458 * are hidden here instead of a uapi header! 1459 */ 1460 enum ib_mr_rereg_flags { 1461 IB_MR_REREG_TRANS = 1, 1462 IB_MR_REREG_PD = (1<<1), 1463 IB_MR_REREG_ACCESS = (1<<2), 1464 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1) 1465 }; 1466 1467 struct ib_fmr_attr { 1468 int max_pages; 1469 int max_maps; 1470 u8 page_shift; 1471 }; 1472 1473 struct ib_umem; 1474 1475 enum rdma_remove_reason { 1476 /* Userspace requested uobject deletion. Call could fail */ 1477 RDMA_REMOVE_DESTROY, 1478 /* Context deletion. This call should delete the actual object itself */ 1479 RDMA_REMOVE_CLOSE, 1480 /* Driver is being hot-unplugged. This call should delete the actual object itself */ 1481 RDMA_REMOVE_DRIVER_REMOVE, 1482 /* Context is being cleaned-up, but commit was just completed */ 1483 RDMA_REMOVE_DURING_CLEANUP, 1484 }; 1485 1486 struct ib_rdmacg_object { 1487 #ifdef CONFIG_CGROUP_RDMA 1488 struct rdma_cgroup *cg; /* owner rdma cgroup */ 1489 #endif 1490 }; 1491 1492 struct ib_ucontext { 1493 struct ib_device *device; 1494 struct ib_uverbs_file *ufile; 1495 int closing; 1496 1497 /* locking the uobjects_list */ 1498 struct mutex uobjects_lock; 1499 struct list_head uobjects; 1500 /* protects cleanup process from other actions */ 1501 struct rw_semaphore cleanup_rwsem; 1502 enum rdma_remove_reason cleanup_reason; 1503 1504 struct pid *tgid; 1505 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 1506 struct rb_root_cached umem_tree; 1507 /* 1508 * Protects .umem_rbroot and tree, as well as odp_mrs_count and 1509 * mmu notifiers registration. 1510 */ 1511 struct rw_semaphore umem_rwsem; 1512 void (*invalidate_range)(struct ib_umem *umem, 1513 unsigned long start, unsigned long end); 1514 1515 struct mmu_notifier mn; 1516 atomic_t notifier_count; 1517 /* A list of umems that don't have private mmu notifier counters yet. */ 1518 struct list_head no_private_counters; 1519 int odp_mrs_count; 1520 #endif 1521 1522 struct ib_rdmacg_object cg_obj; 1523 }; 1524 1525 struct ib_uobject { 1526 u64 user_handle; /* handle given to us by userspace */ 1527 struct ib_ucontext *context; /* associated user context */ 1528 void *object; /* containing object */ 1529 struct list_head list; /* link to context's list */ 1530 struct ib_rdmacg_object cg_obj; /* rdmacg object */ 1531 int id; /* index into kernel idr */ 1532 struct kref ref; 1533 atomic_t usecnt; /* protects exclusive access */ 1534 struct rcu_head rcu; /* kfree_rcu() overhead */ 1535 1536 const struct uverbs_obj_type *type; 1537 }; 1538 1539 struct ib_uobject_file { 1540 struct ib_uobject uobj; 1541 /* ufile contains the lock between context release and file close */ 1542 struct ib_uverbs_file *ufile; 1543 }; 1544 1545 struct ib_udata { 1546 const void __user *inbuf; 1547 void __user *outbuf; 1548 size_t inlen; 1549 size_t outlen; 1550 }; 1551 1552 struct ib_pd { 1553 u32 local_dma_lkey; 1554 u32 flags; 1555 struct ib_device *device; 1556 struct ib_uobject *uobject; 1557 atomic_t usecnt; /* count all resources */ 1558 1559 u32 unsafe_global_rkey; 1560 1561 /* 1562 * Implementation details of the RDMA core, don't use in drivers: 1563 */ 1564 struct ib_mr *__internal_mr; 1565 struct rdma_restrack_entry res; 1566 }; 1567 1568 struct ib_xrcd { 1569 struct ib_device *device; 1570 atomic_t usecnt; /* count all exposed resources */ 1571 struct inode *inode; 1572 1573 struct mutex tgt_qp_mutex; 1574 struct list_head tgt_qp_list; 1575 }; 1576 1577 struct ib_ah { 1578 struct ib_device *device; 1579 struct ib_pd *pd; 1580 struct ib_uobject *uobject; 1581 enum rdma_ah_attr_type type; 1582 }; 1583 1584 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); 1585 1586 enum ib_poll_context { 1587 IB_POLL_DIRECT, /* caller context, no hw completions */ 1588 IB_POLL_SOFTIRQ, /* poll from softirq context */ 1589 IB_POLL_WORKQUEUE, /* poll from workqueue */ 1590 }; 1591 1592 struct ib_cq { 1593 struct ib_device *device; 1594 struct ib_uobject *uobject; 1595 ib_comp_handler comp_handler; 1596 void (*event_handler)(struct ib_event *, void *); 1597 void *cq_context; 1598 int cqe; 1599 atomic_t usecnt; /* count number of work queues */ 1600 enum ib_poll_context poll_ctx; 1601 struct ib_wc *wc; 1602 union { 1603 struct irq_poll iop; 1604 struct work_struct work; 1605 }; 1606 /* 1607 * Implementation details of the RDMA core, don't use in drivers: 1608 */ 1609 struct rdma_restrack_entry res; 1610 }; 1611 1612 struct ib_srq { 1613 struct ib_device *device; 1614 struct ib_pd *pd; 1615 struct ib_uobject *uobject; 1616 void (*event_handler)(struct ib_event *, void *); 1617 void *srq_context; 1618 enum ib_srq_type srq_type; 1619 atomic_t usecnt; 1620 1621 struct { 1622 struct ib_cq *cq; 1623 union { 1624 struct { 1625 struct ib_xrcd *xrcd; 1626 u32 srq_num; 1627 } xrc; 1628 }; 1629 } ext; 1630 }; 1631 1632 enum ib_raw_packet_caps { 1633 /* Strip cvlan from incoming packet and report it in the matching work 1634 * completion is supported. 1635 */ 1636 IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0), 1637 /* Scatter FCS field of an incoming packet to host memory is supported. 1638 */ 1639 IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1), 1640 /* Checksum offloads are supported (for both send and receive). */ 1641 IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2), 1642 /* When a packet is received for an RQ with no receive WQEs, the 1643 * packet processing is delayed. 1644 */ 1645 IB_RAW_PACKET_CAP_DELAY_DROP = (1 << 3), 1646 }; 1647 1648 enum ib_wq_type { 1649 IB_WQT_RQ 1650 }; 1651 1652 enum ib_wq_state { 1653 IB_WQS_RESET, 1654 IB_WQS_RDY, 1655 IB_WQS_ERR 1656 }; 1657 1658 struct ib_wq { 1659 struct ib_device *device; 1660 struct ib_uobject *uobject; 1661 void *wq_context; 1662 void (*event_handler)(struct ib_event *, void *); 1663 struct ib_pd *pd; 1664 struct ib_cq *cq; 1665 u32 wq_num; 1666 enum ib_wq_state state; 1667 enum ib_wq_type wq_type; 1668 atomic_t usecnt; 1669 }; 1670 1671 enum ib_wq_flags { 1672 IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0, 1673 IB_WQ_FLAGS_SCATTER_FCS = 1 << 1, 1674 IB_WQ_FLAGS_DELAY_DROP = 1 << 2, 1675 IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3, 1676 }; 1677 1678 struct ib_wq_init_attr { 1679 void *wq_context; 1680 enum ib_wq_type wq_type; 1681 u32 max_wr; 1682 u32 max_sge; 1683 struct ib_cq *cq; 1684 void (*event_handler)(struct ib_event *, void *); 1685 u32 create_flags; /* Use enum ib_wq_flags */ 1686 }; 1687 1688 enum ib_wq_attr_mask { 1689 IB_WQ_STATE = 1 << 0, 1690 IB_WQ_CUR_STATE = 1 << 1, 1691 IB_WQ_FLAGS = 1 << 2, 1692 }; 1693 1694 struct ib_wq_attr { 1695 enum ib_wq_state wq_state; 1696 enum ib_wq_state curr_wq_state; 1697 u32 flags; /* Use enum ib_wq_flags */ 1698 u32 flags_mask; /* Use enum ib_wq_flags */ 1699 }; 1700 1701 struct ib_rwq_ind_table { 1702 struct ib_device *device; 1703 struct ib_uobject *uobject; 1704 atomic_t usecnt; 1705 u32 ind_tbl_num; 1706 u32 log_ind_tbl_size; 1707 struct ib_wq **ind_tbl; 1708 }; 1709 1710 struct ib_rwq_ind_table_init_attr { 1711 u32 log_ind_tbl_size; 1712 /* Each entry is a pointer to Receive Work Queue */ 1713 struct ib_wq **ind_tbl; 1714 }; 1715 1716 enum port_pkey_state { 1717 IB_PORT_PKEY_NOT_VALID = 0, 1718 IB_PORT_PKEY_VALID = 1, 1719 IB_PORT_PKEY_LISTED = 2, 1720 }; 1721 1722 struct ib_qp_security; 1723 1724 struct ib_port_pkey { 1725 enum port_pkey_state state; 1726 u16 pkey_index; 1727 u8 port_num; 1728 struct list_head qp_list; 1729 struct list_head to_error_list; 1730 struct ib_qp_security *sec; 1731 }; 1732 1733 struct ib_ports_pkeys { 1734 struct ib_port_pkey main; 1735 struct ib_port_pkey alt; 1736 }; 1737 1738 struct ib_qp_security { 1739 struct ib_qp *qp; 1740 struct ib_device *dev; 1741 /* Hold this mutex when changing port and pkey settings. */ 1742 struct mutex mutex; 1743 struct ib_ports_pkeys *ports_pkeys; 1744 /* A list of all open shared QP handles. Required to enforce security 1745 * properly for all users of a shared QP. 1746 */ 1747 struct list_head shared_qp_list; 1748 void *security; 1749 bool destroying; 1750 atomic_t error_list_count; 1751 struct completion error_complete; 1752 int error_comps_pending; 1753 }; 1754 1755 /* 1756 * @max_write_sge: Maximum SGE elements per RDMA WRITE request. 1757 * @max_read_sge: Maximum SGE elements per RDMA READ request. 1758 */ 1759 struct ib_qp { 1760 struct ib_device *device; 1761 struct ib_pd *pd; 1762 struct ib_cq *send_cq; 1763 struct ib_cq *recv_cq; 1764 spinlock_t mr_lock; 1765 int mrs_used; 1766 struct list_head rdma_mrs; 1767 struct list_head sig_mrs; 1768 struct ib_srq *srq; 1769 struct ib_xrcd *xrcd; /* XRC TGT QPs only */ 1770 struct list_head xrcd_list; 1771 1772 /* count times opened, mcast attaches, flow attaches */ 1773 atomic_t usecnt; 1774 struct list_head open_list; 1775 struct ib_qp *real_qp; 1776 struct ib_uobject *uobject; 1777 void (*event_handler)(struct ib_event *, void *); 1778 void *qp_context; 1779 u32 qp_num; 1780 u32 max_write_sge; 1781 u32 max_read_sge; 1782 enum ib_qp_type qp_type; 1783 struct ib_rwq_ind_table *rwq_ind_tbl; 1784 struct ib_qp_security *qp_sec; 1785 u8 port; 1786 1787 /* 1788 * Implementation details of the RDMA core, don't use in drivers: 1789 */ 1790 struct rdma_restrack_entry res; 1791 }; 1792 1793 struct ib_dm { 1794 struct ib_device *device; 1795 u32 length; 1796 u32 flags; 1797 struct ib_uobject *uobject; 1798 atomic_t usecnt; 1799 }; 1800 1801 struct ib_mr { 1802 struct ib_device *device; 1803 struct ib_pd *pd; 1804 u32 lkey; 1805 u32 rkey; 1806 u64 iova; 1807 u64 length; 1808 unsigned int page_size; 1809 bool need_inval; 1810 union { 1811 struct ib_uobject *uobject; /* user */ 1812 struct list_head qp_entry; /* FR */ 1813 }; 1814 1815 struct ib_dm *dm; 1816 1817 /* 1818 * Implementation details of the RDMA core, don't use in drivers: 1819 */ 1820 struct rdma_restrack_entry res; 1821 }; 1822 1823 struct ib_mw { 1824 struct ib_device *device; 1825 struct ib_pd *pd; 1826 struct ib_uobject *uobject; 1827 u32 rkey; 1828 enum ib_mw_type type; 1829 }; 1830 1831 struct ib_fmr { 1832 struct ib_device *device; 1833 struct ib_pd *pd; 1834 struct list_head list; 1835 u32 lkey; 1836 u32 rkey; 1837 }; 1838 1839 /* Supported steering options */ 1840 enum ib_flow_attr_type { 1841 /* steering according to rule specifications */ 1842 IB_FLOW_ATTR_NORMAL = 0x0, 1843 /* default unicast and multicast rule - 1844 * receive all Eth traffic which isn't steered to any QP 1845 */ 1846 IB_FLOW_ATTR_ALL_DEFAULT = 0x1, 1847 /* default multicast rule - 1848 * receive all Eth multicast traffic which isn't steered to any QP 1849 */ 1850 IB_FLOW_ATTR_MC_DEFAULT = 0x2, 1851 /* sniffer rule - receive all port traffic */ 1852 IB_FLOW_ATTR_SNIFFER = 0x3 1853 }; 1854 1855 /* Supported steering header types */ 1856 enum ib_flow_spec_type { 1857 /* L2 headers*/ 1858 IB_FLOW_SPEC_ETH = 0x20, 1859 IB_FLOW_SPEC_IB = 0x22, 1860 /* L3 header*/ 1861 IB_FLOW_SPEC_IPV4 = 0x30, 1862 IB_FLOW_SPEC_IPV6 = 0x31, 1863 IB_FLOW_SPEC_ESP = 0x34, 1864 /* L4 headers*/ 1865 IB_FLOW_SPEC_TCP = 0x40, 1866 IB_FLOW_SPEC_UDP = 0x41, 1867 IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50, 1868 IB_FLOW_SPEC_GRE = 0x51, 1869 IB_FLOW_SPEC_MPLS = 0x60, 1870 IB_FLOW_SPEC_INNER = 0x100, 1871 /* Actions */ 1872 IB_FLOW_SPEC_ACTION_TAG = 0x1000, 1873 IB_FLOW_SPEC_ACTION_DROP = 0x1001, 1874 IB_FLOW_SPEC_ACTION_HANDLE = 0x1002, 1875 IB_FLOW_SPEC_ACTION_COUNT = 0x1003, 1876 }; 1877 #define IB_FLOW_SPEC_LAYER_MASK 0xF0 1878 #define IB_FLOW_SPEC_SUPPORT_LAYERS 10 1879 1880 /* Flow steering rule priority is set according to it's domain. 1881 * Lower domain value means higher priority. 1882 */ 1883 enum ib_flow_domain { 1884 IB_FLOW_DOMAIN_USER, 1885 IB_FLOW_DOMAIN_ETHTOOL, 1886 IB_FLOW_DOMAIN_RFS, 1887 IB_FLOW_DOMAIN_NIC, 1888 IB_FLOW_DOMAIN_NUM /* Must be last */ 1889 }; 1890 1891 enum ib_flow_flags { 1892 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */ 1893 IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */ 1894 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 3 /* Must be last */ 1895 }; 1896 1897 struct ib_flow_eth_filter { 1898 u8 dst_mac[6]; 1899 u8 src_mac[6]; 1900 __be16 ether_type; 1901 __be16 vlan_tag; 1902 /* Must be last */ 1903 u8 real_sz[0]; 1904 }; 1905 1906 struct ib_flow_spec_eth { 1907 u32 type; 1908 u16 size; 1909 struct ib_flow_eth_filter val; 1910 struct ib_flow_eth_filter mask; 1911 }; 1912 1913 struct ib_flow_ib_filter { 1914 __be16 dlid; 1915 __u8 sl; 1916 /* Must be last */ 1917 u8 real_sz[0]; 1918 }; 1919 1920 struct ib_flow_spec_ib { 1921 u32 type; 1922 u16 size; 1923 struct ib_flow_ib_filter val; 1924 struct ib_flow_ib_filter mask; 1925 }; 1926 1927 /* IPv4 header flags */ 1928 enum ib_ipv4_flags { 1929 IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */ 1930 IB_IPV4_MORE_FRAG = 0X4 /* For All fragmented packets except the 1931 last have this flag set */ 1932 }; 1933 1934 struct ib_flow_ipv4_filter { 1935 __be32 src_ip; 1936 __be32 dst_ip; 1937 u8 proto; 1938 u8 tos; 1939 u8 ttl; 1940 u8 flags; 1941 /* Must be last */ 1942 u8 real_sz[0]; 1943 }; 1944 1945 struct ib_flow_spec_ipv4 { 1946 u32 type; 1947 u16 size; 1948 struct ib_flow_ipv4_filter val; 1949 struct ib_flow_ipv4_filter mask; 1950 }; 1951 1952 struct ib_flow_ipv6_filter { 1953 u8 src_ip[16]; 1954 u8 dst_ip[16]; 1955 __be32 flow_label; 1956 u8 next_hdr; 1957 u8 traffic_class; 1958 u8 hop_limit; 1959 /* Must be last */ 1960 u8 real_sz[0]; 1961 }; 1962 1963 struct ib_flow_spec_ipv6 { 1964 u32 type; 1965 u16 size; 1966 struct ib_flow_ipv6_filter val; 1967 struct ib_flow_ipv6_filter mask; 1968 }; 1969 1970 struct ib_flow_tcp_udp_filter { 1971 __be16 dst_port; 1972 __be16 src_port; 1973 /* Must be last */ 1974 u8 real_sz[0]; 1975 }; 1976 1977 struct ib_flow_spec_tcp_udp { 1978 u32 type; 1979 u16 size; 1980 struct ib_flow_tcp_udp_filter val; 1981 struct ib_flow_tcp_udp_filter mask; 1982 }; 1983 1984 struct ib_flow_tunnel_filter { 1985 __be32 tunnel_id; 1986 u8 real_sz[0]; 1987 }; 1988 1989 /* ib_flow_spec_tunnel describes the Vxlan tunnel 1990 * the tunnel_id from val has the vni value 1991 */ 1992 struct ib_flow_spec_tunnel { 1993 u32 type; 1994 u16 size; 1995 struct ib_flow_tunnel_filter val; 1996 struct ib_flow_tunnel_filter mask; 1997 }; 1998 1999 struct ib_flow_esp_filter { 2000 __be32 spi; 2001 __be32 seq; 2002 /* Must be last */ 2003 u8 real_sz[0]; 2004 }; 2005 2006 struct ib_flow_spec_esp { 2007 u32 type; 2008 u16 size; 2009 struct ib_flow_esp_filter val; 2010 struct ib_flow_esp_filter mask; 2011 }; 2012 2013 struct ib_flow_gre_filter { 2014 __be16 c_ks_res0_ver; 2015 __be16 protocol; 2016 __be32 key; 2017 /* Must be last */ 2018 u8 real_sz[0]; 2019 }; 2020 2021 struct ib_flow_spec_gre { 2022 u32 type; 2023 u16 size; 2024 struct ib_flow_gre_filter val; 2025 struct ib_flow_gre_filter mask; 2026 }; 2027 2028 struct ib_flow_mpls_filter { 2029 __be32 tag; 2030 /* Must be last */ 2031 u8 real_sz[0]; 2032 }; 2033 2034 struct ib_flow_spec_mpls { 2035 u32 type; 2036 u16 size; 2037 struct ib_flow_mpls_filter val; 2038 struct ib_flow_mpls_filter mask; 2039 }; 2040 2041 struct ib_flow_spec_action_tag { 2042 enum ib_flow_spec_type type; 2043 u16 size; 2044 u32 tag_id; 2045 }; 2046 2047 struct ib_flow_spec_action_drop { 2048 enum ib_flow_spec_type type; 2049 u16 size; 2050 }; 2051 2052 struct ib_flow_spec_action_handle { 2053 enum ib_flow_spec_type type; 2054 u16 size; 2055 struct ib_flow_action *act; 2056 }; 2057 2058 enum ib_counters_description { 2059 IB_COUNTER_PACKETS, 2060 IB_COUNTER_BYTES, 2061 }; 2062 2063 struct ib_flow_spec_action_count { 2064 enum ib_flow_spec_type type; 2065 u16 size; 2066 struct ib_counters *counters; 2067 }; 2068 2069 union ib_flow_spec { 2070 struct { 2071 u32 type; 2072 u16 size; 2073 }; 2074 struct ib_flow_spec_eth eth; 2075 struct ib_flow_spec_ib ib; 2076 struct ib_flow_spec_ipv4 ipv4; 2077 struct ib_flow_spec_tcp_udp tcp_udp; 2078 struct ib_flow_spec_ipv6 ipv6; 2079 struct ib_flow_spec_tunnel tunnel; 2080 struct ib_flow_spec_esp esp; 2081 struct ib_flow_spec_gre gre; 2082 struct ib_flow_spec_mpls mpls; 2083 struct ib_flow_spec_action_tag flow_tag; 2084 struct ib_flow_spec_action_drop drop; 2085 struct ib_flow_spec_action_handle action; 2086 struct ib_flow_spec_action_count flow_count; 2087 }; 2088 2089 struct ib_flow_attr { 2090 enum ib_flow_attr_type type; 2091 u16 size; 2092 u16 priority; 2093 u32 flags; 2094 u8 num_of_specs; 2095 u8 port; 2096 union ib_flow_spec flows[]; 2097 }; 2098 2099 struct ib_flow { 2100 struct ib_qp *qp; 2101 struct ib_uobject *uobject; 2102 }; 2103 2104 enum ib_flow_action_type { 2105 IB_FLOW_ACTION_UNSPECIFIED, 2106 IB_FLOW_ACTION_ESP = 1, 2107 }; 2108 2109 struct ib_flow_action_attrs_esp_keymats { 2110 enum ib_uverbs_flow_action_esp_keymat protocol; 2111 union { 2112 struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm; 2113 } keymat; 2114 }; 2115 2116 struct ib_flow_action_attrs_esp_replays { 2117 enum ib_uverbs_flow_action_esp_replay protocol; 2118 union { 2119 struct ib_uverbs_flow_action_esp_replay_bmp bmp; 2120 } replay; 2121 }; 2122 2123 enum ib_flow_action_attrs_esp_flags { 2124 /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags 2125 * This is done in order to share the same flags between user-space and 2126 * kernel and spare an unnecessary translation. 2127 */ 2128 2129 /* Kernel flags */ 2130 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED = 1ULL << 32, 2131 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS = 1ULL << 33, 2132 }; 2133 2134 struct ib_flow_spec_list { 2135 struct ib_flow_spec_list *next; 2136 union ib_flow_spec spec; 2137 }; 2138 2139 struct ib_flow_action_attrs_esp { 2140 struct ib_flow_action_attrs_esp_keymats *keymat; 2141 struct ib_flow_action_attrs_esp_replays *replay; 2142 struct ib_flow_spec_list *encap; 2143 /* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled. 2144 * Value of 0 is a valid value. 2145 */ 2146 u32 esn; 2147 u32 spi; 2148 u32 seq; 2149 u32 tfc_pad; 2150 /* Use enum ib_flow_action_attrs_esp_flags */ 2151 u64 flags; 2152 u64 hard_limit_pkts; 2153 }; 2154 2155 struct ib_flow_action { 2156 struct ib_device *device; 2157 struct ib_uobject *uobject; 2158 enum ib_flow_action_type type; 2159 atomic_t usecnt; 2160 }; 2161 2162 struct ib_mad_hdr; 2163 struct ib_grh; 2164 2165 enum ib_process_mad_flags { 2166 IB_MAD_IGNORE_MKEY = 1, 2167 IB_MAD_IGNORE_BKEY = 2, 2168 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY 2169 }; 2170 2171 enum ib_mad_result { 2172 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */ 2173 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */ 2174 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */ 2175 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */ 2176 }; 2177 2178 struct ib_port_cache { 2179 u64 subnet_prefix; 2180 struct ib_pkey_cache *pkey; 2181 struct ib_gid_table *gid; 2182 u8 lmc; 2183 enum ib_port_state port_state; 2184 }; 2185 2186 struct ib_cache { 2187 rwlock_t lock; 2188 struct ib_event_handler event_handler; 2189 struct ib_port_cache *ports; 2190 }; 2191 2192 struct iw_cm_verbs; 2193 2194 struct ib_port_immutable { 2195 int pkey_tbl_len; 2196 int gid_tbl_len; 2197 u32 core_cap_flags; 2198 u32 max_mad_size; 2199 }; 2200 2201 /* rdma netdev type - specifies protocol type */ 2202 enum rdma_netdev_t { 2203 RDMA_NETDEV_OPA_VNIC, 2204 RDMA_NETDEV_IPOIB, 2205 }; 2206 2207 /** 2208 * struct rdma_netdev - rdma netdev 2209 * For cases where netstack interfacing is required. 2210 */ 2211 struct rdma_netdev { 2212 void *clnt_priv; 2213 struct ib_device *hca; 2214 u8 port_num; 2215 2216 /* cleanup function must be specified */ 2217 void (*free_rdma_netdev)(struct net_device *netdev); 2218 2219 /* control functions */ 2220 void (*set_id)(struct net_device *netdev, int id); 2221 /* send packet */ 2222 int (*send)(struct net_device *dev, struct sk_buff *skb, 2223 struct ib_ah *address, u32 dqpn); 2224 /* multicast */ 2225 int (*attach_mcast)(struct net_device *dev, struct ib_device *hca, 2226 union ib_gid *gid, u16 mlid, 2227 int set_qkey, u32 qkey); 2228 int (*detach_mcast)(struct net_device *dev, struct ib_device *hca, 2229 union ib_gid *gid, u16 mlid); 2230 }; 2231 2232 struct ib_port_pkey_list { 2233 /* Lock to hold while modifying the list. */ 2234 spinlock_t list_lock; 2235 struct list_head pkey_list; 2236 }; 2237 2238 struct ib_counters { 2239 struct ib_device *device; 2240 struct ib_uobject *uobject; 2241 /* num of objects attached */ 2242 atomic_t usecnt; 2243 }; 2244 2245 enum ib_read_counters_flags { 2246 /* prefer read values from driver cache */ 2247 IB_READ_COUNTERS_ATTR_PREFER_CACHED = 1 << 0, 2248 }; 2249 2250 struct ib_counters_read_attr { 2251 u64 *counters_buff; 2252 u32 ncounters; 2253 u32 flags; /* use enum ib_read_counters_flags */ 2254 }; 2255 2256 struct uverbs_attr_bundle; 2257 2258 struct ib_device { 2259 /* Do not access @dma_device directly from ULP nor from HW drivers. */ 2260 struct device *dma_device; 2261 2262 char name[IB_DEVICE_NAME_MAX]; 2263 2264 struct list_head event_handler_list; 2265 spinlock_t event_handler_lock; 2266 2267 spinlock_t client_data_lock; 2268 struct list_head core_list; 2269 /* Access to the client_data_list is protected by the client_data_lock 2270 * spinlock and the lists_rwsem read-write semaphore */ 2271 struct list_head client_data_list; 2272 2273 struct ib_cache cache; 2274 /** 2275 * port_immutable is indexed by port number 2276 */ 2277 struct ib_port_immutable *port_immutable; 2278 2279 int num_comp_vectors; 2280 2281 struct ib_port_pkey_list *port_pkey_list; 2282 2283 struct iw_cm_verbs *iwcm; 2284 2285 /** 2286 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the 2287 * driver initialized data. The struct is kfree()'ed by the sysfs 2288 * core when the device is removed. A lifespan of -1 in the return 2289 * struct tells the core to set a default lifespan. 2290 */ 2291 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device, 2292 u8 port_num); 2293 /** 2294 * get_hw_stats - Fill in the counter value(s) in the stats struct. 2295 * @index - The index in the value array we wish to have updated, or 2296 * num_counters if we want all stats updated 2297 * Return codes - 2298 * < 0 - Error, no counters updated 2299 * index - Updated the single counter pointed to by index 2300 * num_counters - Updated all counters (will reset the timestamp 2301 * and prevent further calls for lifespan milliseconds) 2302 * Drivers are allowed to update all counters in leiu of just the 2303 * one given in index at their option 2304 */ 2305 int (*get_hw_stats)(struct ib_device *device, 2306 struct rdma_hw_stats *stats, 2307 u8 port, int index); 2308 int (*query_device)(struct ib_device *device, 2309 struct ib_device_attr *device_attr, 2310 struct ib_udata *udata); 2311 int (*query_port)(struct ib_device *device, 2312 u8 port_num, 2313 struct ib_port_attr *port_attr); 2314 enum rdma_link_layer (*get_link_layer)(struct ib_device *device, 2315 u8 port_num); 2316 /* When calling get_netdev, the HW vendor's driver should return the 2317 * net device of device @device at port @port_num or NULL if such 2318 * a net device doesn't exist. The vendor driver should call dev_hold 2319 * on this net device. The HW vendor's device driver must guarantee 2320 * that this function returns NULL before the net device has finished 2321 * NETDEV_UNREGISTER state. 2322 */ 2323 struct net_device *(*get_netdev)(struct ib_device *device, 2324 u8 port_num); 2325 /* query_gid should be return GID value for @device, when @port_num 2326 * link layer is either IB or iWarp. It is no-op if @port_num port 2327 * is RoCE link layer. 2328 */ 2329 int (*query_gid)(struct ib_device *device, 2330 u8 port_num, int index, 2331 union ib_gid *gid); 2332 /* When calling add_gid, the HW vendor's driver should add the gid 2333 * of device of port at gid index available at @attr. Meta-info of 2334 * that gid (for example, the network device related to this gid) is 2335 * available at @attr. @context allows the HW vendor driver to store 2336 * extra information together with a GID entry. The HW vendor driver may 2337 * allocate memory to contain this information and store it in @context 2338 * when a new GID entry is written to. Params are consistent until the 2339 * next call of add_gid or delete_gid. The function should return 0 on 2340 * success or error otherwise. The function could be called 2341 * concurrently for different ports. This function is only called when 2342 * roce_gid_table is used. 2343 */ 2344 int (*add_gid)(const union ib_gid *gid, 2345 const struct ib_gid_attr *attr, 2346 void **context); 2347 /* When calling del_gid, the HW vendor's driver should delete the 2348 * gid of device @device at gid index gid_index of port port_num 2349 * available in @attr. 2350 * Upon the deletion of a GID entry, the HW vendor must free any 2351 * allocated memory. The caller will clear @context afterwards. 2352 * This function is only called when roce_gid_table is used. 2353 */ 2354 int (*del_gid)(const struct ib_gid_attr *attr, 2355 void **context); 2356 int (*query_pkey)(struct ib_device *device, 2357 u8 port_num, u16 index, u16 *pkey); 2358 int (*modify_device)(struct ib_device *device, 2359 int device_modify_mask, 2360 struct ib_device_modify *device_modify); 2361 int (*modify_port)(struct ib_device *device, 2362 u8 port_num, int port_modify_mask, 2363 struct ib_port_modify *port_modify); 2364 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device, 2365 struct ib_udata *udata); 2366 int (*dealloc_ucontext)(struct ib_ucontext *context); 2367 int (*mmap)(struct ib_ucontext *context, 2368 struct vm_area_struct *vma); 2369 struct ib_pd * (*alloc_pd)(struct ib_device *device, 2370 struct ib_ucontext *context, 2371 struct ib_udata *udata); 2372 int (*dealloc_pd)(struct ib_pd *pd); 2373 struct ib_ah * (*create_ah)(struct ib_pd *pd, 2374 struct rdma_ah_attr *ah_attr, 2375 struct ib_udata *udata); 2376 int (*modify_ah)(struct ib_ah *ah, 2377 struct rdma_ah_attr *ah_attr); 2378 int (*query_ah)(struct ib_ah *ah, 2379 struct rdma_ah_attr *ah_attr); 2380 int (*destroy_ah)(struct ib_ah *ah); 2381 struct ib_srq * (*create_srq)(struct ib_pd *pd, 2382 struct ib_srq_init_attr *srq_init_attr, 2383 struct ib_udata *udata); 2384 int (*modify_srq)(struct ib_srq *srq, 2385 struct ib_srq_attr *srq_attr, 2386 enum ib_srq_attr_mask srq_attr_mask, 2387 struct ib_udata *udata); 2388 int (*query_srq)(struct ib_srq *srq, 2389 struct ib_srq_attr *srq_attr); 2390 int (*destroy_srq)(struct ib_srq *srq); 2391 int (*post_srq_recv)(struct ib_srq *srq, 2392 struct ib_recv_wr *recv_wr, 2393 struct ib_recv_wr **bad_recv_wr); 2394 struct ib_qp * (*create_qp)(struct ib_pd *pd, 2395 struct ib_qp_init_attr *qp_init_attr, 2396 struct ib_udata *udata); 2397 int (*modify_qp)(struct ib_qp *qp, 2398 struct ib_qp_attr *qp_attr, 2399 int qp_attr_mask, 2400 struct ib_udata *udata); 2401 int (*query_qp)(struct ib_qp *qp, 2402 struct ib_qp_attr *qp_attr, 2403 int qp_attr_mask, 2404 struct ib_qp_init_attr *qp_init_attr); 2405 int (*destroy_qp)(struct ib_qp *qp); 2406 int (*post_send)(struct ib_qp *qp, 2407 struct ib_send_wr *send_wr, 2408 struct ib_send_wr **bad_send_wr); 2409 int (*post_recv)(struct ib_qp *qp, 2410 struct ib_recv_wr *recv_wr, 2411 struct ib_recv_wr **bad_recv_wr); 2412 struct ib_cq * (*create_cq)(struct ib_device *device, 2413 const struct ib_cq_init_attr *attr, 2414 struct ib_ucontext *context, 2415 struct ib_udata *udata); 2416 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, 2417 u16 cq_period); 2418 int (*destroy_cq)(struct ib_cq *cq); 2419 int (*resize_cq)(struct ib_cq *cq, int cqe, 2420 struct ib_udata *udata); 2421 int (*poll_cq)(struct ib_cq *cq, int num_entries, 2422 struct ib_wc *wc); 2423 int (*peek_cq)(struct ib_cq *cq, int wc_cnt); 2424 int (*req_notify_cq)(struct ib_cq *cq, 2425 enum ib_cq_notify_flags flags); 2426 int (*req_ncomp_notif)(struct ib_cq *cq, 2427 int wc_cnt); 2428 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd, 2429 int mr_access_flags); 2430 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd, 2431 u64 start, u64 length, 2432 u64 virt_addr, 2433 int mr_access_flags, 2434 struct ib_udata *udata); 2435 int (*rereg_user_mr)(struct ib_mr *mr, 2436 int flags, 2437 u64 start, u64 length, 2438 u64 virt_addr, 2439 int mr_access_flags, 2440 struct ib_pd *pd, 2441 struct ib_udata *udata); 2442 int (*dereg_mr)(struct ib_mr *mr); 2443 struct ib_mr * (*alloc_mr)(struct ib_pd *pd, 2444 enum ib_mr_type mr_type, 2445 u32 max_num_sg); 2446 int (*map_mr_sg)(struct ib_mr *mr, 2447 struct scatterlist *sg, 2448 int sg_nents, 2449 unsigned int *sg_offset); 2450 struct ib_mw * (*alloc_mw)(struct ib_pd *pd, 2451 enum ib_mw_type type, 2452 struct ib_udata *udata); 2453 int (*dealloc_mw)(struct ib_mw *mw); 2454 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd, 2455 int mr_access_flags, 2456 struct ib_fmr_attr *fmr_attr); 2457 int (*map_phys_fmr)(struct ib_fmr *fmr, 2458 u64 *page_list, int list_len, 2459 u64 iova); 2460 int (*unmap_fmr)(struct list_head *fmr_list); 2461 int (*dealloc_fmr)(struct ib_fmr *fmr); 2462 int (*attach_mcast)(struct ib_qp *qp, 2463 union ib_gid *gid, 2464 u16 lid); 2465 int (*detach_mcast)(struct ib_qp *qp, 2466 union ib_gid *gid, 2467 u16 lid); 2468 int (*process_mad)(struct ib_device *device, 2469 int process_mad_flags, 2470 u8 port_num, 2471 const struct ib_wc *in_wc, 2472 const struct ib_grh *in_grh, 2473 const struct ib_mad_hdr *in_mad, 2474 size_t in_mad_size, 2475 struct ib_mad_hdr *out_mad, 2476 size_t *out_mad_size, 2477 u16 *out_mad_pkey_index); 2478 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device, 2479 struct ib_ucontext *ucontext, 2480 struct ib_udata *udata); 2481 int (*dealloc_xrcd)(struct ib_xrcd *xrcd); 2482 struct ib_flow * (*create_flow)(struct ib_qp *qp, 2483 struct ib_flow_attr 2484 *flow_attr, 2485 int domain, 2486 struct ib_udata *udata); 2487 int (*destroy_flow)(struct ib_flow *flow_id); 2488 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask, 2489 struct ib_mr_status *mr_status); 2490 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext); 2491 void (*drain_rq)(struct ib_qp *qp); 2492 void (*drain_sq)(struct ib_qp *qp); 2493 int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port, 2494 int state); 2495 int (*get_vf_config)(struct ib_device *device, int vf, u8 port, 2496 struct ifla_vf_info *ivf); 2497 int (*get_vf_stats)(struct ib_device *device, int vf, u8 port, 2498 struct ifla_vf_stats *stats); 2499 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid, 2500 int type); 2501 struct ib_wq * (*create_wq)(struct ib_pd *pd, 2502 struct ib_wq_init_attr *init_attr, 2503 struct ib_udata *udata); 2504 int (*destroy_wq)(struct ib_wq *wq); 2505 int (*modify_wq)(struct ib_wq *wq, 2506 struct ib_wq_attr *attr, 2507 u32 wq_attr_mask, 2508 struct ib_udata *udata); 2509 struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *device, 2510 struct ib_rwq_ind_table_init_attr *init_attr, 2511 struct ib_udata *udata); 2512 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table); 2513 struct ib_flow_action * (*create_flow_action_esp)(struct ib_device *device, 2514 const struct ib_flow_action_attrs_esp *attr, 2515 struct uverbs_attr_bundle *attrs); 2516 int (*destroy_flow_action)(struct ib_flow_action *action); 2517 int (*modify_flow_action_esp)(struct ib_flow_action *action, 2518 const struct ib_flow_action_attrs_esp *attr, 2519 struct uverbs_attr_bundle *attrs); 2520 struct ib_dm * (*alloc_dm)(struct ib_device *device, 2521 struct ib_ucontext *context, 2522 struct ib_dm_alloc_attr *attr, 2523 struct uverbs_attr_bundle *attrs); 2524 int (*dealloc_dm)(struct ib_dm *dm); 2525 struct ib_mr * (*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm, 2526 struct ib_dm_mr_attr *attr, 2527 struct uverbs_attr_bundle *attrs); 2528 struct ib_counters * (*create_counters)(struct ib_device *device, 2529 struct uverbs_attr_bundle *attrs); 2530 int (*destroy_counters)(struct ib_counters *counters); 2531 int (*read_counters)(struct ib_counters *counters, 2532 struct ib_counters_read_attr *counters_read_attr, 2533 struct uverbs_attr_bundle *attrs); 2534 2535 /** 2536 * rdma netdev operation 2537 * 2538 * Driver implementing alloc_rdma_netdev must return -EOPNOTSUPP if it 2539 * doesn't support the specified rdma netdev type. 2540 */ 2541 struct net_device *(*alloc_rdma_netdev)( 2542 struct ib_device *device, 2543 u8 port_num, 2544 enum rdma_netdev_t type, 2545 const char *name, 2546 unsigned char name_assign_type, 2547 void (*setup)(struct net_device *)); 2548 2549 struct module *owner; 2550 struct device dev; 2551 struct kobject *ports_parent; 2552 struct list_head port_list; 2553 2554 enum { 2555 IB_DEV_UNINITIALIZED, 2556 IB_DEV_REGISTERED, 2557 IB_DEV_UNREGISTERED 2558 } reg_state; 2559 2560 int uverbs_abi_ver; 2561 u64 uverbs_cmd_mask; 2562 u64 uverbs_ex_cmd_mask; 2563 2564 char node_desc[IB_DEVICE_NODE_DESC_MAX]; 2565 __be64 node_guid; 2566 u32 local_dma_lkey; 2567 u16 is_switch:1; 2568 u8 node_type; 2569 u8 phys_port_cnt; 2570 struct ib_device_attr attrs; 2571 struct attribute_group *hw_stats_ag; 2572 struct rdma_hw_stats *hw_stats; 2573 2574 #ifdef CONFIG_CGROUP_RDMA 2575 struct rdmacg_device cg_device; 2576 #endif 2577 2578 u32 index; 2579 /* 2580 * Implementation details of the RDMA core, don't use in drivers 2581 */ 2582 struct rdma_restrack_root res; 2583 2584 /** 2585 * The following mandatory functions are used only at device 2586 * registration. Keep functions such as these at the end of this 2587 * structure to avoid cache line misses when accessing struct ib_device 2588 * in fast paths. 2589 */ 2590 int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *); 2591 void (*get_dev_fw_str)(struct ib_device *, char *str); 2592 const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev, 2593 int comp_vector); 2594 2595 struct uverbs_root_spec *specs_root; 2596 enum rdma_driver_id driver_id; 2597 }; 2598 2599 struct ib_client { 2600 char *name; 2601 void (*add) (struct ib_device *); 2602 void (*remove)(struct ib_device *, void *client_data); 2603 2604 /* Returns the net_dev belonging to this ib_client and matching the 2605 * given parameters. 2606 * @dev: An RDMA device that the net_dev use for communication. 2607 * @port: A physical port number on the RDMA device. 2608 * @pkey: P_Key that the net_dev uses if applicable. 2609 * @gid: A GID that the net_dev uses to communicate. 2610 * @addr: An IP address the net_dev is configured with. 2611 * @client_data: The device's client data set by ib_set_client_data(). 2612 * 2613 * An ib_client that implements a net_dev on top of RDMA devices 2614 * (such as IP over IB) should implement this callback, allowing the 2615 * rdma_cm module to find the right net_dev for a given request. 2616 * 2617 * The caller is responsible for calling dev_put on the returned 2618 * netdev. */ 2619 struct net_device *(*get_net_dev_by_params)( 2620 struct ib_device *dev, 2621 u8 port, 2622 u16 pkey, 2623 const union ib_gid *gid, 2624 const struct sockaddr *addr, 2625 void *client_data); 2626 struct list_head list; 2627 }; 2628 2629 struct ib_device *ib_alloc_device(size_t size); 2630 void ib_dealloc_device(struct ib_device *device); 2631 2632 void ib_get_device_fw_str(struct ib_device *device, char *str); 2633 2634 int ib_register_device(struct ib_device *device, 2635 int (*port_callback)(struct ib_device *, 2636 u8, struct kobject *)); 2637 void ib_unregister_device(struct ib_device *device); 2638 2639 int ib_register_client (struct ib_client *client); 2640 void ib_unregister_client(struct ib_client *client); 2641 2642 void *ib_get_client_data(struct ib_device *device, struct ib_client *client); 2643 void ib_set_client_data(struct ib_device *device, struct ib_client *client, 2644 void *data); 2645 2646 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len) 2647 { 2648 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0; 2649 } 2650 2651 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len) 2652 { 2653 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; 2654 } 2655 2656 static inline bool ib_is_buffer_cleared(const void __user *p, 2657 size_t len) 2658 { 2659 bool ret; 2660 u8 *buf; 2661 2662 if (len > USHRT_MAX) 2663 return false; 2664 2665 buf = memdup_user(p, len); 2666 if (IS_ERR(buf)) 2667 return false; 2668 2669 ret = !memchr_inv(buf, 0, len); 2670 kfree(buf); 2671 return ret; 2672 } 2673 2674 static inline bool ib_is_udata_cleared(struct ib_udata *udata, 2675 size_t offset, 2676 size_t len) 2677 { 2678 return ib_is_buffer_cleared(udata->inbuf + offset, len); 2679 } 2680 2681 /** 2682 * ib_modify_qp_is_ok - Check that the supplied attribute mask 2683 * contains all required attributes and no attributes not allowed for 2684 * the given QP state transition. 2685 * @cur_state: Current QP state 2686 * @next_state: Next QP state 2687 * @type: QP type 2688 * @mask: Mask of supplied QP attributes 2689 * @ll : link layer of port 2690 * 2691 * This function is a helper function that a low-level driver's 2692 * modify_qp method can use to validate the consumer's input. It 2693 * checks that cur_state and next_state are valid QP states, that a 2694 * transition from cur_state to next_state is allowed by the IB spec, 2695 * and that the attribute mask supplied is allowed for the transition. 2696 */ 2697 bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 2698 enum ib_qp_type type, enum ib_qp_attr_mask mask, 2699 enum rdma_link_layer ll); 2700 2701 void ib_register_event_handler(struct ib_event_handler *event_handler); 2702 void ib_unregister_event_handler(struct ib_event_handler *event_handler); 2703 void ib_dispatch_event(struct ib_event *event); 2704 2705 int ib_query_port(struct ib_device *device, 2706 u8 port_num, struct ib_port_attr *port_attr); 2707 2708 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, 2709 u8 port_num); 2710 2711 /** 2712 * rdma_cap_ib_switch - Check if the device is IB switch 2713 * @device: Device to check 2714 * 2715 * Device driver is responsible for setting is_switch bit on 2716 * in ib_device structure at init time. 2717 * 2718 * Return: true if the device is IB switch. 2719 */ 2720 static inline bool rdma_cap_ib_switch(const struct ib_device *device) 2721 { 2722 return device->is_switch; 2723 } 2724 2725 /** 2726 * rdma_start_port - Return the first valid port number for the device 2727 * specified 2728 * 2729 * @device: Device to be checked 2730 * 2731 * Return start port number 2732 */ 2733 static inline u8 rdma_start_port(const struct ib_device *device) 2734 { 2735 return rdma_cap_ib_switch(device) ? 0 : 1; 2736 } 2737 2738 /** 2739 * rdma_end_port - Return the last valid port number for the device 2740 * specified 2741 * 2742 * @device: Device to be checked 2743 * 2744 * Return last port number 2745 */ 2746 static inline u8 rdma_end_port(const struct ib_device *device) 2747 { 2748 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt; 2749 } 2750 2751 static inline int rdma_is_port_valid(const struct ib_device *device, 2752 unsigned int port) 2753 { 2754 return (port >= rdma_start_port(device) && 2755 port <= rdma_end_port(device)); 2756 } 2757 2758 static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num) 2759 { 2760 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB; 2761 } 2762 2763 static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num) 2764 { 2765 return device->port_immutable[port_num].core_cap_flags & 2766 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP); 2767 } 2768 2769 static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num) 2770 { 2771 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP; 2772 } 2773 2774 static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num) 2775 { 2776 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE; 2777 } 2778 2779 static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num) 2780 { 2781 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP; 2782 } 2783 2784 static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num) 2785 { 2786 return rdma_protocol_ib(device, port_num) || 2787 rdma_protocol_roce(device, port_num); 2788 } 2789 2790 static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num) 2791 { 2792 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_RAW_PACKET; 2793 } 2794 2795 static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num) 2796 { 2797 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_USNIC; 2798 } 2799 2800 /** 2801 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband 2802 * Management Datagrams. 2803 * @device: Device to check 2804 * @port_num: Port number to check 2805 * 2806 * Management Datagrams (MAD) are a required part of the InfiniBand 2807 * specification and are supported on all InfiniBand devices. A slightly 2808 * extended version are also supported on OPA interfaces. 2809 * 2810 * Return: true if the port supports sending/receiving of MAD packets. 2811 */ 2812 static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num) 2813 { 2814 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD; 2815 } 2816 2817 /** 2818 * rdma_cap_opa_mad - Check if the port of device provides support for OPA 2819 * Management Datagrams. 2820 * @device: Device to check 2821 * @port_num: Port number to check 2822 * 2823 * Intel OmniPath devices extend and/or replace the InfiniBand Management 2824 * datagrams with their own versions. These OPA MADs share many but not all of 2825 * the characteristics of InfiniBand MADs. 2826 * 2827 * OPA MADs differ in the following ways: 2828 * 2829 * 1) MADs are variable size up to 2K 2830 * IBTA defined MADs remain fixed at 256 bytes 2831 * 2) OPA SMPs must carry valid PKeys 2832 * 3) OPA SMP packets are a different format 2833 * 2834 * Return: true if the port supports OPA MAD packet formats. 2835 */ 2836 static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num) 2837 { 2838 return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD) 2839 == RDMA_CORE_CAP_OPA_MAD; 2840 } 2841 2842 /** 2843 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband 2844 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI). 2845 * @device: Device to check 2846 * @port_num: Port number to check 2847 * 2848 * Each InfiniBand node is required to provide a Subnet Management Agent 2849 * that the subnet manager can access. Prior to the fabric being fully 2850 * configured by the subnet manager, the SMA is accessed via a well known 2851 * interface called the Subnet Management Interface (SMI). This interface 2852 * uses directed route packets to communicate with the SM to get around the 2853 * chicken and egg problem of the SM needing to know what's on the fabric 2854 * in order to configure the fabric, and needing to configure the fabric in 2855 * order to send packets to the devices on the fabric. These directed 2856 * route packets do not need the fabric fully configured in order to reach 2857 * their destination. The SMI is the only method allowed to send 2858 * directed route packets on an InfiniBand fabric. 2859 * 2860 * Return: true if the port provides an SMI. 2861 */ 2862 static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num) 2863 { 2864 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI; 2865 } 2866 2867 /** 2868 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband 2869 * Communication Manager. 2870 * @device: Device to check 2871 * @port_num: Port number to check 2872 * 2873 * The InfiniBand Communication Manager is one of many pre-defined General 2874 * Service Agents (GSA) that are accessed via the General Service 2875 * Interface (GSI). It's role is to facilitate establishment of connections 2876 * between nodes as well as other management related tasks for established 2877 * connections. 2878 * 2879 * Return: true if the port supports an IB CM (this does not guarantee that 2880 * a CM is actually running however). 2881 */ 2882 static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num) 2883 { 2884 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM; 2885 } 2886 2887 /** 2888 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP 2889 * Communication Manager. 2890 * @device: Device to check 2891 * @port_num: Port number to check 2892 * 2893 * Similar to above, but specific to iWARP connections which have a different 2894 * managment protocol than InfiniBand. 2895 * 2896 * Return: true if the port supports an iWARP CM (this does not guarantee that 2897 * a CM is actually running however). 2898 */ 2899 static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num) 2900 { 2901 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM; 2902 } 2903 2904 /** 2905 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband 2906 * Subnet Administration. 2907 * @device: Device to check 2908 * @port_num: Port number to check 2909 * 2910 * An InfiniBand Subnet Administration (SA) service is a pre-defined General 2911 * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand 2912 * fabrics, devices should resolve routes to other hosts by contacting the 2913 * SA to query the proper route. 2914 * 2915 * Return: true if the port should act as a client to the fabric Subnet 2916 * Administration interface. This does not imply that the SA service is 2917 * running locally. 2918 */ 2919 static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num) 2920 { 2921 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA; 2922 } 2923 2924 /** 2925 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband 2926 * Multicast. 2927 * @device: Device to check 2928 * @port_num: Port number to check 2929 * 2930 * InfiniBand multicast registration is more complex than normal IPv4 or 2931 * IPv6 multicast registration. Each Host Channel Adapter must register 2932 * with the Subnet Manager when it wishes to join a multicast group. It 2933 * should do so only once regardless of how many queue pairs it subscribes 2934 * to this group. And it should leave the group only after all queue pairs 2935 * attached to the group have been detached. 2936 * 2937 * Return: true if the port must undertake the additional adminstrative 2938 * overhead of registering/unregistering with the SM and tracking of the 2939 * total number of queue pairs attached to the multicast group. 2940 */ 2941 static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num) 2942 { 2943 return rdma_cap_ib_sa(device, port_num); 2944 } 2945 2946 /** 2947 * rdma_cap_af_ib - Check if the port of device has the capability 2948 * Native Infiniband Address. 2949 * @device: Device to check 2950 * @port_num: Port number to check 2951 * 2952 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default 2953 * GID. RoCE uses a different mechanism, but still generates a GID via 2954 * a prescribed mechanism and port specific data. 2955 * 2956 * Return: true if the port uses a GID address to identify devices on the 2957 * network. 2958 */ 2959 static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num) 2960 { 2961 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB; 2962 } 2963 2964 /** 2965 * rdma_cap_eth_ah - Check if the port of device has the capability 2966 * Ethernet Address Handle. 2967 * @device: Device to check 2968 * @port_num: Port number to check 2969 * 2970 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique 2971 * to fabricate GIDs over Ethernet/IP specific addresses native to the 2972 * port. Normally, packet headers are generated by the sending host 2973 * adapter, but when sending connectionless datagrams, we must manually 2974 * inject the proper headers for the fabric we are communicating over. 2975 * 2976 * Return: true if we are running as a RoCE port and must force the 2977 * addition of a Global Route Header built from our Ethernet Address 2978 * Handle into our header list for connectionless packets. 2979 */ 2980 static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num) 2981 { 2982 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH; 2983 } 2984 2985 /** 2986 * rdma_cap_opa_ah - Check if the port of device supports 2987 * OPA Address handles 2988 * @device: Device to check 2989 * @port_num: Port number to check 2990 * 2991 * Return: true if we are running on an OPA device which supports 2992 * the extended OPA addressing. 2993 */ 2994 static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num) 2995 { 2996 return (device->port_immutable[port_num].core_cap_flags & 2997 RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH; 2998 } 2999 3000 /** 3001 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port. 3002 * 3003 * @device: Device 3004 * @port_num: Port number 3005 * 3006 * This MAD size includes the MAD headers and MAD payload. No other headers 3007 * are included. 3008 * 3009 * Return the max MAD size required by the Port. Will return 0 if the port 3010 * does not support MADs 3011 */ 3012 static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num) 3013 { 3014 return device->port_immutable[port_num].max_mad_size; 3015 } 3016 3017 /** 3018 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table 3019 * @device: Device to check 3020 * @port_num: Port number to check 3021 * 3022 * RoCE GID table mechanism manages the various GIDs for a device. 3023 * 3024 * NOTE: if allocating the port's GID table has failed, this call will still 3025 * return true, but any RoCE GID table API will fail. 3026 * 3027 * Return: true if the port uses RoCE GID table mechanism in order to manage 3028 * its GIDs. 3029 */ 3030 static inline bool rdma_cap_roce_gid_table(const struct ib_device *device, 3031 u8 port_num) 3032 { 3033 return rdma_protocol_roce(device, port_num) && 3034 device->add_gid && device->del_gid; 3035 } 3036 3037 /* 3038 * Check if the device supports READ W/ INVALIDATE. 3039 */ 3040 static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num) 3041 { 3042 /* 3043 * iWarp drivers must support READ W/ INVALIDATE. No other protocol 3044 * has support for it yet. 3045 */ 3046 return rdma_protocol_iwarp(dev, port_num); 3047 } 3048 3049 int ib_query_gid(struct ib_device *device, 3050 u8 port_num, int index, union ib_gid *gid, 3051 struct ib_gid_attr *attr); 3052 3053 int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, 3054 int state); 3055 int ib_get_vf_config(struct ib_device *device, int vf, u8 port, 3056 struct ifla_vf_info *info); 3057 int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, 3058 struct ifla_vf_stats *stats); 3059 int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, 3060 int type); 3061 3062 int ib_query_pkey(struct ib_device *device, 3063 u8 port_num, u16 index, u16 *pkey); 3064 3065 int ib_modify_device(struct ib_device *device, 3066 int device_modify_mask, 3067 struct ib_device_modify *device_modify); 3068 3069 int ib_modify_port(struct ib_device *device, 3070 u8 port_num, int port_modify_mask, 3071 struct ib_port_modify *port_modify); 3072 3073 int ib_find_gid(struct ib_device *device, union ib_gid *gid, 3074 u8 *port_num, u16 *index); 3075 3076 int ib_find_pkey(struct ib_device *device, 3077 u8 port_num, u16 pkey, u16 *index); 3078 3079 enum ib_pd_flags { 3080 /* 3081 * Create a memory registration for all memory in the system and place 3082 * the rkey for it into pd->unsafe_global_rkey. This can be used by 3083 * ULPs to avoid the overhead of dynamic MRs. 3084 * 3085 * This flag is generally considered unsafe and must only be used in 3086 * extremly trusted environments. Every use of it will log a warning 3087 * in the kernel log. 3088 */ 3089 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01, 3090 }; 3091 3092 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, 3093 const char *caller); 3094 #define ib_alloc_pd(device, flags) \ 3095 __ib_alloc_pd((device), (flags), KBUILD_MODNAME) 3096 void ib_dealloc_pd(struct ib_pd *pd); 3097 3098 /** 3099 * rdma_create_ah - Creates an address handle for the given address vector. 3100 * @pd: The protection domain associated with the address handle. 3101 * @ah_attr: The attributes of the address vector. 3102 * 3103 * The address handle is used to reference a local or global destination 3104 * in all UD QP post sends. 3105 */ 3106 struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr); 3107 3108 /** 3109 * rdma_create_user_ah - Creates an address handle for the given address vector. 3110 * It resolves destination mac address for ah attribute of RoCE type. 3111 * @pd: The protection domain associated with the address handle. 3112 * @ah_attr: The attributes of the address vector. 3113 * @udata: pointer to user's input output buffer information need by 3114 * provider driver. 3115 * 3116 * It returns 0 on success and returns appropriate error code on error. 3117 * The address handle is used to reference a local or global destination 3118 * in all UD QP post sends. 3119 */ 3120 struct ib_ah *rdma_create_user_ah(struct ib_pd *pd, 3121 struct rdma_ah_attr *ah_attr, 3122 struct ib_udata *udata); 3123 /** 3124 * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header 3125 * work completion. 3126 * @hdr: the L3 header to parse 3127 * @net_type: type of header to parse 3128 * @sgid: place to store source gid 3129 * @dgid: place to store destination gid 3130 */ 3131 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr, 3132 enum rdma_network_type net_type, 3133 union ib_gid *sgid, union ib_gid *dgid); 3134 3135 /** 3136 * ib_get_rdma_header_version - Get the header version 3137 * @hdr: the L3 header to parse 3138 */ 3139 int ib_get_rdma_header_version(const union rdma_network_hdr *hdr); 3140 3141 /** 3142 * ib_init_ah_attr_from_wc - Initializes address handle attributes from a 3143 * work completion. 3144 * @device: Device on which the received message arrived. 3145 * @port_num: Port on which the received message arrived. 3146 * @wc: Work completion associated with the received message. 3147 * @grh: References the received global route header. This parameter is 3148 * ignored unless the work completion indicates that the GRH is valid. 3149 * @ah_attr: Returned attributes that can be used when creating an address 3150 * handle for replying to the message. 3151 */ 3152 int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num, 3153 const struct ib_wc *wc, const struct ib_grh *grh, 3154 struct rdma_ah_attr *ah_attr); 3155 3156 /** 3157 * ib_create_ah_from_wc - Creates an address handle associated with the 3158 * sender of the specified work completion. 3159 * @pd: The protection domain associated with the address handle. 3160 * @wc: Work completion information associated with a received message. 3161 * @grh: References the received global route header. This parameter is 3162 * ignored unless the work completion indicates that the GRH is valid. 3163 * @port_num: The outbound port number to associate with the address. 3164 * 3165 * The address handle is used to reference a local or global destination 3166 * in all UD QP post sends. 3167 */ 3168 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, 3169 const struct ib_grh *grh, u8 port_num); 3170 3171 /** 3172 * rdma_modify_ah - Modifies the address vector associated with an address 3173 * handle. 3174 * @ah: The address handle to modify. 3175 * @ah_attr: The new address vector attributes to associate with the 3176 * address handle. 3177 */ 3178 int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); 3179 3180 /** 3181 * rdma_query_ah - Queries the address vector associated with an address 3182 * handle. 3183 * @ah: The address handle to query. 3184 * @ah_attr: The address vector attributes associated with the address 3185 * handle. 3186 */ 3187 int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); 3188 3189 /** 3190 * rdma_destroy_ah - Destroys an address handle. 3191 * @ah: The address handle to destroy. 3192 */ 3193 int rdma_destroy_ah(struct ib_ah *ah); 3194 3195 /** 3196 * ib_create_srq - Creates a SRQ associated with the specified protection 3197 * domain. 3198 * @pd: The protection domain associated with the SRQ. 3199 * @srq_init_attr: A list of initial attributes required to create the 3200 * SRQ. If SRQ creation succeeds, then the attributes are updated to 3201 * the actual capabilities of the created SRQ. 3202 * 3203 * srq_attr->max_wr and srq_attr->max_sge are read the determine the 3204 * requested size of the SRQ, and set to the actual values allocated 3205 * on return. If ib_create_srq() succeeds, then max_wr and max_sge 3206 * will always be at least as large as the requested values. 3207 */ 3208 struct ib_srq *ib_create_srq(struct ib_pd *pd, 3209 struct ib_srq_init_attr *srq_init_attr); 3210 3211 /** 3212 * ib_modify_srq - Modifies the attributes for the specified SRQ. 3213 * @srq: The SRQ to modify. 3214 * @srq_attr: On input, specifies the SRQ attributes to modify. On output, 3215 * the current values of selected SRQ attributes are returned. 3216 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ 3217 * are being modified. 3218 * 3219 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or 3220 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when 3221 * the number of receives queued drops below the limit. 3222 */ 3223 int ib_modify_srq(struct ib_srq *srq, 3224 struct ib_srq_attr *srq_attr, 3225 enum ib_srq_attr_mask srq_attr_mask); 3226 3227 /** 3228 * ib_query_srq - Returns the attribute list and current values for the 3229 * specified SRQ. 3230 * @srq: The SRQ to query. 3231 * @srq_attr: The attributes of the specified SRQ. 3232 */ 3233 int ib_query_srq(struct ib_srq *srq, 3234 struct ib_srq_attr *srq_attr); 3235 3236 /** 3237 * ib_destroy_srq - Destroys the specified SRQ. 3238 * @srq: The SRQ to destroy. 3239 */ 3240 int ib_destroy_srq(struct ib_srq *srq); 3241 3242 /** 3243 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ. 3244 * @srq: The SRQ to post the work request on. 3245 * @recv_wr: A list of work requests to post on the receive queue. 3246 * @bad_recv_wr: On an immediate failure, this parameter will reference 3247 * the work request that failed to be posted on the QP. 3248 */ 3249 static inline int ib_post_srq_recv(struct ib_srq *srq, 3250 struct ib_recv_wr *recv_wr, 3251 struct ib_recv_wr **bad_recv_wr) 3252 { 3253 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr); 3254 } 3255 3256 /** 3257 * ib_create_qp - Creates a QP associated with the specified protection 3258 * domain. 3259 * @pd: The protection domain associated with the QP. 3260 * @qp_init_attr: A list of initial attributes required to create the 3261 * QP. If QP creation succeeds, then the attributes are updated to 3262 * the actual capabilities of the created QP. 3263 */ 3264 struct ib_qp *ib_create_qp(struct ib_pd *pd, 3265 struct ib_qp_init_attr *qp_init_attr); 3266 3267 /** 3268 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP. 3269 * @qp: The QP to modify. 3270 * @attr: On input, specifies the QP attributes to modify. On output, 3271 * the current values of selected QP attributes are returned. 3272 * @attr_mask: A bit-mask used to specify which attributes of the QP 3273 * are being modified. 3274 * @udata: pointer to user's input output buffer information 3275 * are being modified. 3276 * It returns 0 on success and returns appropriate error code on error. 3277 */ 3278 int ib_modify_qp_with_udata(struct ib_qp *qp, 3279 struct ib_qp_attr *attr, 3280 int attr_mask, 3281 struct ib_udata *udata); 3282 3283 /** 3284 * ib_modify_qp - Modifies the attributes for the specified QP and then 3285 * transitions the QP to the given state. 3286 * @qp: The QP to modify. 3287 * @qp_attr: On input, specifies the QP attributes to modify. On output, 3288 * the current values of selected QP attributes are returned. 3289 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP 3290 * are being modified. 3291 */ 3292 int ib_modify_qp(struct ib_qp *qp, 3293 struct ib_qp_attr *qp_attr, 3294 int qp_attr_mask); 3295 3296 /** 3297 * ib_query_qp - Returns the attribute list and current values for the 3298 * specified QP. 3299 * @qp: The QP to query. 3300 * @qp_attr: The attributes of the specified QP. 3301 * @qp_attr_mask: A bit-mask used to select specific attributes to query. 3302 * @qp_init_attr: Additional attributes of the selected QP. 3303 * 3304 * The qp_attr_mask may be used to limit the query to gathering only the 3305 * selected attributes. 3306 */ 3307 int ib_query_qp(struct ib_qp *qp, 3308 struct ib_qp_attr *qp_attr, 3309 int qp_attr_mask, 3310 struct ib_qp_init_attr *qp_init_attr); 3311 3312 /** 3313 * ib_destroy_qp - Destroys the specified QP. 3314 * @qp: The QP to destroy. 3315 */ 3316 int ib_destroy_qp(struct ib_qp *qp); 3317 3318 /** 3319 * ib_open_qp - Obtain a reference to an existing sharable QP. 3320 * @xrcd - XRC domain 3321 * @qp_open_attr: Attributes identifying the QP to open. 3322 * 3323 * Returns a reference to a sharable QP. 3324 */ 3325 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, 3326 struct ib_qp_open_attr *qp_open_attr); 3327 3328 /** 3329 * ib_close_qp - Release an external reference to a QP. 3330 * @qp: The QP handle to release 3331 * 3332 * The opened QP handle is released by the caller. The underlying 3333 * shared QP is not destroyed until all internal references are released. 3334 */ 3335 int ib_close_qp(struct ib_qp *qp); 3336 3337 /** 3338 * ib_post_send - Posts a list of work requests to the send queue of 3339 * the specified QP. 3340 * @qp: The QP to post the work request on. 3341 * @send_wr: A list of work requests to post on the send queue. 3342 * @bad_send_wr: On an immediate failure, this parameter will reference 3343 * the work request that failed to be posted on the QP. 3344 * 3345 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate 3346 * error is returned, the QP state shall not be affected, 3347 * ib_post_send() will return an immediate error after queueing any 3348 * earlier work requests in the list. 3349 */ 3350 static inline int ib_post_send(struct ib_qp *qp, 3351 struct ib_send_wr *send_wr, 3352 struct ib_send_wr **bad_send_wr) 3353 { 3354 return qp->device->post_send(qp, send_wr, bad_send_wr); 3355 } 3356 3357 /** 3358 * ib_post_recv - Posts a list of work requests to the receive queue of 3359 * the specified QP. 3360 * @qp: The QP to post the work request on. 3361 * @recv_wr: A list of work requests to post on the receive queue. 3362 * @bad_recv_wr: On an immediate failure, this parameter will reference 3363 * the work request that failed to be posted on the QP. 3364 */ 3365 static inline int ib_post_recv(struct ib_qp *qp, 3366 struct ib_recv_wr *recv_wr, 3367 struct ib_recv_wr **bad_recv_wr) 3368 { 3369 return qp->device->post_recv(qp, recv_wr, bad_recv_wr); 3370 } 3371 3372 struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, 3373 int nr_cqe, int comp_vector, 3374 enum ib_poll_context poll_ctx, const char *caller); 3375 #define ib_alloc_cq(device, priv, nr_cqe, comp_vect, poll_ctx) \ 3376 __ib_alloc_cq((device), (priv), (nr_cqe), (comp_vect), (poll_ctx), KBUILD_MODNAME) 3377 3378 void ib_free_cq(struct ib_cq *cq); 3379 int ib_process_cq_direct(struct ib_cq *cq, int budget); 3380 3381 /** 3382 * ib_create_cq - Creates a CQ on the specified device. 3383 * @device: The device on which to create the CQ. 3384 * @comp_handler: A user-specified callback that is invoked when a 3385 * completion event occurs on the CQ. 3386 * @event_handler: A user-specified callback that is invoked when an 3387 * asynchronous event not associated with a completion occurs on the CQ. 3388 * @cq_context: Context associated with the CQ returned to the user via 3389 * the associated completion and event handlers. 3390 * @cq_attr: The attributes the CQ should be created upon. 3391 * 3392 * Users can examine the cq structure to determine the actual CQ size. 3393 */ 3394 struct ib_cq *__ib_create_cq(struct ib_device *device, 3395 ib_comp_handler comp_handler, 3396 void (*event_handler)(struct ib_event *, void *), 3397 void *cq_context, 3398 const struct ib_cq_init_attr *cq_attr, 3399 const char *caller); 3400 #define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \ 3401 __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME) 3402 3403 /** 3404 * ib_resize_cq - Modifies the capacity of the CQ. 3405 * @cq: The CQ to resize. 3406 * @cqe: The minimum size of the CQ. 3407 * 3408 * Users can examine the cq structure to determine the actual CQ size. 3409 */ 3410 int ib_resize_cq(struct ib_cq *cq, int cqe); 3411 3412 /** 3413 * rdma_set_cq_moderation - Modifies moderation params of the CQ 3414 * @cq: The CQ to modify. 3415 * @cq_count: number of CQEs that will trigger an event 3416 * @cq_period: max period of time in usec before triggering an event 3417 * 3418 */ 3419 int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period); 3420 3421 /** 3422 * ib_destroy_cq - Destroys the specified CQ. 3423 * @cq: The CQ to destroy. 3424 */ 3425 int ib_destroy_cq(struct ib_cq *cq); 3426 3427 /** 3428 * ib_poll_cq - poll a CQ for completion(s) 3429 * @cq:the CQ being polled 3430 * @num_entries:maximum number of completions to return 3431 * @wc:array of at least @num_entries &struct ib_wc where completions 3432 * will be returned 3433 * 3434 * Poll a CQ for (possibly multiple) completions. If the return value 3435 * is < 0, an error occurred. If the return value is >= 0, it is the 3436 * number of completions returned. If the return value is 3437 * non-negative and < num_entries, then the CQ was emptied. 3438 */ 3439 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries, 3440 struct ib_wc *wc) 3441 { 3442 return cq->device->poll_cq(cq, num_entries, wc); 3443 } 3444 3445 /** 3446 * ib_req_notify_cq - Request completion notification on a CQ. 3447 * @cq: The CQ to generate an event for. 3448 * @flags: 3449 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP 3450 * to request an event on the next solicited event or next work 3451 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS 3452 * may also be |ed in to request a hint about missed events, as 3453 * described below. 3454 * 3455 * Return Value: 3456 * < 0 means an error occurred while requesting notification 3457 * == 0 means notification was requested successfully, and if 3458 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events 3459 * were missed and it is safe to wait for another event. In 3460 * this case is it guaranteed that any work completions added 3461 * to the CQ since the last CQ poll will trigger a completion 3462 * notification event. 3463 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed 3464 * in. It means that the consumer must poll the CQ again to 3465 * make sure it is empty to avoid missing an event because of a 3466 * race between requesting notification and an entry being 3467 * added to the CQ. This return value means it is possible 3468 * (but not guaranteed) that a work completion has been added 3469 * to the CQ since the last poll without triggering a 3470 * completion notification event. 3471 */ 3472 static inline int ib_req_notify_cq(struct ib_cq *cq, 3473 enum ib_cq_notify_flags flags) 3474 { 3475 return cq->device->req_notify_cq(cq, flags); 3476 } 3477 3478 /** 3479 * ib_req_ncomp_notif - Request completion notification when there are 3480 * at least the specified number of unreaped completions on the CQ. 3481 * @cq: The CQ to generate an event for. 3482 * @wc_cnt: The number of unreaped completions that should be on the 3483 * CQ before an event is generated. 3484 */ 3485 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) 3486 { 3487 return cq->device->req_ncomp_notif ? 3488 cq->device->req_ncomp_notif(cq, wc_cnt) : 3489 -ENOSYS; 3490 } 3491 3492 /** 3493 * ib_dma_mapping_error - check a DMA addr for error 3494 * @dev: The device for which the dma_addr was created 3495 * @dma_addr: The DMA address to check 3496 */ 3497 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) 3498 { 3499 return dma_mapping_error(dev->dma_device, dma_addr); 3500 } 3501 3502 /** 3503 * ib_dma_map_single - Map a kernel virtual address to DMA address 3504 * @dev: The device for which the dma_addr is to be created 3505 * @cpu_addr: The kernel virtual address 3506 * @size: The size of the region in bytes 3507 * @direction: The direction of the DMA 3508 */ 3509 static inline u64 ib_dma_map_single(struct ib_device *dev, 3510 void *cpu_addr, size_t size, 3511 enum dma_data_direction direction) 3512 { 3513 return dma_map_single(dev->dma_device, cpu_addr, size, direction); 3514 } 3515 3516 /** 3517 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single() 3518 * @dev: The device for which the DMA address was created 3519 * @addr: The DMA address 3520 * @size: The size of the region in bytes 3521 * @direction: The direction of the DMA 3522 */ 3523 static inline void ib_dma_unmap_single(struct ib_device *dev, 3524 u64 addr, size_t size, 3525 enum dma_data_direction direction) 3526 { 3527 dma_unmap_single(dev->dma_device, addr, size, direction); 3528 } 3529 3530 /** 3531 * ib_dma_map_page - Map a physical page to DMA address 3532 * @dev: The device for which the dma_addr is to be created 3533 * @page: The page to be mapped 3534 * @offset: The offset within the page 3535 * @size: The size of the region in bytes 3536 * @direction: The direction of the DMA 3537 */ 3538 static inline u64 ib_dma_map_page(struct ib_device *dev, 3539 struct page *page, 3540 unsigned long offset, 3541 size_t size, 3542 enum dma_data_direction direction) 3543 { 3544 return dma_map_page(dev->dma_device, page, offset, size, direction); 3545 } 3546 3547 /** 3548 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page() 3549 * @dev: The device for which the DMA address was created 3550 * @addr: The DMA address 3551 * @size: The size of the region in bytes 3552 * @direction: The direction of the DMA 3553 */ 3554 static inline void ib_dma_unmap_page(struct ib_device *dev, 3555 u64 addr, size_t size, 3556 enum dma_data_direction direction) 3557 { 3558 dma_unmap_page(dev->dma_device, addr, size, direction); 3559 } 3560 3561 /** 3562 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses 3563 * @dev: The device for which the DMA addresses are to be created 3564 * @sg: The array of scatter/gather entries 3565 * @nents: The number of scatter/gather entries 3566 * @direction: The direction of the DMA 3567 */ 3568 static inline int ib_dma_map_sg(struct ib_device *dev, 3569 struct scatterlist *sg, int nents, 3570 enum dma_data_direction direction) 3571 { 3572 return dma_map_sg(dev->dma_device, sg, nents, direction); 3573 } 3574 3575 /** 3576 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses 3577 * @dev: The device for which the DMA addresses were created 3578 * @sg: The array of scatter/gather entries 3579 * @nents: The number of scatter/gather entries 3580 * @direction: The direction of the DMA 3581 */ 3582 static inline void ib_dma_unmap_sg(struct ib_device *dev, 3583 struct scatterlist *sg, int nents, 3584 enum dma_data_direction direction) 3585 { 3586 dma_unmap_sg(dev->dma_device, sg, nents, direction); 3587 } 3588 3589 static inline int ib_dma_map_sg_attrs(struct ib_device *dev, 3590 struct scatterlist *sg, int nents, 3591 enum dma_data_direction direction, 3592 unsigned long dma_attrs) 3593 { 3594 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, 3595 dma_attrs); 3596 } 3597 3598 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, 3599 struct scatterlist *sg, int nents, 3600 enum dma_data_direction direction, 3601 unsigned long dma_attrs) 3602 { 3603 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs); 3604 } 3605 /** 3606 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry 3607 * @dev: The device for which the DMA addresses were created 3608 * @sg: The scatter/gather entry 3609 * 3610 * Note: this function is obsolete. To do: change all occurrences of 3611 * ib_sg_dma_address() into sg_dma_address(). 3612 */ 3613 static inline u64 ib_sg_dma_address(struct ib_device *dev, 3614 struct scatterlist *sg) 3615 { 3616 return sg_dma_address(sg); 3617 } 3618 3619 /** 3620 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry 3621 * @dev: The device for which the DMA addresses were created 3622 * @sg: The scatter/gather entry 3623 * 3624 * Note: this function is obsolete. To do: change all occurrences of 3625 * ib_sg_dma_len() into sg_dma_len(). 3626 */ 3627 static inline unsigned int ib_sg_dma_len(struct ib_device *dev, 3628 struct scatterlist *sg) 3629 { 3630 return sg_dma_len(sg); 3631 } 3632 3633 /** 3634 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU 3635 * @dev: The device for which the DMA address was created 3636 * @addr: The DMA address 3637 * @size: The size of the region in bytes 3638 * @dir: The direction of the DMA 3639 */ 3640 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, 3641 u64 addr, 3642 size_t size, 3643 enum dma_data_direction dir) 3644 { 3645 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); 3646 } 3647 3648 /** 3649 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device 3650 * @dev: The device for which the DMA address was created 3651 * @addr: The DMA address 3652 * @size: The size of the region in bytes 3653 * @dir: The direction of the DMA 3654 */ 3655 static inline void ib_dma_sync_single_for_device(struct ib_device *dev, 3656 u64 addr, 3657 size_t size, 3658 enum dma_data_direction dir) 3659 { 3660 dma_sync_single_for_device(dev->dma_device, addr, size, dir); 3661 } 3662 3663 /** 3664 * ib_dma_alloc_coherent - Allocate memory and map it for DMA 3665 * @dev: The device for which the DMA address is requested 3666 * @size: The size of the region to allocate in bytes 3667 * @dma_handle: A pointer for returning the DMA address of the region 3668 * @flag: memory allocator flags 3669 */ 3670 static inline void *ib_dma_alloc_coherent(struct ib_device *dev, 3671 size_t size, 3672 dma_addr_t *dma_handle, 3673 gfp_t flag) 3674 { 3675 return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag); 3676 } 3677 3678 /** 3679 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent() 3680 * @dev: The device for which the DMA addresses were allocated 3681 * @size: The size of the region 3682 * @cpu_addr: the address returned by ib_dma_alloc_coherent() 3683 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent() 3684 */ 3685 static inline void ib_dma_free_coherent(struct ib_device *dev, 3686 size_t size, void *cpu_addr, 3687 dma_addr_t dma_handle) 3688 { 3689 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); 3690 } 3691 3692 /** 3693 * ib_dereg_mr - Deregisters a memory region and removes it from the 3694 * HCA translation table. 3695 * @mr: The memory region to deregister. 3696 * 3697 * This function can fail, if the memory region has memory windows bound to it. 3698 */ 3699 int ib_dereg_mr(struct ib_mr *mr); 3700 3701 struct ib_mr *ib_alloc_mr(struct ib_pd *pd, 3702 enum ib_mr_type mr_type, 3703 u32 max_num_sg); 3704 3705 /** 3706 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR 3707 * R_Key and L_Key. 3708 * @mr - struct ib_mr pointer to be updated. 3709 * @newkey - new key to be used. 3710 */ 3711 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey) 3712 { 3713 mr->lkey = (mr->lkey & 0xffffff00) | newkey; 3714 mr->rkey = (mr->rkey & 0xffffff00) | newkey; 3715 } 3716 3717 /** 3718 * ib_inc_rkey - increments the key portion of the given rkey. Can be used 3719 * for calculating a new rkey for type 2 memory windows. 3720 * @rkey - the rkey to increment. 3721 */ 3722 static inline u32 ib_inc_rkey(u32 rkey) 3723 { 3724 const u32 mask = 0x000000ff; 3725 return ((rkey + 1) & mask) | (rkey & ~mask); 3726 } 3727 3728 /** 3729 * ib_alloc_fmr - Allocates a unmapped fast memory region. 3730 * @pd: The protection domain associated with the unmapped region. 3731 * @mr_access_flags: Specifies the memory access rights. 3732 * @fmr_attr: Attributes of the unmapped region. 3733 * 3734 * A fast memory region must be mapped before it can be used as part of 3735 * a work request. 3736 */ 3737 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, 3738 int mr_access_flags, 3739 struct ib_fmr_attr *fmr_attr); 3740 3741 /** 3742 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region. 3743 * @fmr: The fast memory region to associate with the pages. 3744 * @page_list: An array of physical pages to map to the fast memory region. 3745 * @list_len: The number of pages in page_list. 3746 * @iova: The I/O virtual address to use with the mapped region. 3747 */ 3748 static inline int ib_map_phys_fmr(struct ib_fmr *fmr, 3749 u64 *page_list, int list_len, 3750 u64 iova) 3751 { 3752 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova); 3753 } 3754 3755 /** 3756 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions. 3757 * @fmr_list: A linked list of fast memory regions to unmap. 3758 */ 3759 int ib_unmap_fmr(struct list_head *fmr_list); 3760 3761 /** 3762 * ib_dealloc_fmr - Deallocates a fast memory region. 3763 * @fmr: The fast memory region to deallocate. 3764 */ 3765 int ib_dealloc_fmr(struct ib_fmr *fmr); 3766 3767 /** 3768 * ib_attach_mcast - Attaches the specified QP to a multicast group. 3769 * @qp: QP to attach to the multicast group. The QP must be type 3770 * IB_QPT_UD. 3771 * @gid: Multicast group GID. 3772 * @lid: Multicast group LID in host byte order. 3773 * 3774 * In order to send and receive multicast packets, subnet 3775 * administration must have created the multicast group and configured 3776 * the fabric appropriately. The port associated with the specified 3777 * QP must also be a member of the multicast group. 3778 */ 3779 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 3780 3781 /** 3782 * ib_detach_mcast - Detaches the specified QP from a multicast group. 3783 * @qp: QP to detach from the multicast group. 3784 * @gid: Multicast group GID. 3785 * @lid: Multicast group LID in host byte order. 3786 */ 3787 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 3788 3789 /** 3790 * ib_alloc_xrcd - Allocates an XRC domain. 3791 * @device: The device on which to allocate the XRC domain. 3792 * @caller: Module name for kernel consumers 3793 */ 3794 struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller); 3795 #define ib_alloc_xrcd(device) \ 3796 __ib_alloc_xrcd((device), KBUILD_MODNAME) 3797 3798 /** 3799 * ib_dealloc_xrcd - Deallocates an XRC domain. 3800 * @xrcd: The XRC domain to deallocate. 3801 */ 3802 int ib_dealloc_xrcd(struct ib_xrcd *xrcd); 3803 3804 struct ib_flow *ib_create_flow(struct ib_qp *qp, 3805 struct ib_flow_attr *flow_attr, int domain); 3806 int ib_destroy_flow(struct ib_flow *flow_id); 3807 3808 static inline int ib_check_mr_access(int flags) 3809 { 3810 /* 3811 * Local write permission is required if remote write or 3812 * remote atomic permission is also requested. 3813 */ 3814 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && 3815 !(flags & IB_ACCESS_LOCAL_WRITE)) 3816 return -EINVAL; 3817 3818 return 0; 3819 } 3820 3821 static inline bool ib_access_writable(int access_flags) 3822 { 3823 /* 3824 * We have writable memory backing the MR if any of the following 3825 * access flags are set. "Local write" and "remote write" obviously 3826 * require write access. "Remote atomic" can do things like fetch and 3827 * add, which will modify memory, and "MW bind" can change permissions 3828 * by binding a window. 3829 */ 3830 return access_flags & 3831 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | 3832 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND); 3833 } 3834 3835 /** 3836 * ib_check_mr_status: lightweight check of MR status. 3837 * This routine may provide status checks on a selected 3838 * ib_mr. first use is for signature status check. 3839 * 3840 * @mr: A memory region. 3841 * @check_mask: Bitmask of which checks to perform from 3842 * ib_mr_status_check enumeration. 3843 * @mr_status: The container of relevant status checks. 3844 * failed checks will be indicated in the status bitmask 3845 * and the relevant info shall be in the error item. 3846 */ 3847 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, 3848 struct ib_mr_status *mr_status); 3849 3850 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port, 3851 u16 pkey, const union ib_gid *gid, 3852 const struct sockaddr *addr); 3853 struct ib_wq *ib_create_wq(struct ib_pd *pd, 3854 struct ib_wq_init_attr *init_attr); 3855 int ib_destroy_wq(struct ib_wq *wq); 3856 int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr, 3857 u32 wq_attr_mask); 3858 struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, 3859 struct ib_rwq_ind_table_init_attr* 3860 wq_ind_table_init_attr); 3861 int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); 3862 3863 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 3864 unsigned int *sg_offset, unsigned int page_size); 3865 3866 static inline int 3867 ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 3868 unsigned int *sg_offset, unsigned int page_size) 3869 { 3870 int n; 3871 3872 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size); 3873 mr->iova = 0; 3874 3875 return n; 3876 } 3877 3878 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents, 3879 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64)); 3880 3881 void ib_drain_rq(struct ib_qp *qp); 3882 void ib_drain_sq(struct ib_qp *qp); 3883 void ib_drain_qp(struct ib_qp *qp); 3884 3885 int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width); 3886 3887 static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr) 3888 { 3889 if (attr->type == RDMA_AH_ATTR_TYPE_ROCE) 3890 return attr->roce.dmac; 3891 return NULL; 3892 } 3893 3894 static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid) 3895 { 3896 if (attr->type == RDMA_AH_ATTR_TYPE_IB) 3897 attr->ib.dlid = (u16)dlid; 3898 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 3899 attr->opa.dlid = dlid; 3900 } 3901 3902 static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr) 3903 { 3904 if (attr->type == RDMA_AH_ATTR_TYPE_IB) 3905 return attr->ib.dlid; 3906 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 3907 return attr->opa.dlid; 3908 return 0; 3909 } 3910 3911 static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl) 3912 { 3913 attr->sl = sl; 3914 } 3915 3916 static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr) 3917 { 3918 return attr->sl; 3919 } 3920 3921 static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr, 3922 u8 src_path_bits) 3923 { 3924 if (attr->type == RDMA_AH_ATTR_TYPE_IB) 3925 attr->ib.src_path_bits = src_path_bits; 3926 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 3927 attr->opa.src_path_bits = src_path_bits; 3928 } 3929 3930 static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr) 3931 { 3932 if (attr->type == RDMA_AH_ATTR_TYPE_IB) 3933 return attr->ib.src_path_bits; 3934 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 3935 return attr->opa.src_path_bits; 3936 return 0; 3937 } 3938 3939 static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr, 3940 bool make_grd) 3941 { 3942 if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 3943 attr->opa.make_grd = make_grd; 3944 } 3945 3946 static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr) 3947 { 3948 if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 3949 return attr->opa.make_grd; 3950 return false; 3951 } 3952 3953 static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num) 3954 { 3955 attr->port_num = port_num; 3956 } 3957 3958 static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr) 3959 { 3960 return attr->port_num; 3961 } 3962 3963 static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr, 3964 u8 static_rate) 3965 { 3966 attr->static_rate = static_rate; 3967 } 3968 3969 static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr) 3970 { 3971 return attr->static_rate; 3972 } 3973 3974 static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr, 3975 enum ib_ah_flags flag) 3976 { 3977 attr->ah_flags = flag; 3978 } 3979 3980 static inline enum ib_ah_flags 3981 rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr) 3982 { 3983 return attr->ah_flags; 3984 } 3985 3986 static inline const struct ib_global_route 3987 *rdma_ah_read_grh(const struct rdma_ah_attr *attr) 3988 { 3989 return &attr->grh; 3990 } 3991 3992 /*To retrieve and modify the grh */ 3993 static inline struct ib_global_route 3994 *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr) 3995 { 3996 return &attr->grh; 3997 } 3998 3999 static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid) 4000 { 4001 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); 4002 4003 memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid)); 4004 } 4005 4006 static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr, 4007 __be64 prefix) 4008 { 4009 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); 4010 4011 grh->dgid.global.subnet_prefix = prefix; 4012 } 4013 4014 static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr, 4015 __be64 if_id) 4016 { 4017 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); 4018 4019 grh->dgid.global.interface_id = if_id; 4020 } 4021 4022 static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr, 4023 union ib_gid *dgid, u32 flow_label, 4024 u8 sgid_index, u8 hop_limit, 4025 u8 traffic_class) 4026 { 4027 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); 4028 4029 attr->ah_flags = IB_AH_GRH; 4030 if (dgid) 4031 grh->dgid = *dgid; 4032 grh->flow_label = flow_label; 4033 grh->sgid_index = sgid_index; 4034 grh->hop_limit = hop_limit; 4035 grh->traffic_class = traffic_class; 4036 } 4037 4038 /** 4039 * rdma_ah_find_type - Return address handle type. 4040 * 4041 * @dev: Device to be checked 4042 * @port_num: Port number 4043 */ 4044 static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev, 4045 u8 port_num) 4046 { 4047 if (rdma_protocol_roce(dev, port_num)) 4048 return RDMA_AH_ATTR_TYPE_ROCE; 4049 if (rdma_protocol_ib(dev, port_num)) { 4050 if (rdma_cap_opa_ah(dev, port_num)) 4051 return RDMA_AH_ATTR_TYPE_OPA; 4052 return RDMA_AH_ATTR_TYPE_IB; 4053 } 4054 4055 return RDMA_AH_ATTR_TYPE_UNDEFINED; 4056 } 4057 4058 /** 4059 * ib_lid_cpu16 - Return lid in 16bit CPU encoding. 4060 * In the current implementation the only way to get 4061 * get the 32bit lid is from other sources for OPA. 4062 * For IB, lids will always be 16bits so cast the 4063 * value accordingly. 4064 * 4065 * @lid: A 32bit LID 4066 */ 4067 static inline u16 ib_lid_cpu16(u32 lid) 4068 { 4069 WARN_ON_ONCE(lid & 0xFFFF0000); 4070 return (u16)lid; 4071 } 4072 4073 /** 4074 * ib_lid_be16 - Return lid in 16bit BE encoding. 4075 * 4076 * @lid: A 32bit LID 4077 */ 4078 static inline __be16 ib_lid_be16(u32 lid) 4079 { 4080 WARN_ON_ONCE(lid & 0xFFFF0000); 4081 return cpu_to_be16((u16)lid); 4082 } 4083 4084 /** 4085 * ib_get_vector_affinity - Get the affinity mappings of a given completion 4086 * vector 4087 * @device: the rdma device 4088 * @comp_vector: index of completion vector 4089 * 4090 * Returns NULL on failure, otherwise a corresponding cpu map of the 4091 * completion vector (returns all-cpus map if the device driver doesn't 4092 * implement get_vector_affinity). 4093 */ 4094 static inline const struct cpumask * 4095 ib_get_vector_affinity(struct ib_device *device, int comp_vector) 4096 { 4097 if (comp_vector < 0 || comp_vector >= device->num_comp_vectors || 4098 !device->get_vector_affinity) 4099 return NULL; 4100 4101 return device->get_vector_affinity(device, comp_vector); 4102 4103 } 4104 4105 /** 4106 * rdma_roce_rescan_device - Rescan all of the network devices in the system 4107 * and add their gids, as needed, to the relevant RoCE devices. 4108 * 4109 * @device: the rdma device 4110 */ 4111 void rdma_roce_rescan_device(struct ib_device *ibdev); 4112 4113 #endif /* IB_VERBS_H */ 4114