1 /* 2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 8 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 9 * 10 * This software is available to you under a choice of one of two 11 * licenses. You may choose to be licensed under the terms of the GNU 12 * General Public License (GPL) Version 2, available from the file 13 * COPYING in the main directory of this source tree, or the 14 * OpenIB.org BSD license below: 15 * 16 * Redistribution and use in source and binary forms, with or 17 * without modification, are permitted provided that the following 18 * conditions are met: 19 * 20 * - Redistributions of source code must retain the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer. 23 * 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials 27 * provided with the distribution. 28 * 29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 36 * SOFTWARE. 37 */ 38 39 #if !defined(IB_VERBS_H) 40 #define IB_VERBS_H 41 42 #include <linux/types.h> 43 #include <linux/device.h> 44 #include <linux/dma-mapping.h> 45 #include <linux/kref.h> 46 #include <linux/list.h> 47 #include <linux/rwsem.h> 48 #include <linux/workqueue.h> 49 #include <linux/irq_poll.h> 50 #include <uapi/linux/if_ether.h> 51 #include <net/ipv6.h> 52 #include <net/ip.h> 53 #include <linux/string.h> 54 #include <linux/slab.h> 55 #include <linux/netdevice.h> 56 #include <linux/refcount.h> 57 #include <linux/if_link.h> 58 #include <linux/atomic.h> 59 #include <linux/mmu_notifier.h> 60 #include <linux/uaccess.h> 61 #include <linux/cgroup_rdma.h> 62 #include <uapi/rdma/ib_user_verbs.h> 63 #include <rdma/restrack.h> 64 #include <uapi/rdma/rdma_user_ioctl.h> 65 #include <uapi/rdma/ib_user_ioctl_verbs.h> 66 67 #define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN 68 69 struct ib_umem_odp; 70 71 extern struct workqueue_struct *ib_wq; 72 extern struct workqueue_struct *ib_comp_wq; 73 extern struct workqueue_struct *ib_comp_unbound_wq; 74 75 union ib_gid { 76 u8 raw[16]; 77 struct { 78 __be64 subnet_prefix; 79 __be64 interface_id; 80 } global; 81 }; 82 83 extern union ib_gid zgid; 84 85 enum ib_gid_type { 86 /* If link layer is Ethernet, this is RoCE V1 */ 87 IB_GID_TYPE_IB = 0, 88 IB_GID_TYPE_ROCE = 0, 89 IB_GID_TYPE_ROCE_UDP_ENCAP = 1, 90 IB_GID_TYPE_SIZE 91 }; 92 93 #define ROCE_V2_UDP_DPORT 4791 94 struct ib_gid_attr { 95 struct net_device *ndev; 96 struct ib_device *device; 97 union ib_gid gid; 98 enum ib_gid_type gid_type; 99 u16 index; 100 u8 port_num; 101 }; 102 103 enum rdma_node_type { 104 /* IB values map to NodeInfo:NodeType. */ 105 RDMA_NODE_IB_CA = 1, 106 RDMA_NODE_IB_SWITCH, 107 RDMA_NODE_IB_ROUTER, 108 RDMA_NODE_RNIC, 109 RDMA_NODE_USNIC, 110 RDMA_NODE_USNIC_UDP, 111 }; 112 113 enum { 114 /* set the local administered indication */ 115 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2, 116 }; 117 118 enum rdma_transport_type { 119 RDMA_TRANSPORT_IB, 120 RDMA_TRANSPORT_IWARP, 121 RDMA_TRANSPORT_USNIC, 122 RDMA_TRANSPORT_USNIC_UDP 123 }; 124 125 enum rdma_protocol_type { 126 RDMA_PROTOCOL_IB, 127 RDMA_PROTOCOL_IBOE, 128 RDMA_PROTOCOL_IWARP, 129 RDMA_PROTOCOL_USNIC_UDP 130 }; 131 132 __attribute_const__ enum rdma_transport_type 133 rdma_node_get_transport(enum rdma_node_type node_type); 134 135 enum rdma_network_type { 136 RDMA_NETWORK_IB, 137 RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB, 138 RDMA_NETWORK_IPV4, 139 RDMA_NETWORK_IPV6 140 }; 141 142 static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type) 143 { 144 if (network_type == RDMA_NETWORK_IPV4 || 145 network_type == RDMA_NETWORK_IPV6) 146 return IB_GID_TYPE_ROCE_UDP_ENCAP; 147 148 /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */ 149 return IB_GID_TYPE_IB; 150 } 151 152 static inline enum rdma_network_type 153 rdma_gid_attr_network_type(const struct ib_gid_attr *attr) 154 { 155 if (attr->gid_type == IB_GID_TYPE_IB) 156 return RDMA_NETWORK_IB; 157 158 if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid)) 159 return RDMA_NETWORK_IPV4; 160 else 161 return RDMA_NETWORK_IPV6; 162 } 163 164 enum rdma_link_layer { 165 IB_LINK_LAYER_UNSPECIFIED, 166 IB_LINK_LAYER_INFINIBAND, 167 IB_LINK_LAYER_ETHERNET, 168 }; 169 170 enum ib_device_cap_flags { 171 IB_DEVICE_RESIZE_MAX_WR = (1 << 0), 172 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1), 173 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2), 174 IB_DEVICE_RAW_MULTI = (1 << 3), 175 IB_DEVICE_AUTO_PATH_MIG = (1 << 4), 176 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5), 177 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6), 178 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7), 179 IB_DEVICE_SHUTDOWN_PORT = (1 << 8), 180 /* Not in use, former INIT_TYPE = (1 << 9),*/ 181 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10), 182 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11), 183 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12), 184 IB_DEVICE_SRQ_RESIZE = (1 << 13), 185 IB_DEVICE_N_NOTIFY_CQ = (1 << 14), 186 187 /* 188 * This device supports a per-device lkey or stag that can be 189 * used without performing a memory registration for the local 190 * memory. Note that ULPs should never check this flag, but 191 * instead of use the local_dma_lkey flag in the ib_pd structure, 192 * which will always contain a usable lkey. 193 */ 194 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15), 195 /* Reserved, old SEND_W_INV = (1 << 16),*/ 196 IB_DEVICE_MEM_WINDOW = (1 << 17), 197 /* 198 * Devices should set IB_DEVICE_UD_IP_SUM if they support 199 * insertion of UDP and TCP checksum on outgoing UD IPoIB 200 * messages and can verify the validity of checksum for 201 * incoming messages. Setting this flag implies that the 202 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode. 203 */ 204 IB_DEVICE_UD_IP_CSUM = (1 << 18), 205 IB_DEVICE_UD_TSO = (1 << 19), 206 IB_DEVICE_XRC = (1 << 20), 207 208 /* 209 * This device supports the IB "base memory management extension", 210 * which includes support for fast registrations (IB_WR_REG_MR, 211 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should 212 * also be set by any iWarp device which must support FRs to comply 213 * to the iWarp verbs spec. iWarp devices also support the 214 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the 215 * stag. 216 */ 217 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21), 218 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22), 219 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23), 220 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24), 221 IB_DEVICE_RC_IP_CSUM = (1 << 25), 222 /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */ 223 IB_DEVICE_RAW_IP_CSUM = (1 << 26), 224 /* 225 * Devices should set IB_DEVICE_CROSS_CHANNEL if they 226 * support execution of WQEs that involve synchronization 227 * of I/O operations with single completion queue managed 228 * by hardware. 229 */ 230 IB_DEVICE_CROSS_CHANNEL = (1 << 27), 231 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29), 232 IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30), 233 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31), 234 IB_DEVICE_SG_GAPS_REG = (1ULL << 32), 235 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33), 236 /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */ 237 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34), 238 IB_DEVICE_RDMA_NETDEV_OPA_VNIC = (1ULL << 35), 239 /* The device supports padding incoming writes to cacheline. */ 240 IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36), 241 IB_DEVICE_ALLOW_USER_UNREG = (1ULL << 37), 242 }; 243 244 enum ib_signature_prot_cap { 245 IB_PROT_T10DIF_TYPE_1 = 1, 246 IB_PROT_T10DIF_TYPE_2 = 1 << 1, 247 IB_PROT_T10DIF_TYPE_3 = 1 << 2, 248 }; 249 250 enum ib_signature_guard_cap { 251 IB_GUARD_T10DIF_CRC = 1, 252 IB_GUARD_T10DIF_CSUM = 1 << 1, 253 }; 254 255 enum ib_atomic_cap { 256 IB_ATOMIC_NONE, 257 IB_ATOMIC_HCA, 258 IB_ATOMIC_GLOB 259 }; 260 261 enum ib_odp_general_cap_bits { 262 IB_ODP_SUPPORT = 1 << 0, 263 IB_ODP_SUPPORT_IMPLICIT = 1 << 1, 264 }; 265 266 enum ib_odp_transport_cap_bits { 267 IB_ODP_SUPPORT_SEND = 1 << 0, 268 IB_ODP_SUPPORT_RECV = 1 << 1, 269 IB_ODP_SUPPORT_WRITE = 1 << 2, 270 IB_ODP_SUPPORT_READ = 1 << 3, 271 IB_ODP_SUPPORT_ATOMIC = 1 << 4, 272 IB_ODP_SUPPORT_SRQ_RECV = 1 << 5, 273 }; 274 275 struct ib_odp_caps { 276 uint64_t general_caps; 277 struct { 278 uint32_t rc_odp_caps; 279 uint32_t uc_odp_caps; 280 uint32_t ud_odp_caps; 281 uint32_t xrc_odp_caps; 282 } per_transport_caps; 283 }; 284 285 struct ib_rss_caps { 286 /* Corresponding bit will be set if qp type from 287 * 'enum ib_qp_type' is supported, e.g. 288 * supported_qpts |= 1 << IB_QPT_UD 289 */ 290 u32 supported_qpts; 291 u32 max_rwq_indirection_tables; 292 u32 max_rwq_indirection_table_size; 293 }; 294 295 enum ib_tm_cap_flags { 296 /* Support tag matching on RC transport */ 297 IB_TM_CAP_RC = 1 << 0, 298 }; 299 300 struct ib_tm_caps { 301 /* Max size of RNDV header */ 302 u32 max_rndv_hdr_size; 303 /* Max number of entries in tag matching list */ 304 u32 max_num_tags; 305 /* From enum ib_tm_cap_flags */ 306 u32 flags; 307 /* Max number of outstanding list operations */ 308 u32 max_ops; 309 /* Max number of SGE in tag matching entry */ 310 u32 max_sge; 311 }; 312 313 struct ib_cq_init_attr { 314 unsigned int cqe; 315 int comp_vector; 316 u32 flags; 317 }; 318 319 enum ib_cq_attr_mask { 320 IB_CQ_MODERATE = 1 << 0, 321 }; 322 323 struct ib_cq_caps { 324 u16 max_cq_moderation_count; 325 u16 max_cq_moderation_period; 326 }; 327 328 struct ib_dm_mr_attr { 329 u64 length; 330 u64 offset; 331 u32 access_flags; 332 }; 333 334 struct ib_dm_alloc_attr { 335 u64 length; 336 u32 alignment; 337 u32 flags; 338 }; 339 340 struct ib_device_attr { 341 u64 fw_ver; 342 __be64 sys_image_guid; 343 u64 max_mr_size; 344 u64 page_size_cap; 345 u32 vendor_id; 346 u32 vendor_part_id; 347 u32 hw_ver; 348 int max_qp; 349 int max_qp_wr; 350 u64 device_cap_flags; 351 int max_send_sge; 352 int max_recv_sge; 353 int max_sge_rd; 354 int max_cq; 355 int max_cqe; 356 int max_mr; 357 int max_pd; 358 int max_qp_rd_atom; 359 int max_ee_rd_atom; 360 int max_res_rd_atom; 361 int max_qp_init_rd_atom; 362 int max_ee_init_rd_atom; 363 enum ib_atomic_cap atomic_cap; 364 enum ib_atomic_cap masked_atomic_cap; 365 int max_ee; 366 int max_rdd; 367 int max_mw; 368 int max_raw_ipv6_qp; 369 int max_raw_ethy_qp; 370 int max_mcast_grp; 371 int max_mcast_qp_attach; 372 int max_total_mcast_qp_attach; 373 int max_ah; 374 int max_fmr; 375 int max_map_per_fmr; 376 int max_srq; 377 int max_srq_wr; 378 int max_srq_sge; 379 unsigned int max_fast_reg_page_list_len; 380 u16 max_pkeys; 381 u8 local_ca_ack_delay; 382 int sig_prot_cap; 383 int sig_guard_cap; 384 struct ib_odp_caps odp_caps; 385 uint64_t timestamp_mask; 386 uint64_t hca_core_clock; /* in KHZ */ 387 struct ib_rss_caps rss_caps; 388 u32 max_wq_type_rq; 389 u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */ 390 struct ib_tm_caps tm_caps; 391 struct ib_cq_caps cq_caps; 392 u64 max_dm_size; 393 }; 394 395 enum ib_mtu { 396 IB_MTU_256 = 1, 397 IB_MTU_512 = 2, 398 IB_MTU_1024 = 3, 399 IB_MTU_2048 = 4, 400 IB_MTU_4096 = 5 401 }; 402 403 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu) 404 { 405 switch (mtu) { 406 case IB_MTU_256: return 256; 407 case IB_MTU_512: return 512; 408 case IB_MTU_1024: return 1024; 409 case IB_MTU_2048: return 2048; 410 case IB_MTU_4096: return 4096; 411 default: return -1; 412 } 413 } 414 415 static inline enum ib_mtu ib_mtu_int_to_enum(int mtu) 416 { 417 if (mtu >= 4096) 418 return IB_MTU_4096; 419 else if (mtu >= 2048) 420 return IB_MTU_2048; 421 else if (mtu >= 1024) 422 return IB_MTU_1024; 423 else if (mtu >= 512) 424 return IB_MTU_512; 425 else 426 return IB_MTU_256; 427 } 428 429 enum ib_port_state { 430 IB_PORT_NOP = 0, 431 IB_PORT_DOWN = 1, 432 IB_PORT_INIT = 2, 433 IB_PORT_ARMED = 3, 434 IB_PORT_ACTIVE = 4, 435 IB_PORT_ACTIVE_DEFER = 5 436 }; 437 438 enum ib_port_width { 439 IB_WIDTH_1X = 1, 440 IB_WIDTH_2X = 16, 441 IB_WIDTH_4X = 2, 442 IB_WIDTH_8X = 4, 443 IB_WIDTH_12X = 8 444 }; 445 446 static inline int ib_width_enum_to_int(enum ib_port_width width) 447 { 448 switch (width) { 449 case IB_WIDTH_1X: return 1; 450 case IB_WIDTH_2X: return 2; 451 case IB_WIDTH_4X: return 4; 452 case IB_WIDTH_8X: return 8; 453 case IB_WIDTH_12X: return 12; 454 default: return -1; 455 } 456 } 457 458 enum ib_port_speed { 459 IB_SPEED_SDR = 1, 460 IB_SPEED_DDR = 2, 461 IB_SPEED_QDR = 4, 462 IB_SPEED_FDR10 = 8, 463 IB_SPEED_FDR = 16, 464 IB_SPEED_EDR = 32, 465 IB_SPEED_HDR = 64 466 }; 467 468 /** 469 * struct rdma_hw_stats 470 * @lock - Mutex to protect parallel write access to lifespan and values 471 * of counters, which are 64bits and not guaranteeed to be written 472 * atomicaly on 32bits systems. 473 * @timestamp - Used by the core code to track when the last update was 474 * @lifespan - Used by the core code to determine how old the counters 475 * should be before being updated again. Stored in jiffies, defaults 476 * to 10 milliseconds, drivers can override the default be specifying 477 * their own value during their allocation routine. 478 * @name - Array of pointers to static names used for the counters in 479 * directory. 480 * @num_counters - How many hardware counters there are. If name is 481 * shorter than this number, a kernel oops will result. Driver authors 482 * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters) 483 * in their code to prevent this. 484 * @value - Array of u64 counters that are accessed by the sysfs code and 485 * filled in by the drivers get_stats routine 486 */ 487 struct rdma_hw_stats { 488 struct mutex lock; /* Protect lifespan and values[] */ 489 unsigned long timestamp; 490 unsigned long lifespan; 491 const char * const *names; 492 int num_counters; 493 u64 value[]; 494 }; 495 496 #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10 497 /** 498 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct 499 * for drivers. 500 * @names - Array of static const char * 501 * @num_counters - How many elements in array 502 * @lifespan - How many milliseconds between updates 503 */ 504 static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct( 505 const char * const *names, int num_counters, 506 unsigned long lifespan) 507 { 508 struct rdma_hw_stats *stats; 509 510 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64), 511 GFP_KERNEL); 512 if (!stats) 513 return NULL; 514 stats->names = names; 515 stats->num_counters = num_counters; 516 stats->lifespan = msecs_to_jiffies(lifespan); 517 518 return stats; 519 } 520 521 522 /* Define bits for the various functionality this port needs to be supported by 523 * the core. 524 */ 525 /* Management 0x00000FFF */ 526 #define RDMA_CORE_CAP_IB_MAD 0x00000001 527 #define RDMA_CORE_CAP_IB_SMI 0x00000002 528 #define RDMA_CORE_CAP_IB_CM 0x00000004 529 #define RDMA_CORE_CAP_IW_CM 0x00000008 530 #define RDMA_CORE_CAP_IB_SA 0x00000010 531 #define RDMA_CORE_CAP_OPA_MAD 0x00000020 532 533 /* Address format 0x000FF000 */ 534 #define RDMA_CORE_CAP_AF_IB 0x00001000 535 #define RDMA_CORE_CAP_ETH_AH 0x00002000 536 #define RDMA_CORE_CAP_OPA_AH 0x00004000 537 #define RDMA_CORE_CAP_IB_GRH_REQUIRED 0x00008000 538 539 /* Protocol 0xFFF00000 */ 540 #define RDMA_CORE_CAP_PROT_IB 0x00100000 541 #define RDMA_CORE_CAP_PROT_ROCE 0x00200000 542 #define RDMA_CORE_CAP_PROT_IWARP 0x00400000 543 #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000 544 #define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000 545 #define RDMA_CORE_CAP_PROT_USNIC 0x02000000 546 547 #define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \ 548 | RDMA_CORE_CAP_PROT_ROCE \ 549 | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP) 550 551 #define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \ 552 | RDMA_CORE_CAP_IB_MAD \ 553 | RDMA_CORE_CAP_IB_SMI \ 554 | RDMA_CORE_CAP_IB_CM \ 555 | RDMA_CORE_CAP_IB_SA \ 556 | RDMA_CORE_CAP_AF_IB) 557 #define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \ 558 | RDMA_CORE_CAP_IB_MAD \ 559 | RDMA_CORE_CAP_IB_CM \ 560 | RDMA_CORE_CAP_AF_IB \ 561 | RDMA_CORE_CAP_ETH_AH) 562 #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \ 563 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \ 564 | RDMA_CORE_CAP_IB_MAD \ 565 | RDMA_CORE_CAP_IB_CM \ 566 | RDMA_CORE_CAP_AF_IB \ 567 | RDMA_CORE_CAP_ETH_AH) 568 #define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \ 569 | RDMA_CORE_CAP_IW_CM) 570 #define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \ 571 | RDMA_CORE_CAP_OPA_MAD) 572 573 #define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET) 574 575 #define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC) 576 577 struct ib_port_attr { 578 u64 subnet_prefix; 579 enum ib_port_state state; 580 enum ib_mtu max_mtu; 581 enum ib_mtu active_mtu; 582 int gid_tbl_len; 583 unsigned int ip_gids:1; 584 /* This is the value from PortInfo CapabilityMask, defined by IBA */ 585 u32 port_cap_flags; 586 u32 max_msg_sz; 587 u32 bad_pkey_cntr; 588 u32 qkey_viol_cntr; 589 u16 pkey_tbl_len; 590 u32 sm_lid; 591 u32 lid; 592 u8 lmc; 593 u8 max_vl_num; 594 u8 sm_sl; 595 u8 subnet_timeout; 596 u8 init_type_reply; 597 u8 active_width; 598 u8 active_speed; 599 u8 phys_state; 600 u16 port_cap_flags2; 601 }; 602 603 enum ib_device_modify_flags { 604 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0, 605 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1 606 }; 607 608 #define IB_DEVICE_NODE_DESC_MAX 64 609 610 struct ib_device_modify { 611 u64 sys_image_guid; 612 char node_desc[IB_DEVICE_NODE_DESC_MAX]; 613 }; 614 615 enum ib_port_modify_flags { 616 IB_PORT_SHUTDOWN = 1, 617 IB_PORT_INIT_TYPE = (1<<2), 618 IB_PORT_RESET_QKEY_CNTR = (1<<3), 619 IB_PORT_OPA_MASK_CHG = (1<<4) 620 }; 621 622 struct ib_port_modify { 623 u32 set_port_cap_mask; 624 u32 clr_port_cap_mask; 625 u8 init_type; 626 }; 627 628 enum ib_event_type { 629 IB_EVENT_CQ_ERR, 630 IB_EVENT_QP_FATAL, 631 IB_EVENT_QP_REQ_ERR, 632 IB_EVENT_QP_ACCESS_ERR, 633 IB_EVENT_COMM_EST, 634 IB_EVENT_SQ_DRAINED, 635 IB_EVENT_PATH_MIG, 636 IB_EVENT_PATH_MIG_ERR, 637 IB_EVENT_DEVICE_FATAL, 638 IB_EVENT_PORT_ACTIVE, 639 IB_EVENT_PORT_ERR, 640 IB_EVENT_LID_CHANGE, 641 IB_EVENT_PKEY_CHANGE, 642 IB_EVENT_SM_CHANGE, 643 IB_EVENT_SRQ_ERR, 644 IB_EVENT_SRQ_LIMIT_REACHED, 645 IB_EVENT_QP_LAST_WQE_REACHED, 646 IB_EVENT_CLIENT_REREGISTER, 647 IB_EVENT_GID_CHANGE, 648 IB_EVENT_WQ_FATAL, 649 }; 650 651 const char *__attribute_const__ ib_event_msg(enum ib_event_type event); 652 653 struct ib_event { 654 struct ib_device *device; 655 union { 656 struct ib_cq *cq; 657 struct ib_qp *qp; 658 struct ib_srq *srq; 659 struct ib_wq *wq; 660 u8 port_num; 661 } element; 662 enum ib_event_type event; 663 }; 664 665 struct ib_event_handler { 666 struct ib_device *device; 667 void (*handler)(struct ib_event_handler *, struct ib_event *); 668 struct list_head list; 669 }; 670 671 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \ 672 do { \ 673 (_ptr)->device = _device; \ 674 (_ptr)->handler = _handler; \ 675 INIT_LIST_HEAD(&(_ptr)->list); \ 676 } while (0) 677 678 struct ib_global_route { 679 const struct ib_gid_attr *sgid_attr; 680 union ib_gid dgid; 681 u32 flow_label; 682 u8 sgid_index; 683 u8 hop_limit; 684 u8 traffic_class; 685 }; 686 687 struct ib_grh { 688 __be32 version_tclass_flow; 689 __be16 paylen; 690 u8 next_hdr; 691 u8 hop_limit; 692 union ib_gid sgid; 693 union ib_gid dgid; 694 }; 695 696 union rdma_network_hdr { 697 struct ib_grh ibgrh; 698 struct { 699 /* The IB spec states that if it's IPv4, the header 700 * is located in the last 20 bytes of the header. 701 */ 702 u8 reserved[20]; 703 struct iphdr roce4grh; 704 }; 705 }; 706 707 #define IB_QPN_MASK 0xFFFFFF 708 709 enum { 710 IB_MULTICAST_QPN = 0xffffff 711 }; 712 713 #define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF) 714 #define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000) 715 716 enum ib_ah_flags { 717 IB_AH_GRH = 1 718 }; 719 720 enum ib_rate { 721 IB_RATE_PORT_CURRENT = 0, 722 IB_RATE_2_5_GBPS = 2, 723 IB_RATE_5_GBPS = 5, 724 IB_RATE_10_GBPS = 3, 725 IB_RATE_20_GBPS = 6, 726 IB_RATE_30_GBPS = 4, 727 IB_RATE_40_GBPS = 7, 728 IB_RATE_60_GBPS = 8, 729 IB_RATE_80_GBPS = 9, 730 IB_RATE_120_GBPS = 10, 731 IB_RATE_14_GBPS = 11, 732 IB_RATE_56_GBPS = 12, 733 IB_RATE_112_GBPS = 13, 734 IB_RATE_168_GBPS = 14, 735 IB_RATE_25_GBPS = 15, 736 IB_RATE_100_GBPS = 16, 737 IB_RATE_200_GBPS = 17, 738 IB_RATE_300_GBPS = 18, 739 IB_RATE_28_GBPS = 19, 740 IB_RATE_50_GBPS = 20, 741 IB_RATE_400_GBPS = 21, 742 IB_RATE_600_GBPS = 22, 743 }; 744 745 /** 746 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the 747 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be 748 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec. 749 * @rate: rate to convert. 750 */ 751 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate); 752 753 /** 754 * ib_rate_to_mbps - Convert the IB rate enum to Mbps. 755 * For example, IB_RATE_2_5_GBPS will be converted to 2500. 756 * @rate: rate to convert. 757 */ 758 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate); 759 760 761 /** 762 * enum ib_mr_type - memory region type 763 * @IB_MR_TYPE_MEM_REG: memory region that is used for 764 * normal registration 765 * @IB_MR_TYPE_SIGNATURE: memory region that is used for 766 * signature operations (data-integrity 767 * capable regions) 768 * @IB_MR_TYPE_SG_GAPS: memory region that is capable to 769 * register any arbitrary sg lists (without 770 * the normal mr constraints - see 771 * ib_map_mr_sg) 772 */ 773 enum ib_mr_type { 774 IB_MR_TYPE_MEM_REG, 775 IB_MR_TYPE_SIGNATURE, 776 IB_MR_TYPE_SG_GAPS, 777 }; 778 779 /** 780 * Signature types 781 * IB_SIG_TYPE_NONE: Unprotected. 782 * IB_SIG_TYPE_T10_DIF: Type T10-DIF 783 */ 784 enum ib_signature_type { 785 IB_SIG_TYPE_NONE, 786 IB_SIG_TYPE_T10_DIF, 787 }; 788 789 /** 790 * Signature T10-DIF block-guard types 791 * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules. 792 * IB_T10DIF_CSUM: Corresponds to IP checksum rules. 793 */ 794 enum ib_t10_dif_bg_type { 795 IB_T10DIF_CRC, 796 IB_T10DIF_CSUM 797 }; 798 799 /** 800 * struct ib_t10_dif_domain - Parameters specific for T10-DIF 801 * domain. 802 * @bg_type: T10-DIF block guard type (CRC|CSUM) 803 * @pi_interval: protection information interval. 804 * @bg: seed of guard computation. 805 * @app_tag: application tag of guard block 806 * @ref_tag: initial guard block reference tag. 807 * @ref_remap: Indicate wethear the reftag increments each block 808 * @app_escape: Indicate to skip block check if apptag=0xffff 809 * @ref_escape: Indicate to skip block check if reftag=0xffffffff 810 * @apptag_check_mask: check bitmask of application tag. 811 */ 812 struct ib_t10_dif_domain { 813 enum ib_t10_dif_bg_type bg_type; 814 u16 pi_interval; 815 u16 bg; 816 u16 app_tag; 817 u32 ref_tag; 818 bool ref_remap; 819 bool app_escape; 820 bool ref_escape; 821 u16 apptag_check_mask; 822 }; 823 824 /** 825 * struct ib_sig_domain - Parameters for signature domain 826 * @sig_type: specific signauture type 827 * @sig: union of all signature domain attributes that may 828 * be used to set domain layout. 829 */ 830 struct ib_sig_domain { 831 enum ib_signature_type sig_type; 832 union { 833 struct ib_t10_dif_domain dif; 834 } sig; 835 }; 836 837 /** 838 * struct ib_sig_attrs - Parameters for signature handover operation 839 * @check_mask: bitmask for signature byte check (8 bytes) 840 * @mem: memory domain layout desciptor. 841 * @wire: wire domain layout desciptor. 842 */ 843 struct ib_sig_attrs { 844 u8 check_mask; 845 struct ib_sig_domain mem; 846 struct ib_sig_domain wire; 847 }; 848 849 enum ib_sig_err_type { 850 IB_SIG_BAD_GUARD, 851 IB_SIG_BAD_REFTAG, 852 IB_SIG_BAD_APPTAG, 853 }; 854 855 /** 856 * Signature check masks (8 bytes in total) according to the T10-PI standard: 857 * -------- -------- ------------ 858 * | GUARD | APPTAG | REFTAG | 859 * | 2B | 2B | 4B | 860 * -------- -------- ------------ 861 */ 862 enum { 863 IB_SIG_CHECK_GUARD = 0xc0, 864 IB_SIG_CHECK_APPTAG = 0x30, 865 IB_SIG_CHECK_REFTAG = 0x0f, 866 }; 867 868 /** 869 * struct ib_sig_err - signature error descriptor 870 */ 871 struct ib_sig_err { 872 enum ib_sig_err_type err_type; 873 u32 expected; 874 u32 actual; 875 u64 sig_err_offset; 876 u32 key; 877 }; 878 879 enum ib_mr_status_check { 880 IB_MR_CHECK_SIG_STATUS = 1, 881 }; 882 883 /** 884 * struct ib_mr_status - Memory region status container 885 * 886 * @fail_status: Bitmask of MR checks status. For each 887 * failed check a corresponding status bit is set. 888 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS 889 * failure. 890 */ 891 struct ib_mr_status { 892 u32 fail_status; 893 struct ib_sig_err sig_err; 894 }; 895 896 /** 897 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate 898 * enum. 899 * @mult: multiple to convert. 900 */ 901 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult); 902 903 enum rdma_ah_attr_type { 904 RDMA_AH_ATTR_TYPE_UNDEFINED, 905 RDMA_AH_ATTR_TYPE_IB, 906 RDMA_AH_ATTR_TYPE_ROCE, 907 RDMA_AH_ATTR_TYPE_OPA, 908 }; 909 910 struct ib_ah_attr { 911 u16 dlid; 912 u8 src_path_bits; 913 }; 914 915 struct roce_ah_attr { 916 u8 dmac[ETH_ALEN]; 917 }; 918 919 struct opa_ah_attr { 920 u32 dlid; 921 u8 src_path_bits; 922 bool make_grd; 923 }; 924 925 struct rdma_ah_attr { 926 struct ib_global_route grh; 927 u8 sl; 928 u8 static_rate; 929 u8 port_num; 930 u8 ah_flags; 931 enum rdma_ah_attr_type type; 932 union { 933 struct ib_ah_attr ib; 934 struct roce_ah_attr roce; 935 struct opa_ah_attr opa; 936 }; 937 }; 938 939 enum ib_wc_status { 940 IB_WC_SUCCESS, 941 IB_WC_LOC_LEN_ERR, 942 IB_WC_LOC_QP_OP_ERR, 943 IB_WC_LOC_EEC_OP_ERR, 944 IB_WC_LOC_PROT_ERR, 945 IB_WC_WR_FLUSH_ERR, 946 IB_WC_MW_BIND_ERR, 947 IB_WC_BAD_RESP_ERR, 948 IB_WC_LOC_ACCESS_ERR, 949 IB_WC_REM_INV_REQ_ERR, 950 IB_WC_REM_ACCESS_ERR, 951 IB_WC_REM_OP_ERR, 952 IB_WC_RETRY_EXC_ERR, 953 IB_WC_RNR_RETRY_EXC_ERR, 954 IB_WC_LOC_RDD_VIOL_ERR, 955 IB_WC_REM_INV_RD_REQ_ERR, 956 IB_WC_REM_ABORT_ERR, 957 IB_WC_INV_EECN_ERR, 958 IB_WC_INV_EEC_STATE_ERR, 959 IB_WC_FATAL_ERR, 960 IB_WC_RESP_TIMEOUT_ERR, 961 IB_WC_GENERAL_ERR 962 }; 963 964 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status); 965 966 enum ib_wc_opcode { 967 IB_WC_SEND, 968 IB_WC_RDMA_WRITE, 969 IB_WC_RDMA_READ, 970 IB_WC_COMP_SWAP, 971 IB_WC_FETCH_ADD, 972 IB_WC_LSO, 973 IB_WC_LOCAL_INV, 974 IB_WC_REG_MR, 975 IB_WC_MASKED_COMP_SWAP, 976 IB_WC_MASKED_FETCH_ADD, 977 /* 978 * Set value of IB_WC_RECV so consumers can test if a completion is a 979 * receive by testing (opcode & IB_WC_RECV). 980 */ 981 IB_WC_RECV = 1 << 7, 982 IB_WC_RECV_RDMA_WITH_IMM 983 }; 984 985 enum ib_wc_flags { 986 IB_WC_GRH = 1, 987 IB_WC_WITH_IMM = (1<<1), 988 IB_WC_WITH_INVALIDATE = (1<<2), 989 IB_WC_IP_CSUM_OK = (1<<3), 990 IB_WC_WITH_SMAC = (1<<4), 991 IB_WC_WITH_VLAN = (1<<5), 992 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6), 993 }; 994 995 struct ib_wc { 996 union { 997 u64 wr_id; 998 struct ib_cqe *wr_cqe; 999 }; 1000 enum ib_wc_status status; 1001 enum ib_wc_opcode opcode; 1002 u32 vendor_err; 1003 u32 byte_len; 1004 struct ib_qp *qp; 1005 union { 1006 __be32 imm_data; 1007 u32 invalidate_rkey; 1008 } ex; 1009 u32 src_qp; 1010 u32 slid; 1011 int wc_flags; 1012 u16 pkey_index; 1013 u8 sl; 1014 u8 dlid_path_bits; 1015 u8 port_num; /* valid only for DR SMPs on switches */ 1016 u8 smac[ETH_ALEN]; 1017 u16 vlan_id; 1018 u8 network_hdr_type; 1019 }; 1020 1021 enum ib_cq_notify_flags { 1022 IB_CQ_SOLICITED = 1 << 0, 1023 IB_CQ_NEXT_COMP = 1 << 1, 1024 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP, 1025 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2, 1026 }; 1027 1028 enum ib_srq_type { 1029 IB_SRQT_BASIC, 1030 IB_SRQT_XRC, 1031 IB_SRQT_TM, 1032 }; 1033 1034 static inline bool ib_srq_has_cq(enum ib_srq_type srq_type) 1035 { 1036 return srq_type == IB_SRQT_XRC || 1037 srq_type == IB_SRQT_TM; 1038 } 1039 1040 enum ib_srq_attr_mask { 1041 IB_SRQ_MAX_WR = 1 << 0, 1042 IB_SRQ_LIMIT = 1 << 1, 1043 }; 1044 1045 struct ib_srq_attr { 1046 u32 max_wr; 1047 u32 max_sge; 1048 u32 srq_limit; 1049 }; 1050 1051 struct ib_srq_init_attr { 1052 void (*event_handler)(struct ib_event *, void *); 1053 void *srq_context; 1054 struct ib_srq_attr attr; 1055 enum ib_srq_type srq_type; 1056 1057 struct { 1058 struct ib_cq *cq; 1059 union { 1060 struct { 1061 struct ib_xrcd *xrcd; 1062 } xrc; 1063 1064 struct { 1065 u32 max_num_tags; 1066 } tag_matching; 1067 }; 1068 } ext; 1069 }; 1070 1071 struct ib_qp_cap { 1072 u32 max_send_wr; 1073 u32 max_recv_wr; 1074 u32 max_send_sge; 1075 u32 max_recv_sge; 1076 u32 max_inline_data; 1077 1078 /* 1079 * Maximum number of rdma_rw_ctx structures in flight at a time. 1080 * ib_create_qp() will calculate the right amount of neededed WRs 1081 * and MRs based on this. 1082 */ 1083 u32 max_rdma_ctxs; 1084 }; 1085 1086 enum ib_sig_type { 1087 IB_SIGNAL_ALL_WR, 1088 IB_SIGNAL_REQ_WR 1089 }; 1090 1091 enum ib_qp_type { 1092 /* 1093 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries 1094 * here (and in that order) since the MAD layer uses them as 1095 * indices into a 2-entry table. 1096 */ 1097 IB_QPT_SMI, 1098 IB_QPT_GSI, 1099 1100 IB_QPT_RC, 1101 IB_QPT_UC, 1102 IB_QPT_UD, 1103 IB_QPT_RAW_IPV6, 1104 IB_QPT_RAW_ETHERTYPE, 1105 IB_QPT_RAW_PACKET = 8, 1106 IB_QPT_XRC_INI = 9, 1107 IB_QPT_XRC_TGT, 1108 IB_QPT_MAX, 1109 IB_QPT_DRIVER = 0xFF, 1110 /* Reserve a range for qp types internal to the low level driver. 1111 * These qp types will not be visible at the IB core layer, so the 1112 * IB_QPT_MAX usages should not be affected in the core layer 1113 */ 1114 IB_QPT_RESERVED1 = 0x1000, 1115 IB_QPT_RESERVED2, 1116 IB_QPT_RESERVED3, 1117 IB_QPT_RESERVED4, 1118 IB_QPT_RESERVED5, 1119 IB_QPT_RESERVED6, 1120 IB_QPT_RESERVED7, 1121 IB_QPT_RESERVED8, 1122 IB_QPT_RESERVED9, 1123 IB_QPT_RESERVED10, 1124 }; 1125 1126 enum ib_qp_create_flags { 1127 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0, 1128 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1, 1129 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2, 1130 IB_QP_CREATE_MANAGED_SEND = 1 << 3, 1131 IB_QP_CREATE_MANAGED_RECV = 1 << 4, 1132 IB_QP_CREATE_NETIF_QP = 1 << 5, 1133 IB_QP_CREATE_SIGNATURE_EN = 1 << 6, 1134 /* FREE = 1 << 7, */ 1135 IB_QP_CREATE_SCATTER_FCS = 1 << 8, 1136 IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9, 1137 IB_QP_CREATE_SOURCE_QPN = 1 << 10, 1138 IB_QP_CREATE_PCI_WRITE_END_PADDING = 1 << 11, 1139 /* reserve bits 26-31 for low level drivers' internal use */ 1140 IB_QP_CREATE_RESERVED_START = 1 << 26, 1141 IB_QP_CREATE_RESERVED_END = 1 << 31, 1142 }; 1143 1144 /* 1145 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler 1146 * callback to destroy the passed in QP. 1147 */ 1148 1149 struct ib_qp_init_attr { 1150 /* Consumer's event_handler callback must not block */ 1151 void (*event_handler)(struct ib_event *, void *); 1152 1153 void *qp_context; 1154 struct ib_cq *send_cq; 1155 struct ib_cq *recv_cq; 1156 struct ib_srq *srq; 1157 struct ib_xrcd *xrcd; /* XRC TGT QPs only */ 1158 struct ib_qp_cap cap; 1159 enum ib_sig_type sq_sig_type; 1160 enum ib_qp_type qp_type; 1161 u32 create_flags; 1162 1163 /* 1164 * Only needed for special QP types, or when using the RW API. 1165 */ 1166 u8 port_num; 1167 struct ib_rwq_ind_table *rwq_ind_tbl; 1168 u32 source_qpn; 1169 }; 1170 1171 struct ib_qp_open_attr { 1172 void (*event_handler)(struct ib_event *, void *); 1173 void *qp_context; 1174 u32 qp_num; 1175 enum ib_qp_type qp_type; 1176 }; 1177 1178 enum ib_rnr_timeout { 1179 IB_RNR_TIMER_655_36 = 0, 1180 IB_RNR_TIMER_000_01 = 1, 1181 IB_RNR_TIMER_000_02 = 2, 1182 IB_RNR_TIMER_000_03 = 3, 1183 IB_RNR_TIMER_000_04 = 4, 1184 IB_RNR_TIMER_000_06 = 5, 1185 IB_RNR_TIMER_000_08 = 6, 1186 IB_RNR_TIMER_000_12 = 7, 1187 IB_RNR_TIMER_000_16 = 8, 1188 IB_RNR_TIMER_000_24 = 9, 1189 IB_RNR_TIMER_000_32 = 10, 1190 IB_RNR_TIMER_000_48 = 11, 1191 IB_RNR_TIMER_000_64 = 12, 1192 IB_RNR_TIMER_000_96 = 13, 1193 IB_RNR_TIMER_001_28 = 14, 1194 IB_RNR_TIMER_001_92 = 15, 1195 IB_RNR_TIMER_002_56 = 16, 1196 IB_RNR_TIMER_003_84 = 17, 1197 IB_RNR_TIMER_005_12 = 18, 1198 IB_RNR_TIMER_007_68 = 19, 1199 IB_RNR_TIMER_010_24 = 20, 1200 IB_RNR_TIMER_015_36 = 21, 1201 IB_RNR_TIMER_020_48 = 22, 1202 IB_RNR_TIMER_030_72 = 23, 1203 IB_RNR_TIMER_040_96 = 24, 1204 IB_RNR_TIMER_061_44 = 25, 1205 IB_RNR_TIMER_081_92 = 26, 1206 IB_RNR_TIMER_122_88 = 27, 1207 IB_RNR_TIMER_163_84 = 28, 1208 IB_RNR_TIMER_245_76 = 29, 1209 IB_RNR_TIMER_327_68 = 30, 1210 IB_RNR_TIMER_491_52 = 31 1211 }; 1212 1213 enum ib_qp_attr_mask { 1214 IB_QP_STATE = 1, 1215 IB_QP_CUR_STATE = (1<<1), 1216 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2), 1217 IB_QP_ACCESS_FLAGS = (1<<3), 1218 IB_QP_PKEY_INDEX = (1<<4), 1219 IB_QP_PORT = (1<<5), 1220 IB_QP_QKEY = (1<<6), 1221 IB_QP_AV = (1<<7), 1222 IB_QP_PATH_MTU = (1<<8), 1223 IB_QP_TIMEOUT = (1<<9), 1224 IB_QP_RETRY_CNT = (1<<10), 1225 IB_QP_RNR_RETRY = (1<<11), 1226 IB_QP_RQ_PSN = (1<<12), 1227 IB_QP_MAX_QP_RD_ATOMIC = (1<<13), 1228 IB_QP_ALT_PATH = (1<<14), 1229 IB_QP_MIN_RNR_TIMER = (1<<15), 1230 IB_QP_SQ_PSN = (1<<16), 1231 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17), 1232 IB_QP_PATH_MIG_STATE = (1<<18), 1233 IB_QP_CAP = (1<<19), 1234 IB_QP_DEST_QPN = (1<<20), 1235 IB_QP_RESERVED1 = (1<<21), 1236 IB_QP_RESERVED2 = (1<<22), 1237 IB_QP_RESERVED3 = (1<<23), 1238 IB_QP_RESERVED4 = (1<<24), 1239 IB_QP_RATE_LIMIT = (1<<25), 1240 }; 1241 1242 enum ib_qp_state { 1243 IB_QPS_RESET, 1244 IB_QPS_INIT, 1245 IB_QPS_RTR, 1246 IB_QPS_RTS, 1247 IB_QPS_SQD, 1248 IB_QPS_SQE, 1249 IB_QPS_ERR 1250 }; 1251 1252 enum ib_mig_state { 1253 IB_MIG_MIGRATED, 1254 IB_MIG_REARM, 1255 IB_MIG_ARMED 1256 }; 1257 1258 enum ib_mw_type { 1259 IB_MW_TYPE_1 = 1, 1260 IB_MW_TYPE_2 = 2 1261 }; 1262 1263 struct ib_qp_attr { 1264 enum ib_qp_state qp_state; 1265 enum ib_qp_state cur_qp_state; 1266 enum ib_mtu path_mtu; 1267 enum ib_mig_state path_mig_state; 1268 u32 qkey; 1269 u32 rq_psn; 1270 u32 sq_psn; 1271 u32 dest_qp_num; 1272 int qp_access_flags; 1273 struct ib_qp_cap cap; 1274 struct rdma_ah_attr ah_attr; 1275 struct rdma_ah_attr alt_ah_attr; 1276 u16 pkey_index; 1277 u16 alt_pkey_index; 1278 u8 en_sqd_async_notify; 1279 u8 sq_draining; 1280 u8 max_rd_atomic; 1281 u8 max_dest_rd_atomic; 1282 u8 min_rnr_timer; 1283 u8 port_num; 1284 u8 timeout; 1285 u8 retry_cnt; 1286 u8 rnr_retry; 1287 u8 alt_port_num; 1288 u8 alt_timeout; 1289 u32 rate_limit; 1290 }; 1291 1292 enum ib_wr_opcode { 1293 /* These are shared with userspace */ 1294 IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE, 1295 IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM, 1296 IB_WR_SEND = IB_UVERBS_WR_SEND, 1297 IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM, 1298 IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ, 1299 IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP, 1300 IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD, 1301 IB_WR_LSO = IB_UVERBS_WR_TSO, 1302 IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV, 1303 IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV, 1304 IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV, 1305 IB_WR_MASKED_ATOMIC_CMP_AND_SWP = 1306 IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP, 1307 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD = 1308 IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD, 1309 1310 /* These are kernel only and can not be issued by userspace */ 1311 IB_WR_REG_MR = 0x20, 1312 IB_WR_REG_SIG_MR, 1313 1314 /* reserve values for low level drivers' internal use. 1315 * These values will not be used at all in the ib core layer. 1316 */ 1317 IB_WR_RESERVED1 = 0xf0, 1318 IB_WR_RESERVED2, 1319 IB_WR_RESERVED3, 1320 IB_WR_RESERVED4, 1321 IB_WR_RESERVED5, 1322 IB_WR_RESERVED6, 1323 IB_WR_RESERVED7, 1324 IB_WR_RESERVED8, 1325 IB_WR_RESERVED9, 1326 IB_WR_RESERVED10, 1327 }; 1328 1329 enum ib_send_flags { 1330 IB_SEND_FENCE = 1, 1331 IB_SEND_SIGNALED = (1<<1), 1332 IB_SEND_SOLICITED = (1<<2), 1333 IB_SEND_INLINE = (1<<3), 1334 IB_SEND_IP_CSUM = (1<<4), 1335 1336 /* reserve bits 26-31 for low level drivers' internal use */ 1337 IB_SEND_RESERVED_START = (1 << 26), 1338 IB_SEND_RESERVED_END = (1 << 31), 1339 }; 1340 1341 struct ib_sge { 1342 u64 addr; 1343 u32 length; 1344 u32 lkey; 1345 }; 1346 1347 struct ib_cqe { 1348 void (*done)(struct ib_cq *cq, struct ib_wc *wc); 1349 }; 1350 1351 struct ib_send_wr { 1352 struct ib_send_wr *next; 1353 union { 1354 u64 wr_id; 1355 struct ib_cqe *wr_cqe; 1356 }; 1357 struct ib_sge *sg_list; 1358 int num_sge; 1359 enum ib_wr_opcode opcode; 1360 int send_flags; 1361 union { 1362 __be32 imm_data; 1363 u32 invalidate_rkey; 1364 } ex; 1365 }; 1366 1367 struct ib_rdma_wr { 1368 struct ib_send_wr wr; 1369 u64 remote_addr; 1370 u32 rkey; 1371 }; 1372 1373 static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr) 1374 { 1375 return container_of(wr, struct ib_rdma_wr, wr); 1376 } 1377 1378 struct ib_atomic_wr { 1379 struct ib_send_wr wr; 1380 u64 remote_addr; 1381 u64 compare_add; 1382 u64 swap; 1383 u64 compare_add_mask; 1384 u64 swap_mask; 1385 u32 rkey; 1386 }; 1387 1388 static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr) 1389 { 1390 return container_of(wr, struct ib_atomic_wr, wr); 1391 } 1392 1393 struct ib_ud_wr { 1394 struct ib_send_wr wr; 1395 struct ib_ah *ah; 1396 void *header; 1397 int hlen; 1398 int mss; 1399 u32 remote_qpn; 1400 u32 remote_qkey; 1401 u16 pkey_index; /* valid for GSI only */ 1402 u8 port_num; /* valid for DR SMPs on switch only */ 1403 }; 1404 1405 static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr) 1406 { 1407 return container_of(wr, struct ib_ud_wr, wr); 1408 } 1409 1410 struct ib_reg_wr { 1411 struct ib_send_wr wr; 1412 struct ib_mr *mr; 1413 u32 key; 1414 int access; 1415 }; 1416 1417 static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr) 1418 { 1419 return container_of(wr, struct ib_reg_wr, wr); 1420 } 1421 1422 struct ib_sig_handover_wr { 1423 struct ib_send_wr wr; 1424 struct ib_sig_attrs *sig_attrs; 1425 struct ib_mr *sig_mr; 1426 int access_flags; 1427 struct ib_sge *prot; 1428 }; 1429 1430 static inline const struct ib_sig_handover_wr * 1431 sig_handover_wr(const struct ib_send_wr *wr) 1432 { 1433 return container_of(wr, struct ib_sig_handover_wr, wr); 1434 } 1435 1436 struct ib_recv_wr { 1437 struct ib_recv_wr *next; 1438 union { 1439 u64 wr_id; 1440 struct ib_cqe *wr_cqe; 1441 }; 1442 struct ib_sge *sg_list; 1443 int num_sge; 1444 }; 1445 1446 enum ib_access_flags { 1447 IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE, 1448 IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE, 1449 IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ, 1450 IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC, 1451 IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND, 1452 IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED, 1453 IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND, 1454 IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB, 1455 1456 IB_ACCESS_SUPPORTED = ((IB_ACCESS_HUGETLB << 1) - 1) 1457 }; 1458 1459 /* 1460 * XXX: these are apparently used for ->rereg_user_mr, no idea why they 1461 * are hidden here instead of a uapi header! 1462 */ 1463 enum ib_mr_rereg_flags { 1464 IB_MR_REREG_TRANS = 1, 1465 IB_MR_REREG_PD = (1<<1), 1466 IB_MR_REREG_ACCESS = (1<<2), 1467 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1) 1468 }; 1469 1470 struct ib_fmr_attr { 1471 int max_pages; 1472 int max_maps; 1473 u8 page_shift; 1474 }; 1475 1476 struct ib_umem; 1477 1478 enum rdma_remove_reason { 1479 /* 1480 * Userspace requested uobject deletion or initial try 1481 * to remove uobject via cleanup. Call could fail 1482 */ 1483 RDMA_REMOVE_DESTROY, 1484 /* Context deletion. This call should delete the actual object itself */ 1485 RDMA_REMOVE_CLOSE, 1486 /* Driver is being hot-unplugged. This call should delete the actual object itself */ 1487 RDMA_REMOVE_DRIVER_REMOVE, 1488 /* uobj is being cleaned-up before being committed */ 1489 RDMA_REMOVE_ABORT, 1490 }; 1491 1492 struct ib_rdmacg_object { 1493 #ifdef CONFIG_CGROUP_RDMA 1494 struct rdma_cgroup *cg; /* owner rdma cgroup */ 1495 #endif 1496 }; 1497 1498 struct ib_ucontext { 1499 struct ib_device *device; 1500 struct ib_uverbs_file *ufile; 1501 /* 1502 * 'closing' can be read by the driver only during a destroy callback, 1503 * it is set when we are closing the file descriptor and indicates 1504 * that mm_sem may be locked. 1505 */ 1506 bool closing; 1507 1508 bool cleanup_retryable; 1509 1510 void (*invalidate_range)(struct ib_umem_odp *umem_odp, 1511 unsigned long start, unsigned long end); 1512 struct mutex per_mm_list_lock; 1513 struct list_head per_mm_list; 1514 1515 struct ib_rdmacg_object cg_obj; 1516 /* 1517 * Implementation details of the RDMA core, don't use in drivers: 1518 */ 1519 struct rdma_restrack_entry res; 1520 }; 1521 1522 struct ib_uobject { 1523 u64 user_handle; /* handle given to us by userspace */ 1524 /* ufile & ucontext owning this object */ 1525 struct ib_uverbs_file *ufile; 1526 /* FIXME, save memory: ufile->context == context */ 1527 struct ib_ucontext *context; /* associated user context */ 1528 void *object; /* containing object */ 1529 struct list_head list; /* link to context's list */ 1530 struct ib_rdmacg_object cg_obj; /* rdmacg object */ 1531 int id; /* index into kernel idr */ 1532 struct kref ref; 1533 atomic_t usecnt; /* protects exclusive access */ 1534 struct rcu_head rcu; /* kfree_rcu() overhead */ 1535 1536 const struct uverbs_api_object *uapi_object; 1537 }; 1538 1539 struct ib_udata { 1540 const void __user *inbuf; 1541 void __user *outbuf; 1542 size_t inlen; 1543 size_t outlen; 1544 }; 1545 1546 struct ib_pd { 1547 u32 local_dma_lkey; 1548 u32 flags; 1549 struct ib_device *device; 1550 struct ib_uobject *uobject; 1551 atomic_t usecnt; /* count all resources */ 1552 1553 u32 unsafe_global_rkey; 1554 1555 /* 1556 * Implementation details of the RDMA core, don't use in drivers: 1557 */ 1558 struct ib_mr *__internal_mr; 1559 struct rdma_restrack_entry res; 1560 }; 1561 1562 struct ib_xrcd { 1563 struct ib_device *device; 1564 atomic_t usecnt; /* count all exposed resources */ 1565 struct inode *inode; 1566 1567 struct mutex tgt_qp_mutex; 1568 struct list_head tgt_qp_list; 1569 }; 1570 1571 struct ib_ah { 1572 struct ib_device *device; 1573 struct ib_pd *pd; 1574 struct ib_uobject *uobject; 1575 const struct ib_gid_attr *sgid_attr; 1576 enum rdma_ah_attr_type type; 1577 }; 1578 1579 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); 1580 1581 enum ib_poll_context { 1582 IB_POLL_DIRECT, /* caller context, no hw completions */ 1583 IB_POLL_SOFTIRQ, /* poll from softirq context */ 1584 IB_POLL_WORKQUEUE, /* poll from workqueue */ 1585 IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */ 1586 }; 1587 1588 struct ib_cq { 1589 struct ib_device *device; 1590 struct ib_uobject *uobject; 1591 ib_comp_handler comp_handler; 1592 void (*event_handler)(struct ib_event *, void *); 1593 void *cq_context; 1594 int cqe; 1595 atomic_t usecnt; /* count number of work queues */ 1596 enum ib_poll_context poll_ctx; 1597 struct ib_wc *wc; 1598 union { 1599 struct irq_poll iop; 1600 struct work_struct work; 1601 }; 1602 struct workqueue_struct *comp_wq; 1603 /* 1604 * Implementation details of the RDMA core, don't use in drivers: 1605 */ 1606 struct rdma_restrack_entry res; 1607 }; 1608 1609 struct ib_srq { 1610 struct ib_device *device; 1611 struct ib_pd *pd; 1612 struct ib_uobject *uobject; 1613 void (*event_handler)(struct ib_event *, void *); 1614 void *srq_context; 1615 enum ib_srq_type srq_type; 1616 atomic_t usecnt; 1617 1618 struct { 1619 struct ib_cq *cq; 1620 union { 1621 struct { 1622 struct ib_xrcd *xrcd; 1623 u32 srq_num; 1624 } xrc; 1625 }; 1626 } ext; 1627 }; 1628 1629 enum ib_raw_packet_caps { 1630 /* Strip cvlan from incoming packet and report it in the matching work 1631 * completion is supported. 1632 */ 1633 IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0), 1634 /* Scatter FCS field of an incoming packet to host memory is supported. 1635 */ 1636 IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1), 1637 /* Checksum offloads are supported (for both send and receive). */ 1638 IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2), 1639 /* When a packet is received for an RQ with no receive WQEs, the 1640 * packet processing is delayed. 1641 */ 1642 IB_RAW_PACKET_CAP_DELAY_DROP = (1 << 3), 1643 }; 1644 1645 enum ib_wq_type { 1646 IB_WQT_RQ 1647 }; 1648 1649 enum ib_wq_state { 1650 IB_WQS_RESET, 1651 IB_WQS_RDY, 1652 IB_WQS_ERR 1653 }; 1654 1655 struct ib_wq { 1656 struct ib_device *device; 1657 struct ib_uobject *uobject; 1658 void *wq_context; 1659 void (*event_handler)(struct ib_event *, void *); 1660 struct ib_pd *pd; 1661 struct ib_cq *cq; 1662 u32 wq_num; 1663 enum ib_wq_state state; 1664 enum ib_wq_type wq_type; 1665 atomic_t usecnt; 1666 }; 1667 1668 enum ib_wq_flags { 1669 IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0, 1670 IB_WQ_FLAGS_SCATTER_FCS = 1 << 1, 1671 IB_WQ_FLAGS_DELAY_DROP = 1 << 2, 1672 IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3, 1673 }; 1674 1675 struct ib_wq_init_attr { 1676 void *wq_context; 1677 enum ib_wq_type wq_type; 1678 u32 max_wr; 1679 u32 max_sge; 1680 struct ib_cq *cq; 1681 void (*event_handler)(struct ib_event *, void *); 1682 u32 create_flags; /* Use enum ib_wq_flags */ 1683 }; 1684 1685 enum ib_wq_attr_mask { 1686 IB_WQ_STATE = 1 << 0, 1687 IB_WQ_CUR_STATE = 1 << 1, 1688 IB_WQ_FLAGS = 1 << 2, 1689 }; 1690 1691 struct ib_wq_attr { 1692 enum ib_wq_state wq_state; 1693 enum ib_wq_state curr_wq_state; 1694 u32 flags; /* Use enum ib_wq_flags */ 1695 u32 flags_mask; /* Use enum ib_wq_flags */ 1696 }; 1697 1698 struct ib_rwq_ind_table { 1699 struct ib_device *device; 1700 struct ib_uobject *uobject; 1701 atomic_t usecnt; 1702 u32 ind_tbl_num; 1703 u32 log_ind_tbl_size; 1704 struct ib_wq **ind_tbl; 1705 }; 1706 1707 struct ib_rwq_ind_table_init_attr { 1708 u32 log_ind_tbl_size; 1709 /* Each entry is a pointer to Receive Work Queue */ 1710 struct ib_wq **ind_tbl; 1711 }; 1712 1713 enum port_pkey_state { 1714 IB_PORT_PKEY_NOT_VALID = 0, 1715 IB_PORT_PKEY_VALID = 1, 1716 IB_PORT_PKEY_LISTED = 2, 1717 }; 1718 1719 struct ib_qp_security; 1720 1721 struct ib_port_pkey { 1722 enum port_pkey_state state; 1723 u16 pkey_index; 1724 u8 port_num; 1725 struct list_head qp_list; 1726 struct list_head to_error_list; 1727 struct ib_qp_security *sec; 1728 }; 1729 1730 struct ib_ports_pkeys { 1731 struct ib_port_pkey main; 1732 struct ib_port_pkey alt; 1733 }; 1734 1735 struct ib_qp_security { 1736 struct ib_qp *qp; 1737 struct ib_device *dev; 1738 /* Hold this mutex when changing port and pkey settings. */ 1739 struct mutex mutex; 1740 struct ib_ports_pkeys *ports_pkeys; 1741 /* A list of all open shared QP handles. Required to enforce security 1742 * properly for all users of a shared QP. 1743 */ 1744 struct list_head shared_qp_list; 1745 void *security; 1746 bool destroying; 1747 atomic_t error_list_count; 1748 struct completion error_complete; 1749 int error_comps_pending; 1750 }; 1751 1752 /* 1753 * @max_write_sge: Maximum SGE elements per RDMA WRITE request. 1754 * @max_read_sge: Maximum SGE elements per RDMA READ request. 1755 */ 1756 struct ib_qp { 1757 struct ib_device *device; 1758 struct ib_pd *pd; 1759 struct ib_cq *send_cq; 1760 struct ib_cq *recv_cq; 1761 spinlock_t mr_lock; 1762 int mrs_used; 1763 struct list_head rdma_mrs; 1764 struct list_head sig_mrs; 1765 struct ib_srq *srq; 1766 struct ib_xrcd *xrcd; /* XRC TGT QPs only */ 1767 struct list_head xrcd_list; 1768 1769 /* count times opened, mcast attaches, flow attaches */ 1770 atomic_t usecnt; 1771 struct list_head open_list; 1772 struct ib_qp *real_qp; 1773 struct ib_uobject *uobject; 1774 void (*event_handler)(struct ib_event *, void *); 1775 void *qp_context; 1776 /* sgid_attrs associated with the AV's */ 1777 const struct ib_gid_attr *av_sgid_attr; 1778 const struct ib_gid_attr *alt_path_sgid_attr; 1779 u32 qp_num; 1780 u32 max_write_sge; 1781 u32 max_read_sge; 1782 enum ib_qp_type qp_type; 1783 struct ib_rwq_ind_table *rwq_ind_tbl; 1784 struct ib_qp_security *qp_sec; 1785 u8 port; 1786 1787 /* 1788 * Implementation details of the RDMA core, don't use in drivers: 1789 */ 1790 struct rdma_restrack_entry res; 1791 }; 1792 1793 struct ib_dm { 1794 struct ib_device *device; 1795 u32 length; 1796 u32 flags; 1797 struct ib_uobject *uobject; 1798 atomic_t usecnt; 1799 }; 1800 1801 struct ib_mr { 1802 struct ib_device *device; 1803 struct ib_pd *pd; 1804 u32 lkey; 1805 u32 rkey; 1806 u64 iova; 1807 u64 length; 1808 unsigned int page_size; 1809 bool need_inval; 1810 union { 1811 struct ib_uobject *uobject; /* user */ 1812 struct list_head qp_entry; /* FR */ 1813 }; 1814 1815 struct ib_dm *dm; 1816 1817 /* 1818 * Implementation details of the RDMA core, don't use in drivers: 1819 */ 1820 struct rdma_restrack_entry res; 1821 }; 1822 1823 struct ib_mw { 1824 struct ib_device *device; 1825 struct ib_pd *pd; 1826 struct ib_uobject *uobject; 1827 u32 rkey; 1828 enum ib_mw_type type; 1829 }; 1830 1831 struct ib_fmr { 1832 struct ib_device *device; 1833 struct ib_pd *pd; 1834 struct list_head list; 1835 u32 lkey; 1836 u32 rkey; 1837 }; 1838 1839 /* Supported steering options */ 1840 enum ib_flow_attr_type { 1841 /* steering according to rule specifications */ 1842 IB_FLOW_ATTR_NORMAL = 0x0, 1843 /* default unicast and multicast rule - 1844 * receive all Eth traffic which isn't steered to any QP 1845 */ 1846 IB_FLOW_ATTR_ALL_DEFAULT = 0x1, 1847 /* default multicast rule - 1848 * receive all Eth multicast traffic which isn't steered to any QP 1849 */ 1850 IB_FLOW_ATTR_MC_DEFAULT = 0x2, 1851 /* sniffer rule - receive all port traffic */ 1852 IB_FLOW_ATTR_SNIFFER = 0x3 1853 }; 1854 1855 /* Supported steering header types */ 1856 enum ib_flow_spec_type { 1857 /* L2 headers*/ 1858 IB_FLOW_SPEC_ETH = 0x20, 1859 IB_FLOW_SPEC_IB = 0x22, 1860 /* L3 header*/ 1861 IB_FLOW_SPEC_IPV4 = 0x30, 1862 IB_FLOW_SPEC_IPV6 = 0x31, 1863 IB_FLOW_SPEC_ESP = 0x34, 1864 /* L4 headers*/ 1865 IB_FLOW_SPEC_TCP = 0x40, 1866 IB_FLOW_SPEC_UDP = 0x41, 1867 IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50, 1868 IB_FLOW_SPEC_GRE = 0x51, 1869 IB_FLOW_SPEC_MPLS = 0x60, 1870 IB_FLOW_SPEC_INNER = 0x100, 1871 /* Actions */ 1872 IB_FLOW_SPEC_ACTION_TAG = 0x1000, 1873 IB_FLOW_SPEC_ACTION_DROP = 0x1001, 1874 IB_FLOW_SPEC_ACTION_HANDLE = 0x1002, 1875 IB_FLOW_SPEC_ACTION_COUNT = 0x1003, 1876 }; 1877 #define IB_FLOW_SPEC_LAYER_MASK 0xF0 1878 #define IB_FLOW_SPEC_SUPPORT_LAYERS 10 1879 1880 /* Flow steering rule priority is set according to it's domain. 1881 * Lower domain value means higher priority. 1882 */ 1883 enum ib_flow_domain { 1884 IB_FLOW_DOMAIN_USER, 1885 IB_FLOW_DOMAIN_ETHTOOL, 1886 IB_FLOW_DOMAIN_RFS, 1887 IB_FLOW_DOMAIN_NIC, 1888 IB_FLOW_DOMAIN_NUM /* Must be last */ 1889 }; 1890 1891 enum ib_flow_flags { 1892 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */ 1893 IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */ 1894 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 3 /* Must be last */ 1895 }; 1896 1897 struct ib_flow_eth_filter { 1898 u8 dst_mac[6]; 1899 u8 src_mac[6]; 1900 __be16 ether_type; 1901 __be16 vlan_tag; 1902 /* Must be last */ 1903 u8 real_sz[0]; 1904 }; 1905 1906 struct ib_flow_spec_eth { 1907 u32 type; 1908 u16 size; 1909 struct ib_flow_eth_filter val; 1910 struct ib_flow_eth_filter mask; 1911 }; 1912 1913 struct ib_flow_ib_filter { 1914 __be16 dlid; 1915 __u8 sl; 1916 /* Must be last */ 1917 u8 real_sz[0]; 1918 }; 1919 1920 struct ib_flow_spec_ib { 1921 u32 type; 1922 u16 size; 1923 struct ib_flow_ib_filter val; 1924 struct ib_flow_ib_filter mask; 1925 }; 1926 1927 /* IPv4 header flags */ 1928 enum ib_ipv4_flags { 1929 IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */ 1930 IB_IPV4_MORE_FRAG = 0X4 /* For All fragmented packets except the 1931 last have this flag set */ 1932 }; 1933 1934 struct ib_flow_ipv4_filter { 1935 __be32 src_ip; 1936 __be32 dst_ip; 1937 u8 proto; 1938 u8 tos; 1939 u8 ttl; 1940 u8 flags; 1941 /* Must be last */ 1942 u8 real_sz[0]; 1943 }; 1944 1945 struct ib_flow_spec_ipv4 { 1946 u32 type; 1947 u16 size; 1948 struct ib_flow_ipv4_filter val; 1949 struct ib_flow_ipv4_filter mask; 1950 }; 1951 1952 struct ib_flow_ipv6_filter { 1953 u8 src_ip[16]; 1954 u8 dst_ip[16]; 1955 __be32 flow_label; 1956 u8 next_hdr; 1957 u8 traffic_class; 1958 u8 hop_limit; 1959 /* Must be last */ 1960 u8 real_sz[0]; 1961 }; 1962 1963 struct ib_flow_spec_ipv6 { 1964 u32 type; 1965 u16 size; 1966 struct ib_flow_ipv6_filter val; 1967 struct ib_flow_ipv6_filter mask; 1968 }; 1969 1970 struct ib_flow_tcp_udp_filter { 1971 __be16 dst_port; 1972 __be16 src_port; 1973 /* Must be last */ 1974 u8 real_sz[0]; 1975 }; 1976 1977 struct ib_flow_spec_tcp_udp { 1978 u32 type; 1979 u16 size; 1980 struct ib_flow_tcp_udp_filter val; 1981 struct ib_flow_tcp_udp_filter mask; 1982 }; 1983 1984 struct ib_flow_tunnel_filter { 1985 __be32 tunnel_id; 1986 u8 real_sz[0]; 1987 }; 1988 1989 /* ib_flow_spec_tunnel describes the Vxlan tunnel 1990 * the tunnel_id from val has the vni value 1991 */ 1992 struct ib_flow_spec_tunnel { 1993 u32 type; 1994 u16 size; 1995 struct ib_flow_tunnel_filter val; 1996 struct ib_flow_tunnel_filter mask; 1997 }; 1998 1999 struct ib_flow_esp_filter { 2000 __be32 spi; 2001 __be32 seq; 2002 /* Must be last */ 2003 u8 real_sz[0]; 2004 }; 2005 2006 struct ib_flow_spec_esp { 2007 u32 type; 2008 u16 size; 2009 struct ib_flow_esp_filter val; 2010 struct ib_flow_esp_filter mask; 2011 }; 2012 2013 struct ib_flow_gre_filter { 2014 __be16 c_ks_res0_ver; 2015 __be16 protocol; 2016 __be32 key; 2017 /* Must be last */ 2018 u8 real_sz[0]; 2019 }; 2020 2021 struct ib_flow_spec_gre { 2022 u32 type; 2023 u16 size; 2024 struct ib_flow_gre_filter val; 2025 struct ib_flow_gre_filter mask; 2026 }; 2027 2028 struct ib_flow_mpls_filter { 2029 __be32 tag; 2030 /* Must be last */ 2031 u8 real_sz[0]; 2032 }; 2033 2034 struct ib_flow_spec_mpls { 2035 u32 type; 2036 u16 size; 2037 struct ib_flow_mpls_filter val; 2038 struct ib_flow_mpls_filter mask; 2039 }; 2040 2041 struct ib_flow_spec_action_tag { 2042 enum ib_flow_spec_type type; 2043 u16 size; 2044 u32 tag_id; 2045 }; 2046 2047 struct ib_flow_spec_action_drop { 2048 enum ib_flow_spec_type type; 2049 u16 size; 2050 }; 2051 2052 struct ib_flow_spec_action_handle { 2053 enum ib_flow_spec_type type; 2054 u16 size; 2055 struct ib_flow_action *act; 2056 }; 2057 2058 enum ib_counters_description { 2059 IB_COUNTER_PACKETS, 2060 IB_COUNTER_BYTES, 2061 }; 2062 2063 struct ib_flow_spec_action_count { 2064 enum ib_flow_spec_type type; 2065 u16 size; 2066 struct ib_counters *counters; 2067 }; 2068 2069 union ib_flow_spec { 2070 struct { 2071 u32 type; 2072 u16 size; 2073 }; 2074 struct ib_flow_spec_eth eth; 2075 struct ib_flow_spec_ib ib; 2076 struct ib_flow_spec_ipv4 ipv4; 2077 struct ib_flow_spec_tcp_udp tcp_udp; 2078 struct ib_flow_spec_ipv6 ipv6; 2079 struct ib_flow_spec_tunnel tunnel; 2080 struct ib_flow_spec_esp esp; 2081 struct ib_flow_spec_gre gre; 2082 struct ib_flow_spec_mpls mpls; 2083 struct ib_flow_spec_action_tag flow_tag; 2084 struct ib_flow_spec_action_drop drop; 2085 struct ib_flow_spec_action_handle action; 2086 struct ib_flow_spec_action_count flow_count; 2087 }; 2088 2089 struct ib_flow_attr { 2090 enum ib_flow_attr_type type; 2091 u16 size; 2092 u16 priority; 2093 u32 flags; 2094 u8 num_of_specs; 2095 u8 port; 2096 union ib_flow_spec flows[]; 2097 }; 2098 2099 struct ib_flow { 2100 struct ib_qp *qp; 2101 struct ib_device *device; 2102 struct ib_uobject *uobject; 2103 }; 2104 2105 enum ib_flow_action_type { 2106 IB_FLOW_ACTION_UNSPECIFIED, 2107 IB_FLOW_ACTION_ESP = 1, 2108 }; 2109 2110 struct ib_flow_action_attrs_esp_keymats { 2111 enum ib_uverbs_flow_action_esp_keymat protocol; 2112 union { 2113 struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm; 2114 } keymat; 2115 }; 2116 2117 struct ib_flow_action_attrs_esp_replays { 2118 enum ib_uverbs_flow_action_esp_replay protocol; 2119 union { 2120 struct ib_uverbs_flow_action_esp_replay_bmp bmp; 2121 } replay; 2122 }; 2123 2124 enum ib_flow_action_attrs_esp_flags { 2125 /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags 2126 * This is done in order to share the same flags between user-space and 2127 * kernel and spare an unnecessary translation. 2128 */ 2129 2130 /* Kernel flags */ 2131 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED = 1ULL << 32, 2132 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS = 1ULL << 33, 2133 }; 2134 2135 struct ib_flow_spec_list { 2136 struct ib_flow_spec_list *next; 2137 union ib_flow_spec spec; 2138 }; 2139 2140 struct ib_flow_action_attrs_esp { 2141 struct ib_flow_action_attrs_esp_keymats *keymat; 2142 struct ib_flow_action_attrs_esp_replays *replay; 2143 struct ib_flow_spec_list *encap; 2144 /* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled. 2145 * Value of 0 is a valid value. 2146 */ 2147 u32 esn; 2148 u32 spi; 2149 u32 seq; 2150 u32 tfc_pad; 2151 /* Use enum ib_flow_action_attrs_esp_flags */ 2152 u64 flags; 2153 u64 hard_limit_pkts; 2154 }; 2155 2156 struct ib_flow_action { 2157 struct ib_device *device; 2158 struct ib_uobject *uobject; 2159 enum ib_flow_action_type type; 2160 atomic_t usecnt; 2161 }; 2162 2163 struct ib_mad_hdr; 2164 struct ib_grh; 2165 2166 enum ib_process_mad_flags { 2167 IB_MAD_IGNORE_MKEY = 1, 2168 IB_MAD_IGNORE_BKEY = 2, 2169 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY 2170 }; 2171 2172 enum ib_mad_result { 2173 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */ 2174 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */ 2175 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */ 2176 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */ 2177 }; 2178 2179 struct ib_port_cache { 2180 u64 subnet_prefix; 2181 struct ib_pkey_cache *pkey; 2182 struct ib_gid_table *gid; 2183 u8 lmc; 2184 enum ib_port_state port_state; 2185 }; 2186 2187 struct ib_cache { 2188 rwlock_t lock; 2189 struct ib_event_handler event_handler; 2190 }; 2191 2192 struct iw_cm_verbs; 2193 2194 struct ib_port_immutable { 2195 int pkey_tbl_len; 2196 int gid_tbl_len; 2197 u32 core_cap_flags; 2198 u32 max_mad_size; 2199 }; 2200 2201 struct ib_port_data { 2202 struct ib_device *ib_dev; 2203 2204 struct ib_port_immutable immutable; 2205 2206 spinlock_t pkey_list_lock; 2207 struct list_head pkey_list; 2208 2209 struct ib_port_cache cache; 2210 2211 spinlock_t netdev_lock; 2212 struct net_device __rcu *netdev; 2213 struct hlist_node ndev_hash_link; 2214 }; 2215 2216 /* rdma netdev type - specifies protocol type */ 2217 enum rdma_netdev_t { 2218 RDMA_NETDEV_OPA_VNIC, 2219 RDMA_NETDEV_IPOIB, 2220 }; 2221 2222 /** 2223 * struct rdma_netdev - rdma netdev 2224 * For cases where netstack interfacing is required. 2225 */ 2226 struct rdma_netdev { 2227 void *clnt_priv; 2228 struct ib_device *hca; 2229 u8 port_num; 2230 2231 /* 2232 * cleanup function must be specified. 2233 * FIXME: This is only used for OPA_VNIC and that usage should be 2234 * removed too. 2235 */ 2236 void (*free_rdma_netdev)(struct net_device *netdev); 2237 2238 /* control functions */ 2239 void (*set_id)(struct net_device *netdev, int id); 2240 /* send packet */ 2241 int (*send)(struct net_device *dev, struct sk_buff *skb, 2242 struct ib_ah *address, u32 dqpn); 2243 /* multicast */ 2244 int (*attach_mcast)(struct net_device *dev, struct ib_device *hca, 2245 union ib_gid *gid, u16 mlid, 2246 int set_qkey, u32 qkey); 2247 int (*detach_mcast)(struct net_device *dev, struct ib_device *hca, 2248 union ib_gid *gid, u16 mlid); 2249 }; 2250 2251 struct rdma_netdev_alloc_params { 2252 size_t sizeof_priv; 2253 unsigned int txqs; 2254 unsigned int rxqs; 2255 void *param; 2256 2257 int (*initialize_rdma_netdev)(struct ib_device *device, u8 port_num, 2258 struct net_device *netdev, void *param); 2259 }; 2260 2261 struct ib_counters { 2262 struct ib_device *device; 2263 struct ib_uobject *uobject; 2264 /* num of objects attached */ 2265 atomic_t usecnt; 2266 }; 2267 2268 struct ib_counters_read_attr { 2269 u64 *counters_buff; 2270 u32 ncounters; 2271 u32 flags; /* use enum ib_read_counters_flags */ 2272 }; 2273 2274 struct uverbs_attr_bundle; 2275 2276 #define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \ 2277 .size_##ib_struct = \ 2278 (sizeof(struct drv_struct) + \ 2279 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) + \ 2280 BUILD_BUG_ON_ZERO( \ 2281 !__same_type(((struct drv_struct *)NULL)->member, \ 2282 struct ib_struct))) 2283 2284 #define rdma_zalloc_drv_obj(ib_dev, ib_type) \ 2285 ((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, GFP_KERNEL)) 2286 2287 #define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct 2288 2289 /** 2290 * struct ib_device_ops - InfiniBand device operations 2291 * This structure defines all the InfiniBand device operations, providers will 2292 * need to define the supported operations, otherwise they will be set to null. 2293 */ 2294 struct ib_device_ops { 2295 int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr, 2296 const struct ib_send_wr **bad_send_wr); 2297 int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr, 2298 const struct ib_recv_wr **bad_recv_wr); 2299 void (*drain_rq)(struct ib_qp *qp); 2300 void (*drain_sq)(struct ib_qp *qp); 2301 int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc); 2302 int (*peek_cq)(struct ib_cq *cq, int wc_cnt); 2303 int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags); 2304 int (*req_ncomp_notif)(struct ib_cq *cq, int wc_cnt); 2305 int (*post_srq_recv)(struct ib_srq *srq, 2306 const struct ib_recv_wr *recv_wr, 2307 const struct ib_recv_wr **bad_recv_wr); 2308 int (*process_mad)(struct ib_device *device, int process_mad_flags, 2309 u8 port_num, const struct ib_wc *in_wc, 2310 const struct ib_grh *in_grh, 2311 const struct ib_mad_hdr *in_mad, size_t in_mad_size, 2312 struct ib_mad_hdr *out_mad, size_t *out_mad_size, 2313 u16 *out_mad_pkey_index); 2314 int (*query_device)(struct ib_device *device, 2315 struct ib_device_attr *device_attr, 2316 struct ib_udata *udata); 2317 int (*modify_device)(struct ib_device *device, int device_modify_mask, 2318 struct ib_device_modify *device_modify); 2319 void (*get_dev_fw_str)(struct ib_device *device, char *str); 2320 const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev, 2321 int comp_vector); 2322 int (*query_port)(struct ib_device *device, u8 port_num, 2323 struct ib_port_attr *port_attr); 2324 int (*modify_port)(struct ib_device *device, u8 port_num, 2325 int port_modify_mask, 2326 struct ib_port_modify *port_modify); 2327 /** 2328 * The following mandatory functions are used only at device 2329 * registration. Keep functions such as these at the end of this 2330 * structure to avoid cache line misses when accessing struct ib_device 2331 * in fast paths. 2332 */ 2333 int (*get_port_immutable)(struct ib_device *device, u8 port_num, 2334 struct ib_port_immutable *immutable); 2335 enum rdma_link_layer (*get_link_layer)(struct ib_device *device, 2336 u8 port_num); 2337 /** 2338 * When calling get_netdev, the HW vendor's driver should return the 2339 * net device of device @device at port @port_num or NULL if such 2340 * a net device doesn't exist. The vendor driver should call dev_hold 2341 * on this net device. The HW vendor's device driver must guarantee 2342 * that this function returns NULL before the net device has finished 2343 * NETDEV_UNREGISTER state. 2344 */ 2345 struct net_device *(*get_netdev)(struct ib_device *device, u8 port_num); 2346 /** 2347 * rdma netdev operation 2348 * 2349 * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params 2350 * must return -EOPNOTSUPP if it doesn't support the specified type. 2351 */ 2352 struct net_device *(*alloc_rdma_netdev)( 2353 struct ib_device *device, u8 port_num, enum rdma_netdev_t type, 2354 const char *name, unsigned char name_assign_type, 2355 void (*setup)(struct net_device *)); 2356 2357 int (*rdma_netdev_get_params)(struct ib_device *device, u8 port_num, 2358 enum rdma_netdev_t type, 2359 struct rdma_netdev_alloc_params *params); 2360 /** 2361 * query_gid should be return GID value for @device, when @port_num 2362 * link layer is either IB or iWarp. It is no-op if @port_num port 2363 * is RoCE link layer. 2364 */ 2365 int (*query_gid)(struct ib_device *device, u8 port_num, int index, 2366 union ib_gid *gid); 2367 /** 2368 * When calling add_gid, the HW vendor's driver should add the gid 2369 * of device of port at gid index available at @attr. Meta-info of 2370 * that gid (for example, the network device related to this gid) is 2371 * available at @attr. @context allows the HW vendor driver to store 2372 * extra information together with a GID entry. The HW vendor driver may 2373 * allocate memory to contain this information and store it in @context 2374 * when a new GID entry is written to. Params are consistent until the 2375 * next call of add_gid or delete_gid. The function should return 0 on 2376 * success or error otherwise. The function could be called 2377 * concurrently for different ports. This function is only called when 2378 * roce_gid_table is used. 2379 */ 2380 int (*add_gid)(const struct ib_gid_attr *attr, void **context); 2381 /** 2382 * When calling del_gid, the HW vendor's driver should delete the 2383 * gid of device @device at gid index gid_index of port port_num 2384 * available in @attr. 2385 * Upon the deletion of a GID entry, the HW vendor must free any 2386 * allocated memory. The caller will clear @context afterwards. 2387 * This function is only called when roce_gid_table is used. 2388 */ 2389 int (*del_gid)(const struct ib_gid_attr *attr, void **context); 2390 int (*query_pkey)(struct ib_device *device, u8 port_num, u16 index, 2391 u16 *pkey); 2392 int (*alloc_ucontext)(struct ib_ucontext *context, 2393 struct ib_udata *udata); 2394 void (*dealloc_ucontext)(struct ib_ucontext *context); 2395 int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma); 2396 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext); 2397 int (*alloc_pd)(struct ib_pd *pd, struct ib_ucontext *context, 2398 struct ib_udata *udata); 2399 void (*dealloc_pd)(struct ib_pd *pd); 2400 struct ib_ah *(*create_ah)(struct ib_pd *pd, 2401 struct rdma_ah_attr *ah_attr, u32 flags, 2402 struct ib_udata *udata); 2403 int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); 2404 int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); 2405 int (*destroy_ah)(struct ib_ah *ah, u32 flags); 2406 struct ib_srq *(*create_srq)(struct ib_pd *pd, 2407 struct ib_srq_init_attr *srq_init_attr, 2408 struct ib_udata *udata); 2409 int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr, 2410 enum ib_srq_attr_mask srq_attr_mask, 2411 struct ib_udata *udata); 2412 int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr); 2413 int (*destroy_srq)(struct ib_srq *srq); 2414 struct ib_qp *(*create_qp)(struct ib_pd *pd, 2415 struct ib_qp_init_attr *qp_init_attr, 2416 struct ib_udata *udata); 2417 int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr, 2418 int qp_attr_mask, struct ib_udata *udata); 2419 int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr, 2420 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); 2421 int (*destroy_qp)(struct ib_qp *qp); 2422 struct ib_cq *(*create_cq)(struct ib_device *device, 2423 const struct ib_cq_init_attr *attr, 2424 struct ib_ucontext *context, 2425 struct ib_udata *udata); 2426 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period); 2427 int (*destroy_cq)(struct ib_cq *cq); 2428 int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata); 2429 struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags); 2430 struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length, 2431 u64 virt_addr, int mr_access_flags, 2432 struct ib_udata *udata); 2433 int (*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start, u64 length, 2434 u64 virt_addr, int mr_access_flags, 2435 struct ib_pd *pd, struct ib_udata *udata); 2436 int (*dereg_mr)(struct ib_mr *mr); 2437 struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type, 2438 u32 max_num_sg); 2439 int (*advise_mr)(struct ib_pd *pd, 2440 enum ib_uverbs_advise_mr_advice advice, u32 flags, 2441 struct ib_sge *sg_list, u32 num_sge, 2442 struct uverbs_attr_bundle *attrs); 2443 int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 2444 unsigned int *sg_offset); 2445 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask, 2446 struct ib_mr_status *mr_status); 2447 struct ib_mw *(*alloc_mw)(struct ib_pd *pd, enum ib_mw_type type, 2448 struct ib_udata *udata); 2449 int (*dealloc_mw)(struct ib_mw *mw); 2450 struct ib_fmr *(*alloc_fmr)(struct ib_pd *pd, int mr_access_flags, 2451 struct ib_fmr_attr *fmr_attr); 2452 int (*map_phys_fmr)(struct ib_fmr *fmr, u64 *page_list, int list_len, 2453 u64 iova); 2454 int (*unmap_fmr)(struct list_head *fmr_list); 2455 int (*dealloc_fmr)(struct ib_fmr *fmr); 2456 int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid); 2457 int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid); 2458 struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device, 2459 struct ib_ucontext *ucontext, 2460 struct ib_udata *udata); 2461 int (*dealloc_xrcd)(struct ib_xrcd *xrcd); 2462 struct ib_flow *(*create_flow)(struct ib_qp *qp, 2463 struct ib_flow_attr *flow_attr, 2464 int domain, struct ib_udata *udata); 2465 int (*destroy_flow)(struct ib_flow *flow_id); 2466 struct ib_flow_action *(*create_flow_action_esp)( 2467 struct ib_device *device, 2468 const struct ib_flow_action_attrs_esp *attr, 2469 struct uverbs_attr_bundle *attrs); 2470 int (*destroy_flow_action)(struct ib_flow_action *action); 2471 int (*modify_flow_action_esp)( 2472 struct ib_flow_action *action, 2473 const struct ib_flow_action_attrs_esp *attr, 2474 struct uverbs_attr_bundle *attrs); 2475 int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port, 2476 int state); 2477 int (*get_vf_config)(struct ib_device *device, int vf, u8 port, 2478 struct ifla_vf_info *ivf); 2479 int (*get_vf_stats)(struct ib_device *device, int vf, u8 port, 2480 struct ifla_vf_stats *stats); 2481 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid, 2482 int type); 2483 struct ib_wq *(*create_wq)(struct ib_pd *pd, 2484 struct ib_wq_init_attr *init_attr, 2485 struct ib_udata *udata); 2486 int (*destroy_wq)(struct ib_wq *wq); 2487 int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr, 2488 u32 wq_attr_mask, struct ib_udata *udata); 2489 struct ib_rwq_ind_table *(*create_rwq_ind_table)( 2490 struct ib_device *device, 2491 struct ib_rwq_ind_table_init_attr *init_attr, 2492 struct ib_udata *udata); 2493 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table); 2494 struct ib_dm *(*alloc_dm)(struct ib_device *device, 2495 struct ib_ucontext *context, 2496 struct ib_dm_alloc_attr *attr, 2497 struct uverbs_attr_bundle *attrs); 2498 int (*dealloc_dm)(struct ib_dm *dm); 2499 struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm, 2500 struct ib_dm_mr_attr *attr, 2501 struct uverbs_attr_bundle *attrs); 2502 struct ib_counters *(*create_counters)( 2503 struct ib_device *device, struct uverbs_attr_bundle *attrs); 2504 int (*destroy_counters)(struct ib_counters *counters); 2505 int (*read_counters)(struct ib_counters *counters, 2506 struct ib_counters_read_attr *counters_read_attr, 2507 struct uverbs_attr_bundle *attrs); 2508 /** 2509 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the 2510 * driver initialized data. The struct is kfree()'ed by the sysfs 2511 * core when the device is removed. A lifespan of -1 in the return 2512 * struct tells the core to set a default lifespan. 2513 */ 2514 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device, 2515 u8 port_num); 2516 /** 2517 * get_hw_stats - Fill in the counter value(s) in the stats struct. 2518 * @index - The index in the value array we wish to have updated, or 2519 * num_counters if we want all stats updated 2520 * Return codes - 2521 * < 0 - Error, no counters updated 2522 * index - Updated the single counter pointed to by index 2523 * num_counters - Updated all counters (will reset the timestamp 2524 * and prevent further calls for lifespan milliseconds) 2525 * Drivers are allowed to update all counters in leiu of just the 2526 * one given in index at their option 2527 */ 2528 int (*get_hw_stats)(struct ib_device *device, 2529 struct rdma_hw_stats *stats, u8 port, int index); 2530 /* 2531 * This function is called once for each port when a ib device is 2532 * registered. 2533 */ 2534 int (*init_port)(struct ib_device *device, u8 port_num, 2535 struct kobject *port_sysfs); 2536 /** 2537 * Allows rdma drivers to add their own restrack attributes. 2538 */ 2539 int (*fill_res_entry)(struct sk_buff *msg, 2540 struct rdma_restrack_entry *entry); 2541 2542 /* Device lifecycle callbacks */ 2543 /* 2544 * Called after the device becomes registered, before clients are 2545 * attached 2546 */ 2547 int (*enable_driver)(struct ib_device *dev); 2548 /* 2549 * This is called as part of ib_dealloc_device(). 2550 */ 2551 void (*dealloc_driver)(struct ib_device *dev); 2552 2553 DECLARE_RDMA_OBJ_SIZE(ib_pd); 2554 DECLARE_RDMA_OBJ_SIZE(ib_ucontext); 2555 }; 2556 2557 struct rdma_restrack_root; 2558 2559 struct ib_device { 2560 /* Do not access @dma_device directly from ULP nor from HW drivers. */ 2561 struct device *dma_device; 2562 struct ib_device_ops ops; 2563 char name[IB_DEVICE_NAME_MAX]; 2564 struct rcu_head rcu_head; 2565 2566 struct list_head event_handler_list; 2567 spinlock_t event_handler_lock; 2568 2569 struct rw_semaphore client_data_rwsem; 2570 struct xarray client_data; 2571 struct mutex unregistration_lock; 2572 2573 struct ib_cache cache; 2574 /** 2575 * port_data is indexed by port number 2576 */ 2577 struct ib_port_data *port_data; 2578 2579 int num_comp_vectors; 2580 2581 struct iw_cm_verbs *iwcm; 2582 2583 struct module *owner; 2584 struct device dev; 2585 /* First group for device attributes, 2586 * Second group for driver provided attributes (optional). 2587 * It is NULL terminated array. 2588 */ 2589 const struct attribute_group *groups[3]; 2590 2591 struct kobject *ports_kobj; 2592 struct list_head port_list; 2593 2594 int uverbs_abi_ver; 2595 u64 uverbs_cmd_mask; 2596 u64 uverbs_ex_cmd_mask; 2597 2598 char node_desc[IB_DEVICE_NODE_DESC_MAX]; 2599 __be64 node_guid; 2600 u32 local_dma_lkey; 2601 u16 is_switch:1; 2602 /* Indicates kernel verbs support, should not be used in drivers */ 2603 u16 kverbs_provider:1; 2604 u8 node_type; 2605 u8 phys_port_cnt; 2606 struct ib_device_attr attrs; 2607 struct attribute_group *hw_stats_ag; 2608 struct rdma_hw_stats *hw_stats; 2609 2610 #ifdef CONFIG_CGROUP_RDMA 2611 struct rdmacg_device cg_device; 2612 #endif 2613 2614 u32 index; 2615 struct rdma_restrack_root *res; 2616 2617 const struct uapi_definition *driver_def; 2618 enum rdma_driver_id driver_id; 2619 2620 /* 2621 * Positive refcount indicates that the device is currently 2622 * registered and cannot be unregistered. 2623 */ 2624 refcount_t refcount; 2625 struct completion unreg_completion; 2626 struct work_struct unregistration_work; 2627 2628 const struct rdma_link_ops *link_ops; 2629 }; 2630 2631 struct ib_client { 2632 const char *name; 2633 void (*add) (struct ib_device *); 2634 void (*remove)(struct ib_device *, void *client_data); 2635 2636 /* Returns the net_dev belonging to this ib_client and matching the 2637 * given parameters. 2638 * @dev: An RDMA device that the net_dev use for communication. 2639 * @port: A physical port number on the RDMA device. 2640 * @pkey: P_Key that the net_dev uses if applicable. 2641 * @gid: A GID that the net_dev uses to communicate. 2642 * @addr: An IP address the net_dev is configured with. 2643 * @client_data: The device's client data set by ib_set_client_data(). 2644 * 2645 * An ib_client that implements a net_dev on top of RDMA devices 2646 * (such as IP over IB) should implement this callback, allowing the 2647 * rdma_cm module to find the right net_dev for a given request. 2648 * 2649 * The caller is responsible for calling dev_put on the returned 2650 * netdev. */ 2651 struct net_device *(*get_net_dev_by_params)( 2652 struct ib_device *dev, 2653 u8 port, 2654 u16 pkey, 2655 const union ib_gid *gid, 2656 const struct sockaddr *addr, 2657 void *client_data); 2658 struct list_head list; 2659 u32 client_id; 2660 2661 /* kverbs are not required by the client */ 2662 u8 no_kverbs_req:1; 2663 }; 2664 2665 struct ib_device *_ib_alloc_device(size_t size); 2666 #define ib_alloc_device(drv_struct, member) \ 2667 container_of(_ib_alloc_device(sizeof(struct drv_struct) + \ 2668 BUILD_BUG_ON_ZERO(offsetof( \ 2669 struct drv_struct, member))), \ 2670 struct drv_struct, member) 2671 2672 void ib_dealloc_device(struct ib_device *device); 2673 2674 void ib_get_device_fw_str(struct ib_device *device, char *str); 2675 2676 int ib_register_device(struct ib_device *device, const char *name); 2677 void ib_unregister_device(struct ib_device *device); 2678 void ib_unregister_driver(enum rdma_driver_id driver_id); 2679 void ib_unregister_device_and_put(struct ib_device *device); 2680 void ib_unregister_device_queued(struct ib_device *ib_dev); 2681 2682 int ib_register_client (struct ib_client *client); 2683 void ib_unregister_client(struct ib_client *client); 2684 2685 /** 2686 * ib_get_client_data - Get IB client context 2687 * @device:Device to get context for 2688 * @client:Client to get context for 2689 * 2690 * ib_get_client_data() returns the client context data set with 2691 * ib_set_client_data(). This can only be called while the client is 2692 * registered to the device, once the ib_client remove() callback returns this 2693 * cannot be called. 2694 */ 2695 static inline void *ib_get_client_data(struct ib_device *device, 2696 struct ib_client *client) 2697 { 2698 return xa_load(&device->client_data, client->client_id); 2699 } 2700 void ib_set_client_data(struct ib_device *device, struct ib_client *client, 2701 void *data); 2702 void ib_set_device_ops(struct ib_device *device, 2703 const struct ib_device_ops *ops); 2704 2705 #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) 2706 int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma, 2707 unsigned long pfn, unsigned long size, pgprot_t prot); 2708 int rdma_user_mmap_page(struct ib_ucontext *ucontext, 2709 struct vm_area_struct *vma, struct page *page, 2710 unsigned long size); 2711 #else 2712 static inline int rdma_user_mmap_io(struct ib_ucontext *ucontext, 2713 struct vm_area_struct *vma, 2714 unsigned long pfn, unsigned long size, 2715 pgprot_t prot) 2716 { 2717 return -EINVAL; 2718 } 2719 static inline int rdma_user_mmap_page(struct ib_ucontext *ucontext, 2720 struct vm_area_struct *vma, struct page *page, 2721 unsigned long size) 2722 { 2723 return -EINVAL; 2724 } 2725 #endif 2726 2727 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len) 2728 { 2729 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0; 2730 } 2731 2732 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len) 2733 { 2734 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; 2735 } 2736 2737 static inline bool ib_is_buffer_cleared(const void __user *p, 2738 size_t len) 2739 { 2740 bool ret; 2741 u8 *buf; 2742 2743 if (len > USHRT_MAX) 2744 return false; 2745 2746 buf = memdup_user(p, len); 2747 if (IS_ERR(buf)) 2748 return false; 2749 2750 ret = !memchr_inv(buf, 0, len); 2751 kfree(buf); 2752 return ret; 2753 } 2754 2755 static inline bool ib_is_udata_cleared(struct ib_udata *udata, 2756 size_t offset, 2757 size_t len) 2758 { 2759 return ib_is_buffer_cleared(udata->inbuf + offset, len); 2760 } 2761 2762 /** 2763 * ib_is_destroy_retryable - Check whether the uobject destruction 2764 * is retryable. 2765 * @ret: The initial destruction return code 2766 * @why: remove reason 2767 * @uobj: The uobject that is destroyed 2768 * 2769 * This function is a helper function that IB layer and low-level drivers 2770 * can use to consider whether the destruction of the given uobject is 2771 * retry-able. 2772 * It checks the original return code, if it wasn't success the destruction 2773 * is retryable according to the ucontext state (i.e. cleanup_retryable) and 2774 * the remove reason. (i.e. why). 2775 * Must be called with the object locked for destroy. 2776 */ 2777 static inline bool ib_is_destroy_retryable(int ret, enum rdma_remove_reason why, 2778 struct ib_uobject *uobj) 2779 { 2780 return ret && (why == RDMA_REMOVE_DESTROY || 2781 uobj->context->cleanup_retryable); 2782 } 2783 2784 /** 2785 * ib_destroy_usecnt - Called during destruction to check the usecnt 2786 * @usecnt: The usecnt atomic 2787 * @why: remove reason 2788 * @uobj: The uobject that is destroyed 2789 * 2790 * Non-zero usecnts will block destruction unless destruction was triggered by 2791 * a ucontext cleanup. 2792 */ 2793 static inline int ib_destroy_usecnt(atomic_t *usecnt, 2794 enum rdma_remove_reason why, 2795 struct ib_uobject *uobj) 2796 { 2797 if (atomic_read(usecnt) && ib_is_destroy_retryable(-EBUSY, why, uobj)) 2798 return -EBUSY; 2799 return 0; 2800 } 2801 2802 /** 2803 * ib_modify_qp_is_ok - Check that the supplied attribute mask 2804 * contains all required attributes and no attributes not allowed for 2805 * the given QP state transition. 2806 * @cur_state: Current QP state 2807 * @next_state: Next QP state 2808 * @type: QP type 2809 * @mask: Mask of supplied QP attributes 2810 * 2811 * This function is a helper function that a low-level driver's 2812 * modify_qp method can use to validate the consumer's input. It 2813 * checks that cur_state and next_state are valid QP states, that a 2814 * transition from cur_state to next_state is allowed by the IB spec, 2815 * and that the attribute mask supplied is allowed for the transition. 2816 */ 2817 bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 2818 enum ib_qp_type type, enum ib_qp_attr_mask mask); 2819 2820 void ib_register_event_handler(struct ib_event_handler *event_handler); 2821 void ib_unregister_event_handler(struct ib_event_handler *event_handler); 2822 void ib_dispatch_event(struct ib_event *event); 2823 2824 int ib_query_port(struct ib_device *device, 2825 u8 port_num, struct ib_port_attr *port_attr); 2826 2827 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, 2828 u8 port_num); 2829 2830 /** 2831 * rdma_cap_ib_switch - Check if the device is IB switch 2832 * @device: Device to check 2833 * 2834 * Device driver is responsible for setting is_switch bit on 2835 * in ib_device structure at init time. 2836 * 2837 * Return: true if the device is IB switch. 2838 */ 2839 static inline bool rdma_cap_ib_switch(const struct ib_device *device) 2840 { 2841 return device->is_switch; 2842 } 2843 2844 /** 2845 * rdma_start_port - Return the first valid port number for the device 2846 * specified 2847 * 2848 * @device: Device to be checked 2849 * 2850 * Return start port number 2851 */ 2852 static inline u8 rdma_start_port(const struct ib_device *device) 2853 { 2854 return rdma_cap_ib_switch(device) ? 0 : 1; 2855 } 2856 2857 /** 2858 * rdma_for_each_port - Iterate over all valid port numbers of the IB device 2859 * @device - The struct ib_device * to iterate over 2860 * @iter - The unsigned int to store the port number 2861 */ 2862 #define rdma_for_each_port(device, iter) \ 2863 for (iter = rdma_start_port(device + BUILD_BUG_ON_ZERO(!__same_type( \ 2864 unsigned int, iter))); \ 2865 iter <= rdma_end_port(device); (iter)++) 2866 2867 /** 2868 * rdma_end_port - Return the last valid port number for the device 2869 * specified 2870 * 2871 * @device: Device to be checked 2872 * 2873 * Return last port number 2874 */ 2875 static inline u8 rdma_end_port(const struct ib_device *device) 2876 { 2877 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt; 2878 } 2879 2880 static inline int rdma_is_port_valid(const struct ib_device *device, 2881 unsigned int port) 2882 { 2883 return (port >= rdma_start_port(device) && 2884 port <= rdma_end_port(device)); 2885 } 2886 2887 static inline bool rdma_is_grh_required(const struct ib_device *device, 2888 u8 port_num) 2889 { 2890 return device->port_data[port_num].immutable.core_cap_flags & 2891 RDMA_CORE_PORT_IB_GRH_REQUIRED; 2892 } 2893 2894 static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num) 2895 { 2896 return device->port_data[port_num].immutable.core_cap_flags & 2897 RDMA_CORE_CAP_PROT_IB; 2898 } 2899 2900 static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num) 2901 { 2902 return device->port_data[port_num].immutable.core_cap_flags & 2903 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP); 2904 } 2905 2906 static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num) 2907 { 2908 return device->port_data[port_num].immutable.core_cap_flags & 2909 RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP; 2910 } 2911 2912 static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num) 2913 { 2914 return device->port_data[port_num].immutable.core_cap_flags & 2915 RDMA_CORE_CAP_PROT_ROCE; 2916 } 2917 2918 static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num) 2919 { 2920 return device->port_data[port_num].immutable.core_cap_flags & 2921 RDMA_CORE_CAP_PROT_IWARP; 2922 } 2923 2924 static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num) 2925 { 2926 return rdma_protocol_ib(device, port_num) || 2927 rdma_protocol_roce(device, port_num); 2928 } 2929 2930 static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num) 2931 { 2932 return device->port_data[port_num].immutable.core_cap_flags & 2933 RDMA_CORE_CAP_PROT_RAW_PACKET; 2934 } 2935 2936 static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num) 2937 { 2938 return device->port_data[port_num].immutable.core_cap_flags & 2939 RDMA_CORE_CAP_PROT_USNIC; 2940 } 2941 2942 /** 2943 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband 2944 * Management Datagrams. 2945 * @device: Device to check 2946 * @port_num: Port number to check 2947 * 2948 * Management Datagrams (MAD) are a required part of the InfiniBand 2949 * specification and are supported on all InfiniBand devices. A slightly 2950 * extended version are also supported on OPA interfaces. 2951 * 2952 * Return: true if the port supports sending/receiving of MAD packets. 2953 */ 2954 static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num) 2955 { 2956 return device->port_data[port_num].immutable.core_cap_flags & 2957 RDMA_CORE_CAP_IB_MAD; 2958 } 2959 2960 /** 2961 * rdma_cap_opa_mad - Check if the port of device provides support for OPA 2962 * Management Datagrams. 2963 * @device: Device to check 2964 * @port_num: Port number to check 2965 * 2966 * Intel OmniPath devices extend and/or replace the InfiniBand Management 2967 * datagrams with their own versions. These OPA MADs share many but not all of 2968 * the characteristics of InfiniBand MADs. 2969 * 2970 * OPA MADs differ in the following ways: 2971 * 2972 * 1) MADs are variable size up to 2K 2973 * IBTA defined MADs remain fixed at 256 bytes 2974 * 2) OPA SMPs must carry valid PKeys 2975 * 3) OPA SMP packets are a different format 2976 * 2977 * Return: true if the port supports OPA MAD packet formats. 2978 */ 2979 static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num) 2980 { 2981 return (device->port_data[port_num].immutable.core_cap_flags & 2982 RDMA_CORE_CAP_OPA_MAD) == RDMA_CORE_CAP_OPA_MAD; 2983 } 2984 2985 /** 2986 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband 2987 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI). 2988 * @device: Device to check 2989 * @port_num: Port number to check 2990 * 2991 * Each InfiniBand node is required to provide a Subnet Management Agent 2992 * that the subnet manager can access. Prior to the fabric being fully 2993 * configured by the subnet manager, the SMA is accessed via a well known 2994 * interface called the Subnet Management Interface (SMI). This interface 2995 * uses directed route packets to communicate with the SM to get around the 2996 * chicken and egg problem of the SM needing to know what's on the fabric 2997 * in order to configure the fabric, and needing to configure the fabric in 2998 * order to send packets to the devices on the fabric. These directed 2999 * route packets do not need the fabric fully configured in order to reach 3000 * their destination. The SMI is the only method allowed to send 3001 * directed route packets on an InfiniBand fabric. 3002 * 3003 * Return: true if the port provides an SMI. 3004 */ 3005 static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num) 3006 { 3007 return device->port_data[port_num].immutable.core_cap_flags & 3008 RDMA_CORE_CAP_IB_SMI; 3009 } 3010 3011 /** 3012 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband 3013 * Communication Manager. 3014 * @device: Device to check 3015 * @port_num: Port number to check 3016 * 3017 * The InfiniBand Communication Manager is one of many pre-defined General 3018 * Service Agents (GSA) that are accessed via the General Service 3019 * Interface (GSI). It's role is to facilitate establishment of connections 3020 * between nodes as well as other management related tasks for established 3021 * connections. 3022 * 3023 * Return: true if the port supports an IB CM (this does not guarantee that 3024 * a CM is actually running however). 3025 */ 3026 static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num) 3027 { 3028 return device->port_data[port_num].immutable.core_cap_flags & 3029 RDMA_CORE_CAP_IB_CM; 3030 } 3031 3032 /** 3033 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP 3034 * Communication Manager. 3035 * @device: Device to check 3036 * @port_num: Port number to check 3037 * 3038 * Similar to above, but specific to iWARP connections which have a different 3039 * managment protocol than InfiniBand. 3040 * 3041 * Return: true if the port supports an iWARP CM (this does not guarantee that 3042 * a CM is actually running however). 3043 */ 3044 static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num) 3045 { 3046 return device->port_data[port_num].immutable.core_cap_flags & 3047 RDMA_CORE_CAP_IW_CM; 3048 } 3049 3050 /** 3051 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband 3052 * Subnet Administration. 3053 * @device: Device to check 3054 * @port_num: Port number to check 3055 * 3056 * An InfiniBand Subnet Administration (SA) service is a pre-defined General 3057 * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand 3058 * fabrics, devices should resolve routes to other hosts by contacting the 3059 * SA to query the proper route. 3060 * 3061 * Return: true if the port should act as a client to the fabric Subnet 3062 * Administration interface. This does not imply that the SA service is 3063 * running locally. 3064 */ 3065 static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num) 3066 { 3067 return device->port_data[port_num].immutable.core_cap_flags & 3068 RDMA_CORE_CAP_IB_SA; 3069 } 3070 3071 /** 3072 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband 3073 * Multicast. 3074 * @device: Device to check 3075 * @port_num: Port number to check 3076 * 3077 * InfiniBand multicast registration is more complex than normal IPv4 or 3078 * IPv6 multicast registration. Each Host Channel Adapter must register 3079 * with the Subnet Manager when it wishes to join a multicast group. It 3080 * should do so only once regardless of how many queue pairs it subscribes 3081 * to this group. And it should leave the group only after all queue pairs 3082 * attached to the group have been detached. 3083 * 3084 * Return: true if the port must undertake the additional adminstrative 3085 * overhead of registering/unregistering with the SM and tracking of the 3086 * total number of queue pairs attached to the multicast group. 3087 */ 3088 static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num) 3089 { 3090 return rdma_cap_ib_sa(device, port_num); 3091 } 3092 3093 /** 3094 * rdma_cap_af_ib - Check if the port of device has the capability 3095 * Native Infiniband Address. 3096 * @device: Device to check 3097 * @port_num: Port number to check 3098 * 3099 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default 3100 * GID. RoCE uses a different mechanism, but still generates a GID via 3101 * a prescribed mechanism and port specific data. 3102 * 3103 * Return: true if the port uses a GID address to identify devices on the 3104 * network. 3105 */ 3106 static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num) 3107 { 3108 return device->port_data[port_num].immutable.core_cap_flags & 3109 RDMA_CORE_CAP_AF_IB; 3110 } 3111 3112 /** 3113 * rdma_cap_eth_ah - Check if the port of device has the capability 3114 * Ethernet Address Handle. 3115 * @device: Device to check 3116 * @port_num: Port number to check 3117 * 3118 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique 3119 * to fabricate GIDs over Ethernet/IP specific addresses native to the 3120 * port. Normally, packet headers are generated by the sending host 3121 * adapter, but when sending connectionless datagrams, we must manually 3122 * inject the proper headers for the fabric we are communicating over. 3123 * 3124 * Return: true if we are running as a RoCE port and must force the 3125 * addition of a Global Route Header built from our Ethernet Address 3126 * Handle into our header list for connectionless packets. 3127 */ 3128 static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num) 3129 { 3130 return device->port_data[port_num].immutable.core_cap_flags & 3131 RDMA_CORE_CAP_ETH_AH; 3132 } 3133 3134 /** 3135 * rdma_cap_opa_ah - Check if the port of device supports 3136 * OPA Address handles 3137 * @device: Device to check 3138 * @port_num: Port number to check 3139 * 3140 * Return: true if we are running on an OPA device which supports 3141 * the extended OPA addressing. 3142 */ 3143 static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num) 3144 { 3145 return (device->port_data[port_num].immutable.core_cap_flags & 3146 RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH; 3147 } 3148 3149 /** 3150 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port. 3151 * 3152 * @device: Device 3153 * @port_num: Port number 3154 * 3155 * This MAD size includes the MAD headers and MAD payload. No other headers 3156 * are included. 3157 * 3158 * Return the max MAD size required by the Port. Will return 0 if the port 3159 * does not support MADs 3160 */ 3161 static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num) 3162 { 3163 return device->port_data[port_num].immutable.max_mad_size; 3164 } 3165 3166 /** 3167 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table 3168 * @device: Device to check 3169 * @port_num: Port number to check 3170 * 3171 * RoCE GID table mechanism manages the various GIDs for a device. 3172 * 3173 * NOTE: if allocating the port's GID table has failed, this call will still 3174 * return true, but any RoCE GID table API will fail. 3175 * 3176 * Return: true if the port uses RoCE GID table mechanism in order to manage 3177 * its GIDs. 3178 */ 3179 static inline bool rdma_cap_roce_gid_table(const struct ib_device *device, 3180 u8 port_num) 3181 { 3182 return rdma_protocol_roce(device, port_num) && 3183 device->ops.add_gid && device->ops.del_gid; 3184 } 3185 3186 /* 3187 * Check if the device supports READ W/ INVALIDATE. 3188 */ 3189 static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num) 3190 { 3191 /* 3192 * iWarp drivers must support READ W/ INVALIDATE. No other protocol 3193 * has support for it yet. 3194 */ 3195 return rdma_protocol_iwarp(dev, port_num); 3196 } 3197 3198 int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, 3199 int state); 3200 int ib_get_vf_config(struct ib_device *device, int vf, u8 port, 3201 struct ifla_vf_info *info); 3202 int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, 3203 struct ifla_vf_stats *stats); 3204 int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, 3205 int type); 3206 3207 int ib_query_pkey(struct ib_device *device, 3208 u8 port_num, u16 index, u16 *pkey); 3209 3210 int ib_modify_device(struct ib_device *device, 3211 int device_modify_mask, 3212 struct ib_device_modify *device_modify); 3213 3214 int ib_modify_port(struct ib_device *device, 3215 u8 port_num, int port_modify_mask, 3216 struct ib_port_modify *port_modify); 3217 3218 int ib_find_gid(struct ib_device *device, union ib_gid *gid, 3219 u8 *port_num, u16 *index); 3220 3221 int ib_find_pkey(struct ib_device *device, 3222 u8 port_num, u16 pkey, u16 *index); 3223 3224 enum ib_pd_flags { 3225 /* 3226 * Create a memory registration for all memory in the system and place 3227 * the rkey for it into pd->unsafe_global_rkey. This can be used by 3228 * ULPs to avoid the overhead of dynamic MRs. 3229 * 3230 * This flag is generally considered unsafe and must only be used in 3231 * extremly trusted environments. Every use of it will log a warning 3232 * in the kernel log. 3233 */ 3234 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01, 3235 }; 3236 3237 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, 3238 const char *caller); 3239 #define ib_alloc_pd(device, flags) \ 3240 __ib_alloc_pd((device), (flags), KBUILD_MODNAME) 3241 void ib_dealloc_pd(struct ib_pd *pd); 3242 3243 enum rdma_create_ah_flags { 3244 /* In a sleepable context */ 3245 RDMA_CREATE_AH_SLEEPABLE = BIT(0), 3246 }; 3247 3248 /** 3249 * rdma_create_ah - Creates an address handle for the given address vector. 3250 * @pd: The protection domain associated with the address handle. 3251 * @ah_attr: The attributes of the address vector. 3252 * @flags: Create address handle flags (see enum rdma_create_ah_flags). 3253 * 3254 * The address handle is used to reference a local or global destination 3255 * in all UD QP post sends. 3256 */ 3257 struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, 3258 u32 flags); 3259 3260 /** 3261 * rdma_create_user_ah - Creates an address handle for the given address vector. 3262 * It resolves destination mac address for ah attribute of RoCE type. 3263 * @pd: The protection domain associated with the address handle. 3264 * @ah_attr: The attributes of the address vector. 3265 * @udata: pointer to user's input output buffer information need by 3266 * provider driver. 3267 * 3268 * It returns 0 on success and returns appropriate error code on error. 3269 * The address handle is used to reference a local or global destination 3270 * in all UD QP post sends. 3271 */ 3272 struct ib_ah *rdma_create_user_ah(struct ib_pd *pd, 3273 struct rdma_ah_attr *ah_attr, 3274 struct ib_udata *udata); 3275 /** 3276 * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header 3277 * work completion. 3278 * @hdr: the L3 header to parse 3279 * @net_type: type of header to parse 3280 * @sgid: place to store source gid 3281 * @dgid: place to store destination gid 3282 */ 3283 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr, 3284 enum rdma_network_type net_type, 3285 union ib_gid *sgid, union ib_gid *dgid); 3286 3287 /** 3288 * ib_get_rdma_header_version - Get the header version 3289 * @hdr: the L3 header to parse 3290 */ 3291 int ib_get_rdma_header_version(const union rdma_network_hdr *hdr); 3292 3293 /** 3294 * ib_init_ah_attr_from_wc - Initializes address handle attributes from a 3295 * work completion. 3296 * @device: Device on which the received message arrived. 3297 * @port_num: Port on which the received message arrived. 3298 * @wc: Work completion associated with the received message. 3299 * @grh: References the received global route header. This parameter is 3300 * ignored unless the work completion indicates that the GRH is valid. 3301 * @ah_attr: Returned attributes that can be used when creating an address 3302 * handle for replying to the message. 3303 * When ib_init_ah_attr_from_wc() returns success, 3304 * (a) for IB link layer it optionally contains a reference to SGID attribute 3305 * when GRH is present for IB link layer. 3306 * (b) for RoCE link layer it contains a reference to SGID attribute. 3307 * User must invoke rdma_cleanup_ah_attr_gid_attr() to release reference to SGID 3308 * attributes which are initialized using ib_init_ah_attr_from_wc(). 3309 * 3310 */ 3311 int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num, 3312 const struct ib_wc *wc, const struct ib_grh *grh, 3313 struct rdma_ah_attr *ah_attr); 3314 3315 /** 3316 * ib_create_ah_from_wc - Creates an address handle associated with the 3317 * sender of the specified work completion. 3318 * @pd: The protection domain associated with the address handle. 3319 * @wc: Work completion information associated with a received message. 3320 * @grh: References the received global route header. This parameter is 3321 * ignored unless the work completion indicates that the GRH is valid. 3322 * @port_num: The outbound port number to associate with the address. 3323 * 3324 * The address handle is used to reference a local or global destination 3325 * in all UD QP post sends. 3326 */ 3327 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, 3328 const struct ib_grh *grh, u8 port_num); 3329 3330 /** 3331 * rdma_modify_ah - Modifies the address vector associated with an address 3332 * handle. 3333 * @ah: The address handle to modify. 3334 * @ah_attr: The new address vector attributes to associate with the 3335 * address handle. 3336 */ 3337 int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); 3338 3339 /** 3340 * rdma_query_ah - Queries the address vector associated with an address 3341 * handle. 3342 * @ah: The address handle to query. 3343 * @ah_attr: The address vector attributes associated with the address 3344 * handle. 3345 */ 3346 int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); 3347 3348 enum rdma_destroy_ah_flags { 3349 /* In a sleepable context */ 3350 RDMA_DESTROY_AH_SLEEPABLE = BIT(0), 3351 }; 3352 3353 /** 3354 * rdma_destroy_ah - Destroys an address handle. 3355 * @ah: The address handle to destroy. 3356 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags). 3357 */ 3358 int rdma_destroy_ah(struct ib_ah *ah, u32 flags); 3359 3360 /** 3361 * ib_create_srq - Creates a SRQ associated with the specified protection 3362 * domain. 3363 * @pd: The protection domain associated with the SRQ. 3364 * @srq_init_attr: A list of initial attributes required to create the 3365 * SRQ. If SRQ creation succeeds, then the attributes are updated to 3366 * the actual capabilities of the created SRQ. 3367 * 3368 * srq_attr->max_wr and srq_attr->max_sge are read the determine the 3369 * requested size of the SRQ, and set to the actual values allocated 3370 * on return. If ib_create_srq() succeeds, then max_wr and max_sge 3371 * will always be at least as large as the requested values. 3372 */ 3373 struct ib_srq *ib_create_srq(struct ib_pd *pd, 3374 struct ib_srq_init_attr *srq_init_attr); 3375 3376 /** 3377 * ib_modify_srq - Modifies the attributes for the specified SRQ. 3378 * @srq: The SRQ to modify. 3379 * @srq_attr: On input, specifies the SRQ attributes to modify. On output, 3380 * the current values of selected SRQ attributes are returned. 3381 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ 3382 * are being modified. 3383 * 3384 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or 3385 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when 3386 * the number of receives queued drops below the limit. 3387 */ 3388 int ib_modify_srq(struct ib_srq *srq, 3389 struct ib_srq_attr *srq_attr, 3390 enum ib_srq_attr_mask srq_attr_mask); 3391 3392 /** 3393 * ib_query_srq - Returns the attribute list and current values for the 3394 * specified SRQ. 3395 * @srq: The SRQ to query. 3396 * @srq_attr: The attributes of the specified SRQ. 3397 */ 3398 int ib_query_srq(struct ib_srq *srq, 3399 struct ib_srq_attr *srq_attr); 3400 3401 /** 3402 * ib_destroy_srq - Destroys the specified SRQ. 3403 * @srq: The SRQ to destroy. 3404 */ 3405 int ib_destroy_srq(struct ib_srq *srq); 3406 3407 /** 3408 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ. 3409 * @srq: The SRQ to post the work request on. 3410 * @recv_wr: A list of work requests to post on the receive queue. 3411 * @bad_recv_wr: On an immediate failure, this parameter will reference 3412 * the work request that failed to be posted on the QP. 3413 */ 3414 static inline int ib_post_srq_recv(struct ib_srq *srq, 3415 const struct ib_recv_wr *recv_wr, 3416 const struct ib_recv_wr **bad_recv_wr) 3417 { 3418 const struct ib_recv_wr *dummy; 3419 3420 return srq->device->ops.post_srq_recv(srq, recv_wr, 3421 bad_recv_wr ? : &dummy); 3422 } 3423 3424 /** 3425 * ib_create_qp - Creates a QP associated with the specified protection 3426 * domain. 3427 * @pd: The protection domain associated with the QP. 3428 * @qp_init_attr: A list of initial attributes required to create the 3429 * QP. If QP creation succeeds, then the attributes are updated to 3430 * the actual capabilities of the created QP. 3431 */ 3432 struct ib_qp *ib_create_qp(struct ib_pd *pd, 3433 struct ib_qp_init_attr *qp_init_attr); 3434 3435 /** 3436 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP. 3437 * @qp: The QP to modify. 3438 * @attr: On input, specifies the QP attributes to modify. On output, 3439 * the current values of selected QP attributes are returned. 3440 * @attr_mask: A bit-mask used to specify which attributes of the QP 3441 * are being modified. 3442 * @udata: pointer to user's input output buffer information 3443 * are being modified. 3444 * It returns 0 on success and returns appropriate error code on error. 3445 */ 3446 int ib_modify_qp_with_udata(struct ib_qp *qp, 3447 struct ib_qp_attr *attr, 3448 int attr_mask, 3449 struct ib_udata *udata); 3450 3451 /** 3452 * ib_modify_qp - Modifies the attributes for the specified QP and then 3453 * transitions the QP to the given state. 3454 * @qp: The QP to modify. 3455 * @qp_attr: On input, specifies the QP attributes to modify. On output, 3456 * the current values of selected QP attributes are returned. 3457 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP 3458 * are being modified. 3459 */ 3460 int ib_modify_qp(struct ib_qp *qp, 3461 struct ib_qp_attr *qp_attr, 3462 int qp_attr_mask); 3463 3464 /** 3465 * ib_query_qp - Returns the attribute list and current values for the 3466 * specified QP. 3467 * @qp: The QP to query. 3468 * @qp_attr: The attributes of the specified QP. 3469 * @qp_attr_mask: A bit-mask used to select specific attributes to query. 3470 * @qp_init_attr: Additional attributes of the selected QP. 3471 * 3472 * The qp_attr_mask may be used to limit the query to gathering only the 3473 * selected attributes. 3474 */ 3475 int ib_query_qp(struct ib_qp *qp, 3476 struct ib_qp_attr *qp_attr, 3477 int qp_attr_mask, 3478 struct ib_qp_init_attr *qp_init_attr); 3479 3480 /** 3481 * ib_destroy_qp - Destroys the specified QP. 3482 * @qp: The QP to destroy. 3483 */ 3484 int ib_destroy_qp(struct ib_qp *qp); 3485 3486 /** 3487 * ib_open_qp - Obtain a reference to an existing sharable QP. 3488 * @xrcd - XRC domain 3489 * @qp_open_attr: Attributes identifying the QP to open. 3490 * 3491 * Returns a reference to a sharable QP. 3492 */ 3493 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, 3494 struct ib_qp_open_attr *qp_open_attr); 3495 3496 /** 3497 * ib_close_qp - Release an external reference to a QP. 3498 * @qp: The QP handle to release 3499 * 3500 * The opened QP handle is released by the caller. The underlying 3501 * shared QP is not destroyed until all internal references are released. 3502 */ 3503 int ib_close_qp(struct ib_qp *qp); 3504 3505 /** 3506 * ib_post_send - Posts a list of work requests to the send queue of 3507 * the specified QP. 3508 * @qp: The QP to post the work request on. 3509 * @send_wr: A list of work requests to post on the send queue. 3510 * @bad_send_wr: On an immediate failure, this parameter will reference 3511 * the work request that failed to be posted on the QP. 3512 * 3513 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate 3514 * error is returned, the QP state shall not be affected, 3515 * ib_post_send() will return an immediate error after queueing any 3516 * earlier work requests in the list. 3517 */ 3518 static inline int ib_post_send(struct ib_qp *qp, 3519 const struct ib_send_wr *send_wr, 3520 const struct ib_send_wr **bad_send_wr) 3521 { 3522 const struct ib_send_wr *dummy; 3523 3524 return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy); 3525 } 3526 3527 /** 3528 * ib_post_recv - Posts a list of work requests to the receive queue of 3529 * the specified QP. 3530 * @qp: The QP to post the work request on. 3531 * @recv_wr: A list of work requests to post on the receive queue. 3532 * @bad_recv_wr: On an immediate failure, this parameter will reference 3533 * the work request that failed to be posted on the QP. 3534 */ 3535 static inline int ib_post_recv(struct ib_qp *qp, 3536 const struct ib_recv_wr *recv_wr, 3537 const struct ib_recv_wr **bad_recv_wr) 3538 { 3539 const struct ib_recv_wr *dummy; 3540 3541 return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy); 3542 } 3543 3544 struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, 3545 int nr_cqe, int comp_vector, 3546 enum ib_poll_context poll_ctx, const char *caller); 3547 #define ib_alloc_cq(device, priv, nr_cqe, comp_vect, poll_ctx) \ 3548 __ib_alloc_cq((device), (priv), (nr_cqe), (comp_vect), (poll_ctx), KBUILD_MODNAME) 3549 3550 void ib_free_cq(struct ib_cq *cq); 3551 int ib_process_cq_direct(struct ib_cq *cq, int budget); 3552 3553 /** 3554 * ib_create_cq - Creates a CQ on the specified device. 3555 * @device: The device on which to create the CQ. 3556 * @comp_handler: A user-specified callback that is invoked when a 3557 * completion event occurs on the CQ. 3558 * @event_handler: A user-specified callback that is invoked when an 3559 * asynchronous event not associated with a completion occurs on the CQ. 3560 * @cq_context: Context associated with the CQ returned to the user via 3561 * the associated completion and event handlers. 3562 * @cq_attr: The attributes the CQ should be created upon. 3563 * 3564 * Users can examine the cq structure to determine the actual CQ size. 3565 */ 3566 struct ib_cq *__ib_create_cq(struct ib_device *device, 3567 ib_comp_handler comp_handler, 3568 void (*event_handler)(struct ib_event *, void *), 3569 void *cq_context, 3570 const struct ib_cq_init_attr *cq_attr, 3571 const char *caller); 3572 #define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \ 3573 __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME) 3574 3575 /** 3576 * ib_resize_cq - Modifies the capacity of the CQ. 3577 * @cq: The CQ to resize. 3578 * @cqe: The minimum size of the CQ. 3579 * 3580 * Users can examine the cq structure to determine the actual CQ size. 3581 */ 3582 int ib_resize_cq(struct ib_cq *cq, int cqe); 3583 3584 /** 3585 * rdma_set_cq_moderation - Modifies moderation params of the CQ 3586 * @cq: The CQ to modify. 3587 * @cq_count: number of CQEs that will trigger an event 3588 * @cq_period: max period of time in usec before triggering an event 3589 * 3590 */ 3591 int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period); 3592 3593 /** 3594 * ib_destroy_cq - Destroys the specified CQ. 3595 * @cq: The CQ to destroy. 3596 */ 3597 int ib_destroy_cq(struct ib_cq *cq); 3598 3599 /** 3600 * ib_poll_cq - poll a CQ for completion(s) 3601 * @cq:the CQ being polled 3602 * @num_entries:maximum number of completions to return 3603 * @wc:array of at least @num_entries &struct ib_wc where completions 3604 * will be returned 3605 * 3606 * Poll a CQ for (possibly multiple) completions. If the return value 3607 * is < 0, an error occurred. If the return value is >= 0, it is the 3608 * number of completions returned. If the return value is 3609 * non-negative and < num_entries, then the CQ was emptied. 3610 */ 3611 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries, 3612 struct ib_wc *wc) 3613 { 3614 return cq->device->ops.poll_cq(cq, num_entries, wc); 3615 } 3616 3617 /** 3618 * ib_req_notify_cq - Request completion notification on a CQ. 3619 * @cq: The CQ to generate an event for. 3620 * @flags: 3621 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP 3622 * to request an event on the next solicited event or next work 3623 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS 3624 * may also be |ed in to request a hint about missed events, as 3625 * described below. 3626 * 3627 * Return Value: 3628 * < 0 means an error occurred while requesting notification 3629 * == 0 means notification was requested successfully, and if 3630 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events 3631 * were missed and it is safe to wait for another event. In 3632 * this case is it guaranteed that any work completions added 3633 * to the CQ since the last CQ poll will trigger a completion 3634 * notification event. 3635 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed 3636 * in. It means that the consumer must poll the CQ again to 3637 * make sure it is empty to avoid missing an event because of a 3638 * race between requesting notification and an entry being 3639 * added to the CQ. This return value means it is possible 3640 * (but not guaranteed) that a work completion has been added 3641 * to the CQ since the last poll without triggering a 3642 * completion notification event. 3643 */ 3644 static inline int ib_req_notify_cq(struct ib_cq *cq, 3645 enum ib_cq_notify_flags flags) 3646 { 3647 return cq->device->ops.req_notify_cq(cq, flags); 3648 } 3649 3650 /** 3651 * ib_req_ncomp_notif - Request completion notification when there are 3652 * at least the specified number of unreaped completions on the CQ. 3653 * @cq: The CQ to generate an event for. 3654 * @wc_cnt: The number of unreaped completions that should be on the 3655 * CQ before an event is generated. 3656 */ 3657 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) 3658 { 3659 return cq->device->ops.req_ncomp_notif ? 3660 cq->device->ops.req_ncomp_notif(cq, wc_cnt) : 3661 -ENOSYS; 3662 } 3663 3664 /** 3665 * ib_dma_mapping_error - check a DMA addr for error 3666 * @dev: The device for which the dma_addr was created 3667 * @dma_addr: The DMA address to check 3668 */ 3669 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) 3670 { 3671 return dma_mapping_error(dev->dma_device, dma_addr); 3672 } 3673 3674 /** 3675 * ib_dma_map_single - Map a kernel virtual address to DMA address 3676 * @dev: The device for which the dma_addr is to be created 3677 * @cpu_addr: The kernel virtual address 3678 * @size: The size of the region in bytes 3679 * @direction: The direction of the DMA 3680 */ 3681 static inline u64 ib_dma_map_single(struct ib_device *dev, 3682 void *cpu_addr, size_t size, 3683 enum dma_data_direction direction) 3684 { 3685 return dma_map_single(dev->dma_device, cpu_addr, size, direction); 3686 } 3687 3688 /** 3689 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single() 3690 * @dev: The device for which the DMA address was created 3691 * @addr: The DMA address 3692 * @size: The size of the region in bytes 3693 * @direction: The direction of the DMA 3694 */ 3695 static inline void ib_dma_unmap_single(struct ib_device *dev, 3696 u64 addr, size_t size, 3697 enum dma_data_direction direction) 3698 { 3699 dma_unmap_single(dev->dma_device, addr, size, direction); 3700 } 3701 3702 /** 3703 * ib_dma_map_page - Map a physical page to DMA address 3704 * @dev: The device for which the dma_addr is to be created 3705 * @page: The page to be mapped 3706 * @offset: The offset within the page 3707 * @size: The size of the region in bytes 3708 * @direction: The direction of the DMA 3709 */ 3710 static inline u64 ib_dma_map_page(struct ib_device *dev, 3711 struct page *page, 3712 unsigned long offset, 3713 size_t size, 3714 enum dma_data_direction direction) 3715 { 3716 return dma_map_page(dev->dma_device, page, offset, size, direction); 3717 } 3718 3719 /** 3720 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page() 3721 * @dev: The device for which the DMA address was created 3722 * @addr: The DMA address 3723 * @size: The size of the region in bytes 3724 * @direction: The direction of the DMA 3725 */ 3726 static inline void ib_dma_unmap_page(struct ib_device *dev, 3727 u64 addr, size_t size, 3728 enum dma_data_direction direction) 3729 { 3730 dma_unmap_page(dev->dma_device, addr, size, direction); 3731 } 3732 3733 /** 3734 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses 3735 * @dev: The device for which the DMA addresses are to be created 3736 * @sg: The array of scatter/gather entries 3737 * @nents: The number of scatter/gather entries 3738 * @direction: The direction of the DMA 3739 */ 3740 static inline int ib_dma_map_sg(struct ib_device *dev, 3741 struct scatterlist *sg, int nents, 3742 enum dma_data_direction direction) 3743 { 3744 return dma_map_sg(dev->dma_device, sg, nents, direction); 3745 } 3746 3747 /** 3748 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses 3749 * @dev: The device for which the DMA addresses were created 3750 * @sg: The array of scatter/gather entries 3751 * @nents: The number of scatter/gather entries 3752 * @direction: The direction of the DMA 3753 */ 3754 static inline void ib_dma_unmap_sg(struct ib_device *dev, 3755 struct scatterlist *sg, int nents, 3756 enum dma_data_direction direction) 3757 { 3758 dma_unmap_sg(dev->dma_device, sg, nents, direction); 3759 } 3760 3761 static inline int ib_dma_map_sg_attrs(struct ib_device *dev, 3762 struct scatterlist *sg, int nents, 3763 enum dma_data_direction direction, 3764 unsigned long dma_attrs) 3765 { 3766 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, 3767 dma_attrs); 3768 } 3769 3770 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, 3771 struct scatterlist *sg, int nents, 3772 enum dma_data_direction direction, 3773 unsigned long dma_attrs) 3774 { 3775 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs); 3776 } 3777 3778 /** 3779 * ib_dma_max_seg_size - Return the size limit of a single DMA transfer 3780 * @dev: The device to query 3781 * 3782 * The returned value represents a size in bytes. 3783 */ 3784 static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev) 3785 { 3786 struct device_dma_parameters *p = dev->dma_device->dma_parms; 3787 3788 return p ? p->max_segment_size : UINT_MAX; 3789 } 3790 3791 /** 3792 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU 3793 * @dev: The device for which the DMA address was created 3794 * @addr: The DMA address 3795 * @size: The size of the region in bytes 3796 * @dir: The direction of the DMA 3797 */ 3798 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, 3799 u64 addr, 3800 size_t size, 3801 enum dma_data_direction dir) 3802 { 3803 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); 3804 } 3805 3806 /** 3807 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device 3808 * @dev: The device for which the DMA address was created 3809 * @addr: The DMA address 3810 * @size: The size of the region in bytes 3811 * @dir: The direction of the DMA 3812 */ 3813 static inline void ib_dma_sync_single_for_device(struct ib_device *dev, 3814 u64 addr, 3815 size_t size, 3816 enum dma_data_direction dir) 3817 { 3818 dma_sync_single_for_device(dev->dma_device, addr, size, dir); 3819 } 3820 3821 /** 3822 * ib_dma_alloc_coherent - Allocate memory and map it for DMA 3823 * @dev: The device for which the DMA address is requested 3824 * @size: The size of the region to allocate in bytes 3825 * @dma_handle: A pointer for returning the DMA address of the region 3826 * @flag: memory allocator flags 3827 */ 3828 static inline void *ib_dma_alloc_coherent(struct ib_device *dev, 3829 size_t size, 3830 dma_addr_t *dma_handle, 3831 gfp_t flag) 3832 { 3833 return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag); 3834 } 3835 3836 /** 3837 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent() 3838 * @dev: The device for which the DMA addresses were allocated 3839 * @size: The size of the region 3840 * @cpu_addr: the address returned by ib_dma_alloc_coherent() 3841 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent() 3842 */ 3843 static inline void ib_dma_free_coherent(struct ib_device *dev, 3844 size_t size, void *cpu_addr, 3845 dma_addr_t dma_handle) 3846 { 3847 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); 3848 } 3849 3850 /** 3851 * ib_dereg_mr - Deregisters a memory region and removes it from the 3852 * HCA translation table. 3853 * @mr: The memory region to deregister. 3854 * 3855 * This function can fail, if the memory region has memory windows bound to it. 3856 */ 3857 int ib_dereg_mr(struct ib_mr *mr); 3858 3859 struct ib_mr *ib_alloc_mr(struct ib_pd *pd, 3860 enum ib_mr_type mr_type, 3861 u32 max_num_sg); 3862 3863 /** 3864 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR 3865 * R_Key and L_Key. 3866 * @mr - struct ib_mr pointer to be updated. 3867 * @newkey - new key to be used. 3868 */ 3869 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey) 3870 { 3871 mr->lkey = (mr->lkey & 0xffffff00) | newkey; 3872 mr->rkey = (mr->rkey & 0xffffff00) | newkey; 3873 } 3874 3875 /** 3876 * ib_inc_rkey - increments the key portion of the given rkey. Can be used 3877 * for calculating a new rkey for type 2 memory windows. 3878 * @rkey - the rkey to increment. 3879 */ 3880 static inline u32 ib_inc_rkey(u32 rkey) 3881 { 3882 const u32 mask = 0x000000ff; 3883 return ((rkey + 1) & mask) | (rkey & ~mask); 3884 } 3885 3886 /** 3887 * ib_alloc_fmr - Allocates a unmapped fast memory region. 3888 * @pd: The protection domain associated with the unmapped region. 3889 * @mr_access_flags: Specifies the memory access rights. 3890 * @fmr_attr: Attributes of the unmapped region. 3891 * 3892 * A fast memory region must be mapped before it can be used as part of 3893 * a work request. 3894 */ 3895 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, 3896 int mr_access_flags, 3897 struct ib_fmr_attr *fmr_attr); 3898 3899 /** 3900 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region. 3901 * @fmr: The fast memory region to associate with the pages. 3902 * @page_list: An array of physical pages to map to the fast memory region. 3903 * @list_len: The number of pages in page_list. 3904 * @iova: The I/O virtual address to use with the mapped region. 3905 */ 3906 static inline int ib_map_phys_fmr(struct ib_fmr *fmr, 3907 u64 *page_list, int list_len, 3908 u64 iova) 3909 { 3910 return fmr->device->ops.map_phys_fmr(fmr, page_list, list_len, iova); 3911 } 3912 3913 /** 3914 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions. 3915 * @fmr_list: A linked list of fast memory regions to unmap. 3916 */ 3917 int ib_unmap_fmr(struct list_head *fmr_list); 3918 3919 /** 3920 * ib_dealloc_fmr - Deallocates a fast memory region. 3921 * @fmr: The fast memory region to deallocate. 3922 */ 3923 int ib_dealloc_fmr(struct ib_fmr *fmr); 3924 3925 /** 3926 * ib_attach_mcast - Attaches the specified QP to a multicast group. 3927 * @qp: QP to attach to the multicast group. The QP must be type 3928 * IB_QPT_UD. 3929 * @gid: Multicast group GID. 3930 * @lid: Multicast group LID in host byte order. 3931 * 3932 * In order to send and receive multicast packets, subnet 3933 * administration must have created the multicast group and configured 3934 * the fabric appropriately. The port associated with the specified 3935 * QP must also be a member of the multicast group. 3936 */ 3937 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 3938 3939 /** 3940 * ib_detach_mcast - Detaches the specified QP from a multicast group. 3941 * @qp: QP to detach from the multicast group. 3942 * @gid: Multicast group GID. 3943 * @lid: Multicast group LID in host byte order. 3944 */ 3945 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 3946 3947 /** 3948 * ib_alloc_xrcd - Allocates an XRC domain. 3949 * @device: The device on which to allocate the XRC domain. 3950 * @caller: Module name for kernel consumers 3951 */ 3952 struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller); 3953 #define ib_alloc_xrcd(device) \ 3954 __ib_alloc_xrcd((device), KBUILD_MODNAME) 3955 3956 /** 3957 * ib_dealloc_xrcd - Deallocates an XRC domain. 3958 * @xrcd: The XRC domain to deallocate. 3959 */ 3960 int ib_dealloc_xrcd(struct ib_xrcd *xrcd); 3961 3962 static inline int ib_check_mr_access(int flags) 3963 { 3964 /* 3965 * Local write permission is required if remote write or 3966 * remote atomic permission is also requested. 3967 */ 3968 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && 3969 !(flags & IB_ACCESS_LOCAL_WRITE)) 3970 return -EINVAL; 3971 3972 return 0; 3973 } 3974 3975 static inline bool ib_access_writable(int access_flags) 3976 { 3977 /* 3978 * We have writable memory backing the MR if any of the following 3979 * access flags are set. "Local write" and "remote write" obviously 3980 * require write access. "Remote atomic" can do things like fetch and 3981 * add, which will modify memory, and "MW bind" can change permissions 3982 * by binding a window. 3983 */ 3984 return access_flags & 3985 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | 3986 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND); 3987 } 3988 3989 /** 3990 * ib_check_mr_status: lightweight check of MR status. 3991 * This routine may provide status checks on a selected 3992 * ib_mr. first use is for signature status check. 3993 * 3994 * @mr: A memory region. 3995 * @check_mask: Bitmask of which checks to perform from 3996 * ib_mr_status_check enumeration. 3997 * @mr_status: The container of relevant status checks. 3998 * failed checks will be indicated in the status bitmask 3999 * and the relevant info shall be in the error item. 4000 */ 4001 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, 4002 struct ib_mr_status *mr_status); 4003 4004 /** 4005 * ib_device_try_get: Hold a registration lock 4006 * device: The device to lock 4007 * 4008 * A device under an active registration lock cannot become unregistered. It 4009 * is only possible to obtain a registration lock on a device that is fully 4010 * registered, otherwise this function returns false. 4011 * 4012 * The registration lock is only necessary for actions which require the 4013 * device to still be registered. Uses that only require the device pointer to 4014 * be valid should use get_device(&ibdev->dev) to hold the memory. 4015 * 4016 */ 4017 static inline bool ib_device_try_get(struct ib_device *dev) 4018 { 4019 return refcount_inc_not_zero(&dev->refcount); 4020 } 4021 4022 void ib_device_put(struct ib_device *device); 4023 struct ib_device *ib_device_get_by_netdev(struct net_device *ndev, 4024 enum rdma_driver_id driver_id); 4025 struct ib_device *ib_device_get_by_name(const char *name, 4026 enum rdma_driver_id driver_id); 4027 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port, 4028 u16 pkey, const union ib_gid *gid, 4029 const struct sockaddr *addr); 4030 int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev, 4031 unsigned int port); 4032 struct net_device *ib_device_netdev(struct ib_device *dev, u8 port); 4033 4034 struct ib_wq *ib_create_wq(struct ib_pd *pd, 4035 struct ib_wq_init_attr *init_attr); 4036 int ib_destroy_wq(struct ib_wq *wq); 4037 int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr, 4038 u32 wq_attr_mask); 4039 struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, 4040 struct ib_rwq_ind_table_init_attr* 4041 wq_ind_table_init_attr); 4042 int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); 4043 4044 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 4045 unsigned int *sg_offset, unsigned int page_size); 4046 4047 static inline int 4048 ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 4049 unsigned int *sg_offset, unsigned int page_size) 4050 { 4051 int n; 4052 4053 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size); 4054 mr->iova = 0; 4055 4056 return n; 4057 } 4058 4059 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents, 4060 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64)); 4061 4062 void ib_drain_rq(struct ib_qp *qp); 4063 void ib_drain_sq(struct ib_qp *qp); 4064 void ib_drain_qp(struct ib_qp *qp); 4065 4066 int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width); 4067 4068 static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr) 4069 { 4070 if (attr->type == RDMA_AH_ATTR_TYPE_ROCE) 4071 return attr->roce.dmac; 4072 return NULL; 4073 } 4074 4075 static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid) 4076 { 4077 if (attr->type == RDMA_AH_ATTR_TYPE_IB) 4078 attr->ib.dlid = (u16)dlid; 4079 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 4080 attr->opa.dlid = dlid; 4081 } 4082 4083 static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr) 4084 { 4085 if (attr->type == RDMA_AH_ATTR_TYPE_IB) 4086 return attr->ib.dlid; 4087 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 4088 return attr->opa.dlid; 4089 return 0; 4090 } 4091 4092 static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl) 4093 { 4094 attr->sl = sl; 4095 } 4096 4097 static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr) 4098 { 4099 return attr->sl; 4100 } 4101 4102 static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr, 4103 u8 src_path_bits) 4104 { 4105 if (attr->type == RDMA_AH_ATTR_TYPE_IB) 4106 attr->ib.src_path_bits = src_path_bits; 4107 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 4108 attr->opa.src_path_bits = src_path_bits; 4109 } 4110 4111 static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr) 4112 { 4113 if (attr->type == RDMA_AH_ATTR_TYPE_IB) 4114 return attr->ib.src_path_bits; 4115 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 4116 return attr->opa.src_path_bits; 4117 return 0; 4118 } 4119 4120 static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr, 4121 bool make_grd) 4122 { 4123 if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 4124 attr->opa.make_grd = make_grd; 4125 } 4126 4127 static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr) 4128 { 4129 if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 4130 return attr->opa.make_grd; 4131 return false; 4132 } 4133 4134 static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num) 4135 { 4136 attr->port_num = port_num; 4137 } 4138 4139 static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr) 4140 { 4141 return attr->port_num; 4142 } 4143 4144 static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr, 4145 u8 static_rate) 4146 { 4147 attr->static_rate = static_rate; 4148 } 4149 4150 static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr) 4151 { 4152 return attr->static_rate; 4153 } 4154 4155 static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr, 4156 enum ib_ah_flags flag) 4157 { 4158 attr->ah_flags = flag; 4159 } 4160 4161 static inline enum ib_ah_flags 4162 rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr) 4163 { 4164 return attr->ah_flags; 4165 } 4166 4167 static inline const struct ib_global_route 4168 *rdma_ah_read_grh(const struct rdma_ah_attr *attr) 4169 { 4170 return &attr->grh; 4171 } 4172 4173 /*To retrieve and modify the grh */ 4174 static inline struct ib_global_route 4175 *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr) 4176 { 4177 return &attr->grh; 4178 } 4179 4180 static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid) 4181 { 4182 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); 4183 4184 memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid)); 4185 } 4186 4187 static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr, 4188 __be64 prefix) 4189 { 4190 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); 4191 4192 grh->dgid.global.subnet_prefix = prefix; 4193 } 4194 4195 static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr, 4196 __be64 if_id) 4197 { 4198 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); 4199 4200 grh->dgid.global.interface_id = if_id; 4201 } 4202 4203 static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr, 4204 union ib_gid *dgid, u32 flow_label, 4205 u8 sgid_index, u8 hop_limit, 4206 u8 traffic_class) 4207 { 4208 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); 4209 4210 attr->ah_flags = IB_AH_GRH; 4211 if (dgid) 4212 grh->dgid = *dgid; 4213 grh->flow_label = flow_label; 4214 grh->sgid_index = sgid_index; 4215 grh->hop_limit = hop_limit; 4216 grh->traffic_class = traffic_class; 4217 grh->sgid_attr = NULL; 4218 } 4219 4220 void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr); 4221 void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid, 4222 u32 flow_label, u8 hop_limit, u8 traffic_class, 4223 const struct ib_gid_attr *sgid_attr); 4224 void rdma_copy_ah_attr(struct rdma_ah_attr *dest, 4225 const struct rdma_ah_attr *src); 4226 void rdma_replace_ah_attr(struct rdma_ah_attr *old, 4227 const struct rdma_ah_attr *new); 4228 void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src); 4229 4230 /** 4231 * rdma_ah_find_type - Return address handle type. 4232 * 4233 * @dev: Device to be checked 4234 * @port_num: Port number 4235 */ 4236 static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev, 4237 u8 port_num) 4238 { 4239 if (rdma_protocol_roce(dev, port_num)) 4240 return RDMA_AH_ATTR_TYPE_ROCE; 4241 if (rdma_protocol_ib(dev, port_num)) { 4242 if (rdma_cap_opa_ah(dev, port_num)) 4243 return RDMA_AH_ATTR_TYPE_OPA; 4244 return RDMA_AH_ATTR_TYPE_IB; 4245 } 4246 4247 return RDMA_AH_ATTR_TYPE_UNDEFINED; 4248 } 4249 4250 /** 4251 * ib_lid_cpu16 - Return lid in 16bit CPU encoding. 4252 * In the current implementation the only way to get 4253 * get the 32bit lid is from other sources for OPA. 4254 * For IB, lids will always be 16bits so cast the 4255 * value accordingly. 4256 * 4257 * @lid: A 32bit LID 4258 */ 4259 static inline u16 ib_lid_cpu16(u32 lid) 4260 { 4261 WARN_ON_ONCE(lid & 0xFFFF0000); 4262 return (u16)lid; 4263 } 4264 4265 /** 4266 * ib_lid_be16 - Return lid in 16bit BE encoding. 4267 * 4268 * @lid: A 32bit LID 4269 */ 4270 static inline __be16 ib_lid_be16(u32 lid) 4271 { 4272 WARN_ON_ONCE(lid & 0xFFFF0000); 4273 return cpu_to_be16((u16)lid); 4274 } 4275 4276 /** 4277 * ib_get_vector_affinity - Get the affinity mappings of a given completion 4278 * vector 4279 * @device: the rdma device 4280 * @comp_vector: index of completion vector 4281 * 4282 * Returns NULL on failure, otherwise a corresponding cpu map of the 4283 * completion vector (returns all-cpus map if the device driver doesn't 4284 * implement get_vector_affinity). 4285 */ 4286 static inline const struct cpumask * 4287 ib_get_vector_affinity(struct ib_device *device, int comp_vector) 4288 { 4289 if (comp_vector < 0 || comp_vector >= device->num_comp_vectors || 4290 !device->ops.get_vector_affinity) 4291 return NULL; 4292 4293 return device->ops.get_vector_affinity(device, comp_vector); 4294 4295 } 4296 4297 /** 4298 * rdma_roce_rescan_device - Rescan all of the network devices in the system 4299 * and add their gids, as needed, to the relevant RoCE devices. 4300 * 4301 * @device: the rdma device 4302 */ 4303 void rdma_roce_rescan_device(struct ib_device *ibdev); 4304 4305 struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile); 4306 4307 int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs); 4308 4309 struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num, 4310 enum rdma_netdev_t type, const char *name, 4311 unsigned char name_assign_type, 4312 void (*setup)(struct net_device *)); 4313 4314 int rdma_init_netdev(struct ib_device *device, u8 port_num, 4315 enum rdma_netdev_t type, const char *name, 4316 unsigned char name_assign_type, 4317 void (*setup)(struct net_device *), 4318 struct net_device *netdev); 4319 4320 /** 4321 * rdma_set_device_sysfs_group - Set device attributes group to have 4322 * driver specific sysfs entries at 4323 * for infiniband class. 4324 * 4325 * @device: device pointer for which attributes to be created 4326 * @group: Pointer to group which should be added when device 4327 * is registered with sysfs. 4328 * rdma_set_device_sysfs_group() allows existing drivers to expose one 4329 * group per device to have sysfs attributes. 4330 * 4331 * NOTE: New drivers should not make use of this API; instead new device 4332 * parameter should be exposed via netlink command. This API and mechanism 4333 * exist only for existing drivers. 4334 */ 4335 static inline void 4336 rdma_set_device_sysfs_group(struct ib_device *dev, 4337 const struct attribute_group *group) 4338 { 4339 dev->groups[1] = group; 4340 } 4341 4342 /** 4343 * rdma_device_to_ibdev - Get ib_device pointer from device pointer 4344 * 4345 * @device: device pointer for which ib_device pointer to retrieve 4346 * 4347 * rdma_device_to_ibdev() retrieves ib_device pointer from device. 4348 * 4349 */ 4350 static inline struct ib_device *rdma_device_to_ibdev(struct device *device) 4351 { 4352 return container_of(device, struct ib_device, dev); 4353 } 4354 4355 /** 4356 * rdma_device_to_drv_device - Helper macro to reach back to driver's 4357 * ib_device holder structure from device pointer. 4358 * 4359 * NOTE: New drivers should not make use of this API; This API is only for 4360 * existing drivers who have exposed sysfs entries using 4361 * rdma_set_device_sysfs_group(). 4362 */ 4363 #define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member) \ 4364 container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member) 4365 #endif /* IB_VERBS_H */ 4366