1 /* 2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 8 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 9 * 10 * This software is available to you under a choice of one of two 11 * licenses. You may choose to be licensed under the terms of the GNU 12 * General Public License (GPL) Version 2, available from the file 13 * COPYING in the main directory of this source tree, or the 14 * OpenIB.org BSD license below: 15 * 16 * Redistribution and use in source and binary forms, with or 17 * without modification, are permitted provided that the following 18 * conditions are met: 19 * 20 * - Redistributions of source code must retain the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer. 23 * 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials 27 * provided with the distribution. 28 * 29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 36 * SOFTWARE. 37 */ 38 39 #if !defined(IB_VERBS_H) 40 #define IB_VERBS_H 41 42 #include <linux/types.h> 43 #include <linux/device.h> 44 #include <linux/mm.h> 45 #include <linux/dma-mapping.h> 46 #include <linux/kref.h> 47 #include <linux/list.h> 48 #include <linux/rwsem.h> 49 #include <linux/scatterlist.h> 50 #include <linux/workqueue.h> 51 #include <linux/socket.h> 52 #include <linux/irq_poll.h> 53 #include <uapi/linux/if_ether.h> 54 #include <net/ipv6.h> 55 #include <net/ip.h> 56 #include <linux/string.h> 57 #include <linux/slab.h> 58 59 #include <linux/if_link.h> 60 #include <linux/atomic.h> 61 #include <linux/mmu_notifier.h> 62 #include <linux/uaccess.h> 63 64 extern struct workqueue_struct *ib_wq; 65 extern struct workqueue_struct *ib_comp_wq; 66 67 union ib_gid { 68 u8 raw[16]; 69 struct { 70 __be64 subnet_prefix; 71 __be64 interface_id; 72 } global; 73 }; 74 75 extern union ib_gid zgid; 76 77 enum ib_gid_type { 78 /* If link layer is Ethernet, this is RoCE V1 */ 79 IB_GID_TYPE_IB = 0, 80 IB_GID_TYPE_ROCE = 0, 81 IB_GID_TYPE_ROCE_UDP_ENCAP = 1, 82 IB_GID_TYPE_SIZE 83 }; 84 85 #define ROCE_V2_UDP_DPORT 4791 86 struct ib_gid_attr { 87 enum ib_gid_type gid_type; 88 struct net_device *ndev; 89 }; 90 91 enum rdma_node_type { 92 /* IB values map to NodeInfo:NodeType. */ 93 RDMA_NODE_IB_CA = 1, 94 RDMA_NODE_IB_SWITCH, 95 RDMA_NODE_IB_ROUTER, 96 RDMA_NODE_RNIC, 97 RDMA_NODE_USNIC, 98 RDMA_NODE_USNIC_UDP, 99 }; 100 101 enum { 102 /* set the local administered indication */ 103 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2, 104 }; 105 106 enum rdma_transport_type { 107 RDMA_TRANSPORT_IB, 108 RDMA_TRANSPORT_IWARP, 109 RDMA_TRANSPORT_USNIC, 110 RDMA_TRANSPORT_USNIC_UDP 111 }; 112 113 enum rdma_protocol_type { 114 RDMA_PROTOCOL_IB, 115 RDMA_PROTOCOL_IBOE, 116 RDMA_PROTOCOL_IWARP, 117 RDMA_PROTOCOL_USNIC_UDP 118 }; 119 120 __attribute_const__ enum rdma_transport_type 121 rdma_node_get_transport(enum rdma_node_type node_type); 122 123 enum rdma_network_type { 124 RDMA_NETWORK_IB, 125 RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB, 126 RDMA_NETWORK_IPV4, 127 RDMA_NETWORK_IPV6 128 }; 129 130 static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type) 131 { 132 if (network_type == RDMA_NETWORK_IPV4 || 133 network_type == RDMA_NETWORK_IPV6) 134 return IB_GID_TYPE_ROCE_UDP_ENCAP; 135 136 /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */ 137 return IB_GID_TYPE_IB; 138 } 139 140 static inline enum rdma_network_type ib_gid_to_network_type(enum ib_gid_type gid_type, 141 union ib_gid *gid) 142 { 143 if (gid_type == IB_GID_TYPE_IB) 144 return RDMA_NETWORK_IB; 145 146 if (ipv6_addr_v4mapped((struct in6_addr *)gid)) 147 return RDMA_NETWORK_IPV4; 148 else 149 return RDMA_NETWORK_IPV6; 150 } 151 152 enum rdma_link_layer { 153 IB_LINK_LAYER_UNSPECIFIED, 154 IB_LINK_LAYER_INFINIBAND, 155 IB_LINK_LAYER_ETHERNET, 156 }; 157 158 enum ib_device_cap_flags { 159 IB_DEVICE_RESIZE_MAX_WR = (1 << 0), 160 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1), 161 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2), 162 IB_DEVICE_RAW_MULTI = (1 << 3), 163 IB_DEVICE_AUTO_PATH_MIG = (1 << 4), 164 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5), 165 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6), 166 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7), 167 IB_DEVICE_SHUTDOWN_PORT = (1 << 8), 168 IB_DEVICE_INIT_TYPE = (1 << 9), 169 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10), 170 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11), 171 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12), 172 IB_DEVICE_SRQ_RESIZE = (1 << 13), 173 IB_DEVICE_N_NOTIFY_CQ = (1 << 14), 174 175 /* 176 * This device supports a per-device lkey or stag that can be 177 * used without performing a memory registration for the local 178 * memory. Note that ULPs should never check this flag, but 179 * instead of use the local_dma_lkey flag in the ib_pd structure, 180 * which will always contain a usable lkey. 181 */ 182 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15), 183 IB_DEVICE_RESERVED /* old SEND_W_INV */ = (1 << 16), 184 IB_DEVICE_MEM_WINDOW = (1 << 17), 185 /* 186 * Devices should set IB_DEVICE_UD_IP_SUM if they support 187 * insertion of UDP and TCP checksum on outgoing UD IPoIB 188 * messages and can verify the validity of checksum for 189 * incoming messages. Setting this flag implies that the 190 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode. 191 */ 192 IB_DEVICE_UD_IP_CSUM = (1 << 18), 193 IB_DEVICE_UD_TSO = (1 << 19), 194 IB_DEVICE_XRC = (1 << 20), 195 196 /* 197 * This device supports the IB "base memory management extension", 198 * which includes support for fast registrations (IB_WR_REG_MR, 199 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should 200 * also be set by any iWarp device which must support FRs to comply 201 * to the iWarp verbs spec. iWarp devices also support the 202 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the 203 * stag. 204 */ 205 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21), 206 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22), 207 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23), 208 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24), 209 IB_DEVICE_RC_IP_CSUM = (1 << 25), 210 /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */ 211 IB_DEVICE_RAW_IP_CSUM = (1 << 26), 212 /* 213 * Devices should set IB_DEVICE_CROSS_CHANNEL if they 214 * support execution of WQEs that involve synchronization 215 * of I/O operations with single completion queue managed 216 * by hardware. 217 */ 218 IB_DEVICE_CROSS_CHANNEL = (1 << 27), 219 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29), 220 IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30), 221 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31), 222 IB_DEVICE_SG_GAPS_REG = (1ULL << 32), 223 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33), 224 /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */ 225 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34), 226 }; 227 228 enum ib_signature_prot_cap { 229 IB_PROT_T10DIF_TYPE_1 = 1, 230 IB_PROT_T10DIF_TYPE_2 = 1 << 1, 231 IB_PROT_T10DIF_TYPE_3 = 1 << 2, 232 }; 233 234 enum ib_signature_guard_cap { 235 IB_GUARD_T10DIF_CRC = 1, 236 IB_GUARD_T10DIF_CSUM = 1 << 1, 237 }; 238 239 enum ib_atomic_cap { 240 IB_ATOMIC_NONE, 241 IB_ATOMIC_HCA, 242 IB_ATOMIC_GLOB 243 }; 244 245 enum ib_odp_general_cap_bits { 246 IB_ODP_SUPPORT = 1 << 0, 247 IB_ODP_SUPPORT_IMPLICIT = 1 << 1, 248 }; 249 250 enum ib_odp_transport_cap_bits { 251 IB_ODP_SUPPORT_SEND = 1 << 0, 252 IB_ODP_SUPPORT_RECV = 1 << 1, 253 IB_ODP_SUPPORT_WRITE = 1 << 2, 254 IB_ODP_SUPPORT_READ = 1 << 3, 255 IB_ODP_SUPPORT_ATOMIC = 1 << 4, 256 }; 257 258 struct ib_odp_caps { 259 uint64_t general_caps; 260 struct { 261 uint32_t rc_odp_caps; 262 uint32_t uc_odp_caps; 263 uint32_t ud_odp_caps; 264 } per_transport_caps; 265 }; 266 267 struct ib_rss_caps { 268 /* Corresponding bit will be set if qp type from 269 * 'enum ib_qp_type' is supported, e.g. 270 * supported_qpts |= 1 << IB_QPT_UD 271 */ 272 u32 supported_qpts; 273 u32 max_rwq_indirection_tables; 274 u32 max_rwq_indirection_table_size; 275 }; 276 277 enum ib_cq_creation_flags { 278 IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0, 279 IB_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1, 280 }; 281 282 struct ib_cq_init_attr { 283 unsigned int cqe; 284 int comp_vector; 285 u32 flags; 286 }; 287 288 struct ib_device_attr { 289 u64 fw_ver; 290 __be64 sys_image_guid; 291 u64 max_mr_size; 292 u64 page_size_cap; 293 u32 vendor_id; 294 u32 vendor_part_id; 295 u32 hw_ver; 296 int max_qp; 297 int max_qp_wr; 298 u64 device_cap_flags; 299 int max_sge; 300 int max_sge_rd; 301 int max_cq; 302 int max_cqe; 303 int max_mr; 304 int max_pd; 305 int max_qp_rd_atom; 306 int max_ee_rd_atom; 307 int max_res_rd_atom; 308 int max_qp_init_rd_atom; 309 int max_ee_init_rd_atom; 310 enum ib_atomic_cap atomic_cap; 311 enum ib_atomic_cap masked_atomic_cap; 312 int max_ee; 313 int max_rdd; 314 int max_mw; 315 int max_raw_ipv6_qp; 316 int max_raw_ethy_qp; 317 int max_mcast_grp; 318 int max_mcast_qp_attach; 319 int max_total_mcast_qp_attach; 320 int max_ah; 321 int max_fmr; 322 int max_map_per_fmr; 323 int max_srq; 324 int max_srq_wr; 325 int max_srq_sge; 326 unsigned int max_fast_reg_page_list_len; 327 u16 max_pkeys; 328 u8 local_ca_ack_delay; 329 int sig_prot_cap; 330 int sig_guard_cap; 331 struct ib_odp_caps odp_caps; 332 uint64_t timestamp_mask; 333 uint64_t hca_core_clock; /* in KHZ */ 334 struct ib_rss_caps rss_caps; 335 u32 max_wq_type_rq; 336 u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */ 337 }; 338 339 enum ib_mtu { 340 IB_MTU_256 = 1, 341 IB_MTU_512 = 2, 342 IB_MTU_1024 = 3, 343 IB_MTU_2048 = 4, 344 IB_MTU_4096 = 5 345 }; 346 347 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu) 348 { 349 switch (mtu) { 350 case IB_MTU_256: return 256; 351 case IB_MTU_512: return 512; 352 case IB_MTU_1024: return 1024; 353 case IB_MTU_2048: return 2048; 354 case IB_MTU_4096: return 4096; 355 default: return -1; 356 } 357 } 358 359 static inline enum ib_mtu ib_mtu_int_to_enum(int mtu) 360 { 361 if (mtu >= 4096) 362 return IB_MTU_4096; 363 else if (mtu >= 2048) 364 return IB_MTU_2048; 365 else if (mtu >= 1024) 366 return IB_MTU_1024; 367 else if (mtu >= 512) 368 return IB_MTU_512; 369 else 370 return IB_MTU_256; 371 } 372 373 enum ib_port_state { 374 IB_PORT_NOP = 0, 375 IB_PORT_DOWN = 1, 376 IB_PORT_INIT = 2, 377 IB_PORT_ARMED = 3, 378 IB_PORT_ACTIVE = 4, 379 IB_PORT_ACTIVE_DEFER = 5 380 }; 381 382 enum ib_port_cap_flags { 383 IB_PORT_SM = 1 << 1, 384 IB_PORT_NOTICE_SUP = 1 << 2, 385 IB_PORT_TRAP_SUP = 1 << 3, 386 IB_PORT_OPT_IPD_SUP = 1 << 4, 387 IB_PORT_AUTO_MIGR_SUP = 1 << 5, 388 IB_PORT_SL_MAP_SUP = 1 << 6, 389 IB_PORT_MKEY_NVRAM = 1 << 7, 390 IB_PORT_PKEY_NVRAM = 1 << 8, 391 IB_PORT_LED_INFO_SUP = 1 << 9, 392 IB_PORT_SM_DISABLED = 1 << 10, 393 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11, 394 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12, 395 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14, 396 IB_PORT_CM_SUP = 1 << 16, 397 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17, 398 IB_PORT_REINIT_SUP = 1 << 18, 399 IB_PORT_DEVICE_MGMT_SUP = 1 << 19, 400 IB_PORT_VENDOR_CLASS_SUP = 1 << 20, 401 IB_PORT_DR_NOTICE_SUP = 1 << 21, 402 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22, 403 IB_PORT_BOOT_MGMT_SUP = 1 << 23, 404 IB_PORT_LINK_LATENCY_SUP = 1 << 24, 405 IB_PORT_CLIENT_REG_SUP = 1 << 25, 406 IB_PORT_IP_BASED_GIDS = 1 << 26, 407 }; 408 409 enum ib_port_width { 410 IB_WIDTH_1X = 1, 411 IB_WIDTH_4X = 2, 412 IB_WIDTH_8X = 4, 413 IB_WIDTH_12X = 8 414 }; 415 416 static inline int ib_width_enum_to_int(enum ib_port_width width) 417 { 418 switch (width) { 419 case IB_WIDTH_1X: return 1; 420 case IB_WIDTH_4X: return 4; 421 case IB_WIDTH_8X: return 8; 422 case IB_WIDTH_12X: return 12; 423 default: return -1; 424 } 425 } 426 427 enum ib_port_speed { 428 IB_SPEED_SDR = 1, 429 IB_SPEED_DDR = 2, 430 IB_SPEED_QDR = 4, 431 IB_SPEED_FDR10 = 8, 432 IB_SPEED_FDR = 16, 433 IB_SPEED_EDR = 32 434 }; 435 436 /** 437 * struct rdma_hw_stats 438 * @timestamp - Used by the core code to track when the last update was 439 * @lifespan - Used by the core code to determine how old the counters 440 * should be before being updated again. Stored in jiffies, defaults 441 * to 10 milliseconds, drivers can override the default be specifying 442 * their own value during their allocation routine. 443 * @name - Array of pointers to static names used for the counters in 444 * directory. 445 * @num_counters - How many hardware counters there are. If name is 446 * shorter than this number, a kernel oops will result. Driver authors 447 * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters) 448 * in their code to prevent this. 449 * @value - Array of u64 counters that are accessed by the sysfs code and 450 * filled in by the drivers get_stats routine 451 */ 452 struct rdma_hw_stats { 453 unsigned long timestamp; 454 unsigned long lifespan; 455 const char * const *names; 456 int num_counters; 457 u64 value[]; 458 }; 459 460 #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10 461 /** 462 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct 463 * for drivers. 464 * @names - Array of static const char * 465 * @num_counters - How many elements in array 466 * @lifespan - How many milliseconds between updates 467 */ 468 static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct( 469 const char * const *names, int num_counters, 470 unsigned long lifespan) 471 { 472 struct rdma_hw_stats *stats; 473 474 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64), 475 GFP_KERNEL); 476 if (!stats) 477 return NULL; 478 stats->names = names; 479 stats->num_counters = num_counters; 480 stats->lifespan = msecs_to_jiffies(lifespan); 481 482 return stats; 483 } 484 485 486 /* Define bits for the various functionality this port needs to be supported by 487 * the core. 488 */ 489 /* Management 0x00000FFF */ 490 #define RDMA_CORE_CAP_IB_MAD 0x00000001 491 #define RDMA_CORE_CAP_IB_SMI 0x00000002 492 #define RDMA_CORE_CAP_IB_CM 0x00000004 493 #define RDMA_CORE_CAP_IW_CM 0x00000008 494 #define RDMA_CORE_CAP_IB_SA 0x00000010 495 #define RDMA_CORE_CAP_OPA_MAD 0x00000020 496 497 /* Address format 0x000FF000 */ 498 #define RDMA_CORE_CAP_AF_IB 0x00001000 499 #define RDMA_CORE_CAP_ETH_AH 0x00002000 500 501 /* Protocol 0xFFF00000 */ 502 #define RDMA_CORE_CAP_PROT_IB 0x00100000 503 #define RDMA_CORE_CAP_PROT_ROCE 0x00200000 504 #define RDMA_CORE_CAP_PROT_IWARP 0x00400000 505 #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000 506 #define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000 507 #define RDMA_CORE_CAP_PROT_USNIC 0x02000000 508 509 #define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \ 510 | RDMA_CORE_CAP_IB_MAD \ 511 | RDMA_CORE_CAP_IB_SMI \ 512 | RDMA_CORE_CAP_IB_CM \ 513 | RDMA_CORE_CAP_IB_SA \ 514 | RDMA_CORE_CAP_AF_IB) 515 #define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \ 516 | RDMA_CORE_CAP_IB_MAD \ 517 | RDMA_CORE_CAP_IB_CM \ 518 | RDMA_CORE_CAP_AF_IB \ 519 | RDMA_CORE_CAP_ETH_AH) 520 #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \ 521 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \ 522 | RDMA_CORE_CAP_IB_MAD \ 523 | RDMA_CORE_CAP_IB_CM \ 524 | RDMA_CORE_CAP_AF_IB \ 525 | RDMA_CORE_CAP_ETH_AH) 526 #define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \ 527 | RDMA_CORE_CAP_IW_CM) 528 #define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \ 529 | RDMA_CORE_CAP_OPA_MAD) 530 531 #define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET) 532 533 #define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC) 534 535 struct ib_port_attr { 536 u64 subnet_prefix; 537 enum ib_port_state state; 538 enum ib_mtu max_mtu; 539 enum ib_mtu active_mtu; 540 int gid_tbl_len; 541 u32 port_cap_flags; 542 u32 max_msg_sz; 543 u32 bad_pkey_cntr; 544 u32 qkey_viol_cntr; 545 u16 pkey_tbl_len; 546 u16 lid; 547 u16 sm_lid; 548 u8 lmc; 549 u8 max_vl_num; 550 u8 sm_sl; 551 u8 subnet_timeout; 552 u8 init_type_reply; 553 u8 active_width; 554 u8 active_speed; 555 u8 phys_state; 556 bool grh_required; 557 }; 558 559 enum ib_device_modify_flags { 560 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0, 561 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1 562 }; 563 564 #define IB_DEVICE_NODE_DESC_MAX 64 565 566 struct ib_device_modify { 567 u64 sys_image_guid; 568 char node_desc[IB_DEVICE_NODE_DESC_MAX]; 569 }; 570 571 enum ib_port_modify_flags { 572 IB_PORT_SHUTDOWN = 1, 573 IB_PORT_INIT_TYPE = (1<<2), 574 IB_PORT_RESET_QKEY_CNTR = (1<<3) 575 }; 576 577 struct ib_port_modify { 578 u32 set_port_cap_mask; 579 u32 clr_port_cap_mask; 580 u8 init_type; 581 }; 582 583 enum ib_event_type { 584 IB_EVENT_CQ_ERR, 585 IB_EVENT_QP_FATAL, 586 IB_EVENT_QP_REQ_ERR, 587 IB_EVENT_QP_ACCESS_ERR, 588 IB_EVENT_COMM_EST, 589 IB_EVENT_SQ_DRAINED, 590 IB_EVENT_PATH_MIG, 591 IB_EVENT_PATH_MIG_ERR, 592 IB_EVENT_DEVICE_FATAL, 593 IB_EVENT_PORT_ACTIVE, 594 IB_EVENT_PORT_ERR, 595 IB_EVENT_LID_CHANGE, 596 IB_EVENT_PKEY_CHANGE, 597 IB_EVENT_SM_CHANGE, 598 IB_EVENT_SRQ_ERR, 599 IB_EVENT_SRQ_LIMIT_REACHED, 600 IB_EVENT_QP_LAST_WQE_REACHED, 601 IB_EVENT_CLIENT_REREGISTER, 602 IB_EVENT_GID_CHANGE, 603 IB_EVENT_WQ_FATAL, 604 }; 605 606 const char *__attribute_const__ ib_event_msg(enum ib_event_type event); 607 608 struct ib_event { 609 struct ib_device *device; 610 union { 611 struct ib_cq *cq; 612 struct ib_qp *qp; 613 struct ib_srq *srq; 614 struct ib_wq *wq; 615 u8 port_num; 616 } element; 617 enum ib_event_type event; 618 }; 619 620 struct ib_event_handler { 621 struct ib_device *device; 622 void (*handler)(struct ib_event_handler *, struct ib_event *); 623 struct list_head list; 624 }; 625 626 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \ 627 do { \ 628 (_ptr)->device = _device; \ 629 (_ptr)->handler = _handler; \ 630 INIT_LIST_HEAD(&(_ptr)->list); \ 631 } while (0) 632 633 struct ib_global_route { 634 union ib_gid dgid; 635 u32 flow_label; 636 u8 sgid_index; 637 u8 hop_limit; 638 u8 traffic_class; 639 }; 640 641 struct ib_grh { 642 __be32 version_tclass_flow; 643 __be16 paylen; 644 u8 next_hdr; 645 u8 hop_limit; 646 union ib_gid sgid; 647 union ib_gid dgid; 648 }; 649 650 union rdma_network_hdr { 651 struct ib_grh ibgrh; 652 struct { 653 /* The IB spec states that if it's IPv4, the header 654 * is located in the last 20 bytes of the header. 655 */ 656 u8 reserved[20]; 657 struct iphdr roce4grh; 658 }; 659 }; 660 661 enum { 662 IB_MULTICAST_QPN = 0xffffff 663 }; 664 665 #define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF) 666 #define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000) 667 668 enum ib_ah_flags { 669 IB_AH_GRH = 1 670 }; 671 672 enum ib_rate { 673 IB_RATE_PORT_CURRENT = 0, 674 IB_RATE_2_5_GBPS = 2, 675 IB_RATE_5_GBPS = 5, 676 IB_RATE_10_GBPS = 3, 677 IB_RATE_20_GBPS = 6, 678 IB_RATE_30_GBPS = 4, 679 IB_RATE_40_GBPS = 7, 680 IB_RATE_60_GBPS = 8, 681 IB_RATE_80_GBPS = 9, 682 IB_RATE_120_GBPS = 10, 683 IB_RATE_14_GBPS = 11, 684 IB_RATE_56_GBPS = 12, 685 IB_RATE_112_GBPS = 13, 686 IB_RATE_168_GBPS = 14, 687 IB_RATE_25_GBPS = 15, 688 IB_RATE_100_GBPS = 16, 689 IB_RATE_200_GBPS = 17, 690 IB_RATE_300_GBPS = 18 691 }; 692 693 /** 694 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the 695 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be 696 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec. 697 * @rate: rate to convert. 698 */ 699 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate); 700 701 /** 702 * ib_rate_to_mbps - Convert the IB rate enum to Mbps. 703 * For example, IB_RATE_2_5_GBPS will be converted to 2500. 704 * @rate: rate to convert. 705 */ 706 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate); 707 708 709 /** 710 * enum ib_mr_type - memory region type 711 * @IB_MR_TYPE_MEM_REG: memory region that is used for 712 * normal registration 713 * @IB_MR_TYPE_SIGNATURE: memory region that is used for 714 * signature operations (data-integrity 715 * capable regions) 716 * @IB_MR_TYPE_SG_GAPS: memory region that is capable to 717 * register any arbitrary sg lists (without 718 * the normal mr constraints - see 719 * ib_map_mr_sg) 720 */ 721 enum ib_mr_type { 722 IB_MR_TYPE_MEM_REG, 723 IB_MR_TYPE_SIGNATURE, 724 IB_MR_TYPE_SG_GAPS, 725 }; 726 727 /** 728 * Signature types 729 * IB_SIG_TYPE_NONE: Unprotected. 730 * IB_SIG_TYPE_T10_DIF: Type T10-DIF 731 */ 732 enum ib_signature_type { 733 IB_SIG_TYPE_NONE, 734 IB_SIG_TYPE_T10_DIF, 735 }; 736 737 /** 738 * Signature T10-DIF block-guard types 739 * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules. 740 * IB_T10DIF_CSUM: Corresponds to IP checksum rules. 741 */ 742 enum ib_t10_dif_bg_type { 743 IB_T10DIF_CRC, 744 IB_T10DIF_CSUM 745 }; 746 747 /** 748 * struct ib_t10_dif_domain - Parameters specific for T10-DIF 749 * domain. 750 * @bg_type: T10-DIF block guard type (CRC|CSUM) 751 * @pi_interval: protection information interval. 752 * @bg: seed of guard computation. 753 * @app_tag: application tag of guard block 754 * @ref_tag: initial guard block reference tag. 755 * @ref_remap: Indicate wethear the reftag increments each block 756 * @app_escape: Indicate to skip block check if apptag=0xffff 757 * @ref_escape: Indicate to skip block check if reftag=0xffffffff 758 * @apptag_check_mask: check bitmask of application tag. 759 */ 760 struct ib_t10_dif_domain { 761 enum ib_t10_dif_bg_type bg_type; 762 u16 pi_interval; 763 u16 bg; 764 u16 app_tag; 765 u32 ref_tag; 766 bool ref_remap; 767 bool app_escape; 768 bool ref_escape; 769 u16 apptag_check_mask; 770 }; 771 772 /** 773 * struct ib_sig_domain - Parameters for signature domain 774 * @sig_type: specific signauture type 775 * @sig: union of all signature domain attributes that may 776 * be used to set domain layout. 777 */ 778 struct ib_sig_domain { 779 enum ib_signature_type sig_type; 780 union { 781 struct ib_t10_dif_domain dif; 782 } sig; 783 }; 784 785 /** 786 * struct ib_sig_attrs - Parameters for signature handover operation 787 * @check_mask: bitmask for signature byte check (8 bytes) 788 * @mem: memory domain layout desciptor. 789 * @wire: wire domain layout desciptor. 790 */ 791 struct ib_sig_attrs { 792 u8 check_mask; 793 struct ib_sig_domain mem; 794 struct ib_sig_domain wire; 795 }; 796 797 enum ib_sig_err_type { 798 IB_SIG_BAD_GUARD, 799 IB_SIG_BAD_REFTAG, 800 IB_SIG_BAD_APPTAG, 801 }; 802 803 /** 804 * struct ib_sig_err - signature error descriptor 805 */ 806 struct ib_sig_err { 807 enum ib_sig_err_type err_type; 808 u32 expected; 809 u32 actual; 810 u64 sig_err_offset; 811 u32 key; 812 }; 813 814 enum ib_mr_status_check { 815 IB_MR_CHECK_SIG_STATUS = 1, 816 }; 817 818 /** 819 * struct ib_mr_status - Memory region status container 820 * 821 * @fail_status: Bitmask of MR checks status. For each 822 * failed check a corresponding status bit is set. 823 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS 824 * failure. 825 */ 826 struct ib_mr_status { 827 u32 fail_status; 828 struct ib_sig_err sig_err; 829 }; 830 831 /** 832 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate 833 * enum. 834 * @mult: multiple to convert. 835 */ 836 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult); 837 838 struct ib_ah_attr { 839 struct ib_global_route grh; 840 u16 dlid; 841 u8 sl; 842 u8 src_path_bits; 843 u8 static_rate; 844 u8 ah_flags; 845 u8 port_num; 846 u8 dmac[ETH_ALEN]; 847 }; 848 849 enum ib_wc_status { 850 IB_WC_SUCCESS, 851 IB_WC_LOC_LEN_ERR, 852 IB_WC_LOC_QP_OP_ERR, 853 IB_WC_LOC_EEC_OP_ERR, 854 IB_WC_LOC_PROT_ERR, 855 IB_WC_WR_FLUSH_ERR, 856 IB_WC_MW_BIND_ERR, 857 IB_WC_BAD_RESP_ERR, 858 IB_WC_LOC_ACCESS_ERR, 859 IB_WC_REM_INV_REQ_ERR, 860 IB_WC_REM_ACCESS_ERR, 861 IB_WC_REM_OP_ERR, 862 IB_WC_RETRY_EXC_ERR, 863 IB_WC_RNR_RETRY_EXC_ERR, 864 IB_WC_LOC_RDD_VIOL_ERR, 865 IB_WC_REM_INV_RD_REQ_ERR, 866 IB_WC_REM_ABORT_ERR, 867 IB_WC_INV_EECN_ERR, 868 IB_WC_INV_EEC_STATE_ERR, 869 IB_WC_FATAL_ERR, 870 IB_WC_RESP_TIMEOUT_ERR, 871 IB_WC_GENERAL_ERR 872 }; 873 874 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status); 875 876 enum ib_wc_opcode { 877 IB_WC_SEND, 878 IB_WC_RDMA_WRITE, 879 IB_WC_RDMA_READ, 880 IB_WC_COMP_SWAP, 881 IB_WC_FETCH_ADD, 882 IB_WC_LSO, 883 IB_WC_LOCAL_INV, 884 IB_WC_REG_MR, 885 IB_WC_MASKED_COMP_SWAP, 886 IB_WC_MASKED_FETCH_ADD, 887 /* 888 * Set value of IB_WC_RECV so consumers can test if a completion is a 889 * receive by testing (opcode & IB_WC_RECV). 890 */ 891 IB_WC_RECV = 1 << 7, 892 IB_WC_RECV_RDMA_WITH_IMM 893 }; 894 895 enum ib_wc_flags { 896 IB_WC_GRH = 1, 897 IB_WC_WITH_IMM = (1<<1), 898 IB_WC_WITH_INVALIDATE = (1<<2), 899 IB_WC_IP_CSUM_OK = (1<<3), 900 IB_WC_WITH_SMAC = (1<<4), 901 IB_WC_WITH_VLAN = (1<<5), 902 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6), 903 }; 904 905 struct ib_wc { 906 union { 907 u64 wr_id; 908 struct ib_cqe *wr_cqe; 909 }; 910 enum ib_wc_status status; 911 enum ib_wc_opcode opcode; 912 u32 vendor_err; 913 u32 byte_len; 914 struct ib_qp *qp; 915 union { 916 __be32 imm_data; 917 u32 invalidate_rkey; 918 } ex; 919 u32 src_qp; 920 int wc_flags; 921 u16 pkey_index; 922 u16 slid; 923 u8 sl; 924 u8 dlid_path_bits; 925 u8 port_num; /* valid only for DR SMPs on switches */ 926 u8 smac[ETH_ALEN]; 927 u16 vlan_id; 928 u8 network_hdr_type; 929 }; 930 931 enum ib_cq_notify_flags { 932 IB_CQ_SOLICITED = 1 << 0, 933 IB_CQ_NEXT_COMP = 1 << 1, 934 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP, 935 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2, 936 }; 937 938 enum ib_srq_type { 939 IB_SRQT_BASIC, 940 IB_SRQT_XRC 941 }; 942 943 enum ib_srq_attr_mask { 944 IB_SRQ_MAX_WR = 1 << 0, 945 IB_SRQ_LIMIT = 1 << 1, 946 }; 947 948 struct ib_srq_attr { 949 u32 max_wr; 950 u32 max_sge; 951 u32 srq_limit; 952 }; 953 954 struct ib_srq_init_attr { 955 void (*event_handler)(struct ib_event *, void *); 956 void *srq_context; 957 struct ib_srq_attr attr; 958 enum ib_srq_type srq_type; 959 960 union { 961 struct { 962 struct ib_xrcd *xrcd; 963 struct ib_cq *cq; 964 } xrc; 965 } ext; 966 }; 967 968 struct ib_qp_cap { 969 u32 max_send_wr; 970 u32 max_recv_wr; 971 u32 max_send_sge; 972 u32 max_recv_sge; 973 u32 max_inline_data; 974 975 /* 976 * Maximum number of rdma_rw_ctx structures in flight at a time. 977 * ib_create_qp() will calculate the right amount of neededed WRs 978 * and MRs based on this. 979 */ 980 u32 max_rdma_ctxs; 981 }; 982 983 enum ib_sig_type { 984 IB_SIGNAL_ALL_WR, 985 IB_SIGNAL_REQ_WR 986 }; 987 988 enum ib_qp_type { 989 /* 990 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries 991 * here (and in that order) since the MAD layer uses them as 992 * indices into a 2-entry table. 993 */ 994 IB_QPT_SMI, 995 IB_QPT_GSI, 996 997 IB_QPT_RC, 998 IB_QPT_UC, 999 IB_QPT_UD, 1000 IB_QPT_RAW_IPV6, 1001 IB_QPT_RAW_ETHERTYPE, 1002 IB_QPT_RAW_PACKET = 8, 1003 IB_QPT_XRC_INI = 9, 1004 IB_QPT_XRC_TGT, 1005 IB_QPT_MAX, 1006 /* Reserve a range for qp types internal to the low level driver. 1007 * These qp types will not be visible at the IB core layer, so the 1008 * IB_QPT_MAX usages should not be affected in the core layer 1009 */ 1010 IB_QPT_RESERVED1 = 0x1000, 1011 IB_QPT_RESERVED2, 1012 IB_QPT_RESERVED3, 1013 IB_QPT_RESERVED4, 1014 IB_QPT_RESERVED5, 1015 IB_QPT_RESERVED6, 1016 IB_QPT_RESERVED7, 1017 IB_QPT_RESERVED8, 1018 IB_QPT_RESERVED9, 1019 IB_QPT_RESERVED10, 1020 }; 1021 1022 enum ib_qp_create_flags { 1023 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0, 1024 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1, 1025 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2, 1026 IB_QP_CREATE_MANAGED_SEND = 1 << 3, 1027 IB_QP_CREATE_MANAGED_RECV = 1 << 4, 1028 IB_QP_CREATE_NETIF_QP = 1 << 5, 1029 IB_QP_CREATE_SIGNATURE_EN = 1 << 6, 1030 IB_QP_CREATE_USE_GFP_NOIO = 1 << 7, 1031 IB_QP_CREATE_SCATTER_FCS = 1 << 8, 1032 IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9, 1033 /* reserve bits 26-31 for low level drivers' internal use */ 1034 IB_QP_CREATE_RESERVED_START = 1 << 26, 1035 IB_QP_CREATE_RESERVED_END = 1 << 31, 1036 }; 1037 1038 /* 1039 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler 1040 * callback to destroy the passed in QP. 1041 */ 1042 1043 struct ib_qp_init_attr { 1044 void (*event_handler)(struct ib_event *, void *); 1045 void *qp_context; 1046 struct ib_cq *send_cq; 1047 struct ib_cq *recv_cq; 1048 struct ib_srq *srq; 1049 struct ib_xrcd *xrcd; /* XRC TGT QPs only */ 1050 struct ib_qp_cap cap; 1051 enum ib_sig_type sq_sig_type; 1052 enum ib_qp_type qp_type; 1053 enum ib_qp_create_flags create_flags; 1054 1055 /* 1056 * Only needed for special QP types, or when using the RW API. 1057 */ 1058 u8 port_num; 1059 struct ib_rwq_ind_table *rwq_ind_tbl; 1060 }; 1061 1062 struct ib_qp_open_attr { 1063 void (*event_handler)(struct ib_event *, void *); 1064 void *qp_context; 1065 u32 qp_num; 1066 enum ib_qp_type qp_type; 1067 }; 1068 1069 enum ib_rnr_timeout { 1070 IB_RNR_TIMER_655_36 = 0, 1071 IB_RNR_TIMER_000_01 = 1, 1072 IB_RNR_TIMER_000_02 = 2, 1073 IB_RNR_TIMER_000_03 = 3, 1074 IB_RNR_TIMER_000_04 = 4, 1075 IB_RNR_TIMER_000_06 = 5, 1076 IB_RNR_TIMER_000_08 = 6, 1077 IB_RNR_TIMER_000_12 = 7, 1078 IB_RNR_TIMER_000_16 = 8, 1079 IB_RNR_TIMER_000_24 = 9, 1080 IB_RNR_TIMER_000_32 = 10, 1081 IB_RNR_TIMER_000_48 = 11, 1082 IB_RNR_TIMER_000_64 = 12, 1083 IB_RNR_TIMER_000_96 = 13, 1084 IB_RNR_TIMER_001_28 = 14, 1085 IB_RNR_TIMER_001_92 = 15, 1086 IB_RNR_TIMER_002_56 = 16, 1087 IB_RNR_TIMER_003_84 = 17, 1088 IB_RNR_TIMER_005_12 = 18, 1089 IB_RNR_TIMER_007_68 = 19, 1090 IB_RNR_TIMER_010_24 = 20, 1091 IB_RNR_TIMER_015_36 = 21, 1092 IB_RNR_TIMER_020_48 = 22, 1093 IB_RNR_TIMER_030_72 = 23, 1094 IB_RNR_TIMER_040_96 = 24, 1095 IB_RNR_TIMER_061_44 = 25, 1096 IB_RNR_TIMER_081_92 = 26, 1097 IB_RNR_TIMER_122_88 = 27, 1098 IB_RNR_TIMER_163_84 = 28, 1099 IB_RNR_TIMER_245_76 = 29, 1100 IB_RNR_TIMER_327_68 = 30, 1101 IB_RNR_TIMER_491_52 = 31 1102 }; 1103 1104 enum ib_qp_attr_mask { 1105 IB_QP_STATE = 1, 1106 IB_QP_CUR_STATE = (1<<1), 1107 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2), 1108 IB_QP_ACCESS_FLAGS = (1<<3), 1109 IB_QP_PKEY_INDEX = (1<<4), 1110 IB_QP_PORT = (1<<5), 1111 IB_QP_QKEY = (1<<6), 1112 IB_QP_AV = (1<<7), 1113 IB_QP_PATH_MTU = (1<<8), 1114 IB_QP_TIMEOUT = (1<<9), 1115 IB_QP_RETRY_CNT = (1<<10), 1116 IB_QP_RNR_RETRY = (1<<11), 1117 IB_QP_RQ_PSN = (1<<12), 1118 IB_QP_MAX_QP_RD_ATOMIC = (1<<13), 1119 IB_QP_ALT_PATH = (1<<14), 1120 IB_QP_MIN_RNR_TIMER = (1<<15), 1121 IB_QP_SQ_PSN = (1<<16), 1122 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17), 1123 IB_QP_PATH_MIG_STATE = (1<<18), 1124 IB_QP_CAP = (1<<19), 1125 IB_QP_DEST_QPN = (1<<20), 1126 IB_QP_RESERVED1 = (1<<21), 1127 IB_QP_RESERVED2 = (1<<22), 1128 IB_QP_RESERVED3 = (1<<23), 1129 IB_QP_RESERVED4 = (1<<24), 1130 IB_QP_RATE_LIMIT = (1<<25), 1131 }; 1132 1133 enum ib_qp_state { 1134 IB_QPS_RESET, 1135 IB_QPS_INIT, 1136 IB_QPS_RTR, 1137 IB_QPS_RTS, 1138 IB_QPS_SQD, 1139 IB_QPS_SQE, 1140 IB_QPS_ERR 1141 }; 1142 1143 enum ib_mig_state { 1144 IB_MIG_MIGRATED, 1145 IB_MIG_REARM, 1146 IB_MIG_ARMED 1147 }; 1148 1149 enum ib_mw_type { 1150 IB_MW_TYPE_1 = 1, 1151 IB_MW_TYPE_2 = 2 1152 }; 1153 1154 struct ib_qp_attr { 1155 enum ib_qp_state qp_state; 1156 enum ib_qp_state cur_qp_state; 1157 enum ib_mtu path_mtu; 1158 enum ib_mig_state path_mig_state; 1159 u32 qkey; 1160 u32 rq_psn; 1161 u32 sq_psn; 1162 u32 dest_qp_num; 1163 int qp_access_flags; 1164 struct ib_qp_cap cap; 1165 struct ib_ah_attr ah_attr; 1166 struct ib_ah_attr alt_ah_attr; 1167 u16 pkey_index; 1168 u16 alt_pkey_index; 1169 u8 en_sqd_async_notify; 1170 u8 sq_draining; 1171 u8 max_rd_atomic; 1172 u8 max_dest_rd_atomic; 1173 u8 min_rnr_timer; 1174 u8 port_num; 1175 u8 timeout; 1176 u8 retry_cnt; 1177 u8 rnr_retry; 1178 u8 alt_port_num; 1179 u8 alt_timeout; 1180 u32 rate_limit; 1181 }; 1182 1183 enum ib_wr_opcode { 1184 IB_WR_RDMA_WRITE, 1185 IB_WR_RDMA_WRITE_WITH_IMM, 1186 IB_WR_SEND, 1187 IB_WR_SEND_WITH_IMM, 1188 IB_WR_RDMA_READ, 1189 IB_WR_ATOMIC_CMP_AND_SWP, 1190 IB_WR_ATOMIC_FETCH_AND_ADD, 1191 IB_WR_LSO, 1192 IB_WR_SEND_WITH_INV, 1193 IB_WR_RDMA_READ_WITH_INV, 1194 IB_WR_LOCAL_INV, 1195 IB_WR_REG_MR, 1196 IB_WR_MASKED_ATOMIC_CMP_AND_SWP, 1197 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD, 1198 IB_WR_REG_SIG_MR, 1199 /* reserve values for low level drivers' internal use. 1200 * These values will not be used at all in the ib core layer. 1201 */ 1202 IB_WR_RESERVED1 = 0xf0, 1203 IB_WR_RESERVED2, 1204 IB_WR_RESERVED3, 1205 IB_WR_RESERVED4, 1206 IB_WR_RESERVED5, 1207 IB_WR_RESERVED6, 1208 IB_WR_RESERVED7, 1209 IB_WR_RESERVED8, 1210 IB_WR_RESERVED9, 1211 IB_WR_RESERVED10, 1212 }; 1213 1214 enum ib_send_flags { 1215 IB_SEND_FENCE = 1, 1216 IB_SEND_SIGNALED = (1<<1), 1217 IB_SEND_SOLICITED = (1<<2), 1218 IB_SEND_INLINE = (1<<3), 1219 IB_SEND_IP_CSUM = (1<<4), 1220 1221 /* reserve bits 26-31 for low level drivers' internal use */ 1222 IB_SEND_RESERVED_START = (1 << 26), 1223 IB_SEND_RESERVED_END = (1 << 31), 1224 }; 1225 1226 struct ib_sge { 1227 u64 addr; 1228 u32 length; 1229 u32 lkey; 1230 }; 1231 1232 struct ib_cqe { 1233 void (*done)(struct ib_cq *cq, struct ib_wc *wc); 1234 }; 1235 1236 struct ib_send_wr { 1237 struct ib_send_wr *next; 1238 union { 1239 u64 wr_id; 1240 struct ib_cqe *wr_cqe; 1241 }; 1242 struct ib_sge *sg_list; 1243 int num_sge; 1244 enum ib_wr_opcode opcode; 1245 int send_flags; 1246 union { 1247 __be32 imm_data; 1248 u32 invalidate_rkey; 1249 } ex; 1250 }; 1251 1252 struct ib_rdma_wr { 1253 struct ib_send_wr wr; 1254 u64 remote_addr; 1255 u32 rkey; 1256 }; 1257 1258 static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr) 1259 { 1260 return container_of(wr, struct ib_rdma_wr, wr); 1261 } 1262 1263 struct ib_atomic_wr { 1264 struct ib_send_wr wr; 1265 u64 remote_addr; 1266 u64 compare_add; 1267 u64 swap; 1268 u64 compare_add_mask; 1269 u64 swap_mask; 1270 u32 rkey; 1271 }; 1272 1273 static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr) 1274 { 1275 return container_of(wr, struct ib_atomic_wr, wr); 1276 } 1277 1278 struct ib_ud_wr { 1279 struct ib_send_wr wr; 1280 struct ib_ah *ah; 1281 void *header; 1282 int hlen; 1283 int mss; 1284 u32 remote_qpn; 1285 u32 remote_qkey; 1286 u16 pkey_index; /* valid for GSI only */ 1287 u8 port_num; /* valid for DR SMPs on switch only */ 1288 }; 1289 1290 static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr) 1291 { 1292 return container_of(wr, struct ib_ud_wr, wr); 1293 } 1294 1295 struct ib_reg_wr { 1296 struct ib_send_wr wr; 1297 struct ib_mr *mr; 1298 u32 key; 1299 int access; 1300 }; 1301 1302 static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr) 1303 { 1304 return container_of(wr, struct ib_reg_wr, wr); 1305 } 1306 1307 struct ib_sig_handover_wr { 1308 struct ib_send_wr wr; 1309 struct ib_sig_attrs *sig_attrs; 1310 struct ib_mr *sig_mr; 1311 int access_flags; 1312 struct ib_sge *prot; 1313 }; 1314 1315 static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr) 1316 { 1317 return container_of(wr, struct ib_sig_handover_wr, wr); 1318 } 1319 1320 struct ib_recv_wr { 1321 struct ib_recv_wr *next; 1322 union { 1323 u64 wr_id; 1324 struct ib_cqe *wr_cqe; 1325 }; 1326 struct ib_sge *sg_list; 1327 int num_sge; 1328 }; 1329 1330 enum ib_access_flags { 1331 IB_ACCESS_LOCAL_WRITE = 1, 1332 IB_ACCESS_REMOTE_WRITE = (1<<1), 1333 IB_ACCESS_REMOTE_READ = (1<<2), 1334 IB_ACCESS_REMOTE_ATOMIC = (1<<3), 1335 IB_ACCESS_MW_BIND = (1<<4), 1336 IB_ZERO_BASED = (1<<5), 1337 IB_ACCESS_ON_DEMAND = (1<<6), 1338 }; 1339 1340 /* 1341 * XXX: these are apparently used for ->rereg_user_mr, no idea why they 1342 * are hidden here instead of a uapi header! 1343 */ 1344 enum ib_mr_rereg_flags { 1345 IB_MR_REREG_TRANS = 1, 1346 IB_MR_REREG_PD = (1<<1), 1347 IB_MR_REREG_ACCESS = (1<<2), 1348 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1) 1349 }; 1350 1351 struct ib_fmr_attr { 1352 int max_pages; 1353 int max_maps; 1354 u8 page_shift; 1355 }; 1356 1357 struct ib_umem; 1358 1359 struct ib_ucontext { 1360 struct ib_device *device; 1361 struct list_head pd_list; 1362 struct list_head mr_list; 1363 struct list_head mw_list; 1364 struct list_head cq_list; 1365 struct list_head qp_list; 1366 struct list_head srq_list; 1367 struct list_head ah_list; 1368 struct list_head xrcd_list; 1369 struct list_head rule_list; 1370 struct list_head wq_list; 1371 struct list_head rwq_ind_tbl_list; 1372 int closing; 1373 1374 struct pid *tgid; 1375 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 1376 struct rb_root umem_tree; 1377 /* 1378 * Protects .umem_rbroot and tree, as well as odp_mrs_count and 1379 * mmu notifiers registration. 1380 */ 1381 struct rw_semaphore umem_rwsem; 1382 void (*invalidate_range)(struct ib_umem *umem, 1383 unsigned long start, unsigned long end); 1384 1385 struct mmu_notifier mn; 1386 atomic_t notifier_count; 1387 /* A list of umems that don't have private mmu notifier counters yet. */ 1388 struct list_head no_private_counters; 1389 int odp_mrs_count; 1390 #endif 1391 }; 1392 1393 struct ib_uobject { 1394 u64 user_handle; /* handle given to us by userspace */ 1395 struct ib_ucontext *context; /* associated user context */ 1396 void *object; /* containing object */ 1397 struct list_head list; /* link to context's list */ 1398 int id; /* index into kernel idr */ 1399 struct kref ref; 1400 struct rw_semaphore mutex; /* protects .live */ 1401 struct rcu_head rcu; /* kfree_rcu() overhead */ 1402 int live; 1403 }; 1404 1405 struct ib_udata { 1406 const void __user *inbuf; 1407 void __user *outbuf; 1408 size_t inlen; 1409 size_t outlen; 1410 }; 1411 1412 struct ib_pd { 1413 u32 local_dma_lkey; 1414 u32 flags; 1415 struct ib_device *device; 1416 struct ib_uobject *uobject; 1417 atomic_t usecnt; /* count all resources */ 1418 1419 u32 unsafe_global_rkey; 1420 1421 /* 1422 * Implementation details of the RDMA core, don't use in drivers: 1423 */ 1424 struct ib_mr *__internal_mr; 1425 }; 1426 1427 struct ib_xrcd { 1428 struct ib_device *device; 1429 atomic_t usecnt; /* count all exposed resources */ 1430 struct inode *inode; 1431 1432 struct mutex tgt_qp_mutex; 1433 struct list_head tgt_qp_list; 1434 }; 1435 1436 struct ib_ah { 1437 struct ib_device *device; 1438 struct ib_pd *pd; 1439 struct ib_uobject *uobject; 1440 }; 1441 1442 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); 1443 1444 enum ib_poll_context { 1445 IB_POLL_DIRECT, /* caller context, no hw completions */ 1446 IB_POLL_SOFTIRQ, /* poll from softirq context */ 1447 IB_POLL_WORKQUEUE, /* poll from workqueue */ 1448 }; 1449 1450 struct ib_cq { 1451 struct ib_device *device; 1452 struct ib_uobject *uobject; 1453 ib_comp_handler comp_handler; 1454 void (*event_handler)(struct ib_event *, void *); 1455 void *cq_context; 1456 int cqe; 1457 atomic_t usecnt; /* count number of work queues */ 1458 enum ib_poll_context poll_ctx; 1459 struct ib_wc *wc; 1460 union { 1461 struct irq_poll iop; 1462 struct work_struct work; 1463 }; 1464 }; 1465 1466 struct ib_srq { 1467 struct ib_device *device; 1468 struct ib_pd *pd; 1469 struct ib_uobject *uobject; 1470 void (*event_handler)(struct ib_event *, void *); 1471 void *srq_context; 1472 enum ib_srq_type srq_type; 1473 atomic_t usecnt; 1474 1475 union { 1476 struct { 1477 struct ib_xrcd *xrcd; 1478 struct ib_cq *cq; 1479 u32 srq_num; 1480 } xrc; 1481 } ext; 1482 }; 1483 1484 enum ib_raw_packet_caps { 1485 /* Strip cvlan from incoming packet and report it in the matching work 1486 * completion is supported. 1487 */ 1488 IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0), 1489 /* Scatter FCS field of an incoming packet to host memory is supported. 1490 */ 1491 IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1), 1492 /* Checksum offloads are supported (for both send and receive). */ 1493 IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2), 1494 }; 1495 1496 enum ib_wq_type { 1497 IB_WQT_RQ 1498 }; 1499 1500 enum ib_wq_state { 1501 IB_WQS_RESET, 1502 IB_WQS_RDY, 1503 IB_WQS_ERR 1504 }; 1505 1506 struct ib_wq { 1507 struct ib_device *device; 1508 struct ib_uobject *uobject; 1509 void *wq_context; 1510 void (*event_handler)(struct ib_event *, void *); 1511 struct ib_pd *pd; 1512 struct ib_cq *cq; 1513 u32 wq_num; 1514 enum ib_wq_state state; 1515 enum ib_wq_type wq_type; 1516 atomic_t usecnt; 1517 }; 1518 1519 enum ib_wq_flags { 1520 IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0, 1521 IB_WQ_FLAGS_SCATTER_FCS = 1 << 1, 1522 }; 1523 1524 struct ib_wq_init_attr { 1525 void *wq_context; 1526 enum ib_wq_type wq_type; 1527 u32 max_wr; 1528 u32 max_sge; 1529 struct ib_cq *cq; 1530 void (*event_handler)(struct ib_event *, void *); 1531 u32 create_flags; /* Use enum ib_wq_flags */ 1532 }; 1533 1534 enum ib_wq_attr_mask { 1535 IB_WQ_STATE = 1 << 0, 1536 IB_WQ_CUR_STATE = 1 << 1, 1537 IB_WQ_FLAGS = 1 << 2, 1538 }; 1539 1540 struct ib_wq_attr { 1541 enum ib_wq_state wq_state; 1542 enum ib_wq_state curr_wq_state; 1543 u32 flags; /* Use enum ib_wq_flags */ 1544 u32 flags_mask; /* Use enum ib_wq_flags */ 1545 }; 1546 1547 struct ib_rwq_ind_table { 1548 struct ib_device *device; 1549 struct ib_uobject *uobject; 1550 atomic_t usecnt; 1551 u32 ind_tbl_num; 1552 u32 log_ind_tbl_size; 1553 struct ib_wq **ind_tbl; 1554 }; 1555 1556 struct ib_rwq_ind_table_init_attr { 1557 u32 log_ind_tbl_size; 1558 /* Each entry is a pointer to Receive Work Queue */ 1559 struct ib_wq **ind_tbl; 1560 }; 1561 1562 /* 1563 * @max_write_sge: Maximum SGE elements per RDMA WRITE request. 1564 * @max_read_sge: Maximum SGE elements per RDMA READ request. 1565 */ 1566 struct ib_qp { 1567 struct ib_device *device; 1568 struct ib_pd *pd; 1569 struct ib_cq *send_cq; 1570 struct ib_cq *recv_cq; 1571 spinlock_t mr_lock; 1572 int mrs_used; 1573 struct list_head rdma_mrs; 1574 struct list_head sig_mrs; 1575 struct ib_srq *srq; 1576 struct ib_xrcd *xrcd; /* XRC TGT QPs only */ 1577 struct list_head xrcd_list; 1578 1579 /* count times opened, mcast attaches, flow attaches */ 1580 atomic_t usecnt; 1581 struct list_head open_list; 1582 struct ib_qp *real_qp; 1583 struct ib_uobject *uobject; 1584 void (*event_handler)(struct ib_event *, void *); 1585 void *qp_context; 1586 u32 qp_num; 1587 u32 max_write_sge; 1588 u32 max_read_sge; 1589 enum ib_qp_type qp_type; 1590 struct ib_rwq_ind_table *rwq_ind_tbl; 1591 }; 1592 1593 struct ib_mr { 1594 struct ib_device *device; 1595 struct ib_pd *pd; 1596 u32 lkey; 1597 u32 rkey; 1598 u64 iova; 1599 u32 length; 1600 unsigned int page_size; 1601 bool need_inval; 1602 union { 1603 struct ib_uobject *uobject; /* user */ 1604 struct list_head qp_entry; /* FR */ 1605 }; 1606 }; 1607 1608 struct ib_mw { 1609 struct ib_device *device; 1610 struct ib_pd *pd; 1611 struct ib_uobject *uobject; 1612 u32 rkey; 1613 enum ib_mw_type type; 1614 }; 1615 1616 struct ib_fmr { 1617 struct ib_device *device; 1618 struct ib_pd *pd; 1619 struct list_head list; 1620 u32 lkey; 1621 u32 rkey; 1622 }; 1623 1624 /* Supported steering options */ 1625 enum ib_flow_attr_type { 1626 /* steering according to rule specifications */ 1627 IB_FLOW_ATTR_NORMAL = 0x0, 1628 /* default unicast and multicast rule - 1629 * receive all Eth traffic which isn't steered to any QP 1630 */ 1631 IB_FLOW_ATTR_ALL_DEFAULT = 0x1, 1632 /* default multicast rule - 1633 * receive all Eth multicast traffic which isn't steered to any QP 1634 */ 1635 IB_FLOW_ATTR_MC_DEFAULT = 0x2, 1636 /* sniffer rule - receive all port traffic */ 1637 IB_FLOW_ATTR_SNIFFER = 0x3 1638 }; 1639 1640 /* Supported steering header types */ 1641 enum ib_flow_spec_type { 1642 /* L2 headers*/ 1643 IB_FLOW_SPEC_ETH = 0x20, 1644 IB_FLOW_SPEC_IB = 0x22, 1645 /* L3 header*/ 1646 IB_FLOW_SPEC_IPV4 = 0x30, 1647 IB_FLOW_SPEC_IPV6 = 0x31, 1648 /* L4 headers*/ 1649 IB_FLOW_SPEC_TCP = 0x40, 1650 IB_FLOW_SPEC_UDP = 0x41, 1651 IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50, 1652 IB_FLOW_SPEC_INNER = 0x100, 1653 /* Actions */ 1654 IB_FLOW_SPEC_ACTION_TAG = 0x1000, 1655 }; 1656 #define IB_FLOW_SPEC_LAYER_MASK 0xF0 1657 #define IB_FLOW_SPEC_SUPPORT_LAYERS 8 1658 1659 /* Flow steering rule priority is set according to it's domain. 1660 * Lower domain value means higher priority. 1661 */ 1662 enum ib_flow_domain { 1663 IB_FLOW_DOMAIN_USER, 1664 IB_FLOW_DOMAIN_ETHTOOL, 1665 IB_FLOW_DOMAIN_RFS, 1666 IB_FLOW_DOMAIN_NIC, 1667 IB_FLOW_DOMAIN_NUM /* Must be last */ 1668 }; 1669 1670 enum ib_flow_flags { 1671 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */ 1672 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 2 /* Must be last */ 1673 }; 1674 1675 struct ib_flow_eth_filter { 1676 u8 dst_mac[6]; 1677 u8 src_mac[6]; 1678 __be16 ether_type; 1679 __be16 vlan_tag; 1680 /* Must be last */ 1681 u8 real_sz[0]; 1682 }; 1683 1684 struct ib_flow_spec_eth { 1685 u32 type; 1686 u16 size; 1687 struct ib_flow_eth_filter val; 1688 struct ib_flow_eth_filter mask; 1689 }; 1690 1691 struct ib_flow_ib_filter { 1692 __be16 dlid; 1693 __u8 sl; 1694 /* Must be last */ 1695 u8 real_sz[0]; 1696 }; 1697 1698 struct ib_flow_spec_ib { 1699 u32 type; 1700 u16 size; 1701 struct ib_flow_ib_filter val; 1702 struct ib_flow_ib_filter mask; 1703 }; 1704 1705 /* IPv4 header flags */ 1706 enum ib_ipv4_flags { 1707 IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */ 1708 IB_IPV4_MORE_FRAG = 0X4 /* For All fragmented packets except the 1709 last have this flag set */ 1710 }; 1711 1712 struct ib_flow_ipv4_filter { 1713 __be32 src_ip; 1714 __be32 dst_ip; 1715 u8 proto; 1716 u8 tos; 1717 u8 ttl; 1718 u8 flags; 1719 /* Must be last */ 1720 u8 real_sz[0]; 1721 }; 1722 1723 struct ib_flow_spec_ipv4 { 1724 u32 type; 1725 u16 size; 1726 struct ib_flow_ipv4_filter val; 1727 struct ib_flow_ipv4_filter mask; 1728 }; 1729 1730 struct ib_flow_ipv6_filter { 1731 u8 src_ip[16]; 1732 u8 dst_ip[16]; 1733 __be32 flow_label; 1734 u8 next_hdr; 1735 u8 traffic_class; 1736 u8 hop_limit; 1737 /* Must be last */ 1738 u8 real_sz[0]; 1739 }; 1740 1741 struct ib_flow_spec_ipv6 { 1742 u32 type; 1743 u16 size; 1744 struct ib_flow_ipv6_filter val; 1745 struct ib_flow_ipv6_filter mask; 1746 }; 1747 1748 struct ib_flow_tcp_udp_filter { 1749 __be16 dst_port; 1750 __be16 src_port; 1751 /* Must be last */ 1752 u8 real_sz[0]; 1753 }; 1754 1755 struct ib_flow_spec_tcp_udp { 1756 u32 type; 1757 u16 size; 1758 struct ib_flow_tcp_udp_filter val; 1759 struct ib_flow_tcp_udp_filter mask; 1760 }; 1761 1762 struct ib_flow_tunnel_filter { 1763 __be32 tunnel_id; 1764 u8 real_sz[0]; 1765 }; 1766 1767 /* ib_flow_spec_tunnel describes the Vxlan tunnel 1768 * the tunnel_id from val has the vni value 1769 */ 1770 struct ib_flow_spec_tunnel { 1771 u32 type; 1772 u16 size; 1773 struct ib_flow_tunnel_filter val; 1774 struct ib_flow_tunnel_filter mask; 1775 }; 1776 1777 struct ib_flow_spec_action_tag { 1778 enum ib_flow_spec_type type; 1779 u16 size; 1780 u32 tag_id; 1781 }; 1782 1783 union ib_flow_spec { 1784 struct { 1785 u32 type; 1786 u16 size; 1787 }; 1788 struct ib_flow_spec_eth eth; 1789 struct ib_flow_spec_ib ib; 1790 struct ib_flow_spec_ipv4 ipv4; 1791 struct ib_flow_spec_tcp_udp tcp_udp; 1792 struct ib_flow_spec_ipv6 ipv6; 1793 struct ib_flow_spec_tunnel tunnel; 1794 struct ib_flow_spec_action_tag flow_tag; 1795 }; 1796 1797 struct ib_flow_attr { 1798 enum ib_flow_attr_type type; 1799 u16 size; 1800 u16 priority; 1801 u32 flags; 1802 u8 num_of_specs; 1803 u8 port; 1804 /* Following are the optional layers according to user request 1805 * struct ib_flow_spec_xxx 1806 * struct ib_flow_spec_yyy 1807 */ 1808 }; 1809 1810 struct ib_flow { 1811 struct ib_qp *qp; 1812 struct ib_uobject *uobject; 1813 }; 1814 1815 struct ib_mad_hdr; 1816 struct ib_grh; 1817 1818 enum ib_process_mad_flags { 1819 IB_MAD_IGNORE_MKEY = 1, 1820 IB_MAD_IGNORE_BKEY = 2, 1821 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY 1822 }; 1823 1824 enum ib_mad_result { 1825 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */ 1826 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */ 1827 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */ 1828 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */ 1829 }; 1830 1831 #define IB_DEVICE_NAME_MAX 64 1832 1833 struct ib_port_cache { 1834 struct ib_pkey_cache *pkey; 1835 struct ib_gid_table *gid; 1836 u8 lmc; 1837 enum ib_port_state port_state; 1838 }; 1839 1840 struct ib_cache { 1841 rwlock_t lock; 1842 struct ib_event_handler event_handler; 1843 struct ib_port_cache *ports; 1844 }; 1845 1846 struct ib_dma_mapping_ops { 1847 int (*mapping_error)(struct ib_device *dev, 1848 u64 dma_addr); 1849 u64 (*map_single)(struct ib_device *dev, 1850 void *ptr, size_t size, 1851 enum dma_data_direction direction); 1852 void (*unmap_single)(struct ib_device *dev, 1853 u64 addr, size_t size, 1854 enum dma_data_direction direction); 1855 u64 (*map_page)(struct ib_device *dev, 1856 struct page *page, unsigned long offset, 1857 size_t size, 1858 enum dma_data_direction direction); 1859 void (*unmap_page)(struct ib_device *dev, 1860 u64 addr, size_t size, 1861 enum dma_data_direction direction); 1862 int (*map_sg)(struct ib_device *dev, 1863 struct scatterlist *sg, int nents, 1864 enum dma_data_direction direction); 1865 void (*unmap_sg)(struct ib_device *dev, 1866 struct scatterlist *sg, int nents, 1867 enum dma_data_direction direction); 1868 int (*map_sg_attrs)(struct ib_device *dev, 1869 struct scatterlist *sg, int nents, 1870 enum dma_data_direction direction, 1871 unsigned long attrs); 1872 void (*unmap_sg_attrs)(struct ib_device *dev, 1873 struct scatterlist *sg, int nents, 1874 enum dma_data_direction direction, 1875 unsigned long attrs); 1876 void (*sync_single_for_cpu)(struct ib_device *dev, 1877 u64 dma_handle, 1878 size_t size, 1879 enum dma_data_direction dir); 1880 void (*sync_single_for_device)(struct ib_device *dev, 1881 u64 dma_handle, 1882 size_t size, 1883 enum dma_data_direction dir); 1884 void *(*alloc_coherent)(struct ib_device *dev, 1885 size_t size, 1886 u64 *dma_handle, 1887 gfp_t flag); 1888 void (*free_coherent)(struct ib_device *dev, 1889 size_t size, void *cpu_addr, 1890 u64 dma_handle); 1891 }; 1892 1893 struct iw_cm_verbs; 1894 1895 struct ib_port_immutable { 1896 int pkey_tbl_len; 1897 int gid_tbl_len; 1898 u32 core_cap_flags; 1899 u32 max_mad_size; 1900 }; 1901 1902 struct ib_device { 1903 struct device *dma_device; 1904 1905 char name[IB_DEVICE_NAME_MAX]; 1906 1907 struct list_head event_handler_list; 1908 spinlock_t event_handler_lock; 1909 1910 spinlock_t client_data_lock; 1911 struct list_head core_list; 1912 /* Access to the client_data_list is protected by the client_data_lock 1913 * spinlock and the lists_rwsem read-write semaphore */ 1914 struct list_head client_data_list; 1915 1916 struct ib_cache cache; 1917 /** 1918 * port_immutable is indexed by port number 1919 */ 1920 struct ib_port_immutable *port_immutable; 1921 1922 int num_comp_vectors; 1923 1924 struct iw_cm_verbs *iwcm; 1925 1926 /** 1927 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the 1928 * driver initialized data. The struct is kfree()'ed by the sysfs 1929 * core when the device is removed. A lifespan of -1 in the return 1930 * struct tells the core to set a default lifespan. 1931 */ 1932 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device, 1933 u8 port_num); 1934 /** 1935 * get_hw_stats - Fill in the counter value(s) in the stats struct. 1936 * @index - The index in the value array we wish to have updated, or 1937 * num_counters if we want all stats updated 1938 * Return codes - 1939 * < 0 - Error, no counters updated 1940 * index - Updated the single counter pointed to by index 1941 * num_counters - Updated all counters (will reset the timestamp 1942 * and prevent further calls for lifespan milliseconds) 1943 * Drivers are allowed to update all counters in leiu of just the 1944 * one given in index at their option 1945 */ 1946 int (*get_hw_stats)(struct ib_device *device, 1947 struct rdma_hw_stats *stats, 1948 u8 port, int index); 1949 int (*query_device)(struct ib_device *device, 1950 struct ib_device_attr *device_attr, 1951 struct ib_udata *udata); 1952 int (*query_port)(struct ib_device *device, 1953 u8 port_num, 1954 struct ib_port_attr *port_attr); 1955 enum rdma_link_layer (*get_link_layer)(struct ib_device *device, 1956 u8 port_num); 1957 /* When calling get_netdev, the HW vendor's driver should return the 1958 * net device of device @device at port @port_num or NULL if such 1959 * a net device doesn't exist. The vendor driver should call dev_hold 1960 * on this net device. The HW vendor's device driver must guarantee 1961 * that this function returns NULL before the net device reaches 1962 * NETDEV_UNREGISTER_FINAL state. 1963 */ 1964 struct net_device *(*get_netdev)(struct ib_device *device, 1965 u8 port_num); 1966 int (*query_gid)(struct ib_device *device, 1967 u8 port_num, int index, 1968 union ib_gid *gid); 1969 /* When calling add_gid, the HW vendor's driver should 1970 * add the gid of device @device at gid index @index of 1971 * port @port_num to be @gid. Meta-info of that gid (for example, 1972 * the network device related to this gid is available 1973 * at @attr. @context allows the HW vendor driver to store extra 1974 * information together with a GID entry. The HW vendor may allocate 1975 * memory to contain this information and store it in @context when a 1976 * new GID entry is written to. Params are consistent until the next 1977 * call of add_gid or delete_gid. The function should return 0 on 1978 * success or error otherwise. The function could be called 1979 * concurrently for different ports. This function is only called 1980 * when roce_gid_table is used. 1981 */ 1982 int (*add_gid)(struct ib_device *device, 1983 u8 port_num, 1984 unsigned int index, 1985 const union ib_gid *gid, 1986 const struct ib_gid_attr *attr, 1987 void **context); 1988 /* When calling del_gid, the HW vendor's driver should delete the 1989 * gid of device @device at gid index @index of port @port_num. 1990 * Upon the deletion of a GID entry, the HW vendor must free any 1991 * allocated memory. The caller will clear @context afterwards. 1992 * This function is only called when roce_gid_table is used. 1993 */ 1994 int (*del_gid)(struct ib_device *device, 1995 u8 port_num, 1996 unsigned int index, 1997 void **context); 1998 int (*query_pkey)(struct ib_device *device, 1999 u8 port_num, u16 index, u16 *pkey); 2000 int (*modify_device)(struct ib_device *device, 2001 int device_modify_mask, 2002 struct ib_device_modify *device_modify); 2003 int (*modify_port)(struct ib_device *device, 2004 u8 port_num, int port_modify_mask, 2005 struct ib_port_modify *port_modify); 2006 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device, 2007 struct ib_udata *udata); 2008 int (*dealloc_ucontext)(struct ib_ucontext *context); 2009 int (*mmap)(struct ib_ucontext *context, 2010 struct vm_area_struct *vma); 2011 struct ib_pd * (*alloc_pd)(struct ib_device *device, 2012 struct ib_ucontext *context, 2013 struct ib_udata *udata); 2014 int (*dealloc_pd)(struct ib_pd *pd); 2015 struct ib_ah * (*create_ah)(struct ib_pd *pd, 2016 struct ib_ah_attr *ah_attr, 2017 struct ib_udata *udata); 2018 int (*modify_ah)(struct ib_ah *ah, 2019 struct ib_ah_attr *ah_attr); 2020 int (*query_ah)(struct ib_ah *ah, 2021 struct ib_ah_attr *ah_attr); 2022 int (*destroy_ah)(struct ib_ah *ah); 2023 struct ib_srq * (*create_srq)(struct ib_pd *pd, 2024 struct ib_srq_init_attr *srq_init_attr, 2025 struct ib_udata *udata); 2026 int (*modify_srq)(struct ib_srq *srq, 2027 struct ib_srq_attr *srq_attr, 2028 enum ib_srq_attr_mask srq_attr_mask, 2029 struct ib_udata *udata); 2030 int (*query_srq)(struct ib_srq *srq, 2031 struct ib_srq_attr *srq_attr); 2032 int (*destroy_srq)(struct ib_srq *srq); 2033 int (*post_srq_recv)(struct ib_srq *srq, 2034 struct ib_recv_wr *recv_wr, 2035 struct ib_recv_wr **bad_recv_wr); 2036 struct ib_qp * (*create_qp)(struct ib_pd *pd, 2037 struct ib_qp_init_attr *qp_init_attr, 2038 struct ib_udata *udata); 2039 int (*modify_qp)(struct ib_qp *qp, 2040 struct ib_qp_attr *qp_attr, 2041 int qp_attr_mask, 2042 struct ib_udata *udata); 2043 int (*query_qp)(struct ib_qp *qp, 2044 struct ib_qp_attr *qp_attr, 2045 int qp_attr_mask, 2046 struct ib_qp_init_attr *qp_init_attr); 2047 int (*destroy_qp)(struct ib_qp *qp); 2048 int (*post_send)(struct ib_qp *qp, 2049 struct ib_send_wr *send_wr, 2050 struct ib_send_wr **bad_send_wr); 2051 int (*post_recv)(struct ib_qp *qp, 2052 struct ib_recv_wr *recv_wr, 2053 struct ib_recv_wr **bad_recv_wr); 2054 struct ib_cq * (*create_cq)(struct ib_device *device, 2055 const struct ib_cq_init_attr *attr, 2056 struct ib_ucontext *context, 2057 struct ib_udata *udata); 2058 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, 2059 u16 cq_period); 2060 int (*destroy_cq)(struct ib_cq *cq); 2061 int (*resize_cq)(struct ib_cq *cq, int cqe, 2062 struct ib_udata *udata); 2063 int (*poll_cq)(struct ib_cq *cq, int num_entries, 2064 struct ib_wc *wc); 2065 int (*peek_cq)(struct ib_cq *cq, int wc_cnt); 2066 int (*req_notify_cq)(struct ib_cq *cq, 2067 enum ib_cq_notify_flags flags); 2068 int (*req_ncomp_notif)(struct ib_cq *cq, 2069 int wc_cnt); 2070 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd, 2071 int mr_access_flags); 2072 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd, 2073 u64 start, u64 length, 2074 u64 virt_addr, 2075 int mr_access_flags, 2076 struct ib_udata *udata); 2077 int (*rereg_user_mr)(struct ib_mr *mr, 2078 int flags, 2079 u64 start, u64 length, 2080 u64 virt_addr, 2081 int mr_access_flags, 2082 struct ib_pd *pd, 2083 struct ib_udata *udata); 2084 int (*dereg_mr)(struct ib_mr *mr); 2085 struct ib_mr * (*alloc_mr)(struct ib_pd *pd, 2086 enum ib_mr_type mr_type, 2087 u32 max_num_sg); 2088 int (*map_mr_sg)(struct ib_mr *mr, 2089 struct scatterlist *sg, 2090 int sg_nents, 2091 unsigned int *sg_offset); 2092 struct ib_mw * (*alloc_mw)(struct ib_pd *pd, 2093 enum ib_mw_type type, 2094 struct ib_udata *udata); 2095 int (*dealloc_mw)(struct ib_mw *mw); 2096 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd, 2097 int mr_access_flags, 2098 struct ib_fmr_attr *fmr_attr); 2099 int (*map_phys_fmr)(struct ib_fmr *fmr, 2100 u64 *page_list, int list_len, 2101 u64 iova); 2102 int (*unmap_fmr)(struct list_head *fmr_list); 2103 int (*dealloc_fmr)(struct ib_fmr *fmr); 2104 int (*attach_mcast)(struct ib_qp *qp, 2105 union ib_gid *gid, 2106 u16 lid); 2107 int (*detach_mcast)(struct ib_qp *qp, 2108 union ib_gid *gid, 2109 u16 lid); 2110 int (*process_mad)(struct ib_device *device, 2111 int process_mad_flags, 2112 u8 port_num, 2113 const struct ib_wc *in_wc, 2114 const struct ib_grh *in_grh, 2115 const struct ib_mad_hdr *in_mad, 2116 size_t in_mad_size, 2117 struct ib_mad_hdr *out_mad, 2118 size_t *out_mad_size, 2119 u16 *out_mad_pkey_index); 2120 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device, 2121 struct ib_ucontext *ucontext, 2122 struct ib_udata *udata); 2123 int (*dealloc_xrcd)(struct ib_xrcd *xrcd); 2124 struct ib_flow * (*create_flow)(struct ib_qp *qp, 2125 struct ib_flow_attr 2126 *flow_attr, 2127 int domain); 2128 int (*destroy_flow)(struct ib_flow *flow_id); 2129 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask, 2130 struct ib_mr_status *mr_status); 2131 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext); 2132 void (*drain_rq)(struct ib_qp *qp); 2133 void (*drain_sq)(struct ib_qp *qp); 2134 int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port, 2135 int state); 2136 int (*get_vf_config)(struct ib_device *device, int vf, u8 port, 2137 struct ifla_vf_info *ivf); 2138 int (*get_vf_stats)(struct ib_device *device, int vf, u8 port, 2139 struct ifla_vf_stats *stats); 2140 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid, 2141 int type); 2142 struct ib_wq * (*create_wq)(struct ib_pd *pd, 2143 struct ib_wq_init_attr *init_attr, 2144 struct ib_udata *udata); 2145 int (*destroy_wq)(struct ib_wq *wq); 2146 int (*modify_wq)(struct ib_wq *wq, 2147 struct ib_wq_attr *attr, 2148 u32 wq_attr_mask, 2149 struct ib_udata *udata); 2150 struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *device, 2151 struct ib_rwq_ind_table_init_attr *init_attr, 2152 struct ib_udata *udata); 2153 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table); 2154 struct ib_dma_mapping_ops *dma_ops; 2155 2156 struct module *owner; 2157 struct device dev; 2158 struct kobject *ports_parent; 2159 struct list_head port_list; 2160 2161 enum { 2162 IB_DEV_UNINITIALIZED, 2163 IB_DEV_REGISTERED, 2164 IB_DEV_UNREGISTERED 2165 } reg_state; 2166 2167 int uverbs_abi_ver; 2168 u64 uverbs_cmd_mask; 2169 u64 uverbs_ex_cmd_mask; 2170 2171 char node_desc[IB_DEVICE_NODE_DESC_MAX]; 2172 __be64 node_guid; 2173 u32 local_dma_lkey; 2174 u16 is_switch:1; 2175 u8 node_type; 2176 u8 phys_port_cnt; 2177 struct ib_device_attr attrs; 2178 struct attribute_group *hw_stats_ag; 2179 struct rdma_hw_stats *hw_stats; 2180 2181 /** 2182 * The following mandatory functions are used only at device 2183 * registration. Keep functions such as these at the end of this 2184 * structure to avoid cache line misses when accessing struct ib_device 2185 * in fast paths. 2186 */ 2187 int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *); 2188 void (*get_dev_fw_str)(struct ib_device *, char *str, size_t str_len); 2189 }; 2190 2191 struct ib_client { 2192 char *name; 2193 void (*add) (struct ib_device *); 2194 void (*remove)(struct ib_device *, void *client_data); 2195 2196 /* Returns the net_dev belonging to this ib_client and matching the 2197 * given parameters. 2198 * @dev: An RDMA device that the net_dev use for communication. 2199 * @port: A physical port number on the RDMA device. 2200 * @pkey: P_Key that the net_dev uses if applicable. 2201 * @gid: A GID that the net_dev uses to communicate. 2202 * @addr: An IP address the net_dev is configured with. 2203 * @client_data: The device's client data set by ib_set_client_data(). 2204 * 2205 * An ib_client that implements a net_dev on top of RDMA devices 2206 * (such as IP over IB) should implement this callback, allowing the 2207 * rdma_cm module to find the right net_dev for a given request. 2208 * 2209 * The caller is responsible for calling dev_put on the returned 2210 * netdev. */ 2211 struct net_device *(*get_net_dev_by_params)( 2212 struct ib_device *dev, 2213 u8 port, 2214 u16 pkey, 2215 const union ib_gid *gid, 2216 const struct sockaddr *addr, 2217 void *client_data); 2218 struct list_head list; 2219 }; 2220 2221 struct ib_device *ib_alloc_device(size_t size); 2222 void ib_dealloc_device(struct ib_device *device); 2223 2224 void ib_get_device_fw_str(struct ib_device *device, char *str, size_t str_len); 2225 2226 int ib_register_device(struct ib_device *device, 2227 int (*port_callback)(struct ib_device *, 2228 u8, struct kobject *)); 2229 void ib_unregister_device(struct ib_device *device); 2230 2231 int ib_register_client (struct ib_client *client); 2232 void ib_unregister_client(struct ib_client *client); 2233 2234 void *ib_get_client_data(struct ib_device *device, struct ib_client *client); 2235 void ib_set_client_data(struct ib_device *device, struct ib_client *client, 2236 void *data); 2237 2238 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len) 2239 { 2240 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0; 2241 } 2242 2243 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len) 2244 { 2245 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; 2246 } 2247 2248 static inline bool ib_is_udata_cleared(struct ib_udata *udata, 2249 size_t offset, 2250 size_t len) 2251 { 2252 const void __user *p = udata->inbuf + offset; 2253 bool ret; 2254 u8 *buf; 2255 2256 if (len > USHRT_MAX) 2257 return false; 2258 2259 buf = memdup_user(p, len); 2260 if (IS_ERR(buf)) 2261 return false; 2262 2263 ret = !memchr_inv(buf, 0, len); 2264 kfree(buf); 2265 return ret; 2266 } 2267 2268 /** 2269 * ib_modify_qp_is_ok - Check that the supplied attribute mask 2270 * contains all required attributes and no attributes not allowed for 2271 * the given QP state transition. 2272 * @cur_state: Current QP state 2273 * @next_state: Next QP state 2274 * @type: QP type 2275 * @mask: Mask of supplied QP attributes 2276 * @ll : link layer of port 2277 * 2278 * This function is a helper function that a low-level driver's 2279 * modify_qp method can use to validate the consumer's input. It 2280 * checks that cur_state and next_state are valid QP states, that a 2281 * transition from cur_state to next_state is allowed by the IB spec, 2282 * and that the attribute mask supplied is allowed for the transition. 2283 */ 2284 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 2285 enum ib_qp_type type, enum ib_qp_attr_mask mask, 2286 enum rdma_link_layer ll); 2287 2288 int ib_register_event_handler (struct ib_event_handler *event_handler); 2289 int ib_unregister_event_handler(struct ib_event_handler *event_handler); 2290 void ib_dispatch_event(struct ib_event *event); 2291 2292 int ib_query_port(struct ib_device *device, 2293 u8 port_num, struct ib_port_attr *port_attr); 2294 2295 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, 2296 u8 port_num); 2297 2298 /** 2299 * rdma_cap_ib_switch - Check if the device is IB switch 2300 * @device: Device to check 2301 * 2302 * Device driver is responsible for setting is_switch bit on 2303 * in ib_device structure at init time. 2304 * 2305 * Return: true if the device is IB switch. 2306 */ 2307 static inline bool rdma_cap_ib_switch(const struct ib_device *device) 2308 { 2309 return device->is_switch; 2310 } 2311 2312 /** 2313 * rdma_start_port - Return the first valid port number for the device 2314 * specified 2315 * 2316 * @device: Device to be checked 2317 * 2318 * Return start port number 2319 */ 2320 static inline u8 rdma_start_port(const struct ib_device *device) 2321 { 2322 return rdma_cap_ib_switch(device) ? 0 : 1; 2323 } 2324 2325 /** 2326 * rdma_end_port - Return the last valid port number for the device 2327 * specified 2328 * 2329 * @device: Device to be checked 2330 * 2331 * Return last port number 2332 */ 2333 static inline u8 rdma_end_port(const struct ib_device *device) 2334 { 2335 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt; 2336 } 2337 2338 static inline int rdma_is_port_valid(const struct ib_device *device, 2339 unsigned int port) 2340 { 2341 return (port >= rdma_start_port(device) && 2342 port <= rdma_end_port(device)); 2343 } 2344 2345 static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num) 2346 { 2347 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB; 2348 } 2349 2350 static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num) 2351 { 2352 return device->port_immutable[port_num].core_cap_flags & 2353 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP); 2354 } 2355 2356 static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num) 2357 { 2358 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP; 2359 } 2360 2361 static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num) 2362 { 2363 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE; 2364 } 2365 2366 static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num) 2367 { 2368 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP; 2369 } 2370 2371 static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num) 2372 { 2373 return rdma_protocol_ib(device, port_num) || 2374 rdma_protocol_roce(device, port_num); 2375 } 2376 2377 static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num) 2378 { 2379 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_RAW_PACKET; 2380 } 2381 2382 static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num) 2383 { 2384 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_USNIC; 2385 } 2386 2387 /** 2388 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband 2389 * Management Datagrams. 2390 * @device: Device to check 2391 * @port_num: Port number to check 2392 * 2393 * Management Datagrams (MAD) are a required part of the InfiniBand 2394 * specification and are supported on all InfiniBand devices. A slightly 2395 * extended version are also supported on OPA interfaces. 2396 * 2397 * Return: true if the port supports sending/receiving of MAD packets. 2398 */ 2399 static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num) 2400 { 2401 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD; 2402 } 2403 2404 /** 2405 * rdma_cap_opa_mad - Check if the port of device provides support for OPA 2406 * Management Datagrams. 2407 * @device: Device to check 2408 * @port_num: Port number to check 2409 * 2410 * Intel OmniPath devices extend and/or replace the InfiniBand Management 2411 * datagrams with their own versions. These OPA MADs share many but not all of 2412 * the characteristics of InfiniBand MADs. 2413 * 2414 * OPA MADs differ in the following ways: 2415 * 2416 * 1) MADs are variable size up to 2K 2417 * IBTA defined MADs remain fixed at 256 bytes 2418 * 2) OPA SMPs must carry valid PKeys 2419 * 3) OPA SMP packets are a different format 2420 * 2421 * Return: true if the port supports OPA MAD packet formats. 2422 */ 2423 static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num) 2424 { 2425 return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD) 2426 == RDMA_CORE_CAP_OPA_MAD; 2427 } 2428 2429 /** 2430 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband 2431 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI). 2432 * @device: Device to check 2433 * @port_num: Port number to check 2434 * 2435 * Each InfiniBand node is required to provide a Subnet Management Agent 2436 * that the subnet manager can access. Prior to the fabric being fully 2437 * configured by the subnet manager, the SMA is accessed via a well known 2438 * interface called the Subnet Management Interface (SMI). This interface 2439 * uses directed route packets to communicate with the SM to get around the 2440 * chicken and egg problem of the SM needing to know what's on the fabric 2441 * in order to configure the fabric, and needing to configure the fabric in 2442 * order to send packets to the devices on the fabric. These directed 2443 * route packets do not need the fabric fully configured in order to reach 2444 * their destination. The SMI is the only method allowed to send 2445 * directed route packets on an InfiniBand fabric. 2446 * 2447 * Return: true if the port provides an SMI. 2448 */ 2449 static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num) 2450 { 2451 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI; 2452 } 2453 2454 /** 2455 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband 2456 * Communication Manager. 2457 * @device: Device to check 2458 * @port_num: Port number to check 2459 * 2460 * The InfiniBand Communication Manager is one of many pre-defined General 2461 * Service Agents (GSA) that are accessed via the General Service 2462 * Interface (GSI). It's role is to facilitate establishment of connections 2463 * between nodes as well as other management related tasks for established 2464 * connections. 2465 * 2466 * Return: true if the port supports an IB CM (this does not guarantee that 2467 * a CM is actually running however). 2468 */ 2469 static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num) 2470 { 2471 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM; 2472 } 2473 2474 /** 2475 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP 2476 * Communication Manager. 2477 * @device: Device to check 2478 * @port_num: Port number to check 2479 * 2480 * Similar to above, but specific to iWARP connections which have a different 2481 * managment protocol than InfiniBand. 2482 * 2483 * Return: true if the port supports an iWARP CM (this does not guarantee that 2484 * a CM is actually running however). 2485 */ 2486 static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num) 2487 { 2488 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM; 2489 } 2490 2491 /** 2492 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband 2493 * Subnet Administration. 2494 * @device: Device to check 2495 * @port_num: Port number to check 2496 * 2497 * An InfiniBand Subnet Administration (SA) service is a pre-defined General 2498 * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand 2499 * fabrics, devices should resolve routes to other hosts by contacting the 2500 * SA to query the proper route. 2501 * 2502 * Return: true if the port should act as a client to the fabric Subnet 2503 * Administration interface. This does not imply that the SA service is 2504 * running locally. 2505 */ 2506 static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num) 2507 { 2508 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA; 2509 } 2510 2511 /** 2512 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband 2513 * Multicast. 2514 * @device: Device to check 2515 * @port_num: Port number to check 2516 * 2517 * InfiniBand multicast registration is more complex than normal IPv4 or 2518 * IPv6 multicast registration. Each Host Channel Adapter must register 2519 * with the Subnet Manager when it wishes to join a multicast group. It 2520 * should do so only once regardless of how many queue pairs it subscribes 2521 * to this group. And it should leave the group only after all queue pairs 2522 * attached to the group have been detached. 2523 * 2524 * Return: true if the port must undertake the additional adminstrative 2525 * overhead of registering/unregistering with the SM and tracking of the 2526 * total number of queue pairs attached to the multicast group. 2527 */ 2528 static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num) 2529 { 2530 return rdma_cap_ib_sa(device, port_num); 2531 } 2532 2533 /** 2534 * rdma_cap_af_ib - Check if the port of device has the capability 2535 * Native Infiniband Address. 2536 * @device: Device to check 2537 * @port_num: Port number to check 2538 * 2539 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default 2540 * GID. RoCE uses a different mechanism, but still generates a GID via 2541 * a prescribed mechanism and port specific data. 2542 * 2543 * Return: true if the port uses a GID address to identify devices on the 2544 * network. 2545 */ 2546 static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num) 2547 { 2548 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB; 2549 } 2550 2551 /** 2552 * rdma_cap_eth_ah - Check if the port of device has the capability 2553 * Ethernet Address Handle. 2554 * @device: Device to check 2555 * @port_num: Port number to check 2556 * 2557 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique 2558 * to fabricate GIDs over Ethernet/IP specific addresses native to the 2559 * port. Normally, packet headers are generated by the sending host 2560 * adapter, but when sending connectionless datagrams, we must manually 2561 * inject the proper headers for the fabric we are communicating over. 2562 * 2563 * Return: true if we are running as a RoCE port and must force the 2564 * addition of a Global Route Header built from our Ethernet Address 2565 * Handle into our header list for connectionless packets. 2566 */ 2567 static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num) 2568 { 2569 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH; 2570 } 2571 2572 /** 2573 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port. 2574 * 2575 * @device: Device 2576 * @port_num: Port number 2577 * 2578 * This MAD size includes the MAD headers and MAD payload. No other headers 2579 * are included. 2580 * 2581 * Return the max MAD size required by the Port. Will return 0 if the port 2582 * does not support MADs 2583 */ 2584 static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num) 2585 { 2586 return device->port_immutable[port_num].max_mad_size; 2587 } 2588 2589 /** 2590 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table 2591 * @device: Device to check 2592 * @port_num: Port number to check 2593 * 2594 * RoCE GID table mechanism manages the various GIDs for a device. 2595 * 2596 * NOTE: if allocating the port's GID table has failed, this call will still 2597 * return true, but any RoCE GID table API will fail. 2598 * 2599 * Return: true if the port uses RoCE GID table mechanism in order to manage 2600 * its GIDs. 2601 */ 2602 static inline bool rdma_cap_roce_gid_table(const struct ib_device *device, 2603 u8 port_num) 2604 { 2605 return rdma_protocol_roce(device, port_num) && 2606 device->add_gid && device->del_gid; 2607 } 2608 2609 /* 2610 * Check if the device supports READ W/ INVALIDATE. 2611 */ 2612 static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num) 2613 { 2614 /* 2615 * iWarp drivers must support READ W/ INVALIDATE. No other protocol 2616 * has support for it yet. 2617 */ 2618 return rdma_protocol_iwarp(dev, port_num); 2619 } 2620 2621 int ib_query_gid(struct ib_device *device, 2622 u8 port_num, int index, union ib_gid *gid, 2623 struct ib_gid_attr *attr); 2624 2625 int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, 2626 int state); 2627 int ib_get_vf_config(struct ib_device *device, int vf, u8 port, 2628 struct ifla_vf_info *info); 2629 int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, 2630 struct ifla_vf_stats *stats); 2631 int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, 2632 int type); 2633 2634 int ib_query_pkey(struct ib_device *device, 2635 u8 port_num, u16 index, u16 *pkey); 2636 2637 int ib_modify_device(struct ib_device *device, 2638 int device_modify_mask, 2639 struct ib_device_modify *device_modify); 2640 2641 int ib_modify_port(struct ib_device *device, 2642 u8 port_num, int port_modify_mask, 2643 struct ib_port_modify *port_modify); 2644 2645 int ib_find_gid(struct ib_device *device, union ib_gid *gid, 2646 enum ib_gid_type gid_type, struct net_device *ndev, 2647 u8 *port_num, u16 *index); 2648 2649 int ib_find_pkey(struct ib_device *device, 2650 u8 port_num, u16 pkey, u16 *index); 2651 2652 enum ib_pd_flags { 2653 /* 2654 * Create a memory registration for all memory in the system and place 2655 * the rkey for it into pd->unsafe_global_rkey. This can be used by 2656 * ULPs to avoid the overhead of dynamic MRs. 2657 * 2658 * This flag is generally considered unsafe and must only be used in 2659 * extremly trusted environments. Every use of it will log a warning 2660 * in the kernel log. 2661 */ 2662 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01, 2663 }; 2664 2665 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, 2666 const char *caller); 2667 #define ib_alloc_pd(device, flags) \ 2668 __ib_alloc_pd((device), (flags), __func__) 2669 void ib_dealloc_pd(struct ib_pd *pd); 2670 2671 /** 2672 * ib_create_ah - Creates an address handle for the given address vector. 2673 * @pd: The protection domain associated with the address handle. 2674 * @ah_attr: The attributes of the address vector. 2675 * 2676 * The address handle is used to reference a local or global destination 2677 * in all UD QP post sends. 2678 */ 2679 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); 2680 2681 /** 2682 * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header 2683 * work completion. 2684 * @hdr: the L3 header to parse 2685 * @net_type: type of header to parse 2686 * @sgid: place to store source gid 2687 * @dgid: place to store destination gid 2688 */ 2689 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr, 2690 enum rdma_network_type net_type, 2691 union ib_gid *sgid, union ib_gid *dgid); 2692 2693 /** 2694 * ib_get_rdma_header_version - Get the header version 2695 * @hdr: the L3 header to parse 2696 */ 2697 int ib_get_rdma_header_version(const union rdma_network_hdr *hdr); 2698 2699 /** 2700 * ib_init_ah_from_wc - Initializes address handle attributes from a 2701 * work completion. 2702 * @device: Device on which the received message arrived. 2703 * @port_num: Port on which the received message arrived. 2704 * @wc: Work completion associated with the received message. 2705 * @grh: References the received global route header. This parameter is 2706 * ignored unless the work completion indicates that the GRH is valid. 2707 * @ah_attr: Returned attributes that can be used when creating an address 2708 * handle for replying to the message. 2709 */ 2710 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, 2711 const struct ib_wc *wc, const struct ib_grh *grh, 2712 struct ib_ah_attr *ah_attr); 2713 2714 /** 2715 * ib_create_ah_from_wc - Creates an address handle associated with the 2716 * sender of the specified work completion. 2717 * @pd: The protection domain associated with the address handle. 2718 * @wc: Work completion information associated with a received message. 2719 * @grh: References the received global route header. This parameter is 2720 * ignored unless the work completion indicates that the GRH is valid. 2721 * @port_num: The outbound port number to associate with the address. 2722 * 2723 * The address handle is used to reference a local or global destination 2724 * in all UD QP post sends. 2725 */ 2726 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, 2727 const struct ib_grh *grh, u8 port_num); 2728 2729 /** 2730 * ib_modify_ah - Modifies the address vector associated with an address 2731 * handle. 2732 * @ah: The address handle to modify. 2733 * @ah_attr: The new address vector attributes to associate with the 2734 * address handle. 2735 */ 2736 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); 2737 2738 /** 2739 * ib_query_ah - Queries the address vector associated with an address 2740 * handle. 2741 * @ah: The address handle to query. 2742 * @ah_attr: The address vector attributes associated with the address 2743 * handle. 2744 */ 2745 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); 2746 2747 /** 2748 * ib_destroy_ah - Destroys an address handle. 2749 * @ah: The address handle to destroy. 2750 */ 2751 int ib_destroy_ah(struct ib_ah *ah); 2752 2753 /** 2754 * ib_create_srq - Creates a SRQ associated with the specified protection 2755 * domain. 2756 * @pd: The protection domain associated with the SRQ. 2757 * @srq_init_attr: A list of initial attributes required to create the 2758 * SRQ. If SRQ creation succeeds, then the attributes are updated to 2759 * the actual capabilities of the created SRQ. 2760 * 2761 * srq_attr->max_wr and srq_attr->max_sge are read the determine the 2762 * requested size of the SRQ, and set to the actual values allocated 2763 * on return. If ib_create_srq() succeeds, then max_wr and max_sge 2764 * will always be at least as large as the requested values. 2765 */ 2766 struct ib_srq *ib_create_srq(struct ib_pd *pd, 2767 struct ib_srq_init_attr *srq_init_attr); 2768 2769 /** 2770 * ib_modify_srq - Modifies the attributes for the specified SRQ. 2771 * @srq: The SRQ to modify. 2772 * @srq_attr: On input, specifies the SRQ attributes to modify. On output, 2773 * the current values of selected SRQ attributes are returned. 2774 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ 2775 * are being modified. 2776 * 2777 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or 2778 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when 2779 * the number of receives queued drops below the limit. 2780 */ 2781 int ib_modify_srq(struct ib_srq *srq, 2782 struct ib_srq_attr *srq_attr, 2783 enum ib_srq_attr_mask srq_attr_mask); 2784 2785 /** 2786 * ib_query_srq - Returns the attribute list and current values for the 2787 * specified SRQ. 2788 * @srq: The SRQ to query. 2789 * @srq_attr: The attributes of the specified SRQ. 2790 */ 2791 int ib_query_srq(struct ib_srq *srq, 2792 struct ib_srq_attr *srq_attr); 2793 2794 /** 2795 * ib_destroy_srq - Destroys the specified SRQ. 2796 * @srq: The SRQ to destroy. 2797 */ 2798 int ib_destroy_srq(struct ib_srq *srq); 2799 2800 /** 2801 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ. 2802 * @srq: The SRQ to post the work request on. 2803 * @recv_wr: A list of work requests to post on the receive queue. 2804 * @bad_recv_wr: On an immediate failure, this parameter will reference 2805 * the work request that failed to be posted on the QP. 2806 */ 2807 static inline int ib_post_srq_recv(struct ib_srq *srq, 2808 struct ib_recv_wr *recv_wr, 2809 struct ib_recv_wr **bad_recv_wr) 2810 { 2811 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr); 2812 } 2813 2814 /** 2815 * ib_create_qp - Creates a QP associated with the specified protection 2816 * domain. 2817 * @pd: The protection domain associated with the QP. 2818 * @qp_init_attr: A list of initial attributes required to create the 2819 * QP. If QP creation succeeds, then the attributes are updated to 2820 * the actual capabilities of the created QP. 2821 */ 2822 struct ib_qp *ib_create_qp(struct ib_pd *pd, 2823 struct ib_qp_init_attr *qp_init_attr); 2824 2825 /** 2826 * ib_modify_qp - Modifies the attributes for the specified QP and then 2827 * transitions the QP to the given state. 2828 * @qp: The QP to modify. 2829 * @qp_attr: On input, specifies the QP attributes to modify. On output, 2830 * the current values of selected QP attributes are returned. 2831 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP 2832 * are being modified. 2833 */ 2834 int ib_modify_qp(struct ib_qp *qp, 2835 struct ib_qp_attr *qp_attr, 2836 int qp_attr_mask); 2837 2838 /** 2839 * ib_query_qp - Returns the attribute list and current values for the 2840 * specified QP. 2841 * @qp: The QP to query. 2842 * @qp_attr: The attributes of the specified QP. 2843 * @qp_attr_mask: A bit-mask used to select specific attributes to query. 2844 * @qp_init_attr: Additional attributes of the selected QP. 2845 * 2846 * The qp_attr_mask may be used to limit the query to gathering only the 2847 * selected attributes. 2848 */ 2849 int ib_query_qp(struct ib_qp *qp, 2850 struct ib_qp_attr *qp_attr, 2851 int qp_attr_mask, 2852 struct ib_qp_init_attr *qp_init_attr); 2853 2854 /** 2855 * ib_destroy_qp - Destroys the specified QP. 2856 * @qp: The QP to destroy. 2857 */ 2858 int ib_destroy_qp(struct ib_qp *qp); 2859 2860 /** 2861 * ib_open_qp - Obtain a reference to an existing sharable QP. 2862 * @xrcd - XRC domain 2863 * @qp_open_attr: Attributes identifying the QP to open. 2864 * 2865 * Returns a reference to a sharable QP. 2866 */ 2867 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, 2868 struct ib_qp_open_attr *qp_open_attr); 2869 2870 /** 2871 * ib_close_qp - Release an external reference to a QP. 2872 * @qp: The QP handle to release 2873 * 2874 * The opened QP handle is released by the caller. The underlying 2875 * shared QP is not destroyed until all internal references are released. 2876 */ 2877 int ib_close_qp(struct ib_qp *qp); 2878 2879 /** 2880 * ib_post_send - Posts a list of work requests to the send queue of 2881 * the specified QP. 2882 * @qp: The QP to post the work request on. 2883 * @send_wr: A list of work requests to post on the send queue. 2884 * @bad_send_wr: On an immediate failure, this parameter will reference 2885 * the work request that failed to be posted on the QP. 2886 * 2887 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate 2888 * error is returned, the QP state shall not be affected, 2889 * ib_post_send() will return an immediate error after queueing any 2890 * earlier work requests in the list. 2891 */ 2892 static inline int ib_post_send(struct ib_qp *qp, 2893 struct ib_send_wr *send_wr, 2894 struct ib_send_wr **bad_send_wr) 2895 { 2896 return qp->device->post_send(qp, send_wr, bad_send_wr); 2897 } 2898 2899 /** 2900 * ib_post_recv - Posts a list of work requests to the receive queue of 2901 * the specified QP. 2902 * @qp: The QP to post the work request on. 2903 * @recv_wr: A list of work requests to post on the receive queue. 2904 * @bad_recv_wr: On an immediate failure, this parameter will reference 2905 * the work request that failed to be posted on the QP. 2906 */ 2907 static inline int ib_post_recv(struct ib_qp *qp, 2908 struct ib_recv_wr *recv_wr, 2909 struct ib_recv_wr **bad_recv_wr) 2910 { 2911 return qp->device->post_recv(qp, recv_wr, bad_recv_wr); 2912 } 2913 2914 struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private, 2915 int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx); 2916 void ib_free_cq(struct ib_cq *cq); 2917 int ib_process_cq_direct(struct ib_cq *cq, int budget); 2918 2919 /** 2920 * ib_create_cq - Creates a CQ on the specified device. 2921 * @device: The device on which to create the CQ. 2922 * @comp_handler: A user-specified callback that is invoked when a 2923 * completion event occurs on the CQ. 2924 * @event_handler: A user-specified callback that is invoked when an 2925 * asynchronous event not associated with a completion occurs on the CQ. 2926 * @cq_context: Context associated with the CQ returned to the user via 2927 * the associated completion and event handlers. 2928 * @cq_attr: The attributes the CQ should be created upon. 2929 * 2930 * Users can examine the cq structure to determine the actual CQ size. 2931 */ 2932 struct ib_cq *ib_create_cq(struct ib_device *device, 2933 ib_comp_handler comp_handler, 2934 void (*event_handler)(struct ib_event *, void *), 2935 void *cq_context, 2936 const struct ib_cq_init_attr *cq_attr); 2937 2938 /** 2939 * ib_resize_cq - Modifies the capacity of the CQ. 2940 * @cq: The CQ to resize. 2941 * @cqe: The minimum size of the CQ. 2942 * 2943 * Users can examine the cq structure to determine the actual CQ size. 2944 */ 2945 int ib_resize_cq(struct ib_cq *cq, int cqe); 2946 2947 /** 2948 * ib_modify_cq - Modifies moderation params of the CQ 2949 * @cq: The CQ to modify. 2950 * @cq_count: number of CQEs that will trigger an event 2951 * @cq_period: max period of time in usec before triggering an event 2952 * 2953 */ 2954 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); 2955 2956 /** 2957 * ib_destroy_cq - Destroys the specified CQ. 2958 * @cq: The CQ to destroy. 2959 */ 2960 int ib_destroy_cq(struct ib_cq *cq); 2961 2962 /** 2963 * ib_poll_cq - poll a CQ for completion(s) 2964 * @cq:the CQ being polled 2965 * @num_entries:maximum number of completions to return 2966 * @wc:array of at least @num_entries &struct ib_wc where completions 2967 * will be returned 2968 * 2969 * Poll a CQ for (possibly multiple) completions. If the return value 2970 * is < 0, an error occurred. If the return value is >= 0, it is the 2971 * number of completions returned. If the return value is 2972 * non-negative and < num_entries, then the CQ was emptied. 2973 */ 2974 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries, 2975 struct ib_wc *wc) 2976 { 2977 return cq->device->poll_cq(cq, num_entries, wc); 2978 } 2979 2980 /** 2981 * ib_peek_cq - Returns the number of unreaped completions currently 2982 * on the specified CQ. 2983 * @cq: The CQ to peek. 2984 * @wc_cnt: A minimum number of unreaped completions to check for. 2985 * 2986 * If the number of unreaped completions is greater than or equal to wc_cnt, 2987 * this function returns wc_cnt, otherwise, it returns the actual number of 2988 * unreaped completions. 2989 */ 2990 int ib_peek_cq(struct ib_cq *cq, int wc_cnt); 2991 2992 /** 2993 * ib_req_notify_cq - Request completion notification on a CQ. 2994 * @cq: The CQ to generate an event for. 2995 * @flags: 2996 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP 2997 * to request an event on the next solicited event or next work 2998 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS 2999 * may also be |ed in to request a hint about missed events, as 3000 * described below. 3001 * 3002 * Return Value: 3003 * < 0 means an error occurred while requesting notification 3004 * == 0 means notification was requested successfully, and if 3005 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events 3006 * were missed and it is safe to wait for another event. In 3007 * this case is it guaranteed that any work completions added 3008 * to the CQ since the last CQ poll will trigger a completion 3009 * notification event. 3010 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed 3011 * in. It means that the consumer must poll the CQ again to 3012 * make sure it is empty to avoid missing an event because of a 3013 * race between requesting notification and an entry being 3014 * added to the CQ. This return value means it is possible 3015 * (but not guaranteed) that a work completion has been added 3016 * to the CQ since the last poll without triggering a 3017 * completion notification event. 3018 */ 3019 static inline int ib_req_notify_cq(struct ib_cq *cq, 3020 enum ib_cq_notify_flags flags) 3021 { 3022 return cq->device->req_notify_cq(cq, flags); 3023 } 3024 3025 /** 3026 * ib_req_ncomp_notif - Request completion notification when there are 3027 * at least the specified number of unreaped completions on the CQ. 3028 * @cq: The CQ to generate an event for. 3029 * @wc_cnt: The number of unreaped completions that should be on the 3030 * CQ before an event is generated. 3031 */ 3032 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) 3033 { 3034 return cq->device->req_ncomp_notif ? 3035 cq->device->req_ncomp_notif(cq, wc_cnt) : 3036 -ENOSYS; 3037 } 3038 3039 /** 3040 * ib_dma_mapping_error - check a DMA addr for error 3041 * @dev: The device for which the dma_addr was created 3042 * @dma_addr: The DMA address to check 3043 */ 3044 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) 3045 { 3046 if (dev->dma_ops) 3047 return dev->dma_ops->mapping_error(dev, dma_addr); 3048 return dma_mapping_error(dev->dma_device, dma_addr); 3049 } 3050 3051 /** 3052 * ib_dma_map_single - Map a kernel virtual address to DMA address 3053 * @dev: The device for which the dma_addr is to be created 3054 * @cpu_addr: The kernel virtual address 3055 * @size: The size of the region in bytes 3056 * @direction: The direction of the DMA 3057 */ 3058 static inline u64 ib_dma_map_single(struct ib_device *dev, 3059 void *cpu_addr, size_t size, 3060 enum dma_data_direction direction) 3061 { 3062 if (dev->dma_ops) 3063 return dev->dma_ops->map_single(dev, cpu_addr, size, direction); 3064 return dma_map_single(dev->dma_device, cpu_addr, size, direction); 3065 } 3066 3067 /** 3068 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single() 3069 * @dev: The device for which the DMA address was created 3070 * @addr: The DMA address 3071 * @size: The size of the region in bytes 3072 * @direction: The direction of the DMA 3073 */ 3074 static inline void ib_dma_unmap_single(struct ib_device *dev, 3075 u64 addr, size_t size, 3076 enum dma_data_direction direction) 3077 { 3078 if (dev->dma_ops) 3079 dev->dma_ops->unmap_single(dev, addr, size, direction); 3080 else 3081 dma_unmap_single(dev->dma_device, addr, size, direction); 3082 } 3083 3084 static inline u64 ib_dma_map_single_attrs(struct ib_device *dev, 3085 void *cpu_addr, size_t size, 3086 enum dma_data_direction direction, 3087 unsigned long dma_attrs) 3088 { 3089 return dma_map_single_attrs(dev->dma_device, cpu_addr, size, 3090 direction, dma_attrs); 3091 } 3092 3093 static inline void ib_dma_unmap_single_attrs(struct ib_device *dev, 3094 u64 addr, size_t size, 3095 enum dma_data_direction direction, 3096 unsigned long dma_attrs) 3097 { 3098 return dma_unmap_single_attrs(dev->dma_device, addr, size, 3099 direction, dma_attrs); 3100 } 3101 3102 /** 3103 * ib_dma_map_page - Map a physical page to DMA address 3104 * @dev: The device for which the dma_addr is to be created 3105 * @page: The page to be mapped 3106 * @offset: The offset within the page 3107 * @size: The size of the region in bytes 3108 * @direction: The direction of the DMA 3109 */ 3110 static inline u64 ib_dma_map_page(struct ib_device *dev, 3111 struct page *page, 3112 unsigned long offset, 3113 size_t size, 3114 enum dma_data_direction direction) 3115 { 3116 if (dev->dma_ops) 3117 return dev->dma_ops->map_page(dev, page, offset, size, direction); 3118 return dma_map_page(dev->dma_device, page, offset, size, direction); 3119 } 3120 3121 /** 3122 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page() 3123 * @dev: The device for which the DMA address was created 3124 * @addr: The DMA address 3125 * @size: The size of the region in bytes 3126 * @direction: The direction of the DMA 3127 */ 3128 static inline void ib_dma_unmap_page(struct ib_device *dev, 3129 u64 addr, size_t size, 3130 enum dma_data_direction direction) 3131 { 3132 if (dev->dma_ops) 3133 dev->dma_ops->unmap_page(dev, addr, size, direction); 3134 else 3135 dma_unmap_page(dev->dma_device, addr, size, direction); 3136 } 3137 3138 /** 3139 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses 3140 * @dev: The device for which the DMA addresses are to be created 3141 * @sg: The array of scatter/gather entries 3142 * @nents: The number of scatter/gather entries 3143 * @direction: The direction of the DMA 3144 */ 3145 static inline int ib_dma_map_sg(struct ib_device *dev, 3146 struct scatterlist *sg, int nents, 3147 enum dma_data_direction direction) 3148 { 3149 if (dev->dma_ops) 3150 return dev->dma_ops->map_sg(dev, sg, nents, direction); 3151 return dma_map_sg(dev->dma_device, sg, nents, direction); 3152 } 3153 3154 /** 3155 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses 3156 * @dev: The device for which the DMA addresses were created 3157 * @sg: The array of scatter/gather entries 3158 * @nents: The number of scatter/gather entries 3159 * @direction: The direction of the DMA 3160 */ 3161 static inline void ib_dma_unmap_sg(struct ib_device *dev, 3162 struct scatterlist *sg, int nents, 3163 enum dma_data_direction direction) 3164 { 3165 if (dev->dma_ops) 3166 dev->dma_ops->unmap_sg(dev, sg, nents, direction); 3167 else 3168 dma_unmap_sg(dev->dma_device, sg, nents, direction); 3169 } 3170 3171 static inline int ib_dma_map_sg_attrs(struct ib_device *dev, 3172 struct scatterlist *sg, int nents, 3173 enum dma_data_direction direction, 3174 unsigned long dma_attrs) 3175 { 3176 if (dev->dma_ops) 3177 return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction, 3178 dma_attrs); 3179 else 3180 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, 3181 dma_attrs); 3182 } 3183 3184 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, 3185 struct scatterlist *sg, int nents, 3186 enum dma_data_direction direction, 3187 unsigned long dma_attrs) 3188 { 3189 if (dev->dma_ops) 3190 return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction, 3191 dma_attrs); 3192 else 3193 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, 3194 dma_attrs); 3195 } 3196 /** 3197 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry 3198 * @dev: The device for which the DMA addresses were created 3199 * @sg: The scatter/gather entry 3200 * 3201 * Note: this function is obsolete. To do: change all occurrences of 3202 * ib_sg_dma_address() into sg_dma_address(). 3203 */ 3204 static inline u64 ib_sg_dma_address(struct ib_device *dev, 3205 struct scatterlist *sg) 3206 { 3207 return sg_dma_address(sg); 3208 } 3209 3210 /** 3211 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry 3212 * @dev: The device for which the DMA addresses were created 3213 * @sg: The scatter/gather entry 3214 * 3215 * Note: this function is obsolete. To do: change all occurrences of 3216 * ib_sg_dma_len() into sg_dma_len(). 3217 */ 3218 static inline unsigned int ib_sg_dma_len(struct ib_device *dev, 3219 struct scatterlist *sg) 3220 { 3221 return sg_dma_len(sg); 3222 } 3223 3224 /** 3225 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU 3226 * @dev: The device for which the DMA address was created 3227 * @addr: The DMA address 3228 * @size: The size of the region in bytes 3229 * @dir: The direction of the DMA 3230 */ 3231 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, 3232 u64 addr, 3233 size_t size, 3234 enum dma_data_direction dir) 3235 { 3236 if (dev->dma_ops) 3237 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir); 3238 else 3239 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); 3240 } 3241 3242 /** 3243 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device 3244 * @dev: The device for which the DMA address was created 3245 * @addr: The DMA address 3246 * @size: The size of the region in bytes 3247 * @dir: The direction of the DMA 3248 */ 3249 static inline void ib_dma_sync_single_for_device(struct ib_device *dev, 3250 u64 addr, 3251 size_t size, 3252 enum dma_data_direction dir) 3253 { 3254 if (dev->dma_ops) 3255 dev->dma_ops->sync_single_for_device(dev, addr, size, dir); 3256 else 3257 dma_sync_single_for_device(dev->dma_device, addr, size, dir); 3258 } 3259 3260 /** 3261 * ib_dma_alloc_coherent - Allocate memory and map it for DMA 3262 * @dev: The device for which the DMA address is requested 3263 * @size: The size of the region to allocate in bytes 3264 * @dma_handle: A pointer for returning the DMA address of the region 3265 * @flag: memory allocator flags 3266 */ 3267 static inline void *ib_dma_alloc_coherent(struct ib_device *dev, 3268 size_t size, 3269 u64 *dma_handle, 3270 gfp_t flag) 3271 { 3272 if (dev->dma_ops) 3273 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag); 3274 else { 3275 dma_addr_t handle; 3276 void *ret; 3277 3278 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag); 3279 *dma_handle = handle; 3280 return ret; 3281 } 3282 } 3283 3284 /** 3285 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent() 3286 * @dev: The device for which the DMA addresses were allocated 3287 * @size: The size of the region 3288 * @cpu_addr: the address returned by ib_dma_alloc_coherent() 3289 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent() 3290 */ 3291 static inline void ib_dma_free_coherent(struct ib_device *dev, 3292 size_t size, void *cpu_addr, 3293 u64 dma_handle) 3294 { 3295 if (dev->dma_ops) 3296 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); 3297 else 3298 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); 3299 } 3300 3301 /** 3302 * ib_dereg_mr - Deregisters a memory region and removes it from the 3303 * HCA translation table. 3304 * @mr: The memory region to deregister. 3305 * 3306 * This function can fail, if the memory region has memory windows bound to it. 3307 */ 3308 int ib_dereg_mr(struct ib_mr *mr); 3309 3310 struct ib_mr *ib_alloc_mr(struct ib_pd *pd, 3311 enum ib_mr_type mr_type, 3312 u32 max_num_sg); 3313 3314 /** 3315 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR 3316 * R_Key and L_Key. 3317 * @mr - struct ib_mr pointer to be updated. 3318 * @newkey - new key to be used. 3319 */ 3320 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey) 3321 { 3322 mr->lkey = (mr->lkey & 0xffffff00) | newkey; 3323 mr->rkey = (mr->rkey & 0xffffff00) | newkey; 3324 } 3325 3326 /** 3327 * ib_inc_rkey - increments the key portion of the given rkey. Can be used 3328 * for calculating a new rkey for type 2 memory windows. 3329 * @rkey - the rkey to increment. 3330 */ 3331 static inline u32 ib_inc_rkey(u32 rkey) 3332 { 3333 const u32 mask = 0x000000ff; 3334 return ((rkey + 1) & mask) | (rkey & ~mask); 3335 } 3336 3337 /** 3338 * ib_alloc_fmr - Allocates a unmapped fast memory region. 3339 * @pd: The protection domain associated with the unmapped region. 3340 * @mr_access_flags: Specifies the memory access rights. 3341 * @fmr_attr: Attributes of the unmapped region. 3342 * 3343 * A fast memory region must be mapped before it can be used as part of 3344 * a work request. 3345 */ 3346 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, 3347 int mr_access_flags, 3348 struct ib_fmr_attr *fmr_attr); 3349 3350 /** 3351 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region. 3352 * @fmr: The fast memory region to associate with the pages. 3353 * @page_list: An array of physical pages to map to the fast memory region. 3354 * @list_len: The number of pages in page_list. 3355 * @iova: The I/O virtual address to use with the mapped region. 3356 */ 3357 static inline int ib_map_phys_fmr(struct ib_fmr *fmr, 3358 u64 *page_list, int list_len, 3359 u64 iova) 3360 { 3361 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova); 3362 } 3363 3364 /** 3365 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions. 3366 * @fmr_list: A linked list of fast memory regions to unmap. 3367 */ 3368 int ib_unmap_fmr(struct list_head *fmr_list); 3369 3370 /** 3371 * ib_dealloc_fmr - Deallocates a fast memory region. 3372 * @fmr: The fast memory region to deallocate. 3373 */ 3374 int ib_dealloc_fmr(struct ib_fmr *fmr); 3375 3376 /** 3377 * ib_attach_mcast - Attaches the specified QP to a multicast group. 3378 * @qp: QP to attach to the multicast group. The QP must be type 3379 * IB_QPT_UD. 3380 * @gid: Multicast group GID. 3381 * @lid: Multicast group LID in host byte order. 3382 * 3383 * In order to send and receive multicast packets, subnet 3384 * administration must have created the multicast group and configured 3385 * the fabric appropriately. The port associated with the specified 3386 * QP must also be a member of the multicast group. 3387 */ 3388 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 3389 3390 /** 3391 * ib_detach_mcast - Detaches the specified QP from a multicast group. 3392 * @qp: QP to detach from the multicast group. 3393 * @gid: Multicast group GID. 3394 * @lid: Multicast group LID in host byte order. 3395 */ 3396 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 3397 3398 /** 3399 * ib_alloc_xrcd - Allocates an XRC domain. 3400 * @device: The device on which to allocate the XRC domain. 3401 */ 3402 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device); 3403 3404 /** 3405 * ib_dealloc_xrcd - Deallocates an XRC domain. 3406 * @xrcd: The XRC domain to deallocate. 3407 */ 3408 int ib_dealloc_xrcd(struct ib_xrcd *xrcd); 3409 3410 struct ib_flow *ib_create_flow(struct ib_qp *qp, 3411 struct ib_flow_attr *flow_attr, int domain); 3412 int ib_destroy_flow(struct ib_flow *flow_id); 3413 3414 static inline int ib_check_mr_access(int flags) 3415 { 3416 /* 3417 * Local write permission is required if remote write or 3418 * remote atomic permission is also requested. 3419 */ 3420 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && 3421 !(flags & IB_ACCESS_LOCAL_WRITE)) 3422 return -EINVAL; 3423 3424 return 0; 3425 } 3426 3427 /** 3428 * ib_check_mr_status: lightweight check of MR status. 3429 * This routine may provide status checks on a selected 3430 * ib_mr. first use is for signature status check. 3431 * 3432 * @mr: A memory region. 3433 * @check_mask: Bitmask of which checks to perform from 3434 * ib_mr_status_check enumeration. 3435 * @mr_status: The container of relevant status checks. 3436 * failed checks will be indicated in the status bitmask 3437 * and the relevant info shall be in the error item. 3438 */ 3439 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, 3440 struct ib_mr_status *mr_status); 3441 3442 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port, 3443 u16 pkey, const union ib_gid *gid, 3444 const struct sockaddr *addr); 3445 struct ib_wq *ib_create_wq(struct ib_pd *pd, 3446 struct ib_wq_init_attr *init_attr); 3447 int ib_destroy_wq(struct ib_wq *wq); 3448 int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr, 3449 u32 wq_attr_mask); 3450 struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, 3451 struct ib_rwq_ind_table_init_attr* 3452 wq_ind_table_init_attr); 3453 int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); 3454 3455 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 3456 unsigned int *sg_offset, unsigned int page_size); 3457 3458 static inline int 3459 ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 3460 unsigned int *sg_offset, unsigned int page_size) 3461 { 3462 int n; 3463 3464 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size); 3465 mr->iova = 0; 3466 3467 return n; 3468 } 3469 3470 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents, 3471 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64)); 3472 3473 void ib_drain_rq(struct ib_qp *qp); 3474 void ib_drain_sq(struct ib_qp *qp); 3475 void ib_drain_qp(struct ib_qp *qp); 3476 3477 int ib_resolve_eth_dmac(struct ib_device *device, 3478 struct ib_ah_attr *ah_attr); 3479 #endif /* IB_VERBS_H */ 3480