1 /* 2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 8 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 9 * 10 * This software is available to you under a choice of one of two 11 * licenses. You may choose to be licensed under the terms of the GNU 12 * General Public License (GPL) Version 2, available from the file 13 * COPYING in the main directory of this source tree, or the 14 * OpenIB.org BSD license below: 15 * 16 * Redistribution and use in source and binary forms, with or 17 * without modification, are permitted provided that the following 18 * conditions are met: 19 * 20 * - Redistributions of source code must retain the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer. 23 * 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials 27 * provided with the distribution. 28 * 29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 36 * SOFTWARE. 37 */ 38 39 #if !defined(IB_VERBS_H) 40 #define IB_VERBS_H 41 42 #include <linux/types.h> 43 #include <linux/device.h> 44 #include <linux/mm.h> 45 #include <linux/dma-mapping.h> 46 #include <linux/kref.h> 47 #include <linux/list.h> 48 #include <linux/rwsem.h> 49 #include <linux/scatterlist.h> 50 #include <linux/workqueue.h> 51 #include <linux/socket.h> 52 #include <linux/irq_poll.h> 53 #include <uapi/linux/if_ether.h> 54 #include <net/ipv6.h> 55 #include <net/ip.h> 56 #include <linux/string.h> 57 #include <linux/slab.h> 58 59 #include <linux/if_link.h> 60 #include <linux/atomic.h> 61 #include <linux/mmu_notifier.h> 62 #include <asm/uaccess.h> 63 64 extern struct workqueue_struct *ib_wq; 65 extern struct workqueue_struct *ib_comp_wq; 66 67 union ib_gid { 68 u8 raw[16]; 69 struct { 70 __be64 subnet_prefix; 71 __be64 interface_id; 72 } global; 73 }; 74 75 extern union ib_gid zgid; 76 77 enum ib_gid_type { 78 /* If link layer is Ethernet, this is RoCE V1 */ 79 IB_GID_TYPE_IB = 0, 80 IB_GID_TYPE_ROCE = 0, 81 IB_GID_TYPE_ROCE_UDP_ENCAP = 1, 82 IB_GID_TYPE_SIZE 83 }; 84 85 #define ROCE_V2_UDP_DPORT 4791 86 struct ib_gid_attr { 87 enum ib_gid_type gid_type; 88 struct net_device *ndev; 89 }; 90 91 enum rdma_node_type { 92 /* IB values map to NodeInfo:NodeType. */ 93 RDMA_NODE_IB_CA = 1, 94 RDMA_NODE_IB_SWITCH, 95 RDMA_NODE_IB_ROUTER, 96 RDMA_NODE_RNIC, 97 RDMA_NODE_USNIC, 98 RDMA_NODE_USNIC_UDP, 99 }; 100 101 enum { 102 /* set the local administered indication */ 103 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2, 104 }; 105 106 enum rdma_transport_type { 107 RDMA_TRANSPORT_IB, 108 RDMA_TRANSPORT_IWARP, 109 RDMA_TRANSPORT_USNIC, 110 RDMA_TRANSPORT_USNIC_UDP 111 }; 112 113 enum rdma_protocol_type { 114 RDMA_PROTOCOL_IB, 115 RDMA_PROTOCOL_IBOE, 116 RDMA_PROTOCOL_IWARP, 117 RDMA_PROTOCOL_USNIC_UDP 118 }; 119 120 __attribute_const__ enum rdma_transport_type 121 rdma_node_get_transport(enum rdma_node_type node_type); 122 123 enum rdma_network_type { 124 RDMA_NETWORK_IB, 125 RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB, 126 RDMA_NETWORK_IPV4, 127 RDMA_NETWORK_IPV6 128 }; 129 130 static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type) 131 { 132 if (network_type == RDMA_NETWORK_IPV4 || 133 network_type == RDMA_NETWORK_IPV6) 134 return IB_GID_TYPE_ROCE_UDP_ENCAP; 135 136 /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */ 137 return IB_GID_TYPE_IB; 138 } 139 140 static inline enum rdma_network_type ib_gid_to_network_type(enum ib_gid_type gid_type, 141 union ib_gid *gid) 142 { 143 if (gid_type == IB_GID_TYPE_IB) 144 return RDMA_NETWORK_IB; 145 146 if (ipv6_addr_v4mapped((struct in6_addr *)gid)) 147 return RDMA_NETWORK_IPV4; 148 else 149 return RDMA_NETWORK_IPV6; 150 } 151 152 enum rdma_link_layer { 153 IB_LINK_LAYER_UNSPECIFIED, 154 IB_LINK_LAYER_INFINIBAND, 155 IB_LINK_LAYER_ETHERNET, 156 }; 157 158 enum ib_device_cap_flags { 159 IB_DEVICE_RESIZE_MAX_WR = (1 << 0), 160 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1), 161 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2), 162 IB_DEVICE_RAW_MULTI = (1 << 3), 163 IB_DEVICE_AUTO_PATH_MIG = (1 << 4), 164 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5), 165 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6), 166 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7), 167 IB_DEVICE_SHUTDOWN_PORT = (1 << 8), 168 IB_DEVICE_INIT_TYPE = (1 << 9), 169 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10), 170 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11), 171 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12), 172 IB_DEVICE_SRQ_RESIZE = (1 << 13), 173 IB_DEVICE_N_NOTIFY_CQ = (1 << 14), 174 175 /* 176 * This device supports a per-device lkey or stag that can be 177 * used without performing a memory registration for the local 178 * memory. Note that ULPs should never check this flag, but 179 * instead of use the local_dma_lkey flag in the ib_pd structure, 180 * which will always contain a usable lkey. 181 */ 182 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15), 183 IB_DEVICE_RESERVED /* old SEND_W_INV */ = (1 << 16), 184 IB_DEVICE_MEM_WINDOW = (1 << 17), 185 /* 186 * Devices should set IB_DEVICE_UD_IP_SUM if they support 187 * insertion of UDP and TCP checksum on outgoing UD IPoIB 188 * messages and can verify the validity of checksum for 189 * incoming messages. Setting this flag implies that the 190 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode. 191 */ 192 IB_DEVICE_UD_IP_CSUM = (1 << 18), 193 IB_DEVICE_UD_TSO = (1 << 19), 194 IB_DEVICE_XRC = (1 << 20), 195 196 /* 197 * This device supports the IB "base memory management extension", 198 * which includes support for fast registrations (IB_WR_REG_MR, 199 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should 200 * also be set by any iWarp device which must support FRs to comply 201 * to the iWarp verbs spec. iWarp devices also support the 202 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the 203 * stag. 204 */ 205 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21), 206 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22), 207 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23), 208 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24), 209 IB_DEVICE_RC_IP_CSUM = (1 << 25), 210 IB_DEVICE_RAW_IP_CSUM = (1 << 26), 211 /* 212 * Devices should set IB_DEVICE_CROSS_CHANNEL if they 213 * support execution of WQEs that involve synchronization 214 * of I/O operations with single completion queue managed 215 * by hardware. 216 */ 217 IB_DEVICE_CROSS_CHANNEL = (1 << 27), 218 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29), 219 IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30), 220 IB_DEVICE_ON_DEMAND_PAGING = (1 << 31), 221 IB_DEVICE_SG_GAPS_REG = (1ULL << 32), 222 IB_DEVICE_VIRTUAL_FUNCTION = ((u64)1 << 33), 223 }; 224 225 enum ib_signature_prot_cap { 226 IB_PROT_T10DIF_TYPE_1 = 1, 227 IB_PROT_T10DIF_TYPE_2 = 1 << 1, 228 IB_PROT_T10DIF_TYPE_3 = 1 << 2, 229 }; 230 231 enum ib_signature_guard_cap { 232 IB_GUARD_T10DIF_CRC = 1, 233 IB_GUARD_T10DIF_CSUM = 1 << 1, 234 }; 235 236 enum ib_atomic_cap { 237 IB_ATOMIC_NONE, 238 IB_ATOMIC_HCA, 239 IB_ATOMIC_GLOB 240 }; 241 242 enum ib_odp_general_cap_bits { 243 IB_ODP_SUPPORT = 1 << 0, 244 }; 245 246 enum ib_odp_transport_cap_bits { 247 IB_ODP_SUPPORT_SEND = 1 << 0, 248 IB_ODP_SUPPORT_RECV = 1 << 1, 249 IB_ODP_SUPPORT_WRITE = 1 << 2, 250 IB_ODP_SUPPORT_READ = 1 << 3, 251 IB_ODP_SUPPORT_ATOMIC = 1 << 4, 252 }; 253 254 struct ib_odp_caps { 255 uint64_t general_caps; 256 struct { 257 uint32_t rc_odp_caps; 258 uint32_t uc_odp_caps; 259 uint32_t ud_odp_caps; 260 } per_transport_caps; 261 }; 262 263 enum ib_cq_creation_flags { 264 IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0, 265 IB_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1, 266 }; 267 268 struct ib_cq_init_attr { 269 unsigned int cqe; 270 int comp_vector; 271 u32 flags; 272 }; 273 274 struct ib_device_attr { 275 u64 fw_ver; 276 __be64 sys_image_guid; 277 u64 max_mr_size; 278 u64 page_size_cap; 279 u32 vendor_id; 280 u32 vendor_part_id; 281 u32 hw_ver; 282 int max_qp; 283 int max_qp_wr; 284 u64 device_cap_flags; 285 int max_sge; 286 int max_sge_rd; 287 int max_cq; 288 int max_cqe; 289 int max_mr; 290 int max_pd; 291 int max_qp_rd_atom; 292 int max_ee_rd_atom; 293 int max_res_rd_atom; 294 int max_qp_init_rd_atom; 295 int max_ee_init_rd_atom; 296 enum ib_atomic_cap atomic_cap; 297 enum ib_atomic_cap masked_atomic_cap; 298 int max_ee; 299 int max_rdd; 300 int max_mw; 301 int max_raw_ipv6_qp; 302 int max_raw_ethy_qp; 303 int max_mcast_grp; 304 int max_mcast_qp_attach; 305 int max_total_mcast_qp_attach; 306 int max_ah; 307 int max_fmr; 308 int max_map_per_fmr; 309 int max_srq; 310 int max_srq_wr; 311 int max_srq_sge; 312 unsigned int max_fast_reg_page_list_len; 313 u16 max_pkeys; 314 u8 local_ca_ack_delay; 315 int sig_prot_cap; 316 int sig_guard_cap; 317 struct ib_odp_caps odp_caps; 318 uint64_t timestamp_mask; 319 uint64_t hca_core_clock; /* in KHZ */ 320 }; 321 322 enum ib_mtu { 323 IB_MTU_256 = 1, 324 IB_MTU_512 = 2, 325 IB_MTU_1024 = 3, 326 IB_MTU_2048 = 4, 327 IB_MTU_4096 = 5 328 }; 329 330 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu) 331 { 332 switch (mtu) { 333 case IB_MTU_256: return 256; 334 case IB_MTU_512: return 512; 335 case IB_MTU_1024: return 1024; 336 case IB_MTU_2048: return 2048; 337 case IB_MTU_4096: return 4096; 338 default: return -1; 339 } 340 } 341 342 enum ib_port_state { 343 IB_PORT_NOP = 0, 344 IB_PORT_DOWN = 1, 345 IB_PORT_INIT = 2, 346 IB_PORT_ARMED = 3, 347 IB_PORT_ACTIVE = 4, 348 IB_PORT_ACTIVE_DEFER = 5 349 }; 350 351 enum ib_port_cap_flags { 352 IB_PORT_SM = 1 << 1, 353 IB_PORT_NOTICE_SUP = 1 << 2, 354 IB_PORT_TRAP_SUP = 1 << 3, 355 IB_PORT_OPT_IPD_SUP = 1 << 4, 356 IB_PORT_AUTO_MIGR_SUP = 1 << 5, 357 IB_PORT_SL_MAP_SUP = 1 << 6, 358 IB_PORT_MKEY_NVRAM = 1 << 7, 359 IB_PORT_PKEY_NVRAM = 1 << 8, 360 IB_PORT_LED_INFO_SUP = 1 << 9, 361 IB_PORT_SM_DISABLED = 1 << 10, 362 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11, 363 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12, 364 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14, 365 IB_PORT_CM_SUP = 1 << 16, 366 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17, 367 IB_PORT_REINIT_SUP = 1 << 18, 368 IB_PORT_DEVICE_MGMT_SUP = 1 << 19, 369 IB_PORT_VENDOR_CLASS_SUP = 1 << 20, 370 IB_PORT_DR_NOTICE_SUP = 1 << 21, 371 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22, 372 IB_PORT_BOOT_MGMT_SUP = 1 << 23, 373 IB_PORT_LINK_LATENCY_SUP = 1 << 24, 374 IB_PORT_CLIENT_REG_SUP = 1 << 25, 375 IB_PORT_IP_BASED_GIDS = 1 << 26, 376 }; 377 378 enum ib_port_width { 379 IB_WIDTH_1X = 1, 380 IB_WIDTH_4X = 2, 381 IB_WIDTH_8X = 4, 382 IB_WIDTH_12X = 8 383 }; 384 385 static inline int ib_width_enum_to_int(enum ib_port_width width) 386 { 387 switch (width) { 388 case IB_WIDTH_1X: return 1; 389 case IB_WIDTH_4X: return 4; 390 case IB_WIDTH_8X: return 8; 391 case IB_WIDTH_12X: return 12; 392 default: return -1; 393 } 394 } 395 396 enum ib_port_speed { 397 IB_SPEED_SDR = 1, 398 IB_SPEED_DDR = 2, 399 IB_SPEED_QDR = 4, 400 IB_SPEED_FDR10 = 8, 401 IB_SPEED_FDR = 16, 402 IB_SPEED_EDR = 32 403 }; 404 405 struct ib_protocol_stats { 406 /* TBD... */ 407 }; 408 409 struct iw_protocol_stats { 410 u64 ipInReceives; 411 u64 ipInHdrErrors; 412 u64 ipInTooBigErrors; 413 u64 ipInNoRoutes; 414 u64 ipInAddrErrors; 415 u64 ipInUnknownProtos; 416 u64 ipInTruncatedPkts; 417 u64 ipInDiscards; 418 u64 ipInDelivers; 419 u64 ipOutForwDatagrams; 420 u64 ipOutRequests; 421 u64 ipOutDiscards; 422 u64 ipOutNoRoutes; 423 u64 ipReasmTimeout; 424 u64 ipReasmReqds; 425 u64 ipReasmOKs; 426 u64 ipReasmFails; 427 u64 ipFragOKs; 428 u64 ipFragFails; 429 u64 ipFragCreates; 430 u64 ipInMcastPkts; 431 u64 ipOutMcastPkts; 432 u64 ipInBcastPkts; 433 u64 ipOutBcastPkts; 434 435 u64 tcpRtoAlgorithm; 436 u64 tcpRtoMin; 437 u64 tcpRtoMax; 438 u64 tcpMaxConn; 439 u64 tcpActiveOpens; 440 u64 tcpPassiveOpens; 441 u64 tcpAttemptFails; 442 u64 tcpEstabResets; 443 u64 tcpCurrEstab; 444 u64 tcpInSegs; 445 u64 tcpOutSegs; 446 u64 tcpRetransSegs; 447 u64 tcpInErrs; 448 u64 tcpOutRsts; 449 }; 450 451 union rdma_protocol_stats { 452 struct ib_protocol_stats ib; 453 struct iw_protocol_stats iw; 454 }; 455 456 /* Define bits for the various functionality this port needs to be supported by 457 * the core. 458 */ 459 /* Management 0x00000FFF */ 460 #define RDMA_CORE_CAP_IB_MAD 0x00000001 461 #define RDMA_CORE_CAP_IB_SMI 0x00000002 462 #define RDMA_CORE_CAP_IB_CM 0x00000004 463 #define RDMA_CORE_CAP_IW_CM 0x00000008 464 #define RDMA_CORE_CAP_IB_SA 0x00000010 465 #define RDMA_CORE_CAP_OPA_MAD 0x00000020 466 467 /* Address format 0x000FF000 */ 468 #define RDMA_CORE_CAP_AF_IB 0x00001000 469 #define RDMA_CORE_CAP_ETH_AH 0x00002000 470 471 /* Protocol 0xFFF00000 */ 472 #define RDMA_CORE_CAP_PROT_IB 0x00100000 473 #define RDMA_CORE_CAP_PROT_ROCE 0x00200000 474 #define RDMA_CORE_CAP_PROT_IWARP 0x00400000 475 #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000 476 477 #define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \ 478 | RDMA_CORE_CAP_IB_MAD \ 479 | RDMA_CORE_CAP_IB_SMI \ 480 | RDMA_CORE_CAP_IB_CM \ 481 | RDMA_CORE_CAP_IB_SA \ 482 | RDMA_CORE_CAP_AF_IB) 483 #define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \ 484 | RDMA_CORE_CAP_IB_MAD \ 485 | RDMA_CORE_CAP_IB_CM \ 486 | RDMA_CORE_CAP_AF_IB \ 487 | RDMA_CORE_CAP_ETH_AH) 488 #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \ 489 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \ 490 | RDMA_CORE_CAP_IB_MAD \ 491 | RDMA_CORE_CAP_IB_CM \ 492 | RDMA_CORE_CAP_AF_IB \ 493 | RDMA_CORE_CAP_ETH_AH) 494 #define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \ 495 | RDMA_CORE_CAP_IW_CM) 496 #define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \ 497 | RDMA_CORE_CAP_OPA_MAD) 498 499 struct ib_port_attr { 500 u64 subnet_prefix; 501 enum ib_port_state state; 502 enum ib_mtu max_mtu; 503 enum ib_mtu active_mtu; 504 int gid_tbl_len; 505 u32 port_cap_flags; 506 u32 max_msg_sz; 507 u32 bad_pkey_cntr; 508 u32 qkey_viol_cntr; 509 u16 pkey_tbl_len; 510 u16 lid; 511 u16 sm_lid; 512 u8 lmc; 513 u8 max_vl_num; 514 u8 sm_sl; 515 u8 subnet_timeout; 516 u8 init_type_reply; 517 u8 active_width; 518 u8 active_speed; 519 u8 phys_state; 520 bool grh_required; 521 }; 522 523 enum ib_device_modify_flags { 524 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0, 525 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1 526 }; 527 528 struct ib_device_modify { 529 u64 sys_image_guid; 530 char node_desc[64]; 531 }; 532 533 enum ib_port_modify_flags { 534 IB_PORT_SHUTDOWN = 1, 535 IB_PORT_INIT_TYPE = (1<<2), 536 IB_PORT_RESET_QKEY_CNTR = (1<<3) 537 }; 538 539 struct ib_port_modify { 540 u32 set_port_cap_mask; 541 u32 clr_port_cap_mask; 542 u8 init_type; 543 }; 544 545 enum ib_event_type { 546 IB_EVENT_CQ_ERR, 547 IB_EVENT_QP_FATAL, 548 IB_EVENT_QP_REQ_ERR, 549 IB_EVENT_QP_ACCESS_ERR, 550 IB_EVENT_COMM_EST, 551 IB_EVENT_SQ_DRAINED, 552 IB_EVENT_PATH_MIG, 553 IB_EVENT_PATH_MIG_ERR, 554 IB_EVENT_DEVICE_FATAL, 555 IB_EVENT_PORT_ACTIVE, 556 IB_EVENT_PORT_ERR, 557 IB_EVENT_LID_CHANGE, 558 IB_EVENT_PKEY_CHANGE, 559 IB_EVENT_SM_CHANGE, 560 IB_EVENT_SRQ_ERR, 561 IB_EVENT_SRQ_LIMIT_REACHED, 562 IB_EVENT_QP_LAST_WQE_REACHED, 563 IB_EVENT_CLIENT_REREGISTER, 564 IB_EVENT_GID_CHANGE, 565 }; 566 567 const char *__attribute_const__ ib_event_msg(enum ib_event_type event); 568 569 struct ib_event { 570 struct ib_device *device; 571 union { 572 struct ib_cq *cq; 573 struct ib_qp *qp; 574 struct ib_srq *srq; 575 u8 port_num; 576 } element; 577 enum ib_event_type event; 578 }; 579 580 struct ib_event_handler { 581 struct ib_device *device; 582 void (*handler)(struct ib_event_handler *, struct ib_event *); 583 struct list_head list; 584 }; 585 586 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \ 587 do { \ 588 (_ptr)->device = _device; \ 589 (_ptr)->handler = _handler; \ 590 INIT_LIST_HEAD(&(_ptr)->list); \ 591 } while (0) 592 593 struct ib_global_route { 594 union ib_gid dgid; 595 u32 flow_label; 596 u8 sgid_index; 597 u8 hop_limit; 598 u8 traffic_class; 599 }; 600 601 struct ib_grh { 602 __be32 version_tclass_flow; 603 __be16 paylen; 604 u8 next_hdr; 605 u8 hop_limit; 606 union ib_gid sgid; 607 union ib_gid dgid; 608 }; 609 610 union rdma_network_hdr { 611 struct ib_grh ibgrh; 612 struct { 613 /* The IB spec states that if it's IPv4, the header 614 * is located in the last 20 bytes of the header. 615 */ 616 u8 reserved[20]; 617 struct iphdr roce4grh; 618 }; 619 }; 620 621 enum { 622 IB_MULTICAST_QPN = 0xffffff 623 }; 624 625 #define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF) 626 #define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000) 627 628 enum ib_ah_flags { 629 IB_AH_GRH = 1 630 }; 631 632 enum ib_rate { 633 IB_RATE_PORT_CURRENT = 0, 634 IB_RATE_2_5_GBPS = 2, 635 IB_RATE_5_GBPS = 5, 636 IB_RATE_10_GBPS = 3, 637 IB_RATE_20_GBPS = 6, 638 IB_RATE_30_GBPS = 4, 639 IB_RATE_40_GBPS = 7, 640 IB_RATE_60_GBPS = 8, 641 IB_RATE_80_GBPS = 9, 642 IB_RATE_120_GBPS = 10, 643 IB_RATE_14_GBPS = 11, 644 IB_RATE_56_GBPS = 12, 645 IB_RATE_112_GBPS = 13, 646 IB_RATE_168_GBPS = 14, 647 IB_RATE_25_GBPS = 15, 648 IB_RATE_100_GBPS = 16, 649 IB_RATE_200_GBPS = 17, 650 IB_RATE_300_GBPS = 18 651 }; 652 653 /** 654 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the 655 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be 656 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec. 657 * @rate: rate to convert. 658 */ 659 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate); 660 661 /** 662 * ib_rate_to_mbps - Convert the IB rate enum to Mbps. 663 * For example, IB_RATE_2_5_GBPS will be converted to 2500. 664 * @rate: rate to convert. 665 */ 666 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate); 667 668 669 /** 670 * enum ib_mr_type - memory region type 671 * @IB_MR_TYPE_MEM_REG: memory region that is used for 672 * normal registration 673 * @IB_MR_TYPE_SIGNATURE: memory region that is used for 674 * signature operations (data-integrity 675 * capable regions) 676 * @IB_MR_TYPE_SG_GAPS: memory region that is capable to 677 * register any arbitrary sg lists (without 678 * the normal mr constraints - see 679 * ib_map_mr_sg) 680 */ 681 enum ib_mr_type { 682 IB_MR_TYPE_MEM_REG, 683 IB_MR_TYPE_SIGNATURE, 684 IB_MR_TYPE_SG_GAPS, 685 }; 686 687 /** 688 * Signature types 689 * IB_SIG_TYPE_NONE: Unprotected. 690 * IB_SIG_TYPE_T10_DIF: Type T10-DIF 691 */ 692 enum ib_signature_type { 693 IB_SIG_TYPE_NONE, 694 IB_SIG_TYPE_T10_DIF, 695 }; 696 697 /** 698 * Signature T10-DIF block-guard types 699 * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules. 700 * IB_T10DIF_CSUM: Corresponds to IP checksum rules. 701 */ 702 enum ib_t10_dif_bg_type { 703 IB_T10DIF_CRC, 704 IB_T10DIF_CSUM 705 }; 706 707 /** 708 * struct ib_t10_dif_domain - Parameters specific for T10-DIF 709 * domain. 710 * @bg_type: T10-DIF block guard type (CRC|CSUM) 711 * @pi_interval: protection information interval. 712 * @bg: seed of guard computation. 713 * @app_tag: application tag of guard block 714 * @ref_tag: initial guard block reference tag. 715 * @ref_remap: Indicate wethear the reftag increments each block 716 * @app_escape: Indicate to skip block check if apptag=0xffff 717 * @ref_escape: Indicate to skip block check if reftag=0xffffffff 718 * @apptag_check_mask: check bitmask of application tag. 719 */ 720 struct ib_t10_dif_domain { 721 enum ib_t10_dif_bg_type bg_type; 722 u16 pi_interval; 723 u16 bg; 724 u16 app_tag; 725 u32 ref_tag; 726 bool ref_remap; 727 bool app_escape; 728 bool ref_escape; 729 u16 apptag_check_mask; 730 }; 731 732 /** 733 * struct ib_sig_domain - Parameters for signature domain 734 * @sig_type: specific signauture type 735 * @sig: union of all signature domain attributes that may 736 * be used to set domain layout. 737 */ 738 struct ib_sig_domain { 739 enum ib_signature_type sig_type; 740 union { 741 struct ib_t10_dif_domain dif; 742 } sig; 743 }; 744 745 /** 746 * struct ib_sig_attrs - Parameters for signature handover operation 747 * @check_mask: bitmask for signature byte check (8 bytes) 748 * @mem: memory domain layout desciptor. 749 * @wire: wire domain layout desciptor. 750 */ 751 struct ib_sig_attrs { 752 u8 check_mask; 753 struct ib_sig_domain mem; 754 struct ib_sig_domain wire; 755 }; 756 757 enum ib_sig_err_type { 758 IB_SIG_BAD_GUARD, 759 IB_SIG_BAD_REFTAG, 760 IB_SIG_BAD_APPTAG, 761 }; 762 763 /** 764 * struct ib_sig_err - signature error descriptor 765 */ 766 struct ib_sig_err { 767 enum ib_sig_err_type err_type; 768 u32 expected; 769 u32 actual; 770 u64 sig_err_offset; 771 u32 key; 772 }; 773 774 enum ib_mr_status_check { 775 IB_MR_CHECK_SIG_STATUS = 1, 776 }; 777 778 /** 779 * struct ib_mr_status - Memory region status container 780 * 781 * @fail_status: Bitmask of MR checks status. For each 782 * failed check a corresponding status bit is set. 783 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS 784 * failure. 785 */ 786 struct ib_mr_status { 787 u32 fail_status; 788 struct ib_sig_err sig_err; 789 }; 790 791 /** 792 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate 793 * enum. 794 * @mult: multiple to convert. 795 */ 796 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult); 797 798 struct ib_ah_attr { 799 struct ib_global_route grh; 800 u16 dlid; 801 u8 sl; 802 u8 src_path_bits; 803 u8 static_rate; 804 u8 ah_flags; 805 u8 port_num; 806 u8 dmac[ETH_ALEN]; 807 }; 808 809 enum ib_wc_status { 810 IB_WC_SUCCESS, 811 IB_WC_LOC_LEN_ERR, 812 IB_WC_LOC_QP_OP_ERR, 813 IB_WC_LOC_EEC_OP_ERR, 814 IB_WC_LOC_PROT_ERR, 815 IB_WC_WR_FLUSH_ERR, 816 IB_WC_MW_BIND_ERR, 817 IB_WC_BAD_RESP_ERR, 818 IB_WC_LOC_ACCESS_ERR, 819 IB_WC_REM_INV_REQ_ERR, 820 IB_WC_REM_ACCESS_ERR, 821 IB_WC_REM_OP_ERR, 822 IB_WC_RETRY_EXC_ERR, 823 IB_WC_RNR_RETRY_EXC_ERR, 824 IB_WC_LOC_RDD_VIOL_ERR, 825 IB_WC_REM_INV_RD_REQ_ERR, 826 IB_WC_REM_ABORT_ERR, 827 IB_WC_INV_EECN_ERR, 828 IB_WC_INV_EEC_STATE_ERR, 829 IB_WC_FATAL_ERR, 830 IB_WC_RESP_TIMEOUT_ERR, 831 IB_WC_GENERAL_ERR 832 }; 833 834 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status); 835 836 enum ib_wc_opcode { 837 IB_WC_SEND, 838 IB_WC_RDMA_WRITE, 839 IB_WC_RDMA_READ, 840 IB_WC_COMP_SWAP, 841 IB_WC_FETCH_ADD, 842 IB_WC_LSO, 843 IB_WC_LOCAL_INV, 844 IB_WC_REG_MR, 845 IB_WC_MASKED_COMP_SWAP, 846 IB_WC_MASKED_FETCH_ADD, 847 /* 848 * Set value of IB_WC_RECV so consumers can test if a completion is a 849 * receive by testing (opcode & IB_WC_RECV). 850 */ 851 IB_WC_RECV = 1 << 7, 852 IB_WC_RECV_RDMA_WITH_IMM 853 }; 854 855 enum ib_wc_flags { 856 IB_WC_GRH = 1, 857 IB_WC_WITH_IMM = (1<<1), 858 IB_WC_WITH_INVALIDATE = (1<<2), 859 IB_WC_IP_CSUM_OK = (1<<3), 860 IB_WC_WITH_SMAC = (1<<4), 861 IB_WC_WITH_VLAN = (1<<5), 862 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6), 863 }; 864 865 struct ib_wc { 866 union { 867 u64 wr_id; 868 struct ib_cqe *wr_cqe; 869 }; 870 enum ib_wc_status status; 871 enum ib_wc_opcode opcode; 872 u32 vendor_err; 873 u32 byte_len; 874 struct ib_qp *qp; 875 union { 876 __be32 imm_data; 877 u32 invalidate_rkey; 878 } ex; 879 u32 src_qp; 880 int wc_flags; 881 u16 pkey_index; 882 u16 slid; 883 u8 sl; 884 u8 dlid_path_bits; 885 u8 port_num; /* valid only for DR SMPs on switches */ 886 u8 smac[ETH_ALEN]; 887 u16 vlan_id; 888 u8 network_hdr_type; 889 }; 890 891 enum ib_cq_notify_flags { 892 IB_CQ_SOLICITED = 1 << 0, 893 IB_CQ_NEXT_COMP = 1 << 1, 894 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP, 895 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2, 896 }; 897 898 enum ib_srq_type { 899 IB_SRQT_BASIC, 900 IB_SRQT_XRC 901 }; 902 903 enum ib_srq_attr_mask { 904 IB_SRQ_MAX_WR = 1 << 0, 905 IB_SRQ_LIMIT = 1 << 1, 906 }; 907 908 struct ib_srq_attr { 909 u32 max_wr; 910 u32 max_sge; 911 u32 srq_limit; 912 }; 913 914 struct ib_srq_init_attr { 915 void (*event_handler)(struct ib_event *, void *); 916 void *srq_context; 917 struct ib_srq_attr attr; 918 enum ib_srq_type srq_type; 919 920 union { 921 struct { 922 struct ib_xrcd *xrcd; 923 struct ib_cq *cq; 924 } xrc; 925 } ext; 926 }; 927 928 struct ib_qp_cap { 929 u32 max_send_wr; 930 u32 max_recv_wr; 931 u32 max_send_sge; 932 u32 max_recv_sge; 933 u32 max_inline_data; 934 }; 935 936 enum ib_sig_type { 937 IB_SIGNAL_ALL_WR, 938 IB_SIGNAL_REQ_WR 939 }; 940 941 enum ib_qp_type { 942 /* 943 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries 944 * here (and in that order) since the MAD layer uses them as 945 * indices into a 2-entry table. 946 */ 947 IB_QPT_SMI, 948 IB_QPT_GSI, 949 950 IB_QPT_RC, 951 IB_QPT_UC, 952 IB_QPT_UD, 953 IB_QPT_RAW_IPV6, 954 IB_QPT_RAW_ETHERTYPE, 955 IB_QPT_RAW_PACKET = 8, 956 IB_QPT_XRC_INI = 9, 957 IB_QPT_XRC_TGT, 958 IB_QPT_MAX, 959 /* Reserve a range for qp types internal to the low level driver. 960 * These qp types will not be visible at the IB core layer, so the 961 * IB_QPT_MAX usages should not be affected in the core layer 962 */ 963 IB_QPT_RESERVED1 = 0x1000, 964 IB_QPT_RESERVED2, 965 IB_QPT_RESERVED3, 966 IB_QPT_RESERVED4, 967 IB_QPT_RESERVED5, 968 IB_QPT_RESERVED6, 969 IB_QPT_RESERVED7, 970 IB_QPT_RESERVED8, 971 IB_QPT_RESERVED9, 972 IB_QPT_RESERVED10, 973 }; 974 975 enum ib_qp_create_flags { 976 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0, 977 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1, 978 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2, 979 IB_QP_CREATE_MANAGED_SEND = 1 << 3, 980 IB_QP_CREATE_MANAGED_RECV = 1 << 4, 981 IB_QP_CREATE_NETIF_QP = 1 << 5, 982 IB_QP_CREATE_SIGNATURE_EN = 1 << 6, 983 IB_QP_CREATE_USE_GFP_NOIO = 1 << 7, 984 /* reserve bits 26-31 for low level drivers' internal use */ 985 IB_QP_CREATE_RESERVED_START = 1 << 26, 986 IB_QP_CREATE_RESERVED_END = 1 << 31, 987 }; 988 989 /* 990 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler 991 * callback to destroy the passed in QP. 992 */ 993 994 struct ib_qp_init_attr { 995 void (*event_handler)(struct ib_event *, void *); 996 void *qp_context; 997 struct ib_cq *send_cq; 998 struct ib_cq *recv_cq; 999 struct ib_srq *srq; 1000 struct ib_xrcd *xrcd; /* XRC TGT QPs only */ 1001 struct ib_qp_cap cap; 1002 enum ib_sig_type sq_sig_type; 1003 enum ib_qp_type qp_type; 1004 enum ib_qp_create_flags create_flags; 1005 u8 port_num; /* special QP types only */ 1006 }; 1007 1008 struct ib_qp_open_attr { 1009 void (*event_handler)(struct ib_event *, void *); 1010 void *qp_context; 1011 u32 qp_num; 1012 enum ib_qp_type qp_type; 1013 }; 1014 1015 enum ib_rnr_timeout { 1016 IB_RNR_TIMER_655_36 = 0, 1017 IB_RNR_TIMER_000_01 = 1, 1018 IB_RNR_TIMER_000_02 = 2, 1019 IB_RNR_TIMER_000_03 = 3, 1020 IB_RNR_TIMER_000_04 = 4, 1021 IB_RNR_TIMER_000_06 = 5, 1022 IB_RNR_TIMER_000_08 = 6, 1023 IB_RNR_TIMER_000_12 = 7, 1024 IB_RNR_TIMER_000_16 = 8, 1025 IB_RNR_TIMER_000_24 = 9, 1026 IB_RNR_TIMER_000_32 = 10, 1027 IB_RNR_TIMER_000_48 = 11, 1028 IB_RNR_TIMER_000_64 = 12, 1029 IB_RNR_TIMER_000_96 = 13, 1030 IB_RNR_TIMER_001_28 = 14, 1031 IB_RNR_TIMER_001_92 = 15, 1032 IB_RNR_TIMER_002_56 = 16, 1033 IB_RNR_TIMER_003_84 = 17, 1034 IB_RNR_TIMER_005_12 = 18, 1035 IB_RNR_TIMER_007_68 = 19, 1036 IB_RNR_TIMER_010_24 = 20, 1037 IB_RNR_TIMER_015_36 = 21, 1038 IB_RNR_TIMER_020_48 = 22, 1039 IB_RNR_TIMER_030_72 = 23, 1040 IB_RNR_TIMER_040_96 = 24, 1041 IB_RNR_TIMER_061_44 = 25, 1042 IB_RNR_TIMER_081_92 = 26, 1043 IB_RNR_TIMER_122_88 = 27, 1044 IB_RNR_TIMER_163_84 = 28, 1045 IB_RNR_TIMER_245_76 = 29, 1046 IB_RNR_TIMER_327_68 = 30, 1047 IB_RNR_TIMER_491_52 = 31 1048 }; 1049 1050 enum ib_qp_attr_mask { 1051 IB_QP_STATE = 1, 1052 IB_QP_CUR_STATE = (1<<1), 1053 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2), 1054 IB_QP_ACCESS_FLAGS = (1<<3), 1055 IB_QP_PKEY_INDEX = (1<<4), 1056 IB_QP_PORT = (1<<5), 1057 IB_QP_QKEY = (1<<6), 1058 IB_QP_AV = (1<<7), 1059 IB_QP_PATH_MTU = (1<<8), 1060 IB_QP_TIMEOUT = (1<<9), 1061 IB_QP_RETRY_CNT = (1<<10), 1062 IB_QP_RNR_RETRY = (1<<11), 1063 IB_QP_RQ_PSN = (1<<12), 1064 IB_QP_MAX_QP_RD_ATOMIC = (1<<13), 1065 IB_QP_ALT_PATH = (1<<14), 1066 IB_QP_MIN_RNR_TIMER = (1<<15), 1067 IB_QP_SQ_PSN = (1<<16), 1068 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17), 1069 IB_QP_PATH_MIG_STATE = (1<<18), 1070 IB_QP_CAP = (1<<19), 1071 IB_QP_DEST_QPN = (1<<20), 1072 IB_QP_RESERVED1 = (1<<21), 1073 IB_QP_RESERVED2 = (1<<22), 1074 IB_QP_RESERVED3 = (1<<23), 1075 IB_QP_RESERVED4 = (1<<24), 1076 }; 1077 1078 enum ib_qp_state { 1079 IB_QPS_RESET, 1080 IB_QPS_INIT, 1081 IB_QPS_RTR, 1082 IB_QPS_RTS, 1083 IB_QPS_SQD, 1084 IB_QPS_SQE, 1085 IB_QPS_ERR 1086 }; 1087 1088 enum ib_mig_state { 1089 IB_MIG_MIGRATED, 1090 IB_MIG_REARM, 1091 IB_MIG_ARMED 1092 }; 1093 1094 enum ib_mw_type { 1095 IB_MW_TYPE_1 = 1, 1096 IB_MW_TYPE_2 = 2 1097 }; 1098 1099 struct ib_qp_attr { 1100 enum ib_qp_state qp_state; 1101 enum ib_qp_state cur_qp_state; 1102 enum ib_mtu path_mtu; 1103 enum ib_mig_state path_mig_state; 1104 u32 qkey; 1105 u32 rq_psn; 1106 u32 sq_psn; 1107 u32 dest_qp_num; 1108 int qp_access_flags; 1109 struct ib_qp_cap cap; 1110 struct ib_ah_attr ah_attr; 1111 struct ib_ah_attr alt_ah_attr; 1112 u16 pkey_index; 1113 u16 alt_pkey_index; 1114 u8 en_sqd_async_notify; 1115 u8 sq_draining; 1116 u8 max_rd_atomic; 1117 u8 max_dest_rd_atomic; 1118 u8 min_rnr_timer; 1119 u8 port_num; 1120 u8 timeout; 1121 u8 retry_cnt; 1122 u8 rnr_retry; 1123 u8 alt_port_num; 1124 u8 alt_timeout; 1125 }; 1126 1127 enum ib_wr_opcode { 1128 IB_WR_RDMA_WRITE, 1129 IB_WR_RDMA_WRITE_WITH_IMM, 1130 IB_WR_SEND, 1131 IB_WR_SEND_WITH_IMM, 1132 IB_WR_RDMA_READ, 1133 IB_WR_ATOMIC_CMP_AND_SWP, 1134 IB_WR_ATOMIC_FETCH_AND_ADD, 1135 IB_WR_LSO, 1136 IB_WR_SEND_WITH_INV, 1137 IB_WR_RDMA_READ_WITH_INV, 1138 IB_WR_LOCAL_INV, 1139 IB_WR_REG_MR, 1140 IB_WR_MASKED_ATOMIC_CMP_AND_SWP, 1141 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD, 1142 IB_WR_REG_SIG_MR, 1143 /* reserve values for low level drivers' internal use. 1144 * These values will not be used at all in the ib core layer. 1145 */ 1146 IB_WR_RESERVED1 = 0xf0, 1147 IB_WR_RESERVED2, 1148 IB_WR_RESERVED3, 1149 IB_WR_RESERVED4, 1150 IB_WR_RESERVED5, 1151 IB_WR_RESERVED6, 1152 IB_WR_RESERVED7, 1153 IB_WR_RESERVED8, 1154 IB_WR_RESERVED9, 1155 IB_WR_RESERVED10, 1156 }; 1157 1158 enum ib_send_flags { 1159 IB_SEND_FENCE = 1, 1160 IB_SEND_SIGNALED = (1<<1), 1161 IB_SEND_SOLICITED = (1<<2), 1162 IB_SEND_INLINE = (1<<3), 1163 IB_SEND_IP_CSUM = (1<<4), 1164 1165 /* reserve bits 26-31 for low level drivers' internal use */ 1166 IB_SEND_RESERVED_START = (1 << 26), 1167 IB_SEND_RESERVED_END = (1 << 31), 1168 }; 1169 1170 struct ib_sge { 1171 u64 addr; 1172 u32 length; 1173 u32 lkey; 1174 }; 1175 1176 struct ib_cqe { 1177 void (*done)(struct ib_cq *cq, struct ib_wc *wc); 1178 }; 1179 1180 struct ib_send_wr { 1181 struct ib_send_wr *next; 1182 union { 1183 u64 wr_id; 1184 struct ib_cqe *wr_cqe; 1185 }; 1186 struct ib_sge *sg_list; 1187 int num_sge; 1188 enum ib_wr_opcode opcode; 1189 int send_flags; 1190 union { 1191 __be32 imm_data; 1192 u32 invalidate_rkey; 1193 } ex; 1194 }; 1195 1196 struct ib_rdma_wr { 1197 struct ib_send_wr wr; 1198 u64 remote_addr; 1199 u32 rkey; 1200 }; 1201 1202 static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr) 1203 { 1204 return container_of(wr, struct ib_rdma_wr, wr); 1205 } 1206 1207 struct ib_atomic_wr { 1208 struct ib_send_wr wr; 1209 u64 remote_addr; 1210 u64 compare_add; 1211 u64 swap; 1212 u64 compare_add_mask; 1213 u64 swap_mask; 1214 u32 rkey; 1215 }; 1216 1217 static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr) 1218 { 1219 return container_of(wr, struct ib_atomic_wr, wr); 1220 } 1221 1222 struct ib_ud_wr { 1223 struct ib_send_wr wr; 1224 struct ib_ah *ah; 1225 void *header; 1226 int hlen; 1227 int mss; 1228 u32 remote_qpn; 1229 u32 remote_qkey; 1230 u16 pkey_index; /* valid for GSI only */ 1231 u8 port_num; /* valid for DR SMPs on switch only */ 1232 }; 1233 1234 static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr) 1235 { 1236 return container_of(wr, struct ib_ud_wr, wr); 1237 } 1238 1239 struct ib_reg_wr { 1240 struct ib_send_wr wr; 1241 struct ib_mr *mr; 1242 u32 key; 1243 int access; 1244 }; 1245 1246 static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr) 1247 { 1248 return container_of(wr, struct ib_reg_wr, wr); 1249 } 1250 1251 struct ib_sig_handover_wr { 1252 struct ib_send_wr wr; 1253 struct ib_sig_attrs *sig_attrs; 1254 struct ib_mr *sig_mr; 1255 int access_flags; 1256 struct ib_sge *prot; 1257 }; 1258 1259 static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr) 1260 { 1261 return container_of(wr, struct ib_sig_handover_wr, wr); 1262 } 1263 1264 struct ib_recv_wr { 1265 struct ib_recv_wr *next; 1266 union { 1267 u64 wr_id; 1268 struct ib_cqe *wr_cqe; 1269 }; 1270 struct ib_sge *sg_list; 1271 int num_sge; 1272 }; 1273 1274 enum ib_access_flags { 1275 IB_ACCESS_LOCAL_WRITE = 1, 1276 IB_ACCESS_REMOTE_WRITE = (1<<1), 1277 IB_ACCESS_REMOTE_READ = (1<<2), 1278 IB_ACCESS_REMOTE_ATOMIC = (1<<3), 1279 IB_ACCESS_MW_BIND = (1<<4), 1280 IB_ZERO_BASED = (1<<5), 1281 IB_ACCESS_ON_DEMAND = (1<<6), 1282 }; 1283 1284 /* 1285 * XXX: these are apparently used for ->rereg_user_mr, no idea why they 1286 * are hidden here instead of a uapi header! 1287 */ 1288 enum ib_mr_rereg_flags { 1289 IB_MR_REREG_TRANS = 1, 1290 IB_MR_REREG_PD = (1<<1), 1291 IB_MR_REREG_ACCESS = (1<<2), 1292 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1) 1293 }; 1294 1295 struct ib_fmr_attr { 1296 int max_pages; 1297 int max_maps; 1298 u8 page_shift; 1299 }; 1300 1301 struct ib_umem; 1302 1303 struct ib_ucontext { 1304 struct ib_device *device; 1305 struct list_head pd_list; 1306 struct list_head mr_list; 1307 struct list_head mw_list; 1308 struct list_head cq_list; 1309 struct list_head qp_list; 1310 struct list_head srq_list; 1311 struct list_head ah_list; 1312 struct list_head xrcd_list; 1313 struct list_head rule_list; 1314 int closing; 1315 1316 struct pid *tgid; 1317 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 1318 struct rb_root umem_tree; 1319 /* 1320 * Protects .umem_rbroot and tree, as well as odp_mrs_count and 1321 * mmu notifiers registration. 1322 */ 1323 struct rw_semaphore umem_rwsem; 1324 void (*invalidate_range)(struct ib_umem *umem, 1325 unsigned long start, unsigned long end); 1326 1327 struct mmu_notifier mn; 1328 atomic_t notifier_count; 1329 /* A list of umems that don't have private mmu notifier counters yet. */ 1330 struct list_head no_private_counters; 1331 int odp_mrs_count; 1332 #endif 1333 }; 1334 1335 struct ib_uobject { 1336 u64 user_handle; /* handle given to us by userspace */ 1337 struct ib_ucontext *context; /* associated user context */ 1338 void *object; /* containing object */ 1339 struct list_head list; /* link to context's list */ 1340 int id; /* index into kernel idr */ 1341 struct kref ref; 1342 struct rw_semaphore mutex; /* protects .live */ 1343 struct rcu_head rcu; /* kfree_rcu() overhead */ 1344 int live; 1345 }; 1346 1347 struct ib_udata { 1348 const void __user *inbuf; 1349 void __user *outbuf; 1350 size_t inlen; 1351 size_t outlen; 1352 }; 1353 1354 struct ib_pd { 1355 u32 local_dma_lkey; 1356 struct ib_device *device; 1357 struct ib_uobject *uobject; 1358 atomic_t usecnt; /* count all resources */ 1359 struct ib_mr *local_mr; 1360 }; 1361 1362 struct ib_xrcd { 1363 struct ib_device *device; 1364 atomic_t usecnt; /* count all exposed resources */ 1365 struct inode *inode; 1366 1367 struct mutex tgt_qp_mutex; 1368 struct list_head tgt_qp_list; 1369 }; 1370 1371 struct ib_ah { 1372 struct ib_device *device; 1373 struct ib_pd *pd; 1374 struct ib_uobject *uobject; 1375 }; 1376 1377 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); 1378 1379 enum ib_poll_context { 1380 IB_POLL_DIRECT, /* caller context, no hw completions */ 1381 IB_POLL_SOFTIRQ, /* poll from softirq context */ 1382 IB_POLL_WORKQUEUE, /* poll from workqueue */ 1383 }; 1384 1385 struct ib_cq { 1386 struct ib_device *device; 1387 struct ib_uobject *uobject; 1388 ib_comp_handler comp_handler; 1389 void (*event_handler)(struct ib_event *, void *); 1390 void *cq_context; 1391 int cqe; 1392 atomic_t usecnt; /* count number of work queues */ 1393 enum ib_poll_context poll_ctx; 1394 struct ib_wc *wc; 1395 union { 1396 struct irq_poll iop; 1397 struct work_struct work; 1398 }; 1399 }; 1400 1401 struct ib_srq { 1402 struct ib_device *device; 1403 struct ib_pd *pd; 1404 struct ib_uobject *uobject; 1405 void (*event_handler)(struct ib_event *, void *); 1406 void *srq_context; 1407 enum ib_srq_type srq_type; 1408 atomic_t usecnt; 1409 1410 union { 1411 struct { 1412 struct ib_xrcd *xrcd; 1413 struct ib_cq *cq; 1414 u32 srq_num; 1415 } xrc; 1416 } ext; 1417 }; 1418 1419 struct ib_qp { 1420 struct ib_device *device; 1421 struct ib_pd *pd; 1422 struct ib_cq *send_cq; 1423 struct ib_cq *recv_cq; 1424 struct ib_srq *srq; 1425 struct ib_xrcd *xrcd; /* XRC TGT QPs only */ 1426 struct list_head xrcd_list; 1427 /* count times opened, mcast attaches, flow attaches */ 1428 atomic_t usecnt; 1429 struct list_head open_list; 1430 struct ib_qp *real_qp; 1431 struct ib_uobject *uobject; 1432 void (*event_handler)(struct ib_event *, void *); 1433 void *qp_context; 1434 u32 qp_num; 1435 enum ib_qp_type qp_type; 1436 }; 1437 1438 struct ib_mr { 1439 struct ib_device *device; 1440 struct ib_pd *pd; 1441 struct ib_uobject *uobject; 1442 u32 lkey; 1443 u32 rkey; 1444 u64 iova; 1445 u32 length; 1446 unsigned int page_size; 1447 }; 1448 1449 struct ib_mw { 1450 struct ib_device *device; 1451 struct ib_pd *pd; 1452 struct ib_uobject *uobject; 1453 u32 rkey; 1454 enum ib_mw_type type; 1455 }; 1456 1457 struct ib_fmr { 1458 struct ib_device *device; 1459 struct ib_pd *pd; 1460 struct list_head list; 1461 u32 lkey; 1462 u32 rkey; 1463 }; 1464 1465 /* Supported steering options */ 1466 enum ib_flow_attr_type { 1467 /* steering according to rule specifications */ 1468 IB_FLOW_ATTR_NORMAL = 0x0, 1469 /* default unicast and multicast rule - 1470 * receive all Eth traffic which isn't steered to any QP 1471 */ 1472 IB_FLOW_ATTR_ALL_DEFAULT = 0x1, 1473 /* default multicast rule - 1474 * receive all Eth multicast traffic which isn't steered to any QP 1475 */ 1476 IB_FLOW_ATTR_MC_DEFAULT = 0x2, 1477 /* sniffer rule - receive all port traffic */ 1478 IB_FLOW_ATTR_SNIFFER = 0x3 1479 }; 1480 1481 /* Supported steering header types */ 1482 enum ib_flow_spec_type { 1483 /* L2 headers*/ 1484 IB_FLOW_SPEC_ETH = 0x20, 1485 IB_FLOW_SPEC_IB = 0x22, 1486 /* L3 header*/ 1487 IB_FLOW_SPEC_IPV4 = 0x30, 1488 /* L4 headers*/ 1489 IB_FLOW_SPEC_TCP = 0x40, 1490 IB_FLOW_SPEC_UDP = 0x41 1491 }; 1492 #define IB_FLOW_SPEC_LAYER_MASK 0xF0 1493 #define IB_FLOW_SPEC_SUPPORT_LAYERS 4 1494 1495 /* Flow steering rule priority is set according to it's domain. 1496 * Lower domain value means higher priority. 1497 */ 1498 enum ib_flow_domain { 1499 IB_FLOW_DOMAIN_USER, 1500 IB_FLOW_DOMAIN_ETHTOOL, 1501 IB_FLOW_DOMAIN_RFS, 1502 IB_FLOW_DOMAIN_NIC, 1503 IB_FLOW_DOMAIN_NUM /* Must be last */ 1504 }; 1505 1506 enum ib_flow_flags { 1507 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */ 1508 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 2 /* Must be last */ 1509 }; 1510 1511 struct ib_flow_eth_filter { 1512 u8 dst_mac[6]; 1513 u8 src_mac[6]; 1514 __be16 ether_type; 1515 __be16 vlan_tag; 1516 }; 1517 1518 struct ib_flow_spec_eth { 1519 enum ib_flow_spec_type type; 1520 u16 size; 1521 struct ib_flow_eth_filter val; 1522 struct ib_flow_eth_filter mask; 1523 }; 1524 1525 struct ib_flow_ib_filter { 1526 __be16 dlid; 1527 __u8 sl; 1528 }; 1529 1530 struct ib_flow_spec_ib { 1531 enum ib_flow_spec_type type; 1532 u16 size; 1533 struct ib_flow_ib_filter val; 1534 struct ib_flow_ib_filter mask; 1535 }; 1536 1537 struct ib_flow_ipv4_filter { 1538 __be32 src_ip; 1539 __be32 dst_ip; 1540 }; 1541 1542 struct ib_flow_spec_ipv4 { 1543 enum ib_flow_spec_type type; 1544 u16 size; 1545 struct ib_flow_ipv4_filter val; 1546 struct ib_flow_ipv4_filter mask; 1547 }; 1548 1549 struct ib_flow_tcp_udp_filter { 1550 __be16 dst_port; 1551 __be16 src_port; 1552 }; 1553 1554 struct ib_flow_spec_tcp_udp { 1555 enum ib_flow_spec_type type; 1556 u16 size; 1557 struct ib_flow_tcp_udp_filter val; 1558 struct ib_flow_tcp_udp_filter mask; 1559 }; 1560 1561 union ib_flow_spec { 1562 struct { 1563 enum ib_flow_spec_type type; 1564 u16 size; 1565 }; 1566 struct ib_flow_spec_eth eth; 1567 struct ib_flow_spec_ib ib; 1568 struct ib_flow_spec_ipv4 ipv4; 1569 struct ib_flow_spec_tcp_udp tcp_udp; 1570 }; 1571 1572 struct ib_flow_attr { 1573 enum ib_flow_attr_type type; 1574 u16 size; 1575 u16 priority; 1576 u32 flags; 1577 u8 num_of_specs; 1578 u8 port; 1579 /* Following are the optional layers according to user request 1580 * struct ib_flow_spec_xxx 1581 * struct ib_flow_spec_yyy 1582 */ 1583 }; 1584 1585 struct ib_flow { 1586 struct ib_qp *qp; 1587 struct ib_uobject *uobject; 1588 }; 1589 1590 struct ib_mad_hdr; 1591 struct ib_grh; 1592 1593 enum ib_process_mad_flags { 1594 IB_MAD_IGNORE_MKEY = 1, 1595 IB_MAD_IGNORE_BKEY = 2, 1596 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY 1597 }; 1598 1599 enum ib_mad_result { 1600 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */ 1601 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */ 1602 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */ 1603 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */ 1604 }; 1605 1606 #define IB_DEVICE_NAME_MAX 64 1607 1608 struct ib_cache { 1609 rwlock_t lock; 1610 struct ib_event_handler event_handler; 1611 struct ib_pkey_cache **pkey_cache; 1612 struct ib_gid_table **gid_cache; 1613 u8 *lmc_cache; 1614 }; 1615 1616 struct ib_dma_mapping_ops { 1617 int (*mapping_error)(struct ib_device *dev, 1618 u64 dma_addr); 1619 u64 (*map_single)(struct ib_device *dev, 1620 void *ptr, size_t size, 1621 enum dma_data_direction direction); 1622 void (*unmap_single)(struct ib_device *dev, 1623 u64 addr, size_t size, 1624 enum dma_data_direction direction); 1625 u64 (*map_page)(struct ib_device *dev, 1626 struct page *page, unsigned long offset, 1627 size_t size, 1628 enum dma_data_direction direction); 1629 void (*unmap_page)(struct ib_device *dev, 1630 u64 addr, size_t size, 1631 enum dma_data_direction direction); 1632 int (*map_sg)(struct ib_device *dev, 1633 struct scatterlist *sg, int nents, 1634 enum dma_data_direction direction); 1635 void (*unmap_sg)(struct ib_device *dev, 1636 struct scatterlist *sg, int nents, 1637 enum dma_data_direction direction); 1638 void (*sync_single_for_cpu)(struct ib_device *dev, 1639 u64 dma_handle, 1640 size_t size, 1641 enum dma_data_direction dir); 1642 void (*sync_single_for_device)(struct ib_device *dev, 1643 u64 dma_handle, 1644 size_t size, 1645 enum dma_data_direction dir); 1646 void *(*alloc_coherent)(struct ib_device *dev, 1647 size_t size, 1648 u64 *dma_handle, 1649 gfp_t flag); 1650 void (*free_coherent)(struct ib_device *dev, 1651 size_t size, void *cpu_addr, 1652 u64 dma_handle); 1653 }; 1654 1655 struct iw_cm_verbs; 1656 1657 struct ib_port_immutable { 1658 int pkey_tbl_len; 1659 int gid_tbl_len; 1660 u32 core_cap_flags; 1661 u32 max_mad_size; 1662 }; 1663 1664 struct ib_device { 1665 struct device *dma_device; 1666 1667 char name[IB_DEVICE_NAME_MAX]; 1668 1669 struct list_head event_handler_list; 1670 spinlock_t event_handler_lock; 1671 1672 spinlock_t client_data_lock; 1673 struct list_head core_list; 1674 /* Access to the client_data_list is protected by the client_data_lock 1675 * spinlock and the lists_rwsem read-write semaphore */ 1676 struct list_head client_data_list; 1677 1678 struct ib_cache cache; 1679 /** 1680 * port_immutable is indexed by port number 1681 */ 1682 struct ib_port_immutable *port_immutable; 1683 1684 int num_comp_vectors; 1685 1686 struct iw_cm_verbs *iwcm; 1687 1688 int (*get_protocol_stats)(struct ib_device *device, 1689 union rdma_protocol_stats *stats); 1690 int (*query_device)(struct ib_device *device, 1691 struct ib_device_attr *device_attr, 1692 struct ib_udata *udata); 1693 int (*query_port)(struct ib_device *device, 1694 u8 port_num, 1695 struct ib_port_attr *port_attr); 1696 enum rdma_link_layer (*get_link_layer)(struct ib_device *device, 1697 u8 port_num); 1698 /* When calling get_netdev, the HW vendor's driver should return the 1699 * net device of device @device at port @port_num or NULL if such 1700 * a net device doesn't exist. The vendor driver should call dev_hold 1701 * on this net device. The HW vendor's device driver must guarantee 1702 * that this function returns NULL before the net device reaches 1703 * NETDEV_UNREGISTER_FINAL state. 1704 */ 1705 struct net_device *(*get_netdev)(struct ib_device *device, 1706 u8 port_num); 1707 int (*query_gid)(struct ib_device *device, 1708 u8 port_num, int index, 1709 union ib_gid *gid); 1710 /* When calling add_gid, the HW vendor's driver should 1711 * add the gid of device @device at gid index @index of 1712 * port @port_num to be @gid. Meta-info of that gid (for example, 1713 * the network device related to this gid is available 1714 * at @attr. @context allows the HW vendor driver to store extra 1715 * information together with a GID entry. The HW vendor may allocate 1716 * memory to contain this information and store it in @context when a 1717 * new GID entry is written to. Params are consistent until the next 1718 * call of add_gid or delete_gid. The function should return 0 on 1719 * success or error otherwise. The function could be called 1720 * concurrently for different ports. This function is only called 1721 * when roce_gid_table is used. 1722 */ 1723 int (*add_gid)(struct ib_device *device, 1724 u8 port_num, 1725 unsigned int index, 1726 const union ib_gid *gid, 1727 const struct ib_gid_attr *attr, 1728 void **context); 1729 /* When calling del_gid, the HW vendor's driver should delete the 1730 * gid of device @device at gid index @index of port @port_num. 1731 * Upon the deletion of a GID entry, the HW vendor must free any 1732 * allocated memory. The caller will clear @context afterwards. 1733 * This function is only called when roce_gid_table is used. 1734 */ 1735 int (*del_gid)(struct ib_device *device, 1736 u8 port_num, 1737 unsigned int index, 1738 void **context); 1739 int (*query_pkey)(struct ib_device *device, 1740 u8 port_num, u16 index, u16 *pkey); 1741 int (*modify_device)(struct ib_device *device, 1742 int device_modify_mask, 1743 struct ib_device_modify *device_modify); 1744 int (*modify_port)(struct ib_device *device, 1745 u8 port_num, int port_modify_mask, 1746 struct ib_port_modify *port_modify); 1747 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device, 1748 struct ib_udata *udata); 1749 int (*dealloc_ucontext)(struct ib_ucontext *context); 1750 int (*mmap)(struct ib_ucontext *context, 1751 struct vm_area_struct *vma); 1752 struct ib_pd * (*alloc_pd)(struct ib_device *device, 1753 struct ib_ucontext *context, 1754 struct ib_udata *udata); 1755 int (*dealloc_pd)(struct ib_pd *pd); 1756 struct ib_ah * (*create_ah)(struct ib_pd *pd, 1757 struct ib_ah_attr *ah_attr); 1758 int (*modify_ah)(struct ib_ah *ah, 1759 struct ib_ah_attr *ah_attr); 1760 int (*query_ah)(struct ib_ah *ah, 1761 struct ib_ah_attr *ah_attr); 1762 int (*destroy_ah)(struct ib_ah *ah); 1763 struct ib_srq * (*create_srq)(struct ib_pd *pd, 1764 struct ib_srq_init_attr *srq_init_attr, 1765 struct ib_udata *udata); 1766 int (*modify_srq)(struct ib_srq *srq, 1767 struct ib_srq_attr *srq_attr, 1768 enum ib_srq_attr_mask srq_attr_mask, 1769 struct ib_udata *udata); 1770 int (*query_srq)(struct ib_srq *srq, 1771 struct ib_srq_attr *srq_attr); 1772 int (*destroy_srq)(struct ib_srq *srq); 1773 int (*post_srq_recv)(struct ib_srq *srq, 1774 struct ib_recv_wr *recv_wr, 1775 struct ib_recv_wr **bad_recv_wr); 1776 struct ib_qp * (*create_qp)(struct ib_pd *pd, 1777 struct ib_qp_init_attr *qp_init_attr, 1778 struct ib_udata *udata); 1779 int (*modify_qp)(struct ib_qp *qp, 1780 struct ib_qp_attr *qp_attr, 1781 int qp_attr_mask, 1782 struct ib_udata *udata); 1783 int (*query_qp)(struct ib_qp *qp, 1784 struct ib_qp_attr *qp_attr, 1785 int qp_attr_mask, 1786 struct ib_qp_init_attr *qp_init_attr); 1787 int (*destroy_qp)(struct ib_qp *qp); 1788 int (*post_send)(struct ib_qp *qp, 1789 struct ib_send_wr *send_wr, 1790 struct ib_send_wr **bad_send_wr); 1791 int (*post_recv)(struct ib_qp *qp, 1792 struct ib_recv_wr *recv_wr, 1793 struct ib_recv_wr **bad_recv_wr); 1794 struct ib_cq * (*create_cq)(struct ib_device *device, 1795 const struct ib_cq_init_attr *attr, 1796 struct ib_ucontext *context, 1797 struct ib_udata *udata); 1798 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, 1799 u16 cq_period); 1800 int (*destroy_cq)(struct ib_cq *cq); 1801 int (*resize_cq)(struct ib_cq *cq, int cqe, 1802 struct ib_udata *udata); 1803 int (*poll_cq)(struct ib_cq *cq, int num_entries, 1804 struct ib_wc *wc); 1805 int (*peek_cq)(struct ib_cq *cq, int wc_cnt); 1806 int (*req_notify_cq)(struct ib_cq *cq, 1807 enum ib_cq_notify_flags flags); 1808 int (*req_ncomp_notif)(struct ib_cq *cq, 1809 int wc_cnt); 1810 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd, 1811 int mr_access_flags); 1812 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd, 1813 u64 start, u64 length, 1814 u64 virt_addr, 1815 int mr_access_flags, 1816 struct ib_udata *udata); 1817 int (*rereg_user_mr)(struct ib_mr *mr, 1818 int flags, 1819 u64 start, u64 length, 1820 u64 virt_addr, 1821 int mr_access_flags, 1822 struct ib_pd *pd, 1823 struct ib_udata *udata); 1824 int (*dereg_mr)(struct ib_mr *mr); 1825 struct ib_mr * (*alloc_mr)(struct ib_pd *pd, 1826 enum ib_mr_type mr_type, 1827 u32 max_num_sg); 1828 int (*map_mr_sg)(struct ib_mr *mr, 1829 struct scatterlist *sg, 1830 int sg_nents); 1831 struct ib_mw * (*alloc_mw)(struct ib_pd *pd, 1832 enum ib_mw_type type, 1833 struct ib_udata *udata); 1834 int (*dealloc_mw)(struct ib_mw *mw); 1835 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd, 1836 int mr_access_flags, 1837 struct ib_fmr_attr *fmr_attr); 1838 int (*map_phys_fmr)(struct ib_fmr *fmr, 1839 u64 *page_list, int list_len, 1840 u64 iova); 1841 int (*unmap_fmr)(struct list_head *fmr_list); 1842 int (*dealloc_fmr)(struct ib_fmr *fmr); 1843 int (*attach_mcast)(struct ib_qp *qp, 1844 union ib_gid *gid, 1845 u16 lid); 1846 int (*detach_mcast)(struct ib_qp *qp, 1847 union ib_gid *gid, 1848 u16 lid); 1849 int (*process_mad)(struct ib_device *device, 1850 int process_mad_flags, 1851 u8 port_num, 1852 const struct ib_wc *in_wc, 1853 const struct ib_grh *in_grh, 1854 const struct ib_mad_hdr *in_mad, 1855 size_t in_mad_size, 1856 struct ib_mad_hdr *out_mad, 1857 size_t *out_mad_size, 1858 u16 *out_mad_pkey_index); 1859 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device, 1860 struct ib_ucontext *ucontext, 1861 struct ib_udata *udata); 1862 int (*dealloc_xrcd)(struct ib_xrcd *xrcd); 1863 struct ib_flow * (*create_flow)(struct ib_qp *qp, 1864 struct ib_flow_attr 1865 *flow_attr, 1866 int domain); 1867 int (*destroy_flow)(struct ib_flow *flow_id); 1868 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask, 1869 struct ib_mr_status *mr_status); 1870 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext); 1871 void (*drain_rq)(struct ib_qp *qp); 1872 void (*drain_sq)(struct ib_qp *qp); 1873 int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port, 1874 int state); 1875 int (*get_vf_config)(struct ib_device *device, int vf, u8 port, 1876 struct ifla_vf_info *ivf); 1877 int (*get_vf_stats)(struct ib_device *device, int vf, u8 port, 1878 struct ifla_vf_stats *stats); 1879 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid, 1880 int type); 1881 1882 struct ib_dma_mapping_ops *dma_ops; 1883 1884 struct module *owner; 1885 struct device dev; 1886 struct kobject *ports_parent; 1887 struct list_head port_list; 1888 1889 enum { 1890 IB_DEV_UNINITIALIZED, 1891 IB_DEV_REGISTERED, 1892 IB_DEV_UNREGISTERED 1893 } reg_state; 1894 1895 int uverbs_abi_ver; 1896 u64 uverbs_cmd_mask; 1897 u64 uverbs_ex_cmd_mask; 1898 1899 char node_desc[64]; 1900 __be64 node_guid; 1901 u32 local_dma_lkey; 1902 u16 is_switch:1; 1903 u8 node_type; 1904 u8 phys_port_cnt; 1905 struct ib_device_attr attrs; 1906 1907 /** 1908 * The following mandatory functions are used only at device 1909 * registration. Keep functions such as these at the end of this 1910 * structure to avoid cache line misses when accessing struct ib_device 1911 * in fast paths. 1912 */ 1913 int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *); 1914 }; 1915 1916 struct ib_client { 1917 char *name; 1918 void (*add) (struct ib_device *); 1919 void (*remove)(struct ib_device *, void *client_data); 1920 1921 /* Returns the net_dev belonging to this ib_client and matching the 1922 * given parameters. 1923 * @dev: An RDMA device that the net_dev use for communication. 1924 * @port: A physical port number on the RDMA device. 1925 * @pkey: P_Key that the net_dev uses if applicable. 1926 * @gid: A GID that the net_dev uses to communicate. 1927 * @addr: An IP address the net_dev is configured with. 1928 * @client_data: The device's client data set by ib_set_client_data(). 1929 * 1930 * An ib_client that implements a net_dev on top of RDMA devices 1931 * (such as IP over IB) should implement this callback, allowing the 1932 * rdma_cm module to find the right net_dev for a given request. 1933 * 1934 * The caller is responsible for calling dev_put on the returned 1935 * netdev. */ 1936 struct net_device *(*get_net_dev_by_params)( 1937 struct ib_device *dev, 1938 u8 port, 1939 u16 pkey, 1940 const union ib_gid *gid, 1941 const struct sockaddr *addr, 1942 void *client_data); 1943 struct list_head list; 1944 }; 1945 1946 struct ib_device *ib_alloc_device(size_t size); 1947 void ib_dealloc_device(struct ib_device *device); 1948 1949 int ib_register_device(struct ib_device *device, 1950 int (*port_callback)(struct ib_device *, 1951 u8, struct kobject *)); 1952 void ib_unregister_device(struct ib_device *device); 1953 1954 int ib_register_client (struct ib_client *client); 1955 void ib_unregister_client(struct ib_client *client); 1956 1957 void *ib_get_client_data(struct ib_device *device, struct ib_client *client); 1958 void ib_set_client_data(struct ib_device *device, struct ib_client *client, 1959 void *data); 1960 1961 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len) 1962 { 1963 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0; 1964 } 1965 1966 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len) 1967 { 1968 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; 1969 } 1970 1971 static inline bool ib_is_udata_cleared(struct ib_udata *udata, 1972 size_t offset, 1973 size_t len) 1974 { 1975 const void __user *p = udata->inbuf + offset; 1976 bool ret = false; 1977 u8 *buf; 1978 1979 if (len > USHRT_MAX) 1980 return false; 1981 1982 buf = kmalloc(len, GFP_KERNEL); 1983 if (!buf) 1984 return false; 1985 1986 if (copy_from_user(buf, p, len)) 1987 goto free; 1988 1989 ret = !memchr_inv(buf, 0, len); 1990 1991 free: 1992 kfree(buf); 1993 return ret; 1994 } 1995 1996 /** 1997 * ib_modify_qp_is_ok - Check that the supplied attribute mask 1998 * contains all required attributes and no attributes not allowed for 1999 * the given QP state transition. 2000 * @cur_state: Current QP state 2001 * @next_state: Next QP state 2002 * @type: QP type 2003 * @mask: Mask of supplied QP attributes 2004 * @ll : link layer of port 2005 * 2006 * This function is a helper function that a low-level driver's 2007 * modify_qp method can use to validate the consumer's input. It 2008 * checks that cur_state and next_state are valid QP states, that a 2009 * transition from cur_state to next_state is allowed by the IB spec, 2010 * and that the attribute mask supplied is allowed for the transition. 2011 */ 2012 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 2013 enum ib_qp_type type, enum ib_qp_attr_mask mask, 2014 enum rdma_link_layer ll); 2015 2016 int ib_register_event_handler (struct ib_event_handler *event_handler); 2017 int ib_unregister_event_handler(struct ib_event_handler *event_handler); 2018 void ib_dispatch_event(struct ib_event *event); 2019 2020 int ib_query_port(struct ib_device *device, 2021 u8 port_num, struct ib_port_attr *port_attr); 2022 2023 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, 2024 u8 port_num); 2025 2026 /** 2027 * rdma_cap_ib_switch - Check if the device is IB switch 2028 * @device: Device to check 2029 * 2030 * Device driver is responsible for setting is_switch bit on 2031 * in ib_device structure at init time. 2032 * 2033 * Return: true if the device is IB switch. 2034 */ 2035 static inline bool rdma_cap_ib_switch(const struct ib_device *device) 2036 { 2037 return device->is_switch; 2038 } 2039 2040 /** 2041 * rdma_start_port - Return the first valid port number for the device 2042 * specified 2043 * 2044 * @device: Device to be checked 2045 * 2046 * Return start port number 2047 */ 2048 static inline u8 rdma_start_port(const struct ib_device *device) 2049 { 2050 return rdma_cap_ib_switch(device) ? 0 : 1; 2051 } 2052 2053 /** 2054 * rdma_end_port - Return the last valid port number for the device 2055 * specified 2056 * 2057 * @device: Device to be checked 2058 * 2059 * Return last port number 2060 */ 2061 static inline u8 rdma_end_port(const struct ib_device *device) 2062 { 2063 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt; 2064 } 2065 2066 static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num) 2067 { 2068 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB; 2069 } 2070 2071 static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num) 2072 { 2073 return device->port_immutable[port_num].core_cap_flags & 2074 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP); 2075 } 2076 2077 static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num) 2078 { 2079 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP; 2080 } 2081 2082 static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num) 2083 { 2084 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE; 2085 } 2086 2087 static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num) 2088 { 2089 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP; 2090 } 2091 2092 static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num) 2093 { 2094 return rdma_protocol_ib(device, port_num) || 2095 rdma_protocol_roce(device, port_num); 2096 } 2097 2098 /** 2099 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband 2100 * Management Datagrams. 2101 * @device: Device to check 2102 * @port_num: Port number to check 2103 * 2104 * Management Datagrams (MAD) are a required part of the InfiniBand 2105 * specification and are supported on all InfiniBand devices. A slightly 2106 * extended version are also supported on OPA interfaces. 2107 * 2108 * Return: true if the port supports sending/receiving of MAD packets. 2109 */ 2110 static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num) 2111 { 2112 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD; 2113 } 2114 2115 /** 2116 * rdma_cap_opa_mad - Check if the port of device provides support for OPA 2117 * Management Datagrams. 2118 * @device: Device to check 2119 * @port_num: Port number to check 2120 * 2121 * Intel OmniPath devices extend and/or replace the InfiniBand Management 2122 * datagrams with their own versions. These OPA MADs share many but not all of 2123 * the characteristics of InfiniBand MADs. 2124 * 2125 * OPA MADs differ in the following ways: 2126 * 2127 * 1) MADs are variable size up to 2K 2128 * IBTA defined MADs remain fixed at 256 bytes 2129 * 2) OPA SMPs must carry valid PKeys 2130 * 3) OPA SMP packets are a different format 2131 * 2132 * Return: true if the port supports OPA MAD packet formats. 2133 */ 2134 static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num) 2135 { 2136 return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD) 2137 == RDMA_CORE_CAP_OPA_MAD; 2138 } 2139 2140 /** 2141 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband 2142 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI). 2143 * @device: Device to check 2144 * @port_num: Port number to check 2145 * 2146 * Each InfiniBand node is required to provide a Subnet Management Agent 2147 * that the subnet manager can access. Prior to the fabric being fully 2148 * configured by the subnet manager, the SMA is accessed via a well known 2149 * interface called the Subnet Management Interface (SMI). This interface 2150 * uses directed route packets to communicate with the SM to get around the 2151 * chicken and egg problem of the SM needing to know what's on the fabric 2152 * in order to configure the fabric, and needing to configure the fabric in 2153 * order to send packets to the devices on the fabric. These directed 2154 * route packets do not need the fabric fully configured in order to reach 2155 * their destination. The SMI is the only method allowed to send 2156 * directed route packets on an InfiniBand fabric. 2157 * 2158 * Return: true if the port provides an SMI. 2159 */ 2160 static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num) 2161 { 2162 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI; 2163 } 2164 2165 /** 2166 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband 2167 * Communication Manager. 2168 * @device: Device to check 2169 * @port_num: Port number to check 2170 * 2171 * The InfiniBand Communication Manager is one of many pre-defined General 2172 * Service Agents (GSA) that are accessed via the General Service 2173 * Interface (GSI). It's role is to facilitate establishment of connections 2174 * between nodes as well as other management related tasks for established 2175 * connections. 2176 * 2177 * Return: true if the port supports an IB CM (this does not guarantee that 2178 * a CM is actually running however). 2179 */ 2180 static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num) 2181 { 2182 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM; 2183 } 2184 2185 /** 2186 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP 2187 * Communication Manager. 2188 * @device: Device to check 2189 * @port_num: Port number to check 2190 * 2191 * Similar to above, but specific to iWARP connections which have a different 2192 * managment protocol than InfiniBand. 2193 * 2194 * Return: true if the port supports an iWARP CM (this does not guarantee that 2195 * a CM is actually running however). 2196 */ 2197 static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num) 2198 { 2199 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM; 2200 } 2201 2202 /** 2203 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband 2204 * Subnet Administration. 2205 * @device: Device to check 2206 * @port_num: Port number to check 2207 * 2208 * An InfiniBand Subnet Administration (SA) service is a pre-defined General 2209 * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand 2210 * fabrics, devices should resolve routes to other hosts by contacting the 2211 * SA to query the proper route. 2212 * 2213 * Return: true if the port should act as a client to the fabric Subnet 2214 * Administration interface. This does not imply that the SA service is 2215 * running locally. 2216 */ 2217 static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num) 2218 { 2219 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA; 2220 } 2221 2222 /** 2223 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband 2224 * Multicast. 2225 * @device: Device to check 2226 * @port_num: Port number to check 2227 * 2228 * InfiniBand multicast registration is more complex than normal IPv4 or 2229 * IPv6 multicast registration. Each Host Channel Adapter must register 2230 * with the Subnet Manager when it wishes to join a multicast group. It 2231 * should do so only once regardless of how many queue pairs it subscribes 2232 * to this group. And it should leave the group only after all queue pairs 2233 * attached to the group have been detached. 2234 * 2235 * Return: true if the port must undertake the additional adminstrative 2236 * overhead of registering/unregistering with the SM and tracking of the 2237 * total number of queue pairs attached to the multicast group. 2238 */ 2239 static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num) 2240 { 2241 return rdma_cap_ib_sa(device, port_num); 2242 } 2243 2244 /** 2245 * rdma_cap_af_ib - Check if the port of device has the capability 2246 * Native Infiniband Address. 2247 * @device: Device to check 2248 * @port_num: Port number to check 2249 * 2250 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default 2251 * GID. RoCE uses a different mechanism, but still generates a GID via 2252 * a prescribed mechanism and port specific data. 2253 * 2254 * Return: true if the port uses a GID address to identify devices on the 2255 * network. 2256 */ 2257 static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num) 2258 { 2259 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB; 2260 } 2261 2262 /** 2263 * rdma_cap_eth_ah - Check if the port of device has the capability 2264 * Ethernet Address Handle. 2265 * @device: Device to check 2266 * @port_num: Port number to check 2267 * 2268 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique 2269 * to fabricate GIDs over Ethernet/IP specific addresses native to the 2270 * port. Normally, packet headers are generated by the sending host 2271 * adapter, but when sending connectionless datagrams, we must manually 2272 * inject the proper headers for the fabric we are communicating over. 2273 * 2274 * Return: true if we are running as a RoCE port and must force the 2275 * addition of a Global Route Header built from our Ethernet Address 2276 * Handle into our header list for connectionless packets. 2277 */ 2278 static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num) 2279 { 2280 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH; 2281 } 2282 2283 /** 2284 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port. 2285 * 2286 * @device: Device 2287 * @port_num: Port number 2288 * 2289 * This MAD size includes the MAD headers and MAD payload. No other headers 2290 * are included. 2291 * 2292 * Return the max MAD size required by the Port. Will return 0 if the port 2293 * does not support MADs 2294 */ 2295 static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num) 2296 { 2297 return device->port_immutable[port_num].max_mad_size; 2298 } 2299 2300 /** 2301 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table 2302 * @device: Device to check 2303 * @port_num: Port number to check 2304 * 2305 * RoCE GID table mechanism manages the various GIDs for a device. 2306 * 2307 * NOTE: if allocating the port's GID table has failed, this call will still 2308 * return true, but any RoCE GID table API will fail. 2309 * 2310 * Return: true if the port uses RoCE GID table mechanism in order to manage 2311 * its GIDs. 2312 */ 2313 static inline bool rdma_cap_roce_gid_table(const struct ib_device *device, 2314 u8 port_num) 2315 { 2316 return rdma_protocol_roce(device, port_num) && 2317 device->add_gid && device->del_gid; 2318 } 2319 2320 int ib_query_gid(struct ib_device *device, 2321 u8 port_num, int index, union ib_gid *gid, 2322 struct ib_gid_attr *attr); 2323 2324 int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, 2325 int state); 2326 int ib_get_vf_config(struct ib_device *device, int vf, u8 port, 2327 struct ifla_vf_info *info); 2328 int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, 2329 struct ifla_vf_stats *stats); 2330 int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, 2331 int type); 2332 2333 int ib_query_pkey(struct ib_device *device, 2334 u8 port_num, u16 index, u16 *pkey); 2335 2336 int ib_modify_device(struct ib_device *device, 2337 int device_modify_mask, 2338 struct ib_device_modify *device_modify); 2339 2340 int ib_modify_port(struct ib_device *device, 2341 u8 port_num, int port_modify_mask, 2342 struct ib_port_modify *port_modify); 2343 2344 int ib_find_gid(struct ib_device *device, union ib_gid *gid, 2345 enum ib_gid_type gid_type, struct net_device *ndev, 2346 u8 *port_num, u16 *index); 2347 2348 int ib_find_pkey(struct ib_device *device, 2349 u8 port_num, u16 pkey, u16 *index); 2350 2351 struct ib_pd *ib_alloc_pd(struct ib_device *device); 2352 2353 void ib_dealloc_pd(struct ib_pd *pd); 2354 2355 /** 2356 * ib_create_ah - Creates an address handle for the given address vector. 2357 * @pd: The protection domain associated with the address handle. 2358 * @ah_attr: The attributes of the address vector. 2359 * 2360 * The address handle is used to reference a local or global destination 2361 * in all UD QP post sends. 2362 */ 2363 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); 2364 2365 /** 2366 * ib_init_ah_from_wc - Initializes address handle attributes from a 2367 * work completion. 2368 * @device: Device on which the received message arrived. 2369 * @port_num: Port on which the received message arrived. 2370 * @wc: Work completion associated with the received message. 2371 * @grh: References the received global route header. This parameter is 2372 * ignored unless the work completion indicates that the GRH is valid. 2373 * @ah_attr: Returned attributes that can be used when creating an address 2374 * handle for replying to the message. 2375 */ 2376 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, 2377 const struct ib_wc *wc, const struct ib_grh *grh, 2378 struct ib_ah_attr *ah_attr); 2379 2380 /** 2381 * ib_create_ah_from_wc - Creates an address handle associated with the 2382 * sender of the specified work completion. 2383 * @pd: The protection domain associated with the address handle. 2384 * @wc: Work completion information associated with a received message. 2385 * @grh: References the received global route header. This parameter is 2386 * ignored unless the work completion indicates that the GRH is valid. 2387 * @port_num: The outbound port number to associate with the address. 2388 * 2389 * The address handle is used to reference a local or global destination 2390 * in all UD QP post sends. 2391 */ 2392 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, 2393 const struct ib_grh *grh, u8 port_num); 2394 2395 /** 2396 * ib_modify_ah - Modifies the address vector associated with an address 2397 * handle. 2398 * @ah: The address handle to modify. 2399 * @ah_attr: The new address vector attributes to associate with the 2400 * address handle. 2401 */ 2402 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); 2403 2404 /** 2405 * ib_query_ah - Queries the address vector associated with an address 2406 * handle. 2407 * @ah: The address handle to query. 2408 * @ah_attr: The address vector attributes associated with the address 2409 * handle. 2410 */ 2411 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); 2412 2413 /** 2414 * ib_destroy_ah - Destroys an address handle. 2415 * @ah: The address handle to destroy. 2416 */ 2417 int ib_destroy_ah(struct ib_ah *ah); 2418 2419 /** 2420 * ib_create_srq - Creates a SRQ associated with the specified protection 2421 * domain. 2422 * @pd: The protection domain associated with the SRQ. 2423 * @srq_init_attr: A list of initial attributes required to create the 2424 * SRQ. If SRQ creation succeeds, then the attributes are updated to 2425 * the actual capabilities of the created SRQ. 2426 * 2427 * srq_attr->max_wr and srq_attr->max_sge are read the determine the 2428 * requested size of the SRQ, and set to the actual values allocated 2429 * on return. If ib_create_srq() succeeds, then max_wr and max_sge 2430 * will always be at least as large as the requested values. 2431 */ 2432 struct ib_srq *ib_create_srq(struct ib_pd *pd, 2433 struct ib_srq_init_attr *srq_init_attr); 2434 2435 /** 2436 * ib_modify_srq - Modifies the attributes for the specified SRQ. 2437 * @srq: The SRQ to modify. 2438 * @srq_attr: On input, specifies the SRQ attributes to modify. On output, 2439 * the current values of selected SRQ attributes are returned. 2440 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ 2441 * are being modified. 2442 * 2443 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or 2444 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when 2445 * the number of receives queued drops below the limit. 2446 */ 2447 int ib_modify_srq(struct ib_srq *srq, 2448 struct ib_srq_attr *srq_attr, 2449 enum ib_srq_attr_mask srq_attr_mask); 2450 2451 /** 2452 * ib_query_srq - Returns the attribute list and current values for the 2453 * specified SRQ. 2454 * @srq: The SRQ to query. 2455 * @srq_attr: The attributes of the specified SRQ. 2456 */ 2457 int ib_query_srq(struct ib_srq *srq, 2458 struct ib_srq_attr *srq_attr); 2459 2460 /** 2461 * ib_destroy_srq - Destroys the specified SRQ. 2462 * @srq: The SRQ to destroy. 2463 */ 2464 int ib_destroy_srq(struct ib_srq *srq); 2465 2466 /** 2467 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ. 2468 * @srq: The SRQ to post the work request on. 2469 * @recv_wr: A list of work requests to post on the receive queue. 2470 * @bad_recv_wr: On an immediate failure, this parameter will reference 2471 * the work request that failed to be posted on the QP. 2472 */ 2473 static inline int ib_post_srq_recv(struct ib_srq *srq, 2474 struct ib_recv_wr *recv_wr, 2475 struct ib_recv_wr **bad_recv_wr) 2476 { 2477 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr); 2478 } 2479 2480 /** 2481 * ib_create_qp - Creates a QP associated with the specified protection 2482 * domain. 2483 * @pd: The protection domain associated with the QP. 2484 * @qp_init_attr: A list of initial attributes required to create the 2485 * QP. If QP creation succeeds, then the attributes are updated to 2486 * the actual capabilities of the created QP. 2487 */ 2488 struct ib_qp *ib_create_qp(struct ib_pd *pd, 2489 struct ib_qp_init_attr *qp_init_attr); 2490 2491 /** 2492 * ib_modify_qp - Modifies the attributes for the specified QP and then 2493 * transitions the QP to the given state. 2494 * @qp: The QP to modify. 2495 * @qp_attr: On input, specifies the QP attributes to modify. On output, 2496 * the current values of selected QP attributes are returned. 2497 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP 2498 * are being modified. 2499 */ 2500 int ib_modify_qp(struct ib_qp *qp, 2501 struct ib_qp_attr *qp_attr, 2502 int qp_attr_mask); 2503 2504 /** 2505 * ib_query_qp - Returns the attribute list and current values for the 2506 * specified QP. 2507 * @qp: The QP to query. 2508 * @qp_attr: The attributes of the specified QP. 2509 * @qp_attr_mask: A bit-mask used to select specific attributes to query. 2510 * @qp_init_attr: Additional attributes of the selected QP. 2511 * 2512 * The qp_attr_mask may be used to limit the query to gathering only the 2513 * selected attributes. 2514 */ 2515 int ib_query_qp(struct ib_qp *qp, 2516 struct ib_qp_attr *qp_attr, 2517 int qp_attr_mask, 2518 struct ib_qp_init_attr *qp_init_attr); 2519 2520 /** 2521 * ib_destroy_qp - Destroys the specified QP. 2522 * @qp: The QP to destroy. 2523 */ 2524 int ib_destroy_qp(struct ib_qp *qp); 2525 2526 /** 2527 * ib_open_qp - Obtain a reference to an existing sharable QP. 2528 * @xrcd - XRC domain 2529 * @qp_open_attr: Attributes identifying the QP to open. 2530 * 2531 * Returns a reference to a sharable QP. 2532 */ 2533 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, 2534 struct ib_qp_open_attr *qp_open_attr); 2535 2536 /** 2537 * ib_close_qp - Release an external reference to a QP. 2538 * @qp: The QP handle to release 2539 * 2540 * The opened QP handle is released by the caller. The underlying 2541 * shared QP is not destroyed until all internal references are released. 2542 */ 2543 int ib_close_qp(struct ib_qp *qp); 2544 2545 /** 2546 * ib_post_send - Posts a list of work requests to the send queue of 2547 * the specified QP. 2548 * @qp: The QP to post the work request on. 2549 * @send_wr: A list of work requests to post on the send queue. 2550 * @bad_send_wr: On an immediate failure, this parameter will reference 2551 * the work request that failed to be posted on the QP. 2552 * 2553 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate 2554 * error is returned, the QP state shall not be affected, 2555 * ib_post_send() will return an immediate error after queueing any 2556 * earlier work requests in the list. 2557 */ 2558 static inline int ib_post_send(struct ib_qp *qp, 2559 struct ib_send_wr *send_wr, 2560 struct ib_send_wr **bad_send_wr) 2561 { 2562 return qp->device->post_send(qp, send_wr, bad_send_wr); 2563 } 2564 2565 /** 2566 * ib_post_recv - Posts a list of work requests to the receive queue of 2567 * the specified QP. 2568 * @qp: The QP to post the work request on. 2569 * @recv_wr: A list of work requests to post on the receive queue. 2570 * @bad_recv_wr: On an immediate failure, this parameter will reference 2571 * the work request that failed to be posted on the QP. 2572 */ 2573 static inline int ib_post_recv(struct ib_qp *qp, 2574 struct ib_recv_wr *recv_wr, 2575 struct ib_recv_wr **bad_recv_wr) 2576 { 2577 return qp->device->post_recv(qp, recv_wr, bad_recv_wr); 2578 } 2579 2580 struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private, 2581 int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx); 2582 void ib_free_cq(struct ib_cq *cq); 2583 int ib_process_cq_direct(struct ib_cq *cq, int budget); 2584 2585 /** 2586 * ib_create_cq - Creates a CQ on the specified device. 2587 * @device: The device on which to create the CQ. 2588 * @comp_handler: A user-specified callback that is invoked when a 2589 * completion event occurs on the CQ. 2590 * @event_handler: A user-specified callback that is invoked when an 2591 * asynchronous event not associated with a completion occurs on the CQ. 2592 * @cq_context: Context associated with the CQ returned to the user via 2593 * the associated completion and event handlers. 2594 * @cq_attr: The attributes the CQ should be created upon. 2595 * 2596 * Users can examine the cq structure to determine the actual CQ size. 2597 */ 2598 struct ib_cq *ib_create_cq(struct ib_device *device, 2599 ib_comp_handler comp_handler, 2600 void (*event_handler)(struct ib_event *, void *), 2601 void *cq_context, 2602 const struct ib_cq_init_attr *cq_attr); 2603 2604 /** 2605 * ib_resize_cq - Modifies the capacity of the CQ. 2606 * @cq: The CQ to resize. 2607 * @cqe: The minimum size of the CQ. 2608 * 2609 * Users can examine the cq structure to determine the actual CQ size. 2610 */ 2611 int ib_resize_cq(struct ib_cq *cq, int cqe); 2612 2613 /** 2614 * ib_modify_cq - Modifies moderation params of the CQ 2615 * @cq: The CQ to modify. 2616 * @cq_count: number of CQEs that will trigger an event 2617 * @cq_period: max period of time in usec before triggering an event 2618 * 2619 */ 2620 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); 2621 2622 /** 2623 * ib_destroy_cq - Destroys the specified CQ. 2624 * @cq: The CQ to destroy. 2625 */ 2626 int ib_destroy_cq(struct ib_cq *cq); 2627 2628 /** 2629 * ib_poll_cq - poll a CQ for completion(s) 2630 * @cq:the CQ being polled 2631 * @num_entries:maximum number of completions to return 2632 * @wc:array of at least @num_entries &struct ib_wc where completions 2633 * will be returned 2634 * 2635 * Poll a CQ for (possibly multiple) completions. If the return value 2636 * is < 0, an error occurred. If the return value is >= 0, it is the 2637 * number of completions returned. If the return value is 2638 * non-negative and < num_entries, then the CQ was emptied. 2639 */ 2640 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries, 2641 struct ib_wc *wc) 2642 { 2643 return cq->device->poll_cq(cq, num_entries, wc); 2644 } 2645 2646 /** 2647 * ib_peek_cq - Returns the number of unreaped completions currently 2648 * on the specified CQ. 2649 * @cq: The CQ to peek. 2650 * @wc_cnt: A minimum number of unreaped completions to check for. 2651 * 2652 * If the number of unreaped completions is greater than or equal to wc_cnt, 2653 * this function returns wc_cnt, otherwise, it returns the actual number of 2654 * unreaped completions. 2655 */ 2656 int ib_peek_cq(struct ib_cq *cq, int wc_cnt); 2657 2658 /** 2659 * ib_req_notify_cq - Request completion notification on a CQ. 2660 * @cq: The CQ to generate an event for. 2661 * @flags: 2662 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP 2663 * to request an event on the next solicited event or next work 2664 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS 2665 * may also be |ed in to request a hint about missed events, as 2666 * described below. 2667 * 2668 * Return Value: 2669 * < 0 means an error occurred while requesting notification 2670 * == 0 means notification was requested successfully, and if 2671 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events 2672 * were missed and it is safe to wait for another event. In 2673 * this case is it guaranteed that any work completions added 2674 * to the CQ since the last CQ poll will trigger a completion 2675 * notification event. 2676 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed 2677 * in. It means that the consumer must poll the CQ again to 2678 * make sure it is empty to avoid missing an event because of a 2679 * race between requesting notification and an entry being 2680 * added to the CQ. This return value means it is possible 2681 * (but not guaranteed) that a work completion has been added 2682 * to the CQ since the last poll without triggering a 2683 * completion notification event. 2684 */ 2685 static inline int ib_req_notify_cq(struct ib_cq *cq, 2686 enum ib_cq_notify_flags flags) 2687 { 2688 return cq->device->req_notify_cq(cq, flags); 2689 } 2690 2691 /** 2692 * ib_req_ncomp_notif - Request completion notification when there are 2693 * at least the specified number of unreaped completions on the CQ. 2694 * @cq: The CQ to generate an event for. 2695 * @wc_cnt: The number of unreaped completions that should be on the 2696 * CQ before an event is generated. 2697 */ 2698 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) 2699 { 2700 return cq->device->req_ncomp_notif ? 2701 cq->device->req_ncomp_notif(cq, wc_cnt) : 2702 -ENOSYS; 2703 } 2704 2705 /** 2706 * ib_get_dma_mr - Returns a memory region for system memory that is 2707 * usable for DMA. 2708 * @pd: The protection domain associated with the memory region. 2709 * @mr_access_flags: Specifies the memory access rights. 2710 * 2711 * Note that the ib_dma_*() functions defined below must be used 2712 * to create/destroy addresses used with the Lkey or Rkey returned 2713 * by ib_get_dma_mr(). 2714 */ 2715 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags); 2716 2717 /** 2718 * ib_dma_mapping_error - check a DMA addr for error 2719 * @dev: The device for which the dma_addr was created 2720 * @dma_addr: The DMA address to check 2721 */ 2722 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) 2723 { 2724 if (dev->dma_ops) 2725 return dev->dma_ops->mapping_error(dev, dma_addr); 2726 return dma_mapping_error(dev->dma_device, dma_addr); 2727 } 2728 2729 /** 2730 * ib_dma_map_single - Map a kernel virtual address to DMA address 2731 * @dev: The device for which the dma_addr is to be created 2732 * @cpu_addr: The kernel virtual address 2733 * @size: The size of the region in bytes 2734 * @direction: The direction of the DMA 2735 */ 2736 static inline u64 ib_dma_map_single(struct ib_device *dev, 2737 void *cpu_addr, size_t size, 2738 enum dma_data_direction direction) 2739 { 2740 if (dev->dma_ops) 2741 return dev->dma_ops->map_single(dev, cpu_addr, size, direction); 2742 return dma_map_single(dev->dma_device, cpu_addr, size, direction); 2743 } 2744 2745 /** 2746 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single() 2747 * @dev: The device for which the DMA address was created 2748 * @addr: The DMA address 2749 * @size: The size of the region in bytes 2750 * @direction: The direction of the DMA 2751 */ 2752 static inline void ib_dma_unmap_single(struct ib_device *dev, 2753 u64 addr, size_t size, 2754 enum dma_data_direction direction) 2755 { 2756 if (dev->dma_ops) 2757 dev->dma_ops->unmap_single(dev, addr, size, direction); 2758 else 2759 dma_unmap_single(dev->dma_device, addr, size, direction); 2760 } 2761 2762 static inline u64 ib_dma_map_single_attrs(struct ib_device *dev, 2763 void *cpu_addr, size_t size, 2764 enum dma_data_direction direction, 2765 struct dma_attrs *attrs) 2766 { 2767 return dma_map_single_attrs(dev->dma_device, cpu_addr, size, 2768 direction, attrs); 2769 } 2770 2771 static inline void ib_dma_unmap_single_attrs(struct ib_device *dev, 2772 u64 addr, size_t size, 2773 enum dma_data_direction direction, 2774 struct dma_attrs *attrs) 2775 { 2776 return dma_unmap_single_attrs(dev->dma_device, addr, size, 2777 direction, attrs); 2778 } 2779 2780 /** 2781 * ib_dma_map_page - Map a physical page to DMA address 2782 * @dev: The device for which the dma_addr is to be created 2783 * @page: The page to be mapped 2784 * @offset: The offset within the page 2785 * @size: The size of the region in bytes 2786 * @direction: The direction of the DMA 2787 */ 2788 static inline u64 ib_dma_map_page(struct ib_device *dev, 2789 struct page *page, 2790 unsigned long offset, 2791 size_t size, 2792 enum dma_data_direction direction) 2793 { 2794 if (dev->dma_ops) 2795 return dev->dma_ops->map_page(dev, page, offset, size, direction); 2796 return dma_map_page(dev->dma_device, page, offset, size, direction); 2797 } 2798 2799 /** 2800 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page() 2801 * @dev: The device for which the DMA address was created 2802 * @addr: The DMA address 2803 * @size: The size of the region in bytes 2804 * @direction: The direction of the DMA 2805 */ 2806 static inline void ib_dma_unmap_page(struct ib_device *dev, 2807 u64 addr, size_t size, 2808 enum dma_data_direction direction) 2809 { 2810 if (dev->dma_ops) 2811 dev->dma_ops->unmap_page(dev, addr, size, direction); 2812 else 2813 dma_unmap_page(dev->dma_device, addr, size, direction); 2814 } 2815 2816 /** 2817 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses 2818 * @dev: The device for which the DMA addresses are to be created 2819 * @sg: The array of scatter/gather entries 2820 * @nents: The number of scatter/gather entries 2821 * @direction: The direction of the DMA 2822 */ 2823 static inline int ib_dma_map_sg(struct ib_device *dev, 2824 struct scatterlist *sg, int nents, 2825 enum dma_data_direction direction) 2826 { 2827 if (dev->dma_ops) 2828 return dev->dma_ops->map_sg(dev, sg, nents, direction); 2829 return dma_map_sg(dev->dma_device, sg, nents, direction); 2830 } 2831 2832 /** 2833 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses 2834 * @dev: The device for which the DMA addresses were created 2835 * @sg: The array of scatter/gather entries 2836 * @nents: The number of scatter/gather entries 2837 * @direction: The direction of the DMA 2838 */ 2839 static inline void ib_dma_unmap_sg(struct ib_device *dev, 2840 struct scatterlist *sg, int nents, 2841 enum dma_data_direction direction) 2842 { 2843 if (dev->dma_ops) 2844 dev->dma_ops->unmap_sg(dev, sg, nents, direction); 2845 else 2846 dma_unmap_sg(dev->dma_device, sg, nents, direction); 2847 } 2848 2849 static inline int ib_dma_map_sg_attrs(struct ib_device *dev, 2850 struct scatterlist *sg, int nents, 2851 enum dma_data_direction direction, 2852 struct dma_attrs *attrs) 2853 { 2854 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs); 2855 } 2856 2857 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, 2858 struct scatterlist *sg, int nents, 2859 enum dma_data_direction direction, 2860 struct dma_attrs *attrs) 2861 { 2862 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs); 2863 } 2864 /** 2865 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry 2866 * @dev: The device for which the DMA addresses were created 2867 * @sg: The scatter/gather entry 2868 * 2869 * Note: this function is obsolete. To do: change all occurrences of 2870 * ib_sg_dma_address() into sg_dma_address(). 2871 */ 2872 static inline u64 ib_sg_dma_address(struct ib_device *dev, 2873 struct scatterlist *sg) 2874 { 2875 return sg_dma_address(sg); 2876 } 2877 2878 /** 2879 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry 2880 * @dev: The device for which the DMA addresses were created 2881 * @sg: The scatter/gather entry 2882 * 2883 * Note: this function is obsolete. To do: change all occurrences of 2884 * ib_sg_dma_len() into sg_dma_len(). 2885 */ 2886 static inline unsigned int ib_sg_dma_len(struct ib_device *dev, 2887 struct scatterlist *sg) 2888 { 2889 return sg_dma_len(sg); 2890 } 2891 2892 /** 2893 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU 2894 * @dev: The device for which the DMA address was created 2895 * @addr: The DMA address 2896 * @size: The size of the region in bytes 2897 * @dir: The direction of the DMA 2898 */ 2899 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, 2900 u64 addr, 2901 size_t size, 2902 enum dma_data_direction dir) 2903 { 2904 if (dev->dma_ops) 2905 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir); 2906 else 2907 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); 2908 } 2909 2910 /** 2911 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device 2912 * @dev: The device for which the DMA address was created 2913 * @addr: The DMA address 2914 * @size: The size of the region in bytes 2915 * @dir: The direction of the DMA 2916 */ 2917 static inline void ib_dma_sync_single_for_device(struct ib_device *dev, 2918 u64 addr, 2919 size_t size, 2920 enum dma_data_direction dir) 2921 { 2922 if (dev->dma_ops) 2923 dev->dma_ops->sync_single_for_device(dev, addr, size, dir); 2924 else 2925 dma_sync_single_for_device(dev->dma_device, addr, size, dir); 2926 } 2927 2928 /** 2929 * ib_dma_alloc_coherent - Allocate memory and map it for DMA 2930 * @dev: The device for which the DMA address is requested 2931 * @size: The size of the region to allocate in bytes 2932 * @dma_handle: A pointer for returning the DMA address of the region 2933 * @flag: memory allocator flags 2934 */ 2935 static inline void *ib_dma_alloc_coherent(struct ib_device *dev, 2936 size_t size, 2937 u64 *dma_handle, 2938 gfp_t flag) 2939 { 2940 if (dev->dma_ops) 2941 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag); 2942 else { 2943 dma_addr_t handle; 2944 void *ret; 2945 2946 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag); 2947 *dma_handle = handle; 2948 return ret; 2949 } 2950 } 2951 2952 /** 2953 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent() 2954 * @dev: The device for which the DMA addresses were allocated 2955 * @size: The size of the region 2956 * @cpu_addr: the address returned by ib_dma_alloc_coherent() 2957 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent() 2958 */ 2959 static inline void ib_dma_free_coherent(struct ib_device *dev, 2960 size_t size, void *cpu_addr, 2961 u64 dma_handle) 2962 { 2963 if (dev->dma_ops) 2964 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); 2965 else 2966 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); 2967 } 2968 2969 /** 2970 * ib_dereg_mr - Deregisters a memory region and removes it from the 2971 * HCA translation table. 2972 * @mr: The memory region to deregister. 2973 * 2974 * This function can fail, if the memory region has memory windows bound to it. 2975 */ 2976 int ib_dereg_mr(struct ib_mr *mr); 2977 2978 struct ib_mr *ib_alloc_mr(struct ib_pd *pd, 2979 enum ib_mr_type mr_type, 2980 u32 max_num_sg); 2981 2982 /** 2983 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR 2984 * R_Key and L_Key. 2985 * @mr - struct ib_mr pointer to be updated. 2986 * @newkey - new key to be used. 2987 */ 2988 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey) 2989 { 2990 mr->lkey = (mr->lkey & 0xffffff00) | newkey; 2991 mr->rkey = (mr->rkey & 0xffffff00) | newkey; 2992 } 2993 2994 /** 2995 * ib_inc_rkey - increments the key portion of the given rkey. Can be used 2996 * for calculating a new rkey for type 2 memory windows. 2997 * @rkey - the rkey to increment. 2998 */ 2999 static inline u32 ib_inc_rkey(u32 rkey) 3000 { 3001 const u32 mask = 0x000000ff; 3002 return ((rkey + 1) & mask) | (rkey & ~mask); 3003 } 3004 3005 /** 3006 * ib_alloc_fmr - Allocates a unmapped fast memory region. 3007 * @pd: The protection domain associated with the unmapped region. 3008 * @mr_access_flags: Specifies the memory access rights. 3009 * @fmr_attr: Attributes of the unmapped region. 3010 * 3011 * A fast memory region must be mapped before it can be used as part of 3012 * a work request. 3013 */ 3014 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, 3015 int mr_access_flags, 3016 struct ib_fmr_attr *fmr_attr); 3017 3018 /** 3019 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region. 3020 * @fmr: The fast memory region to associate with the pages. 3021 * @page_list: An array of physical pages to map to the fast memory region. 3022 * @list_len: The number of pages in page_list. 3023 * @iova: The I/O virtual address to use with the mapped region. 3024 */ 3025 static inline int ib_map_phys_fmr(struct ib_fmr *fmr, 3026 u64 *page_list, int list_len, 3027 u64 iova) 3028 { 3029 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova); 3030 } 3031 3032 /** 3033 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions. 3034 * @fmr_list: A linked list of fast memory regions to unmap. 3035 */ 3036 int ib_unmap_fmr(struct list_head *fmr_list); 3037 3038 /** 3039 * ib_dealloc_fmr - Deallocates a fast memory region. 3040 * @fmr: The fast memory region to deallocate. 3041 */ 3042 int ib_dealloc_fmr(struct ib_fmr *fmr); 3043 3044 /** 3045 * ib_attach_mcast - Attaches the specified QP to a multicast group. 3046 * @qp: QP to attach to the multicast group. The QP must be type 3047 * IB_QPT_UD. 3048 * @gid: Multicast group GID. 3049 * @lid: Multicast group LID in host byte order. 3050 * 3051 * In order to send and receive multicast packets, subnet 3052 * administration must have created the multicast group and configured 3053 * the fabric appropriately. The port associated with the specified 3054 * QP must also be a member of the multicast group. 3055 */ 3056 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 3057 3058 /** 3059 * ib_detach_mcast - Detaches the specified QP from a multicast group. 3060 * @qp: QP to detach from the multicast group. 3061 * @gid: Multicast group GID. 3062 * @lid: Multicast group LID in host byte order. 3063 */ 3064 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 3065 3066 /** 3067 * ib_alloc_xrcd - Allocates an XRC domain. 3068 * @device: The device on which to allocate the XRC domain. 3069 */ 3070 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device); 3071 3072 /** 3073 * ib_dealloc_xrcd - Deallocates an XRC domain. 3074 * @xrcd: The XRC domain to deallocate. 3075 */ 3076 int ib_dealloc_xrcd(struct ib_xrcd *xrcd); 3077 3078 struct ib_flow *ib_create_flow(struct ib_qp *qp, 3079 struct ib_flow_attr *flow_attr, int domain); 3080 int ib_destroy_flow(struct ib_flow *flow_id); 3081 3082 static inline int ib_check_mr_access(int flags) 3083 { 3084 /* 3085 * Local write permission is required if remote write or 3086 * remote atomic permission is also requested. 3087 */ 3088 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && 3089 !(flags & IB_ACCESS_LOCAL_WRITE)) 3090 return -EINVAL; 3091 3092 return 0; 3093 } 3094 3095 /** 3096 * ib_check_mr_status: lightweight check of MR status. 3097 * This routine may provide status checks on a selected 3098 * ib_mr. first use is for signature status check. 3099 * 3100 * @mr: A memory region. 3101 * @check_mask: Bitmask of which checks to perform from 3102 * ib_mr_status_check enumeration. 3103 * @mr_status: The container of relevant status checks. 3104 * failed checks will be indicated in the status bitmask 3105 * and the relevant info shall be in the error item. 3106 */ 3107 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, 3108 struct ib_mr_status *mr_status); 3109 3110 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port, 3111 u16 pkey, const union ib_gid *gid, 3112 const struct sockaddr *addr); 3113 3114 int ib_map_mr_sg(struct ib_mr *mr, 3115 struct scatterlist *sg, 3116 int sg_nents, 3117 unsigned int page_size); 3118 3119 static inline int 3120 ib_map_mr_sg_zbva(struct ib_mr *mr, 3121 struct scatterlist *sg, 3122 int sg_nents, 3123 unsigned int page_size) 3124 { 3125 int n; 3126 3127 n = ib_map_mr_sg(mr, sg, sg_nents, page_size); 3128 mr->iova = 0; 3129 3130 return n; 3131 } 3132 3133 int ib_sg_to_pages(struct ib_mr *mr, 3134 struct scatterlist *sgl, 3135 int sg_nents, 3136 int (*set_page)(struct ib_mr *, u64)); 3137 3138 void ib_drain_rq(struct ib_qp *qp); 3139 void ib_drain_sq(struct ib_qp *qp); 3140 void ib_drain_qp(struct ib_qp *qp); 3141 #endif /* IB_VERBS_H */ 3142