1 /* 2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 8 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 9 * 10 * This software is available to you under a choice of one of two 11 * licenses. You may choose to be licensed under the terms of the GNU 12 * General Public License (GPL) Version 2, available from the file 13 * COPYING in the main directory of this source tree, or the 14 * OpenIB.org BSD license below: 15 * 16 * Redistribution and use in source and binary forms, with or 17 * without modification, are permitted provided that the following 18 * conditions are met: 19 * 20 * - Redistributions of source code must retain the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer. 23 * 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials 27 * provided with the distribution. 28 * 29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 36 * SOFTWARE. 37 */ 38 39 #if !defined(IB_VERBS_H) 40 #define IB_VERBS_H 41 42 #include <linux/types.h> 43 #include <linux/device.h> 44 #include <linux/mm.h> 45 #include <linux/dma-mapping.h> 46 #include <linux/kref.h> 47 #include <linux/list.h> 48 #include <linux/rwsem.h> 49 #include <linux/scatterlist.h> 50 #include <linux/workqueue.h> 51 #include <uapi/linux/if_ether.h> 52 53 #include <linux/atomic.h> 54 #include <linux/mmu_notifier.h> 55 #include <asm/uaccess.h> 56 57 extern struct workqueue_struct *ib_wq; 58 59 union ib_gid { 60 u8 raw[16]; 61 struct { 62 __be64 subnet_prefix; 63 __be64 interface_id; 64 } global; 65 }; 66 67 enum rdma_node_type { 68 /* IB values map to NodeInfo:NodeType. */ 69 RDMA_NODE_IB_CA = 1, 70 RDMA_NODE_IB_SWITCH, 71 RDMA_NODE_IB_ROUTER, 72 RDMA_NODE_RNIC, 73 RDMA_NODE_USNIC, 74 RDMA_NODE_USNIC_UDP, 75 }; 76 77 enum rdma_transport_type { 78 RDMA_TRANSPORT_IB, 79 RDMA_TRANSPORT_IWARP, 80 RDMA_TRANSPORT_USNIC, 81 RDMA_TRANSPORT_USNIC_UDP 82 }; 83 84 enum rdma_protocol_type { 85 RDMA_PROTOCOL_IB, 86 RDMA_PROTOCOL_IBOE, 87 RDMA_PROTOCOL_IWARP, 88 RDMA_PROTOCOL_USNIC_UDP 89 }; 90 91 __attribute_const__ enum rdma_transport_type 92 rdma_node_get_transport(enum rdma_node_type node_type); 93 94 enum rdma_link_layer { 95 IB_LINK_LAYER_UNSPECIFIED, 96 IB_LINK_LAYER_INFINIBAND, 97 IB_LINK_LAYER_ETHERNET, 98 }; 99 100 enum ib_device_cap_flags { 101 IB_DEVICE_RESIZE_MAX_WR = 1, 102 IB_DEVICE_BAD_PKEY_CNTR = (1<<1), 103 IB_DEVICE_BAD_QKEY_CNTR = (1<<2), 104 IB_DEVICE_RAW_MULTI = (1<<3), 105 IB_DEVICE_AUTO_PATH_MIG = (1<<4), 106 IB_DEVICE_CHANGE_PHY_PORT = (1<<5), 107 IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6), 108 IB_DEVICE_CURR_QP_STATE_MOD = (1<<7), 109 IB_DEVICE_SHUTDOWN_PORT = (1<<8), 110 IB_DEVICE_INIT_TYPE = (1<<9), 111 IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10), 112 IB_DEVICE_SYS_IMAGE_GUID = (1<<11), 113 IB_DEVICE_RC_RNR_NAK_GEN = (1<<12), 114 IB_DEVICE_SRQ_RESIZE = (1<<13), 115 IB_DEVICE_N_NOTIFY_CQ = (1<<14), 116 IB_DEVICE_LOCAL_DMA_LKEY = (1<<15), 117 IB_DEVICE_RESERVED = (1<<16), /* old SEND_W_INV */ 118 IB_DEVICE_MEM_WINDOW = (1<<17), 119 /* 120 * Devices should set IB_DEVICE_UD_IP_SUM if they support 121 * insertion of UDP and TCP checksum on outgoing UD IPoIB 122 * messages and can verify the validity of checksum for 123 * incoming messages. Setting this flag implies that the 124 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode. 125 */ 126 IB_DEVICE_UD_IP_CSUM = (1<<18), 127 IB_DEVICE_UD_TSO = (1<<19), 128 IB_DEVICE_XRC = (1<<20), 129 IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21), 130 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22), 131 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23), 132 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24), 133 IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29), 134 IB_DEVICE_SIGNATURE_HANDOVER = (1<<30), 135 IB_DEVICE_ON_DEMAND_PAGING = (1<<31), 136 }; 137 138 enum ib_signature_prot_cap { 139 IB_PROT_T10DIF_TYPE_1 = 1, 140 IB_PROT_T10DIF_TYPE_2 = 1 << 1, 141 IB_PROT_T10DIF_TYPE_3 = 1 << 2, 142 }; 143 144 enum ib_signature_guard_cap { 145 IB_GUARD_T10DIF_CRC = 1, 146 IB_GUARD_T10DIF_CSUM = 1 << 1, 147 }; 148 149 enum ib_atomic_cap { 150 IB_ATOMIC_NONE, 151 IB_ATOMIC_HCA, 152 IB_ATOMIC_GLOB 153 }; 154 155 enum ib_odp_general_cap_bits { 156 IB_ODP_SUPPORT = 1 << 0, 157 }; 158 159 enum ib_odp_transport_cap_bits { 160 IB_ODP_SUPPORT_SEND = 1 << 0, 161 IB_ODP_SUPPORT_RECV = 1 << 1, 162 IB_ODP_SUPPORT_WRITE = 1 << 2, 163 IB_ODP_SUPPORT_READ = 1 << 3, 164 IB_ODP_SUPPORT_ATOMIC = 1 << 4, 165 }; 166 167 struct ib_odp_caps { 168 uint64_t general_caps; 169 struct { 170 uint32_t rc_odp_caps; 171 uint32_t uc_odp_caps; 172 uint32_t ud_odp_caps; 173 } per_transport_caps; 174 }; 175 176 enum ib_cq_creation_flags { 177 IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0, 178 }; 179 180 struct ib_cq_init_attr { 181 unsigned int cqe; 182 int comp_vector; 183 u32 flags; 184 }; 185 186 struct ib_device_attr { 187 u64 fw_ver; 188 __be64 sys_image_guid; 189 u64 max_mr_size; 190 u64 page_size_cap; 191 u32 vendor_id; 192 u32 vendor_part_id; 193 u32 hw_ver; 194 int max_qp; 195 int max_qp_wr; 196 int device_cap_flags; 197 int max_sge; 198 int max_sge_rd; 199 int max_cq; 200 int max_cqe; 201 int max_mr; 202 int max_pd; 203 int max_qp_rd_atom; 204 int max_ee_rd_atom; 205 int max_res_rd_atom; 206 int max_qp_init_rd_atom; 207 int max_ee_init_rd_atom; 208 enum ib_atomic_cap atomic_cap; 209 enum ib_atomic_cap masked_atomic_cap; 210 int max_ee; 211 int max_rdd; 212 int max_mw; 213 int max_raw_ipv6_qp; 214 int max_raw_ethy_qp; 215 int max_mcast_grp; 216 int max_mcast_qp_attach; 217 int max_total_mcast_qp_attach; 218 int max_ah; 219 int max_fmr; 220 int max_map_per_fmr; 221 int max_srq; 222 int max_srq_wr; 223 int max_srq_sge; 224 unsigned int max_fast_reg_page_list_len; 225 u16 max_pkeys; 226 u8 local_ca_ack_delay; 227 int sig_prot_cap; 228 int sig_guard_cap; 229 struct ib_odp_caps odp_caps; 230 uint64_t timestamp_mask; 231 uint64_t hca_core_clock; /* in KHZ */ 232 }; 233 234 enum ib_mtu { 235 IB_MTU_256 = 1, 236 IB_MTU_512 = 2, 237 IB_MTU_1024 = 3, 238 IB_MTU_2048 = 4, 239 IB_MTU_4096 = 5 240 }; 241 242 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu) 243 { 244 switch (mtu) { 245 case IB_MTU_256: return 256; 246 case IB_MTU_512: return 512; 247 case IB_MTU_1024: return 1024; 248 case IB_MTU_2048: return 2048; 249 case IB_MTU_4096: return 4096; 250 default: return -1; 251 } 252 } 253 254 enum ib_port_state { 255 IB_PORT_NOP = 0, 256 IB_PORT_DOWN = 1, 257 IB_PORT_INIT = 2, 258 IB_PORT_ARMED = 3, 259 IB_PORT_ACTIVE = 4, 260 IB_PORT_ACTIVE_DEFER = 5 261 }; 262 263 enum ib_port_cap_flags { 264 IB_PORT_SM = 1 << 1, 265 IB_PORT_NOTICE_SUP = 1 << 2, 266 IB_PORT_TRAP_SUP = 1 << 3, 267 IB_PORT_OPT_IPD_SUP = 1 << 4, 268 IB_PORT_AUTO_MIGR_SUP = 1 << 5, 269 IB_PORT_SL_MAP_SUP = 1 << 6, 270 IB_PORT_MKEY_NVRAM = 1 << 7, 271 IB_PORT_PKEY_NVRAM = 1 << 8, 272 IB_PORT_LED_INFO_SUP = 1 << 9, 273 IB_PORT_SM_DISABLED = 1 << 10, 274 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11, 275 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12, 276 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14, 277 IB_PORT_CM_SUP = 1 << 16, 278 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17, 279 IB_PORT_REINIT_SUP = 1 << 18, 280 IB_PORT_DEVICE_MGMT_SUP = 1 << 19, 281 IB_PORT_VENDOR_CLASS_SUP = 1 << 20, 282 IB_PORT_DR_NOTICE_SUP = 1 << 21, 283 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22, 284 IB_PORT_BOOT_MGMT_SUP = 1 << 23, 285 IB_PORT_LINK_LATENCY_SUP = 1 << 24, 286 IB_PORT_CLIENT_REG_SUP = 1 << 25, 287 IB_PORT_IP_BASED_GIDS = 1 << 26 288 }; 289 290 enum ib_port_width { 291 IB_WIDTH_1X = 1, 292 IB_WIDTH_4X = 2, 293 IB_WIDTH_8X = 4, 294 IB_WIDTH_12X = 8 295 }; 296 297 static inline int ib_width_enum_to_int(enum ib_port_width width) 298 { 299 switch (width) { 300 case IB_WIDTH_1X: return 1; 301 case IB_WIDTH_4X: return 4; 302 case IB_WIDTH_8X: return 8; 303 case IB_WIDTH_12X: return 12; 304 default: return -1; 305 } 306 } 307 308 enum ib_port_speed { 309 IB_SPEED_SDR = 1, 310 IB_SPEED_DDR = 2, 311 IB_SPEED_QDR = 4, 312 IB_SPEED_FDR10 = 8, 313 IB_SPEED_FDR = 16, 314 IB_SPEED_EDR = 32 315 }; 316 317 struct ib_protocol_stats { 318 /* TBD... */ 319 }; 320 321 struct iw_protocol_stats { 322 u64 ipInReceives; 323 u64 ipInHdrErrors; 324 u64 ipInTooBigErrors; 325 u64 ipInNoRoutes; 326 u64 ipInAddrErrors; 327 u64 ipInUnknownProtos; 328 u64 ipInTruncatedPkts; 329 u64 ipInDiscards; 330 u64 ipInDelivers; 331 u64 ipOutForwDatagrams; 332 u64 ipOutRequests; 333 u64 ipOutDiscards; 334 u64 ipOutNoRoutes; 335 u64 ipReasmTimeout; 336 u64 ipReasmReqds; 337 u64 ipReasmOKs; 338 u64 ipReasmFails; 339 u64 ipFragOKs; 340 u64 ipFragFails; 341 u64 ipFragCreates; 342 u64 ipInMcastPkts; 343 u64 ipOutMcastPkts; 344 u64 ipInBcastPkts; 345 u64 ipOutBcastPkts; 346 347 u64 tcpRtoAlgorithm; 348 u64 tcpRtoMin; 349 u64 tcpRtoMax; 350 u64 tcpMaxConn; 351 u64 tcpActiveOpens; 352 u64 tcpPassiveOpens; 353 u64 tcpAttemptFails; 354 u64 tcpEstabResets; 355 u64 tcpCurrEstab; 356 u64 tcpInSegs; 357 u64 tcpOutSegs; 358 u64 tcpRetransSegs; 359 u64 tcpInErrs; 360 u64 tcpOutRsts; 361 }; 362 363 union rdma_protocol_stats { 364 struct ib_protocol_stats ib; 365 struct iw_protocol_stats iw; 366 }; 367 368 /* Define bits for the various functionality this port needs to be supported by 369 * the core. 370 */ 371 /* Management 0x00000FFF */ 372 #define RDMA_CORE_CAP_IB_MAD 0x00000001 373 #define RDMA_CORE_CAP_IB_SMI 0x00000002 374 #define RDMA_CORE_CAP_IB_CM 0x00000004 375 #define RDMA_CORE_CAP_IW_CM 0x00000008 376 #define RDMA_CORE_CAP_IB_SA 0x00000010 377 #define RDMA_CORE_CAP_OPA_MAD 0x00000020 378 379 /* Address format 0x000FF000 */ 380 #define RDMA_CORE_CAP_AF_IB 0x00001000 381 #define RDMA_CORE_CAP_ETH_AH 0x00002000 382 383 /* Protocol 0xFFF00000 */ 384 #define RDMA_CORE_CAP_PROT_IB 0x00100000 385 #define RDMA_CORE_CAP_PROT_ROCE 0x00200000 386 #define RDMA_CORE_CAP_PROT_IWARP 0x00400000 387 388 #define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \ 389 | RDMA_CORE_CAP_IB_MAD \ 390 | RDMA_CORE_CAP_IB_SMI \ 391 | RDMA_CORE_CAP_IB_CM \ 392 | RDMA_CORE_CAP_IB_SA \ 393 | RDMA_CORE_CAP_AF_IB) 394 #define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \ 395 | RDMA_CORE_CAP_IB_MAD \ 396 | RDMA_CORE_CAP_IB_CM \ 397 | RDMA_CORE_CAP_AF_IB \ 398 | RDMA_CORE_CAP_ETH_AH) 399 #define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \ 400 | RDMA_CORE_CAP_IW_CM) 401 #define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \ 402 | RDMA_CORE_CAP_OPA_MAD) 403 404 struct ib_port_attr { 405 enum ib_port_state state; 406 enum ib_mtu max_mtu; 407 enum ib_mtu active_mtu; 408 int gid_tbl_len; 409 u32 port_cap_flags; 410 u32 max_msg_sz; 411 u32 bad_pkey_cntr; 412 u32 qkey_viol_cntr; 413 u16 pkey_tbl_len; 414 u16 lid; 415 u16 sm_lid; 416 u8 lmc; 417 u8 max_vl_num; 418 u8 sm_sl; 419 u8 subnet_timeout; 420 u8 init_type_reply; 421 u8 active_width; 422 u8 active_speed; 423 u8 phys_state; 424 }; 425 426 enum ib_device_modify_flags { 427 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0, 428 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1 429 }; 430 431 struct ib_device_modify { 432 u64 sys_image_guid; 433 char node_desc[64]; 434 }; 435 436 enum ib_port_modify_flags { 437 IB_PORT_SHUTDOWN = 1, 438 IB_PORT_INIT_TYPE = (1<<2), 439 IB_PORT_RESET_QKEY_CNTR = (1<<3) 440 }; 441 442 struct ib_port_modify { 443 u32 set_port_cap_mask; 444 u32 clr_port_cap_mask; 445 u8 init_type; 446 }; 447 448 enum ib_event_type { 449 IB_EVENT_CQ_ERR, 450 IB_EVENT_QP_FATAL, 451 IB_EVENT_QP_REQ_ERR, 452 IB_EVENT_QP_ACCESS_ERR, 453 IB_EVENT_COMM_EST, 454 IB_EVENT_SQ_DRAINED, 455 IB_EVENT_PATH_MIG, 456 IB_EVENT_PATH_MIG_ERR, 457 IB_EVENT_DEVICE_FATAL, 458 IB_EVENT_PORT_ACTIVE, 459 IB_EVENT_PORT_ERR, 460 IB_EVENT_LID_CHANGE, 461 IB_EVENT_PKEY_CHANGE, 462 IB_EVENT_SM_CHANGE, 463 IB_EVENT_SRQ_ERR, 464 IB_EVENT_SRQ_LIMIT_REACHED, 465 IB_EVENT_QP_LAST_WQE_REACHED, 466 IB_EVENT_CLIENT_REREGISTER, 467 IB_EVENT_GID_CHANGE, 468 }; 469 470 __attribute_const__ const char *ib_event_msg(enum ib_event_type event); 471 472 struct ib_event { 473 struct ib_device *device; 474 union { 475 struct ib_cq *cq; 476 struct ib_qp *qp; 477 struct ib_srq *srq; 478 u8 port_num; 479 } element; 480 enum ib_event_type event; 481 }; 482 483 struct ib_event_handler { 484 struct ib_device *device; 485 void (*handler)(struct ib_event_handler *, struct ib_event *); 486 struct list_head list; 487 }; 488 489 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \ 490 do { \ 491 (_ptr)->device = _device; \ 492 (_ptr)->handler = _handler; \ 493 INIT_LIST_HEAD(&(_ptr)->list); \ 494 } while (0) 495 496 struct ib_global_route { 497 union ib_gid dgid; 498 u32 flow_label; 499 u8 sgid_index; 500 u8 hop_limit; 501 u8 traffic_class; 502 }; 503 504 struct ib_grh { 505 __be32 version_tclass_flow; 506 __be16 paylen; 507 u8 next_hdr; 508 u8 hop_limit; 509 union ib_gid sgid; 510 union ib_gid dgid; 511 }; 512 513 enum { 514 IB_MULTICAST_QPN = 0xffffff 515 }; 516 517 #define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF) 518 519 enum ib_ah_flags { 520 IB_AH_GRH = 1 521 }; 522 523 enum ib_rate { 524 IB_RATE_PORT_CURRENT = 0, 525 IB_RATE_2_5_GBPS = 2, 526 IB_RATE_5_GBPS = 5, 527 IB_RATE_10_GBPS = 3, 528 IB_RATE_20_GBPS = 6, 529 IB_RATE_30_GBPS = 4, 530 IB_RATE_40_GBPS = 7, 531 IB_RATE_60_GBPS = 8, 532 IB_RATE_80_GBPS = 9, 533 IB_RATE_120_GBPS = 10, 534 IB_RATE_14_GBPS = 11, 535 IB_RATE_56_GBPS = 12, 536 IB_RATE_112_GBPS = 13, 537 IB_RATE_168_GBPS = 14, 538 IB_RATE_25_GBPS = 15, 539 IB_RATE_100_GBPS = 16, 540 IB_RATE_200_GBPS = 17, 541 IB_RATE_300_GBPS = 18 542 }; 543 544 /** 545 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the 546 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be 547 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec. 548 * @rate: rate to convert. 549 */ 550 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate); 551 552 /** 553 * ib_rate_to_mbps - Convert the IB rate enum to Mbps. 554 * For example, IB_RATE_2_5_GBPS will be converted to 2500. 555 * @rate: rate to convert. 556 */ 557 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate); 558 559 enum ib_mr_create_flags { 560 IB_MR_SIGNATURE_EN = 1, 561 }; 562 563 /** 564 * ib_mr_init_attr - Memory region init attributes passed to routine 565 * ib_create_mr. 566 * @max_reg_descriptors: max number of registration descriptors that 567 * may be used with registration work requests. 568 * @flags: MR creation flags bit mask. 569 */ 570 struct ib_mr_init_attr { 571 int max_reg_descriptors; 572 u32 flags; 573 }; 574 575 /** 576 * Signature types 577 * IB_SIG_TYPE_NONE: Unprotected. 578 * IB_SIG_TYPE_T10_DIF: Type T10-DIF 579 */ 580 enum ib_signature_type { 581 IB_SIG_TYPE_NONE, 582 IB_SIG_TYPE_T10_DIF, 583 }; 584 585 /** 586 * Signature T10-DIF block-guard types 587 * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules. 588 * IB_T10DIF_CSUM: Corresponds to IP checksum rules. 589 */ 590 enum ib_t10_dif_bg_type { 591 IB_T10DIF_CRC, 592 IB_T10DIF_CSUM 593 }; 594 595 /** 596 * struct ib_t10_dif_domain - Parameters specific for T10-DIF 597 * domain. 598 * @bg_type: T10-DIF block guard type (CRC|CSUM) 599 * @pi_interval: protection information interval. 600 * @bg: seed of guard computation. 601 * @app_tag: application tag of guard block 602 * @ref_tag: initial guard block reference tag. 603 * @ref_remap: Indicate wethear the reftag increments each block 604 * @app_escape: Indicate to skip block check if apptag=0xffff 605 * @ref_escape: Indicate to skip block check if reftag=0xffffffff 606 * @apptag_check_mask: check bitmask of application tag. 607 */ 608 struct ib_t10_dif_domain { 609 enum ib_t10_dif_bg_type bg_type; 610 u16 pi_interval; 611 u16 bg; 612 u16 app_tag; 613 u32 ref_tag; 614 bool ref_remap; 615 bool app_escape; 616 bool ref_escape; 617 u16 apptag_check_mask; 618 }; 619 620 /** 621 * struct ib_sig_domain - Parameters for signature domain 622 * @sig_type: specific signauture type 623 * @sig: union of all signature domain attributes that may 624 * be used to set domain layout. 625 */ 626 struct ib_sig_domain { 627 enum ib_signature_type sig_type; 628 union { 629 struct ib_t10_dif_domain dif; 630 } sig; 631 }; 632 633 /** 634 * struct ib_sig_attrs - Parameters for signature handover operation 635 * @check_mask: bitmask for signature byte check (8 bytes) 636 * @mem: memory domain layout desciptor. 637 * @wire: wire domain layout desciptor. 638 */ 639 struct ib_sig_attrs { 640 u8 check_mask; 641 struct ib_sig_domain mem; 642 struct ib_sig_domain wire; 643 }; 644 645 enum ib_sig_err_type { 646 IB_SIG_BAD_GUARD, 647 IB_SIG_BAD_REFTAG, 648 IB_SIG_BAD_APPTAG, 649 }; 650 651 /** 652 * struct ib_sig_err - signature error descriptor 653 */ 654 struct ib_sig_err { 655 enum ib_sig_err_type err_type; 656 u32 expected; 657 u32 actual; 658 u64 sig_err_offset; 659 u32 key; 660 }; 661 662 enum ib_mr_status_check { 663 IB_MR_CHECK_SIG_STATUS = 1, 664 }; 665 666 /** 667 * struct ib_mr_status - Memory region status container 668 * 669 * @fail_status: Bitmask of MR checks status. For each 670 * failed check a corresponding status bit is set. 671 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS 672 * failure. 673 */ 674 struct ib_mr_status { 675 u32 fail_status; 676 struct ib_sig_err sig_err; 677 }; 678 679 /** 680 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate 681 * enum. 682 * @mult: multiple to convert. 683 */ 684 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult); 685 686 struct ib_ah_attr { 687 struct ib_global_route grh; 688 u16 dlid; 689 u8 sl; 690 u8 src_path_bits; 691 u8 static_rate; 692 u8 ah_flags; 693 u8 port_num; 694 u8 dmac[ETH_ALEN]; 695 u16 vlan_id; 696 }; 697 698 enum ib_wc_status { 699 IB_WC_SUCCESS, 700 IB_WC_LOC_LEN_ERR, 701 IB_WC_LOC_QP_OP_ERR, 702 IB_WC_LOC_EEC_OP_ERR, 703 IB_WC_LOC_PROT_ERR, 704 IB_WC_WR_FLUSH_ERR, 705 IB_WC_MW_BIND_ERR, 706 IB_WC_BAD_RESP_ERR, 707 IB_WC_LOC_ACCESS_ERR, 708 IB_WC_REM_INV_REQ_ERR, 709 IB_WC_REM_ACCESS_ERR, 710 IB_WC_REM_OP_ERR, 711 IB_WC_RETRY_EXC_ERR, 712 IB_WC_RNR_RETRY_EXC_ERR, 713 IB_WC_LOC_RDD_VIOL_ERR, 714 IB_WC_REM_INV_RD_REQ_ERR, 715 IB_WC_REM_ABORT_ERR, 716 IB_WC_INV_EECN_ERR, 717 IB_WC_INV_EEC_STATE_ERR, 718 IB_WC_FATAL_ERR, 719 IB_WC_RESP_TIMEOUT_ERR, 720 IB_WC_GENERAL_ERR 721 }; 722 723 __attribute_const__ const char *ib_wc_status_msg(enum ib_wc_status status); 724 725 enum ib_wc_opcode { 726 IB_WC_SEND, 727 IB_WC_RDMA_WRITE, 728 IB_WC_RDMA_READ, 729 IB_WC_COMP_SWAP, 730 IB_WC_FETCH_ADD, 731 IB_WC_BIND_MW, 732 IB_WC_LSO, 733 IB_WC_LOCAL_INV, 734 IB_WC_FAST_REG_MR, 735 IB_WC_MASKED_COMP_SWAP, 736 IB_WC_MASKED_FETCH_ADD, 737 /* 738 * Set value of IB_WC_RECV so consumers can test if a completion is a 739 * receive by testing (opcode & IB_WC_RECV). 740 */ 741 IB_WC_RECV = 1 << 7, 742 IB_WC_RECV_RDMA_WITH_IMM 743 }; 744 745 enum ib_wc_flags { 746 IB_WC_GRH = 1, 747 IB_WC_WITH_IMM = (1<<1), 748 IB_WC_WITH_INVALIDATE = (1<<2), 749 IB_WC_IP_CSUM_OK = (1<<3), 750 IB_WC_WITH_SMAC = (1<<4), 751 IB_WC_WITH_VLAN = (1<<5), 752 }; 753 754 struct ib_wc { 755 u64 wr_id; 756 enum ib_wc_status status; 757 enum ib_wc_opcode opcode; 758 u32 vendor_err; 759 u32 byte_len; 760 struct ib_qp *qp; 761 union { 762 __be32 imm_data; 763 u32 invalidate_rkey; 764 } ex; 765 u32 src_qp; 766 int wc_flags; 767 u16 pkey_index; 768 u16 slid; 769 u8 sl; 770 u8 dlid_path_bits; 771 u8 port_num; /* valid only for DR SMPs on switches */ 772 u8 smac[ETH_ALEN]; 773 u16 vlan_id; 774 }; 775 776 enum ib_cq_notify_flags { 777 IB_CQ_SOLICITED = 1 << 0, 778 IB_CQ_NEXT_COMP = 1 << 1, 779 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP, 780 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2, 781 }; 782 783 enum ib_srq_type { 784 IB_SRQT_BASIC, 785 IB_SRQT_XRC 786 }; 787 788 enum ib_srq_attr_mask { 789 IB_SRQ_MAX_WR = 1 << 0, 790 IB_SRQ_LIMIT = 1 << 1, 791 }; 792 793 struct ib_srq_attr { 794 u32 max_wr; 795 u32 max_sge; 796 u32 srq_limit; 797 }; 798 799 struct ib_srq_init_attr { 800 void (*event_handler)(struct ib_event *, void *); 801 void *srq_context; 802 struct ib_srq_attr attr; 803 enum ib_srq_type srq_type; 804 805 union { 806 struct { 807 struct ib_xrcd *xrcd; 808 struct ib_cq *cq; 809 } xrc; 810 } ext; 811 }; 812 813 struct ib_qp_cap { 814 u32 max_send_wr; 815 u32 max_recv_wr; 816 u32 max_send_sge; 817 u32 max_recv_sge; 818 u32 max_inline_data; 819 }; 820 821 enum ib_sig_type { 822 IB_SIGNAL_ALL_WR, 823 IB_SIGNAL_REQ_WR 824 }; 825 826 enum ib_qp_type { 827 /* 828 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries 829 * here (and in that order) since the MAD layer uses them as 830 * indices into a 2-entry table. 831 */ 832 IB_QPT_SMI, 833 IB_QPT_GSI, 834 835 IB_QPT_RC, 836 IB_QPT_UC, 837 IB_QPT_UD, 838 IB_QPT_RAW_IPV6, 839 IB_QPT_RAW_ETHERTYPE, 840 IB_QPT_RAW_PACKET = 8, 841 IB_QPT_XRC_INI = 9, 842 IB_QPT_XRC_TGT, 843 IB_QPT_MAX, 844 /* Reserve a range for qp types internal to the low level driver. 845 * These qp types will not be visible at the IB core layer, so the 846 * IB_QPT_MAX usages should not be affected in the core layer 847 */ 848 IB_QPT_RESERVED1 = 0x1000, 849 IB_QPT_RESERVED2, 850 IB_QPT_RESERVED3, 851 IB_QPT_RESERVED4, 852 IB_QPT_RESERVED5, 853 IB_QPT_RESERVED6, 854 IB_QPT_RESERVED7, 855 IB_QPT_RESERVED8, 856 IB_QPT_RESERVED9, 857 IB_QPT_RESERVED10, 858 }; 859 860 enum ib_qp_create_flags { 861 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0, 862 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1, 863 IB_QP_CREATE_NETIF_QP = 1 << 5, 864 IB_QP_CREATE_SIGNATURE_EN = 1 << 6, 865 IB_QP_CREATE_USE_GFP_NOIO = 1 << 7, 866 /* reserve bits 26-31 for low level drivers' internal use */ 867 IB_QP_CREATE_RESERVED_START = 1 << 26, 868 IB_QP_CREATE_RESERVED_END = 1 << 31, 869 }; 870 871 872 /* 873 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler 874 * callback to destroy the passed in QP. 875 */ 876 877 struct ib_qp_init_attr { 878 void (*event_handler)(struct ib_event *, void *); 879 void *qp_context; 880 struct ib_cq *send_cq; 881 struct ib_cq *recv_cq; 882 struct ib_srq *srq; 883 struct ib_xrcd *xrcd; /* XRC TGT QPs only */ 884 struct ib_qp_cap cap; 885 enum ib_sig_type sq_sig_type; 886 enum ib_qp_type qp_type; 887 enum ib_qp_create_flags create_flags; 888 u8 port_num; /* special QP types only */ 889 }; 890 891 struct ib_qp_open_attr { 892 void (*event_handler)(struct ib_event *, void *); 893 void *qp_context; 894 u32 qp_num; 895 enum ib_qp_type qp_type; 896 }; 897 898 enum ib_rnr_timeout { 899 IB_RNR_TIMER_655_36 = 0, 900 IB_RNR_TIMER_000_01 = 1, 901 IB_RNR_TIMER_000_02 = 2, 902 IB_RNR_TIMER_000_03 = 3, 903 IB_RNR_TIMER_000_04 = 4, 904 IB_RNR_TIMER_000_06 = 5, 905 IB_RNR_TIMER_000_08 = 6, 906 IB_RNR_TIMER_000_12 = 7, 907 IB_RNR_TIMER_000_16 = 8, 908 IB_RNR_TIMER_000_24 = 9, 909 IB_RNR_TIMER_000_32 = 10, 910 IB_RNR_TIMER_000_48 = 11, 911 IB_RNR_TIMER_000_64 = 12, 912 IB_RNR_TIMER_000_96 = 13, 913 IB_RNR_TIMER_001_28 = 14, 914 IB_RNR_TIMER_001_92 = 15, 915 IB_RNR_TIMER_002_56 = 16, 916 IB_RNR_TIMER_003_84 = 17, 917 IB_RNR_TIMER_005_12 = 18, 918 IB_RNR_TIMER_007_68 = 19, 919 IB_RNR_TIMER_010_24 = 20, 920 IB_RNR_TIMER_015_36 = 21, 921 IB_RNR_TIMER_020_48 = 22, 922 IB_RNR_TIMER_030_72 = 23, 923 IB_RNR_TIMER_040_96 = 24, 924 IB_RNR_TIMER_061_44 = 25, 925 IB_RNR_TIMER_081_92 = 26, 926 IB_RNR_TIMER_122_88 = 27, 927 IB_RNR_TIMER_163_84 = 28, 928 IB_RNR_TIMER_245_76 = 29, 929 IB_RNR_TIMER_327_68 = 30, 930 IB_RNR_TIMER_491_52 = 31 931 }; 932 933 enum ib_qp_attr_mask { 934 IB_QP_STATE = 1, 935 IB_QP_CUR_STATE = (1<<1), 936 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2), 937 IB_QP_ACCESS_FLAGS = (1<<3), 938 IB_QP_PKEY_INDEX = (1<<4), 939 IB_QP_PORT = (1<<5), 940 IB_QP_QKEY = (1<<6), 941 IB_QP_AV = (1<<7), 942 IB_QP_PATH_MTU = (1<<8), 943 IB_QP_TIMEOUT = (1<<9), 944 IB_QP_RETRY_CNT = (1<<10), 945 IB_QP_RNR_RETRY = (1<<11), 946 IB_QP_RQ_PSN = (1<<12), 947 IB_QP_MAX_QP_RD_ATOMIC = (1<<13), 948 IB_QP_ALT_PATH = (1<<14), 949 IB_QP_MIN_RNR_TIMER = (1<<15), 950 IB_QP_SQ_PSN = (1<<16), 951 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17), 952 IB_QP_PATH_MIG_STATE = (1<<18), 953 IB_QP_CAP = (1<<19), 954 IB_QP_DEST_QPN = (1<<20), 955 IB_QP_SMAC = (1<<21), 956 IB_QP_ALT_SMAC = (1<<22), 957 IB_QP_VID = (1<<23), 958 IB_QP_ALT_VID = (1<<24), 959 }; 960 961 enum ib_qp_state { 962 IB_QPS_RESET, 963 IB_QPS_INIT, 964 IB_QPS_RTR, 965 IB_QPS_RTS, 966 IB_QPS_SQD, 967 IB_QPS_SQE, 968 IB_QPS_ERR 969 }; 970 971 enum ib_mig_state { 972 IB_MIG_MIGRATED, 973 IB_MIG_REARM, 974 IB_MIG_ARMED 975 }; 976 977 enum ib_mw_type { 978 IB_MW_TYPE_1 = 1, 979 IB_MW_TYPE_2 = 2 980 }; 981 982 struct ib_qp_attr { 983 enum ib_qp_state qp_state; 984 enum ib_qp_state cur_qp_state; 985 enum ib_mtu path_mtu; 986 enum ib_mig_state path_mig_state; 987 u32 qkey; 988 u32 rq_psn; 989 u32 sq_psn; 990 u32 dest_qp_num; 991 int qp_access_flags; 992 struct ib_qp_cap cap; 993 struct ib_ah_attr ah_attr; 994 struct ib_ah_attr alt_ah_attr; 995 u16 pkey_index; 996 u16 alt_pkey_index; 997 u8 en_sqd_async_notify; 998 u8 sq_draining; 999 u8 max_rd_atomic; 1000 u8 max_dest_rd_atomic; 1001 u8 min_rnr_timer; 1002 u8 port_num; 1003 u8 timeout; 1004 u8 retry_cnt; 1005 u8 rnr_retry; 1006 u8 alt_port_num; 1007 u8 alt_timeout; 1008 u8 smac[ETH_ALEN]; 1009 u8 alt_smac[ETH_ALEN]; 1010 u16 vlan_id; 1011 u16 alt_vlan_id; 1012 }; 1013 1014 enum ib_wr_opcode { 1015 IB_WR_RDMA_WRITE, 1016 IB_WR_RDMA_WRITE_WITH_IMM, 1017 IB_WR_SEND, 1018 IB_WR_SEND_WITH_IMM, 1019 IB_WR_RDMA_READ, 1020 IB_WR_ATOMIC_CMP_AND_SWP, 1021 IB_WR_ATOMIC_FETCH_AND_ADD, 1022 IB_WR_LSO, 1023 IB_WR_SEND_WITH_INV, 1024 IB_WR_RDMA_READ_WITH_INV, 1025 IB_WR_LOCAL_INV, 1026 IB_WR_FAST_REG_MR, 1027 IB_WR_MASKED_ATOMIC_CMP_AND_SWP, 1028 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD, 1029 IB_WR_BIND_MW, 1030 IB_WR_REG_SIG_MR, 1031 /* reserve values for low level drivers' internal use. 1032 * These values will not be used at all in the ib core layer. 1033 */ 1034 IB_WR_RESERVED1 = 0xf0, 1035 IB_WR_RESERVED2, 1036 IB_WR_RESERVED3, 1037 IB_WR_RESERVED4, 1038 IB_WR_RESERVED5, 1039 IB_WR_RESERVED6, 1040 IB_WR_RESERVED7, 1041 IB_WR_RESERVED8, 1042 IB_WR_RESERVED9, 1043 IB_WR_RESERVED10, 1044 }; 1045 1046 enum ib_send_flags { 1047 IB_SEND_FENCE = 1, 1048 IB_SEND_SIGNALED = (1<<1), 1049 IB_SEND_SOLICITED = (1<<2), 1050 IB_SEND_INLINE = (1<<3), 1051 IB_SEND_IP_CSUM = (1<<4), 1052 1053 /* reserve bits 26-31 for low level drivers' internal use */ 1054 IB_SEND_RESERVED_START = (1 << 26), 1055 IB_SEND_RESERVED_END = (1 << 31), 1056 }; 1057 1058 struct ib_sge { 1059 u64 addr; 1060 u32 length; 1061 u32 lkey; 1062 }; 1063 1064 struct ib_fast_reg_page_list { 1065 struct ib_device *device; 1066 u64 *page_list; 1067 unsigned int max_page_list_len; 1068 }; 1069 1070 /** 1071 * struct ib_mw_bind_info - Parameters for a memory window bind operation. 1072 * @mr: A memory region to bind the memory window to. 1073 * @addr: The address where the memory window should begin. 1074 * @length: The length of the memory window, in bytes. 1075 * @mw_access_flags: Access flags from enum ib_access_flags for the window. 1076 * 1077 * This struct contains the shared parameters for type 1 and type 2 1078 * memory window bind operations. 1079 */ 1080 struct ib_mw_bind_info { 1081 struct ib_mr *mr; 1082 u64 addr; 1083 u64 length; 1084 int mw_access_flags; 1085 }; 1086 1087 struct ib_send_wr { 1088 struct ib_send_wr *next; 1089 u64 wr_id; 1090 struct ib_sge *sg_list; 1091 int num_sge; 1092 enum ib_wr_opcode opcode; 1093 int send_flags; 1094 union { 1095 __be32 imm_data; 1096 u32 invalidate_rkey; 1097 } ex; 1098 union { 1099 struct { 1100 u64 remote_addr; 1101 u32 rkey; 1102 } rdma; 1103 struct { 1104 u64 remote_addr; 1105 u64 compare_add; 1106 u64 swap; 1107 u64 compare_add_mask; 1108 u64 swap_mask; 1109 u32 rkey; 1110 } atomic; 1111 struct { 1112 struct ib_ah *ah; 1113 void *header; 1114 int hlen; 1115 int mss; 1116 u32 remote_qpn; 1117 u32 remote_qkey; 1118 u16 pkey_index; /* valid for GSI only */ 1119 u8 port_num; /* valid for DR SMPs on switch only */ 1120 } ud; 1121 struct { 1122 u64 iova_start; 1123 struct ib_fast_reg_page_list *page_list; 1124 unsigned int page_shift; 1125 unsigned int page_list_len; 1126 u32 length; 1127 int access_flags; 1128 u32 rkey; 1129 } fast_reg; 1130 struct { 1131 struct ib_mw *mw; 1132 /* The new rkey for the memory window. */ 1133 u32 rkey; 1134 struct ib_mw_bind_info bind_info; 1135 } bind_mw; 1136 struct { 1137 struct ib_sig_attrs *sig_attrs; 1138 struct ib_mr *sig_mr; 1139 int access_flags; 1140 struct ib_sge *prot; 1141 } sig_handover; 1142 } wr; 1143 u32 xrc_remote_srq_num; /* XRC TGT QPs only */ 1144 }; 1145 1146 struct ib_recv_wr { 1147 struct ib_recv_wr *next; 1148 u64 wr_id; 1149 struct ib_sge *sg_list; 1150 int num_sge; 1151 }; 1152 1153 enum ib_access_flags { 1154 IB_ACCESS_LOCAL_WRITE = 1, 1155 IB_ACCESS_REMOTE_WRITE = (1<<1), 1156 IB_ACCESS_REMOTE_READ = (1<<2), 1157 IB_ACCESS_REMOTE_ATOMIC = (1<<3), 1158 IB_ACCESS_MW_BIND = (1<<4), 1159 IB_ZERO_BASED = (1<<5), 1160 IB_ACCESS_ON_DEMAND = (1<<6), 1161 }; 1162 1163 struct ib_phys_buf { 1164 u64 addr; 1165 u64 size; 1166 }; 1167 1168 struct ib_mr_attr { 1169 struct ib_pd *pd; 1170 u64 device_virt_addr; 1171 u64 size; 1172 int mr_access_flags; 1173 u32 lkey; 1174 u32 rkey; 1175 }; 1176 1177 enum ib_mr_rereg_flags { 1178 IB_MR_REREG_TRANS = 1, 1179 IB_MR_REREG_PD = (1<<1), 1180 IB_MR_REREG_ACCESS = (1<<2), 1181 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1) 1182 }; 1183 1184 /** 1185 * struct ib_mw_bind - Parameters for a type 1 memory window bind operation. 1186 * @wr_id: Work request id. 1187 * @send_flags: Flags from ib_send_flags enum. 1188 * @bind_info: More parameters of the bind operation. 1189 */ 1190 struct ib_mw_bind { 1191 u64 wr_id; 1192 int send_flags; 1193 struct ib_mw_bind_info bind_info; 1194 }; 1195 1196 struct ib_fmr_attr { 1197 int max_pages; 1198 int max_maps; 1199 u8 page_shift; 1200 }; 1201 1202 struct ib_umem; 1203 1204 struct ib_ucontext { 1205 struct ib_device *device; 1206 struct list_head pd_list; 1207 struct list_head mr_list; 1208 struct list_head mw_list; 1209 struct list_head cq_list; 1210 struct list_head qp_list; 1211 struct list_head srq_list; 1212 struct list_head ah_list; 1213 struct list_head xrcd_list; 1214 struct list_head rule_list; 1215 int closing; 1216 1217 struct pid *tgid; 1218 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 1219 struct rb_root umem_tree; 1220 /* 1221 * Protects .umem_rbroot and tree, as well as odp_mrs_count and 1222 * mmu notifiers registration. 1223 */ 1224 struct rw_semaphore umem_rwsem; 1225 void (*invalidate_range)(struct ib_umem *umem, 1226 unsigned long start, unsigned long end); 1227 1228 struct mmu_notifier mn; 1229 atomic_t notifier_count; 1230 /* A list of umems that don't have private mmu notifier counters yet. */ 1231 struct list_head no_private_counters; 1232 int odp_mrs_count; 1233 #endif 1234 }; 1235 1236 struct ib_uobject { 1237 u64 user_handle; /* handle given to us by userspace */ 1238 struct ib_ucontext *context; /* associated user context */ 1239 void *object; /* containing object */ 1240 struct list_head list; /* link to context's list */ 1241 int id; /* index into kernel idr */ 1242 struct kref ref; 1243 struct rw_semaphore mutex; /* protects .live */ 1244 int live; 1245 }; 1246 1247 struct ib_udata { 1248 const void __user *inbuf; 1249 void __user *outbuf; 1250 size_t inlen; 1251 size_t outlen; 1252 }; 1253 1254 struct ib_pd { 1255 struct ib_device *device; 1256 struct ib_uobject *uobject; 1257 atomic_t usecnt; /* count all resources */ 1258 }; 1259 1260 struct ib_xrcd { 1261 struct ib_device *device; 1262 atomic_t usecnt; /* count all exposed resources */ 1263 struct inode *inode; 1264 1265 struct mutex tgt_qp_mutex; 1266 struct list_head tgt_qp_list; 1267 }; 1268 1269 struct ib_ah { 1270 struct ib_device *device; 1271 struct ib_pd *pd; 1272 struct ib_uobject *uobject; 1273 }; 1274 1275 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); 1276 1277 struct ib_cq { 1278 struct ib_device *device; 1279 struct ib_uobject *uobject; 1280 ib_comp_handler comp_handler; 1281 void (*event_handler)(struct ib_event *, void *); 1282 void *cq_context; 1283 int cqe; 1284 atomic_t usecnt; /* count number of work queues */ 1285 }; 1286 1287 struct ib_srq { 1288 struct ib_device *device; 1289 struct ib_pd *pd; 1290 struct ib_uobject *uobject; 1291 void (*event_handler)(struct ib_event *, void *); 1292 void *srq_context; 1293 enum ib_srq_type srq_type; 1294 atomic_t usecnt; 1295 1296 union { 1297 struct { 1298 struct ib_xrcd *xrcd; 1299 struct ib_cq *cq; 1300 u32 srq_num; 1301 } xrc; 1302 } ext; 1303 }; 1304 1305 struct ib_qp { 1306 struct ib_device *device; 1307 struct ib_pd *pd; 1308 struct ib_cq *send_cq; 1309 struct ib_cq *recv_cq; 1310 struct ib_srq *srq; 1311 struct ib_xrcd *xrcd; /* XRC TGT QPs only */ 1312 struct list_head xrcd_list; 1313 /* count times opened, mcast attaches, flow attaches */ 1314 atomic_t usecnt; 1315 struct list_head open_list; 1316 struct ib_qp *real_qp; 1317 struct ib_uobject *uobject; 1318 void (*event_handler)(struct ib_event *, void *); 1319 void *qp_context; 1320 u32 qp_num; 1321 enum ib_qp_type qp_type; 1322 }; 1323 1324 struct ib_mr { 1325 struct ib_device *device; 1326 struct ib_pd *pd; 1327 struct ib_uobject *uobject; 1328 u32 lkey; 1329 u32 rkey; 1330 atomic_t usecnt; /* count number of MWs */ 1331 }; 1332 1333 struct ib_mw { 1334 struct ib_device *device; 1335 struct ib_pd *pd; 1336 struct ib_uobject *uobject; 1337 u32 rkey; 1338 enum ib_mw_type type; 1339 }; 1340 1341 struct ib_fmr { 1342 struct ib_device *device; 1343 struct ib_pd *pd; 1344 struct list_head list; 1345 u32 lkey; 1346 u32 rkey; 1347 }; 1348 1349 /* Supported steering options */ 1350 enum ib_flow_attr_type { 1351 /* steering according to rule specifications */ 1352 IB_FLOW_ATTR_NORMAL = 0x0, 1353 /* default unicast and multicast rule - 1354 * receive all Eth traffic which isn't steered to any QP 1355 */ 1356 IB_FLOW_ATTR_ALL_DEFAULT = 0x1, 1357 /* default multicast rule - 1358 * receive all Eth multicast traffic which isn't steered to any QP 1359 */ 1360 IB_FLOW_ATTR_MC_DEFAULT = 0x2, 1361 /* sniffer rule - receive all port traffic */ 1362 IB_FLOW_ATTR_SNIFFER = 0x3 1363 }; 1364 1365 /* Supported steering header types */ 1366 enum ib_flow_spec_type { 1367 /* L2 headers*/ 1368 IB_FLOW_SPEC_ETH = 0x20, 1369 IB_FLOW_SPEC_IB = 0x22, 1370 /* L3 header*/ 1371 IB_FLOW_SPEC_IPV4 = 0x30, 1372 /* L4 headers*/ 1373 IB_FLOW_SPEC_TCP = 0x40, 1374 IB_FLOW_SPEC_UDP = 0x41 1375 }; 1376 #define IB_FLOW_SPEC_LAYER_MASK 0xF0 1377 #define IB_FLOW_SPEC_SUPPORT_LAYERS 4 1378 1379 /* Flow steering rule priority is set according to it's domain. 1380 * Lower domain value means higher priority. 1381 */ 1382 enum ib_flow_domain { 1383 IB_FLOW_DOMAIN_USER, 1384 IB_FLOW_DOMAIN_ETHTOOL, 1385 IB_FLOW_DOMAIN_RFS, 1386 IB_FLOW_DOMAIN_NIC, 1387 IB_FLOW_DOMAIN_NUM /* Must be last */ 1388 }; 1389 1390 struct ib_flow_eth_filter { 1391 u8 dst_mac[6]; 1392 u8 src_mac[6]; 1393 __be16 ether_type; 1394 __be16 vlan_tag; 1395 }; 1396 1397 struct ib_flow_spec_eth { 1398 enum ib_flow_spec_type type; 1399 u16 size; 1400 struct ib_flow_eth_filter val; 1401 struct ib_flow_eth_filter mask; 1402 }; 1403 1404 struct ib_flow_ib_filter { 1405 __be16 dlid; 1406 __u8 sl; 1407 }; 1408 1409 struct ib_flow_spec_ib { 1410 enum ib_flow_spec_type type; 1411 u16 size; 1412 struct ib_flow_ib_filter val; 1413 struct ib_flow_ib_filter mask; 1414 }; 1415 1416 struct ib_flow_ipv4_filter { 1417 __be32 src_ip; 1418 __be32 dst_ip; 1419 }; 1420 1421 struct ib_flow_spec_ipv4 { 1422 enum ib_flow_spec_type type; 1423 u16 size; 1424 struct ib_flow_ipv4_filter val; 1425 struct ib_flow_ipv4_filter mask; 1426 }; 1427 1428 struct ib_flow_tcp_udp_filter { 1429 __be16 dst_port; 1430 __be16 src_port; 1431 }; 1432 1433 struct ib_flow_spec_tcp_udp { 1434 enum ib_flow_spec_type type; 1435 u16 size; 1436 struct ib_flow_tcp_udp_filter val; 1437 struct ib_flow_tcp_udp_filter mask; 1438 }; 1439 1440 union ib_flow_spec { 1441 struct { 1442 enum ib_flow_spec_type type; 1443 u16 size; 1444 }; 1445 struct ib_flow_spec_eth eth; 1446 struct ib_flow_spec_ib ib; 1447 struct ib_flow_spec_ipv4 ipv4; 1448 struct ib_flow_spec_tcp_udp tcp_udp; 1449 }; 1450 1451 struct ib_flow_attr { 1452 enum ib_flow_attr_type type; 1453 u16 size; 1454 u16 priority; 1455 u32 flags; 1456 u8 num_of_specs; 1457 u8 port; 1458 /* Following are the optional layers according to user request 1459 * struct ib_flow_spec_xxx 1460 * struct ib_flow_spec_yyy 1461 */ 1462 }; 1463 1464 struct ib_flow { 1465 struct ib_qp *qp; 1466 struct ib_uobject *uobject; 1467 }; 1468 1469 struct ib_mad_hdr; 1470 struct ib_grh; 1471 1472 enum ib_process_mad_flags { 1473 IB_MAD_IGNORE_MKEY = 1, 1474 IB_MAD_IGNORE_BKEY = 2, 1475 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY 1476 }; 1477 1478 enum ib_mad_result { 1479 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */ 1480 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */ 1481 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */ 1482 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */ 1483 }; 1484 1485 #define IB_DEVICE_NAME_MAX 64 1486 1487 struct ib_cache { 1488 rwlock_t lock; 1489 struct ib_event_handler event_handler; 1490 struct ib_pkey_cache **pkey_cache; 1491 struct ib_gid_cache **gid_cache; 1492 u8 *lmc_cache; 1493 }; 1494 1495 struct ib_dma_mapping_ops { 1496 int (*mapping_error)(struct ib_device *dev, 1497 u64 dma_addr); 1498 u64 (*map_single)(struct ib_device *dev, 1499 void *ptr, size_t size, 1500 enum dma_data_direction direction); 1501 void (*unmap_single)(struct ib_device *dev, 1502 u64 addr, size_t size, 1503 enum dma_data_direction direction); 1504 u64 (*map_page)(struct ib_device *dev, 1505 struct page *page, unsigned long offset, 1506 size_t size, 1507 enum dma_data_direction direction); 1508 void (*unmap_page)(struct ib_device *dev, 1509 u64 addr, size_t size, 1510 enum dma_data_direction direction); 1511 int (*map_sg)(struct ib_device *dev, 1512 struct scatterlist *sg, int nents, 1513 enum dma_data_direction direction); 1514 void (*unmap_sg)(struct ib_device *dev, 1515 struct scatterlist *sg, int nents, 1516 enum dma_data_direction direction); 1517 void (*sync_single_for_cpu)(struct ib_device *dev, 1518 u64 dma_handle, 1519 size_t size, 1520 enum dma_data_direction dir); 1521 void (*sync_single_for_device)(struct ib_device *dev, 1522 u64 dma_handle, 1523 size_t size, 1524 enum dma_data_direction dir); 1525 void *(*alloc_coherent)(struct ib_device *dev, 1526 size_t size, 1527 u64 *dma_handle, 1528 gfp_t flag); 1529 void (*free_coherent)(struct ib_device *dev, 1530 size_t size, void *cpu_addr, 1531 u64 dma_handle); 1532 }; 1533 1534 struct iw_cm_verbs; 1535 1536 struct ib_port_immutable { 1537 int pkey_tbl_len; 1538 int gid_tbl_len; 1539 u32 core_cap_flags; 1540 u32 max_mad_size; 1541 }; 1542 1543 struct ib_device { 1544 struct device *dma_device; 1545 1546 char name[IB_DEVICE_NAME_MAX]; 1547 1548 struct list_head event_handler_list; 1549 spinlock_t event_handler_lock; 1550 1551 spinlock_t client_data_lock; 1552 struct list_head core_list; 1553 struct list_head client_data_list; 1554 1555 struct ib_cache cache; 1556 /** 1557 * port_immutable is indexed by port number 1558 */ 1559 struct ib_port_immutable *port_immutable; 1560 1561 int num_comp_vectors; 1562 1563 struct iw_cm_verbs *iwcm; 1564 1565 int (*get_protocol_stats)(struct ib_device *device, 1566 union rdma_protocol_stats *stats); 1567 int (*query_device)(struct ib_device *device, 1568 struct ib_device_attr *device_attr, 1569 struct ib_udata *udata); 1570 int (*query_port)(struct ib_device *device, 1571 u8 port_num, 1572 struct ib_port_attr *port_attr); 1573 enum rdma_link_layer (*get_link_layer)(struct ib_device *device, 1574 u8 port_num); 1575 int (*query_gid)(struct ib_device *device, 1576 u8 port_num, int index, 1577 union ib_gid *gid); 1578 int (*query_pkey)(struct ib_device *device, 1579 u8 port_num, u16 index, u16 *pkey); 1580 int (*modify_device)(struct ib_device *device, 1581 int device_modify_mask, 1582 struct ib_device_modify *device_modify); 1583 int (*modify_port)(struct ib_device *device, 1584 u8 port_num, int port_modify_mask, 1585 struct ib_port_modify *port_modify); 1586 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device, 1587 struct ib_udata *udata); 1588 int (*dealloc_ucontext)(struct ib_ucontext *context); 1589 int (*mmap)(struct ib_ucontext *context, 1590 struct vm_area_struct *vma); 1591 struct ib_pd * (*alloc_pd)(struct ib_device *device, 1592 struct ib_ucontext *context, 1593 struct ib_udata *udata); 1594 int (*dealloc_pd)(struct ib_pd *pd); 1595 struct ib_ah * (*create_ah)(struct ib_pd *pd, 1596 struct ib_ah_attr *ah_attr); 1597 int (*modify_ah)(struct ib_ah *ah, 1598 struct ib_ah_attr *ah_attr); 1599 int (*query_ah)(struct ib_ah *ah, 1600 struct ib_ah_attr *ah_attr); 1601 int (*destroy_ah)(struct ib_ah *ah); 1602 struct ib_srq * (*create_srq)(struct ib_pd *pd, 1603 struct ib_srq_init_attr *srq_init_attr, 1604 struct ib_udata *udata); 1605 int (*modify_srq)(struct ib_srq *srq, 1606 struct ib_srq_attr *srq_attr, 1607 enum ib_srq_attr_mask srq_attr_mask, 1608 struct ib_udata *udata); 1609 int (*query_srq)(struct ib_srq *srq, 1610 struct ib_srq_attr *srq_attr); 1611 int (*destroy_srq)(struct ib_srq *srq); 1612 int (*post_srq_recv)(struct ib_srq *srq, 1613 struct ib_recv_wr *recv_wr, 1614 struct ib_recv_wr **bad_recv_wr); 1615 struct ib_qp * (*create_qp)(struct ib_pd *pd, 1616 struct ib_qp_init_attr *qp_init_attr, 1617 struct ib_udata *udata); 1618 int (*modify_qp)(struct ib_qp *qp, 1619 struct ib_qp_attr *qp_attr, 1620 int qp_attr_mask, 1621 struct ib_udata *udata); 1622 int (*query_qp)(struct ib_qp *qp, 1623 struct ib_qp_attr *qp_attr, 1624 int qp_attr_mask, 1625 struct ib_qp_init_attr *qp_init_attr); 1626 int (*destroy_qp)(struct ib_qp *qp); 1627 int (*post_send)(struct ib_qp *qp, 1628 struct ib_send_wr *send_wr, 1629 struct ib_send_wr **bad_send_wr); 1630 int (*post_recv)(struct ib_qp *qp, 1631 struct ib_recv_wr *recv_wr, 1632 struct ib_recv_wr **bad_recv_wr); 1633 struct ib_cq * (*create_cq)(struct ib_device *device, 1634 const struct ib_cq_init_attr *attr, 1635 struct ib_ucontext *context, 1636 struct ib_udata *udata); 1637 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, 1638 u16 cq_period); 1639 int (*destroy_cq)(struct ib_cq *cq); 1640 int (*resize_cq)(struct ib_cq *cq, int cqe, 1641 struct ib_udata *udata); 1642 int (*poll_cq)(struct ib_cq *cq, int num_entries, 1643 struct ib_wc *wc); 1644 int (*peek_cq)(struct ib_cq *cq, int wc_cnt); 1645 int (*req_notify_cq)(struct ib_cq *cq, 1646 enum ib_cq_notify_flags flags); 1647 int (*req_ncomp_notif)(struct ib_cq *cq, 1648 int wc_cnt); 1649 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd, 1650 int mr_access_flags); 1651 struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd, 1652 struct ib_phys_buf *phys_buf_array, 1653 int num_phys_buf, 1654 int mr_access_flags, 1655 u64 *iova_start); 1656 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd, 1657 u64 start, u64 length, 1658 u64 virt_addr, 1659 int mr_access_flags, 1660 struct ib_udata *udata); 1661 int (*rereg_user_mr)(struct ib_mr *mr, 1662 int flags, 1663 u64 start, u64 length, 1664 u64 virt_addr, 1665 int mr_access_flags, 1666 struct ib_pd *pd, 1667 struct ib_udata *udata); 1668 int (*query_mr)(struct ib_mr *mr, 1669 struct ib_mr_attr *mr_attr); 1670 int (*dereg_mr)(struct ib_mr *mr); 1671 int (*destroy_mr)(struct ib_mr *mr); 1672 struct ib_mr * (*create_mr)(struct ib_pd *pd, 1673 struct ib_mr_init_attr *mr_init_attr); 1674 struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd, 1675 int max_page_list_len); 1676 struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device, 1677 int page_list_len); 1678 void (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list); 1679 int (*rereg_phys_mr)(struct ib_mr *mr, 1680 int mr_rereg_mask, 1681 struct ib_pd *pd, 1682 struct ib_phys_buf *phys_buf_array, 1683 int num_phys_buf, 1684 int mr_access_flags, 1685 u64 *iova_start); 1686 struct ib_mw * (*alloc_mw)(struct ib_pd *pd, 1687 enum ib_mw_type type); 1688 int (*bind_mw)(struct ib_qp *qp, 1689 struct ib_mw *mw, 1690 struct ib_mw_bind *mw_bind); 1691 int (*dealloc_mw)(struct ib_mw *mw); 1692 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd, 1693 int mr_access_flags, 1694 struct ib_fmr_attr *fmr_attr); 1695 int (*map_phys_fmr)(struct ib_fmr *fmr, 1696 u64 *page_list, int list_len, 1697 u64 iova); 1698 int (*unmap_fmr)(struct list_head *fmr_list); 1699 int (*dealloc_fmr)(struct ib_fmr *fmr); 1700 int (*attach_mcast)(struct ib_qp *qp, 1701 union ib_gid *gid, 1702 u16 lid); 1703 int (*detach_mcast)(struct ib_qp *qp, 1704 union ib_gid *gid, 1705 u16 lid); 1706 int (*process_mad)(struct ib_device *device, 1707 int process_mad_flags, 1708 u8 port_num, 1709 const struct ib_wc *in_wc, 1710 const struct ib_grh *in_grh, 1711 const struct ib_mad_hdr *in_mad, 1712 size_t in_mad_size, 1713 struct ib_mad_hdr *out_mad, 1714 size_t *out_mad_size, 1715 u16 *out_mad_pkey_index); 1716 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device, 1717 struct ib_ucontext *ucontext, 1718 struct ib_udata *udata); 1719 int (*dealloc_xrcd)(struct ib_xrcd *xrcd); 1720 struct ib_flow * (*create_flow)(struct ib_qp *qp, 1721 struct ib_flow_attr 1722 *flow_attr, 1723 int domain); 1724 int (*destroy_flow)(struct ib_flow *flow_id); 1725 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask, 1726 struct ib_mr_status *mr_status); 1727 1728 struct ib_dma_mapping_ops *dma_ops; 1729 1730 struct module *owner; 1731 struct device dev; 1732 struct kobject *ports_parent; 1733 struct list_head port_list; 1734 1735 enum { 1736 IB_DEV_UNINITIALIZED, 1737 IB_DEV_REGISTERED, 1738 IB_DEV_UNREGISTERED 1739 } reg_state; 1740 1741 int uverbs_abi_ver; 1742 u64 uverbs_cmd_mask; 1743 u64 uverbs_ex_cmd_mask; 1744 1745 char node_desc[64]; 1746 __be64 node_guid; 1747 u32 local_dma_lkey; 1748 u8 node_type; 1749 u8 phys_port_cnt; 1750 1751 /** 1752 * The following mandatory functions are used only at device 1753 * registration. Keep functions such as these at the end of this 1754 * structure to avoid cache line misses when accessing struct ib_device 1755 * in fast paths. 1756 */ 1757 int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *); 1758 }; 1759 1760 struct ib_client { 1761 char *name; 1762 void (*add) (struct ib_device *); 1763 void (*remove)(struct ib_device *); 1764 1765 struct list_head list; 1766 }; 1767 1768 struct ib_device *ib_alloc_device(size_t size); 1769 void ib_dealloc_device(struct ib_device *device); 1770 1771 int ib_register_device(struct ib_device *device, 1772 int (*port_callback)(struct ib_device *, 1773 u8, struct kobject *)); 1774 void ib_unregister_device(struct ib_device *device); 1775 1776 int ib_register_client (struct ib_client *client); 1777 void ib_unregister_client(struct ib_client *client); 1778 1779 void *ib_get_client_data(struct ib_device *device, struct ib_client *client); 1780 void ib_set_client_data(struct ib_device *device, struct ib_client *client, 1781 void *data); 1782 1783 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len) 1784 { 1785 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0; 1786 } 1787 1788 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len) 1789 { 1790 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; 1791 } 1792 1793 /** 1794 * ib_modify_qp_is_ok - Check that the supplied attribute mask 1795 * contains all required attributes and no attributes not allowed for 1796 * the given QP state transition. 1797 * @cur_state: Current QP state 1798 * @next_state: Next QP state 1799 * @type: QP type 1800 * @mask: Mask of supplied QP attributes 1801 * @ll : link layer of port 1802 * 1803 * This function is a helper function that a low-level driver's 1804 * modify_qp method can use to validate the consumer's input. It 1805 * checks that cur_state and next_state are valid QP states, that a 1806 * transition from cur_state to next_state is allowed by the IB spec, 1807 * and that the attribute mask supplied is allowed for the transition. 1808 */ 1809 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 1810 enum ib_qp_type type, enum ib_qp_attr_mask mask, 1811 enum rdma_link_layer ll); 1812 1813 int ib_register_event_handler (struct ib_event_handler *event_handler); 1814 int ib_unregister_event_handler(struct ib_event_handler *event_handler); 1815 void ib_dispatch_event(struct ib_event *event); 1816 1817 int ib_query_device(struct ib_device *device, 1818 struct ib_device_attr *device_attr); 1819 1820 int ib_query_port(struct ib_device *device, 1821 u8 port_num, struct ib_port_attr *port_attr); 1822 1823 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, 1824 u8 port_num); 1825 1826 /** 1827 * rdma_start_port - Return the first valid port number for the device 1828 * specified 1829 * 1830 * @device: Device to be checked 1831 * 1832 * Return start port number 1833 */ 1834 static inline u8 rdma_start_port(const struct ib_device *device) 1835 { 1836 return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1; 1837 } 1838 1839 /** 1840 * rdma_end_port - Return the last valid port number for the device 1841 * specified 1842 * 1843 * @device: Device to be checked 1844 * 1845 * Return last port number 1846 */ 1847 static inline u8 rdma_end_port(const struct ib_device *device) 1848 { 1849 return (device->node_type == RDMA_NODE_IB_SWITCH) ? 1850 0 : device->phys_port_cnt; 1851 } 1852 1853 static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num) 1854 { 1855 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB; 1856 } 1857 1858 static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num) 1859 { 1860 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE; 1861 } 1862 1863 static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num) 1864 { 1865 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP; 1866 } 1867 1868 static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num) 1869 { 1870 return device->port_immutable[port_num].core_cap_flags & 1871 (RDMA_CORE_CAP_PROT_IB | RDMA_CORE_CAP_PROT_ROCE); 1872 } 1873 1874 /** 1875 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband 1876 * Management Datagrams. 1877 * @device: Device to check 1878 * @port_num: Port number to check 1879 * 1880 * Management Datagrams (MAD) are a required part of the InfiniBand 1881 * specification and are supported on all InfiniBand devices. A slightly 1882 * extended version are also supported on OPA interfaces. 1883 * 1884 * Return: true if the port supports sending/receiving of MAD packets. 1885 */ 1886 static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num) 1887 { 1888 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD; 1889 } 1890 1891 /** 1892 * rdma_cap_opa_mad - Check if the port of device provides support for OPA 1893 * Management Datagrams. 1894 * @device: Device to check 1895 * @port_num: Port number to check 1896 * 1897 * Intel OmniPath devices extend and/or replace the InfiniBand Management 1898 * datagrams with their own versions. These OPA MADs share many but not all of 1899 * the characteristics of InfiniBand MADs. 1900 * 1901 * OPA MADs differ in the following ways: 1902 * 1903 * 1) MADs are variable size up to 2K 1904 * IBTA defined MADs remain fixed at 256 bytes 1905 * 2) OPA SMPs must carry valid PKeys 1906 * 3) OPA SMP packets are a different format 1907 * 1908 * Return: true if the port supports OPA MAD packet formats. 1909 */ 1910 static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num) 1911 { 1912 return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD) 1913 == RDMA_CORE_CAP_OPA_MAD; 1914 } 1915 1916 /** 1917 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband 1918 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI). 1919 * @device: Device to check 1920 * @port_num: Port number to check 1921 * 1922 * Each InfiniBand node is required to provide a Subnet Management Agent 1923 * that the subnet manager can access. Prior to the fabric being fully 1924 * configured by the subnet manager, the SMA is accessed via a well known 1925 * interface called the Subnet Management Interface (SMI). This interface 1926 * uses directed route packets to communicate with the SM to get around the 1927 * chicken and egg problem of the SM needing to know what's on the fabric 1928 * in order to configure the fabric, and needing to configure the fabric in 1929 * order to send packets to the devices on the fabric. These directed 1930 * route packets do not need the fabric fully configured in order to reach 1931 * their destination. The SMI is the only method allowed to send 1932 * directed route packets on an InfiniBand fabric. 1933 * 1934 * Return: true if the port provides an SMI. 1935 */ 1936 static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num) 1937 { 1938 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI; 1939 } 1940 1941 /** 1942 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband 1943 * Communication Manager. 1944 * @device: Device to check 1945 * @port_num: Port number to check 1946 * 1947 * The InfiniBand Communication Manager is one of many pre-defined General 1948 * Service Agents (GSA) that are accessed via the General Service 1949 * Interface (GSI). It's role is to facilitate establishment of connections 1950 * between nodes as well as other management related tasks for established 1951 * connections. 1952 * 1953 * Return: true if the port supports an IB CM (this does not guarantee that 1954 * a CM is actually running however). 1955 */ 1956 static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num) 1957 { 1958 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM; 1959 } 1960 1961 /** 1962 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP 1963 * Communication Manager. 1964 * @device: Device to check 1965 * @port_num: Port number to check 1966 * 1967 * Similar to above, but specific to iWARP connections which have a different 1968 * managment protocol than InfiniBand. 1969 * 1970 * Return: true if the port supports an iWARP CM (this does not guarantee that 1971 * a CM is actually running however). 1972 */ 1973 static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num) 1974 { 1975 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM; 1976 } 1977 1978 /** 1979 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband 1980 * Subnet Administration. 1981 * @device: Device to check 1982 * @port_num: Port number to check 1983 * 1984 * An InfiniBand Subnet Administration (SA) service is a pre-defined General 1985 * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand 1986 * fabrics, devices should resolve routes to other hosts by contacting the 1987 * SA to query the proper route. 1988 * 1989 * Return: true if the port should act as a client to the fabric Subnet 1990 * Administration interface. This does not imply that the SA service is 1991 * running locally. 1992 */ 1993 static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num) 1994 { 1995 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA; 1996 } 1997 1998 /** 1999 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband 2000 * Multicast. 2001 * @device: Device to check 2002 * @port_num: Port number to check 2003 * 2004 * InfiniBand multicast registration is more complex than normal IPv4 or 2005 * IPv6 multicast registration. Each Host Channel Adapter must register 2006 * with the Subnet Manager when it wishes to join a multicast group. It 2007 * should do so only once regardless of how many queue pairs it subscribes 2008 * to this group. And it should leave the group only after all queue pairs 2009 * attached to the group have been detached. 2010 * 2011 * Return: true if the port must undertake the additional adminstrative 2012 * overhead of registering/unregistering with the SM and tracking of the 2013 * total number of queue pairs attached to the multicast group. 2014 */ 2015 static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num) 2016 { 2017 return rdma_cap_ib_sa(device, port_num); 2018 } 2019 2020 /** 2021 * rdma_cap_af_ib - Check if the port of device has the capability 2022 * Native Infiniband Address. 2023 * @device: Device to check 2024 * @port_num: Port number to check 2025 * 2026 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default 2027 * GID. RoCE uses a different mechanism, but still generates a GID via 2028 * a prescribed mechanism and port specific data. 2029 * 2030 * Return: true if the port uses a GID address to identify devices on the 2031 * network. 2032 */ 2033 static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num) 2034 { 2035 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB; 2036 } 2037 2038 /** 2039 * rdma_cap_eth_ah - Check if the port of device has the capability 2040 * Ethernet Address Handle. 2041 * @device: Device to check 2042 * @port_num: Port number to check 2043 * 2044 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique 2045 * to fabricate GIDs over Ethernet/IP specific addresses native to the 2046 * port. Normally, packet headers are generated by the sending host 2047 * adapter, but when sending connectionless datagrams, we must manually 2048 * inject the proper headers for the fabric we are communicating over. 2049 * 2050 * Return: true if we are running as a RoCE port and must force the 2051 * addition of a Global Route Header built from our Ethernet Address 2052 * Handle into our header list for connectionless packets. 2053 */ 2054 static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num) 2055 { 2056 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH; 2057 } 2058 2059 /** 2060 * rdma_cap_read_multi_sge - Check if the port of device has the capability 2061 * RDMA Read Multiple Scatter-Gather Entries. 2062 * @device: Device to check 2063 * @port_num: Port number to check 2064 * 2065 * iWARP has a restriction that RDMA READ requests may only have a single 2066 * Scatter/Gather Entry (SGE) in the work request. 2067 * 2068 * NOTE: although the linux kernel currently assumes all devices are either 2069 * single SGE RDMA READ devices or identical SGE maximums for RDMA READs and 2070 * WRITEs, according to Tom Talpey, this is not accurate. There are some 2071 * devices out there that support more than a single SGE on RDMA READ 2072 * requests, but do not support the same number of SGEs as they do on 2073 * RDMA WRITE requests. The linux kernel would need rearchitecting to 2074 * support these imbalanced READ/WRITE SGEs allowed devices. So, for now, 2075 * suffice with either the device supports the same READ/WRITE SGEs, or 2076 * it only gets one READ sge. 2077 * 2078 * Return: true for any device that allows more than one SGE in RDMA READ 2079 * requests. 2080 */ 2081 static inline bool rdma_cap_read_multi_sge(struct ib_device *device, 2082 u8 port_num) 2083 { 2084 return !(device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP); 2085 } 2086 2087 /** 2088 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port. 2089 * 2090 * @device: Device 2091 * @port_num: Port number 2092 * 2093 * This MAD size includes the MAD headers and MAD payload. No other headers 2094 * are included. 2095 * 2096 * Return the max MAD size required by the Port. Will return 0 if the port 2097 * does not support MADs 2098 */ 2099 static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num) 2100 { 2101 return device->port_immutable[port_num].max_mad_size; 2102 } 2103 2104 int ib_query_gid(struct ib_device *device, 2105 u8 port_num, int index, union ib_gid *gid); 2106 2107 int ib_query_pkey(struct ib_device *device, 2108 u8 port_num, u16 index, u16 *pkey); 2109 2110 int ib_modify_device(struct ib_device *device, 2111 int device_modify_mask, 2112 struct ib_device_modify *device_modify); 2113 2114 int ib_modify_port(struct ib_device *device, 2115 u8 port_num, int port_modify_mask, 2116 struct ib_port_modify *port_modify); 2117 2118 int ib_find_gid(struct ib_device *device, union ib_gid *gid, 2119 u8 *port_num, u16 *index); 2120 2121 int ib_find_pkey(struct ib_device *device, 2122 u8 port_num, u16 pkey, u16 *index); 2123 2124 /** 2125 * ib_alloc_pd - Allocates an unused protection domain. 2126 * @device: The device on which to allocate the protection domain. 2127 * 2128 * A protection domain object provides an association between QPs, shared 2129 * receive queues, address handles, memory regions, and memory windows. 2130 */ 2131 struct ib_pd *ib_alloc_pd(struct ib_device *device); 2132 2133 /** 2134 * ib_dealloc_pd - Deallocates a protection domain. 2135 * @pd: The protection domain to deallocate. 2136 */ 2137 int ib_dealloc_pd(struct ib_pd *pd); 2138 2139 /** 2140 * ib_create_ah - Creates an address handle for the given address vector. 2141 * @pd: The protection domain associated with the address handle. 2142 * @ah_attr: The attributes of the address vector. 2143 * 2144 * The address handle is used to reference a local or global destination 2145 * in all UD QP post sends. 2146 */ 2147 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); 2148 2149 /** 2150 * ib_init_ah_from_wc - Initializes address handle attributes from a 2151 * work completion. 2152 * @device: Device on which the received message arrived. 2153 * @port_num: Port on which the received message arrived. 2154 * @wc: Work completion associated with the received message. 2155 * @grh: References the received global route header. This parameter is 2156 * ignored unless the work completion indicates that the GRH is valid. 2157 * @ah_attr: Returned attributes that can be used when creating an address 2158 * handle for replying to the message. 2159 */ 2160 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, 2161 const struct ib_wc *wc, const struct ib_grh *grh, 2162 struct ib_ah_attr *ah_attr); 2163 2164 /** 2165 * ib_create_ah_from_wc - Creates an address handle associated with the 2166 * sender of the specified work completion. 2167 * @pd: The protection domain associated with the address handle. 2168 * @wc: Work completion information associated with a received message. 2169 * @grh: References the received global route header. This parameter is 2170 * ignored unless the work completion indicates that the GRH is valid. 2171 * @port_num: The outbound port number to associate with the address. 2172 * 2173 * The address handle is used to reference a local or global destination 2174 * in all UD QP post sends. 2175 */ 2176 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, 2177 const struct ib_grh *grh, u8 port_num); 2178 2179 /** 2180 * ib_modify_ah - Modifies the address vector associated with an address 2181 * handle. 2182 * @ah: The address handle to modify. 2183 * @ah_attr: The new address vector attributes to associate with the 2184 * address handle. 2185 */ 2186 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); 2187 2188 /** 2189 * ib_query_ah - Queries the address vector associated with an address 2190 * handle. 2191 * @ah: The address handle to query. 2192 * @ah_attr: The address vector attributes associated with the address 2193 * handle. 2194 */ 2195 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); 2196 2197 /** 2198 * ib_destroy_ah - Destroys an address handle. 2199 * @ah: The address handle to destroy. 2200 */ 2201 int ib_destroy_ah(struct ib_ah *ah); 2202 2203 /** 2204 * ib_create_srq - Creates a SRQ associated with the specified protection 2205 * domain. 2206 * @pd: The protection domain associated with the SRQ. 2207 * @srq_init_attr: A list of initial attributes required to create the 2208 * SRQ. If SRQ creation succeeds, then the attributes are updated to 2209 * the actual capabilities of the created SRQ. 2210 * 2211 * srq_attr->max_wr and srq_attr->max_sge are read the determine the 2212 * requested size of the SRQ, and set to the actual values allocated 2213 * on return. If ib_create_srq() succeeds, then max_wr and max_sge 2214 * will always be at least as large as the requested values. 2215 */ 2216 struct ib_srq *ib_create_srq(struct ib_pd *pd, 2217 struct ib_srq_init_attr *srq_init_attr); 2218 2219 /** 2220 * ib_modify_srq - Modifies the attributes for the specified SRQ. 2221 * @srq: The SRQ to modify. 2222 * @srq_attr: On input, specifies the SRQ attributes to modify. On output, 2223 * the current values of selected SRQ attributes are returned. 2224 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ 2225 * are being modified. 2226 * 2227 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or 2228 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when 2229 * the number of receives queued drops below the limit. 2230 */ 2231 int ib_modify_srq(struct ib_srq *srq, 2232 struct ib_srq_attr *srq_attr, 2233 enum ib_srq_attr_mask srq_attr_mask); 2234 2235 /** 2236 * ib_query_srq - Returns the attribute list and current values for the 2237 * specified SRQ. 2238 * @srq: The SRQ to query. 2239 * @srq_attr: The attributes of the specified SRQ. 2240 */ 2241 int ib_query_srq(struct ib_srq *srq, 2242 struct ib_srq_attr *srq_attr); 2243 2244 /** 2245 * ib_destroy_srq - Destroys the specified SRQ. 2246 * @srq: The SRQ to destroy. 2247 */ 2248 int ib_destroy_srq(struct ib_srq *srq); 2249 2250 /** 2251 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ. 2252 * @srq: The SRQ to post the work request on. 2253 * @recv_wr: A list of work requests to post on the receive queue. 2254 * @bad_recv_wr: On an immediate failure, this parameter will reference 2255 * the work request that failed to be posted on the QP. 2256 */ 2257 static inline int ib_post_srq_recv(struct ib_srq *srq, 2258 struct ib_recv_wr *recv_wr, 2259 struct ib_recv_wr **bad_recv_wr) 2260 { 2261 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr); 2262 } 2263 2264 /** 2265 * ib_create_qp - Creates a QP associated with the specified protection 2266 * domain. 2267 * @pd: The protection domain associated with the QP. 2268 * @qp_init_attr: A list of initial attributes required to create the 2269 * QP. If QP creation succeeds, then the attributes are updated to 2270 * the actual capabilities of the created QP. 2271 */ 2272 struct ib_qp *ib_create_qp(struct ib_pd *pd, 2273 struct ib_qp_init_attr *qp_init_attr); 2274 2275 /** 2276 * ib_modify_qp - Modifies the attributes for the specified QP and then 2277 * transitions the QP to the given state. 2278 * @qp: The QP to modify. 2279 * @qp_attr: On input, specifies the QP attributes to modify. On output, 2280 * the current values of selected QP attributes are returned. 2281 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP 2282 * are being modified. 2283 */ 2284 int ib_modify_qp(struct ib_qp *qp, 2285 struct ib_qp_attr *qp_attr, 2286 int qp_attr_mask); 2287 2288 /** 2289 * ib_query_qp - Returns the attribute list and current values for the 2290 * specified QP. 2291 * @qp: The QP to query. 2292 * @qp_attr: The attributes of the specified QP. 2293 * @qp_attr_mask: A bit-mask used to select specific attributes to query. 2294 * @qp_init_attr: Additional attributes of the selected QP. 2295 * 2296 * The qp_attr_mask may be used to limit the query to gathering only the 2297 * selected attributes. 2298 */ 2299 int ib_query_qp(struct ib_qp *qp, 2300 struct ib_qp_attr *qp_attr, 2301 int qp_attr_mask, 2302 struct ib_qp_init_attr *qp_init_attr); 2303 2304 /** 2305 * ib_destroy_qp - Destroys the specified QP. 2306 * @qp: The QP to destroy. 2307 */ 2308 int ib_destroy_qp(struct ib_qp *qp); 2309 2310 /** 2311 * ib_open_qp - Obtain a reference to an existing sharable QP. 2312 * @xrcd - XRC domain 2313 * @qp_open_attr: Attributes identifying the QP to open. 2314 * 2315 * Returns a reference to a sharable QP. 2316 */ 2317 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, 2318 struct ib_qp_open_attr *qp_open_attr); 2319 2320 /** 2321 * ib_close_qp - Release an external reference to a QP. 2322 * @qp: The QP handle to release 2323 * 2324 * The opened QP handle is released by the caller. The underlying 2325 * shared QP is not destroyed until all internal references are released. 2326 */ 2327 int ib_close_qp(struct ib_qp *qp); 2328 2329 /** 2330 * ib_post_send - Posts a list of work requests to the send queue of 2331 * the specified QP. 2332 * @qp: The QP to post the work request on. 2333 * @send_wr: A list of work requests to post on the send queue. 2334 * @bad_send_wr: On an immediate failure, this parameter will reference 2335 * the work request that failed to be posted on the QP. 2336 * 2337 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate 2338 * error is returned, the QP state shall not be affected, 2339 * ib_post_send() will return an immediate error after queueing any 2340 * earlier work requests in the list. 2341 */ 2342 static inline int ib_post_send(struct ib_qp *qp, 2343 struct ib_send_wr *send_wr, 2344 struct ib_send_wr **bad_send_wr) 2345 { 2346 return qp->device->post_send(qp, send_wr, bad_send_wr); 2347 } 2348 2349 /** 2350 * ib_post_recv - Posts a list of work requests to the receive queue of 2351 * the specified QP. 2352 * @qp: The QP to post the work request on. 2353 * @recv_wr: A list of work requests to post on the receive queue. 2354 * @bad_recv_wr: On an immediate failure, this parameter will reference 2355 * the work request that failed to be posted on the QP. 2356 */ 2357 static inline int ib_post_recv(struct ib_qp *qp, 2358 struct ib_recv_wr *recv_wr, 2359 struct ib_recv_wr **bad_recv_wr) 2360 { 2361 return qp->device->post_recv(qp, recv_wr, bad_recv_wr); 2362 } 2363 2364 /** 2365 * ib_create_cq - Creates a CQ on the specified device. 2366 * @device: The device on which to create the CQ. 2367 * @comp_handler: A user-specified callback that is invoked when a 2368 * completion event occurs on the CQ. 2369 * @event_handler: A user-specified callback that is invoked when an 2370 * asynchronous event not associated with a completion occurs on the CQ. 2371 * @cq_context: Context associated with the CQ returned to the user via 2372 * the associated completion and event handlers. 2373 * @cq_attr: The attributes the CQ should be created upon. 2374 * 2375 * Users can examine the cq structure to determine the actual CQ size. 2376 */ 2377 struct ib_cq *ib_create_cq(struct ib_device *device, 2378 ib_comp_handler comp_handler, 2379 void (*event_handler)(struct ib_event *, void *), 2380 void *cq_context, 2381 const struct ib_cq_init_attr *cq_attr); 2382 2383 /** 2384 * ib_resize_cq - Modifies the capacity of the CQ. 2385 * @cq: The CQ to resize. 2386 * @cqe: The minimum size of the CQ. 2387 * 2388 * Users can examine the cq structure to determine the actual CQ size. 2389 */ 2390 int ib_resize_cq(struct ib_cq *cq, int cqe); 2391 2392 /** 2393 * ib_modify_cq - Modifies moderation params of the CQ 2394 * @cq: The CQ to modify. 2395 * @cq_count: number of CQEs that will trigger an event 2396 * @cq_period: max period of time in usec before triggering an event 2397 * 2398 */ 2399 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); 2400 2401 /** 2402 * ib_destroy_cq - Destroys the specified CQ. 2403 * @cq: The CQ to destroy. 2404 */ 2405 int ib_destroy_cq(struct ib_cq *cq); 2406 2407 /** 2408 * ib_poll_cq - poll a CQ for completion(s) 2409 * @cq:the CQ being polled 2410 * @num_entries:maximum number of completions to return 2411 * @wc:array of at least @num_entries &struct ib_wc where completions 2412 * will be returned 2413 * 2414 * Poll a CQ for (possibly multiple) completions. If the return value 2415 * is < 0, an error occurred. If the return value is >= 0, it is the 2416 * number of completions returned. If the return value is 2417 * non-negative and < num_entries, then the CQ was emptied. 2418 */ 2419 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries, 2420 struct ib_wc *wc) 2421 { 2422 return cq->device->poll_cq(cq, num_entries, wc); 2423 } 2424 2425 /** 2426 * ib_peek_cq - Returns the number of unreaped completions currently 2427 * on the specified CQ. 2428 * @cq: The CQ to peek. 2429 * @wc_cnt: A minimum number of unreaped completions to check for. 2430 * 2431 * If the number of unreaped completions is greater than or equal to wc_cnt, 2432 * this function returns wc_cnt, otherwise, it returns the actual number of 2433 * unreaped completions. 2434 */ 2435 int ib_peek_cq(struct ib_cq *cq, int wc_cnt); 2436 2437 /** 2438 * ib_req_notify_cq - Request completion notification on a CQ. 2439 * @cq: The CQ to generate an event for. 2440 * @flags: 2441 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP 2442 * to request an event on the next solicited event or next work 2443 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS 2444 * may also be |ed in to request a hint about missed events, as 2445 * described below. 2446 * 2447 * Return Value: 2448 * < 0 means an error occurred while requesting notification 2449 * == 0 means notification was requested successfully, and if 2450 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events 2451 * were missed and it is safe to wait for another event. In 2452 * this case is it guaranteed that any work completions added 2453 * to the CQ since the last CQ poll will trigger a completion 2454 * notification event. 2455 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed 2456 * in. It means that the consumer must poll the CQ again to 2457 * make sure it is empty to avoid missing an event because of a 2458 * race between requesting notification and an entry being 2459 * added to the CQ. This return value means it is possible 2460 * (but not guaranteed) that a work completion has been added 2461 * to the CQ since the last poll without triggering a 2462 * completion notification event. 2463 */ 2464 static inline int ib_req_notify_cq(struct ib_cq *cq, 2465 enum ib_cq_notify_flags flags) 2466 { 2467 return cq->device->req_notify_cq(cq, flags); 2468 } 2469 2470 /** 2471 * ib_req_ncomp_notif - Request completion notification when there are 2472 * at least the specified number of unreaped completions on the CQ. 2473 * @cq: The CQ to generate an event for. 2474 * @wc_cnt: The number of unreaped completions that should be on the 2475 * CQ before an event is generated. 2476 */ 2477 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) 2478 { 2479 return cq->device->req_ncomp_notif ? 2480 cq->device->req_ncomp_notif(cq, wc_cnt) : 2481 -ENOSYS; 2482 } 2483 2484 /** 2485 * ib_get_dma_mr - Returns a memory region for system memory that is 2486 * usable for DMA. 2487 * @pd: The protection domain associated with the memory region. 2488 * @mr_access_flags: Specifies the memory access rights. 2489 * 2490 * Note that the ib_dma_*() functions defined below must be used 2491 * to create/destroy addresses used with the Lkey or Rkey returned 2492 * by ib_get_dma_mr(). 2493 */ 2494 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags); 2495 2496 /** 2497 * ib_dma_mapping_error - check a DMA addr for error 2498 * @dev: The device for which the dma_addr was created 2499 * @dma_addr: The DMA address to check 2500 */ 2501 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) 2502 { 2503 if (dev->dma_ops) 2504 return dev->dma_ops->mapping_error(dev, dma_addr); 2505 return dma_mapping_error(dev->dma_device, dma_addr); 2506 } 2507 2508 /** 2509 * ib_dma_map_single - Map a kernel virtual address to DMA address 2510 * @dev: The device for which the dma_addr is to be created 2511 * @cpu_addr: The kernel virtual address 2512 * @size: The size of the region in bytes 2513 * @direction: The direction of the DMA 2514 */ 2515 static inline u64 ib_dma_map_single(struct ib_device *dev, 2516 void *cpu_addr, size_t size, 2517 enum dma_data_direction direction) 2518 { 2519 if (dev->dma_ops) 2520 return dev->dma_ops->map_single(dev, cpu_addr, size, direction); 2521 return dma_map_single(dev->dma_device, cpu_addr, size, direction); 2522 } 2523 2524 /** 2525 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single() 2526 * @dev: The device for which the DMA address was created 2527 * @addr: The DMA address 2528 * @size: The size of the region in bytes 2529 * @direction: The direction of the DMA 2530 */ 2531 static inline void ib_dma_unmap_single(struct ib_device *dev, 2532 u64 addr, size_t size, 2533 enum dma_data_direction direction) 2534 { 2535 if (dev->dma_ops) 2536 dev->dma_ops->unmap_single(dev, addr, size, direction); 2537 else 2538 dma_unmap_single(dev->dma_device, addr, size, direction); 2539 } 2540 2541 static inline u64 ib_dma_map_single_attrs(struct ib_device *dev, 2542 void *cpu_addr, size_t size, 2543 enum dma_data_direction direction, 2544 struct dma_attrs *attrs) 2545 { 2546 return dma_map_single_attrs(dev->dma_device, cpu_addr, size, 2547 direction, attrs); 2548 } 2549 2550 static inline void ib_dma_unmap_single_attrs(struct ib_device *dev, 2551 u64 addr, size_t size, 2552 enum dma_data_direction direction, 2553 struct dma_attrs *attrs) 2554 { 2555 return dma_unmap_single_attrs(dev->dma_device, addr, size, 2556 direction, attrs); 2557 } 2558 2559 /** 2560 * ib_dma_map_page - Map a physical page to DMA address 2561 * @dev: The device for which the dma_addr is to be created 2562 * @page: The page to be mapped 2563 * @offset: The offset within the page 2564 * @size: The size of the region in bytes 2565 * @direction: The direction of the DMA 2566 */ 2567 static inline u64 ib_dma_map_page(struct ib_device *dev, 2568 struct page *page, 2569 unsigned long offset, 2570 size_t size, 2571 enum dma_data_direction direction) 2572 { 2573 if (dev->dma_ops) 2574 return dev->dma_ops->map_page(dev, page, offset, size, direction); 2575 return dma_map_page(dev->dma_device, page, offset, size, direction); 2576 } 2577 2578 /** 2579 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page() 2580 * @dev: The device for which the DMA address was created 2581 * @addr: The DMA address 2582 * @size: The size of the region in bytes 2583 * @direction: The direction of the DMA 2584 */ 2585 static inline void ib_dma_unmap_page(struct ib_device *dev, 2586 u64 addr, size_t size, 2587 enum dma_data_direction direction) 2588 { 2589 if (dev->dma_ops) 2590 dev->dma_ops->unmap_page(dev, addr, size, direction); 2591 else 2592 dma_unmap_page(dev->dma_device, addr, size, direction); 2593 } 2594 2595 /** 2596 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses 2597 * @dev: The device for which the DMA addresses are to be created 2598 * @sg: The array of scatter/gather entries 2599 * @nents: The number of scatter/gather entries 2600 * @direction: The direction of the DMA 2601 */ 2602 static inline int ib_dma_map_sg(struct ib_device *dev, 2603 struct scatterlist *sg, int nents, 2604 enum dma_data_direction direction) 2605 { 2606 if (dev->dma_ops) 2607 return dev->dma_ops->map_sg(dev, sg, nents, direction); 2608 return dma_map_sg(dev->dma_device, sg, nents, direction); 2609 } 2610 2611 /** 2612 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses 2613 * @dev: The device for which the DMA addresses were created 2614 * @sg: The array of scatter/gather entries 2615 * @nents: The number of scatter/gather entries 2616 * @direction: The direction of the DMA 2617 */ 2618 static inline void ib_dma_unmap_sg(struct ib_device *dev, 2619 struct scatterlist *sg, int nents, 2620 enum dma_data_direction direction) 2621 { 2622 if (dev->dma_ops) 2623 dev->dma_ops->unmap_sg(dev, sg, nents, direction); 2624 else 2625 dma_unmap_sg(dev->dma_device, sg, nents, direction); 2626 } 2627 2628 static inline int ib_dma_map_sg_attrs(struct ib_device *dev, 2629 struct scatterlist *sg, int nents, 2630 enum dma_data_direction direction, 2631 struct dma_attrs *attrs) 2632 { 2633 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs); 2634 } 2635 2636 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, 2637 struct scatterlist *sg, int nents, 2638 enum dma_data_direction direction, 2639 struct dma_attrs *attrs) 2640 { 2641 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs); 2642 } 2643 /** 2644 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry 2645 * @dev: The device for which the DMA addresses were created 2646 * @sg: The scatter/gather entry 2647 * 2648 * Note: this function is obsolete. To do: change all occurrences of 2649 * ib_sg_dma_address() into sg_dma_address(). 2650 */ 2651 static inline u64 ib_sg_dma_address(struct ib_device *dev, 2652 struct scatterlist *sg) 2653 { 2654 return sg_dma_address(sg); 2655 } 2656 2657 /** 2658 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry 2659 * @dev: The device for which the DMA addresses were created 2660 * @sg: The scatter/gather entry 2661 * 2662 * Note: this function is obsolete. To do: change all occurrences of 2663 * ib_sg_dma_len() into sg_dma_len(). 2664 */ 2665 static inline unsigned int ib_sg_dma_len(struct ib_device *dev, 2666 struct scatterlist *sg) 2667 { 2668 return sg_dma_len(sg); 2669 } 2670 2671 /** 2672 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU 2673 * @dev: The device for which the DMA address was created 2674 * @addr: The DMA address 2675 * @size: The size of the region in bytes 2676 * @dir: The direction of the DMA 2677 */ 2678 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, 2679 u64 addr, 2680 size_t size, 2681 enum dma_data_direction dir) 2682 { 2683 if (dev->dma_ops) 2684 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir); 2685 else 2686 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); 2687 } 2688 2689 /** 2690 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device 2691 * @dev: The device for which the DMA address was created 2692 * @addr: The DMA address 2693 * @size: The size of the region in bytes 2694 * @dir: The direction of the DMA 2695 */ 2696 static inline void ib_dma_sync_single_for_device(struct ib_device *dev, 2697 u64 addr, 2698 size_t size, 2699 enum dma_data_direction dir) 2700 { 2701 if (dev->dma_ops) 2702 dev->dma_ops->sync_single_for_device(dev, addr, size, dir); 2703 else 2704 dma_sync_single_for_device(dev->dma_device, addr, size, dir); 2705 } 2706 2707 /** 2708 * ib_dma_alloc_coherent - Allocate memory and map it for DMA 2709 * @dev: The device for which the DMA address is requested 2710 * @size: The size of the region to allocate in bytes 2711 * @dma_handle: A pointer for returning the DMA address of the region 2712 * @flag: memory allocator flags 2713 */ 2714 static inline void *ib_dma_alloc_coherent(struct ib_device *dev, 2715 size_t size, 2716 u64 *dma_handle, 2717 gfp_t flag) 2718 { 2719 if (dev->dma_ops) 2720 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag); 2721 else { 2722 dma_addr_t handle; 2723 void *ret; 2724 2725 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag); 2726 *dma_handle = handle; 2727 return ret; 2728 } 2729 } 2730 2731 /** 2732 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent() 2733 * @dev: The device for which the DMA addresses were allocated 2734 * @size: The size of the region 2735 * @cpu_addr: the address returned by ib_dma_alloc_coherent() 2736 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent() 2737 */ 2738 static inline void ib_dma_free_coherent(struct ib_device *dev, 2739 size_t size, void *cpu_addr, 2740 u64 dma_handle) 2741 { 2742 if (dev->dma_ops) 2743 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); 2744 else 2745 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); 2746 } 2747 2748 /** 2749 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use 2750 * by an HCA. 2751 * @pd: The protection domain associated assigned to the registered region. 2752 * @phys_buf_array: Specifies a list of physical buffers to use in the 2753 * memory region. 2754 * @num_phys_buf: Specifies the size of the phys_buf_array. 2755 * @mr_access_flags: Specifies the memory access rights. 2756 * @iova_start: The offset of the region's starting I/O virtual address. 2757 */ 2758 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd, 2759 struct ib_phys_buf *phys_buf_array, 2760 int num_phys_buf, 2761 int mr_access_flags, 2762 u64 *iova_start); 2763 2764 /** 2765 * ib_rereg_phys_mr - Modifies the attributes of an existing memory region. 2766 * Conceptually, this call performs the functions deregister memory region 2767 * followed by register physical memory region. Where possible, 2768 * resources are reused instead of deallocated and reallocated. 2769 * @mr: The memory region to modify. 2770 * @mr_rereg_mask: A bit-mask used to indicate which of the following 2771 * properties of the memory region are being modified. 2772 * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies 2773 * the new protection domain to associated with the memory region, 2774 * otherwise, this parameter is ignored. 2775 * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this 2776 * field specifies a list of physical buffers to use in the new 2777 * translation, otherwise, this parameter is ignored. 2778 * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this 2779 * field specifies the size of the phys_buf_array, otherwise, this 2780 * parameter is ignored. 2781 * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this 2782 * field specifies the new memory access rights, otherwise, this 2783 * parameter is ignored. 2784 * @iova_start: The offset of the region's starting I/O virtual address. 2785 */ 2786 int ib_rereg_phys_mr(struct ib_mr *mr, 2787 int mr_rereg_mask, 2788 struct ib_pd *pd, 2789 struct ib_phys_buf *phys_buf_array, 2790 int num_phys_buf, 2791 int mr_access_flags, 2792 u64 *iova_start); 2793 2794 /** 2795 * ib_query_mr - Retrieves information about a specific memory region. 2796 * @mr: The memory region to retrieve information about. 2797 * @mr_attr: The attributes of the specified memory region. 2798 */ 2799 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr); 2800 2801 /** 2802 * ib_dereg_mr - Deregisters a memory region and removes it from the 2803 * HCA translation table. 2804 * @mr: The memory region to deregister. 2805 * 2806 * This function can fail, if the memory region has memory windows bound to it. 2807 */ 2808 int ib_dereg_mr(struct ib_mr *mr); 2809 2810 2811 /** 2812 * ib_create_mr - Allocates a memory region that may be used for 2813 * signature handover operations. 2814 * @pd: The protection domain associated with the region. 2815 * @mr_init_attr: memory region init attributes. 2816 */ 2817 struct ib_mr *ib_create_mr(struct ib_pd *pd, 2818 struct ib_mr_init_attr *mr_init_attr); 2819 2820 /** 2821 * ib_destroy_mr - Destroys a memory region that was created using 2822 * ib_create_mr and removes it from HW translation tables. 2823 * @mr: The memory region to destroy. 2824 * 2825 * This function can fail, if the memory region has memory windows bound to it. 2826 */ 2827 int ib_destroy_mr(struct ib_mr *mr); 2828 2829 /** 2830 * ib_alloc_fast_reg_mr - Allocates memory region usable with the 2831 * IB_WR_FAST_REG_MR send work request. 2832 * @pd: The protection domain associated with the region. 2833 * @max_page_list_len: requested max physical buffer list length to be 2834 * used with fast register work requests for this MR. 2835 */ 2836 struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len); 2837 2838 /** 2839 * ib_alloc_fast_reg_page_list - Allocates a page list array 2840 * @device - ib device pointer. 2841 * @page_list_len - size of the page list array to be allocated. 2842 * 2843 * This allocates and returns a struct ib_fast_reg_page_list * and a 2844 * page_list array that is at least page_list_len in size. The actual 2845 * size is returned in max_page_list_len. The caller is responsible 2846 * for initializing the contents of the page_list array before posting 2847 * a send work request with the IB_WC_FAST_REG_MR opcode. 2848 * 2849 * The page_list array entries must be translated using one of the 2850 * ib_dma_*() functions just like the addresses passed to 2851 * ib_map_phys_fmr(). Once the ib_post_send() is issued, the struct 2852 * ib_fast_reg_page_list must not be modified by the caller until the 2853 * IB_WC_FAST_REG_MR work request completes. 2854 */ 2855 struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list( 2856 struct ib_device *device, int page_list_len); 2857 2858 /** 2859 * ib_free_fast_reg_page_list - Deallocates a previously allocated 2860 * page list array. 2861 * @page_list - struct ib_fast_reg_page_list pointer to be deallocated. 2862 */ 2863 void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list); 2864 2865 /** 2866 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR 2867 * R_Key and L_Key. 2868 * @mr - struct ib_mr pointer to be updated. 2869 * @newkey - new key to be used. 2870 */ 2871 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey) 2872 { 2873 mr->lkey = (mr->lkey & 0xffffff00) | newkey; 2874 mr->rkey = (mr->rkey & 0xffffff00) | newkey; 2875 } 2876 2877 /** 2878 * ib_inc_rkey - increments the key portion of the given rkey. Can be used 2879 * for calculating a new rkey for type 2 memory windows. 2880 * @rkey - the rkey to increment. 2881 */ 2882 static inline u32 ib_inc_rkey(u32 rkey) 2883 { 2884 const u32 mask = 0x000000ff; 2885 return ((rkey + 1) & mask) | (rkey & ~mask); 2886 } 2887 2888 /** 2889 * ib_alloc_mw - Allocates a memory window. 2890 * @pd: The protection domain associated with the memory window. 2891 * @type: The type of the memory window (1 or 2). 2892 */ 2893 struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type); 2894 2895 /** 2896 * ib_bind_mw - Posts a work request to the send queue of the specified 2897 * QP, which binds the memory window to the given address range and 2898 * remote access attributes. 2899 * @qp: QP to post the bind work request on. 2900 * @mw: The memory window to bind. 2901 * @mw_bind: Specifies information about the memory window, including 2902 * its address range, remote access rights, and associated memory region. 2903 * 2904 * If there is no immediate error, the function will update the rkey member 2905 * of the mw parameter to its new value. The bind operation can still fail 2906 * asynchronously. 2907 */ 2908 static inline int ib_bind_mw(struct ib_qp *qp, 2909 struct ib_mw *mw, 2910 struct ib_mw_bind *mw_bind) 2911 { 2912 /* XXX reference counting in corresponding MR? */ 2913 return mw->device->bind_mw ? 2914 mw->device->bind_mw(qp, mw, mw_bind) : 2915 -ENOSYS; 2916 } 2917 2918 /** 2919 * ib_dealloc_mw - Deallocates a memory window. 2920 * @mw: The memory window to deallocate. 2921 */ 2922 int ib_dealloc_mw(struct ib_mw *mw); 2923 2924 /** 2925 * ib_alloc_fmr - Allocates a unmapped fast memory region. 2926 * @pd: The protection domain associated with the unmapped region. 2927 * @mr_access_flags: Specifies the memory access rights. 2928 * @fmr_attr: Attributes of the unmapped region. 2929 * 2930 * A fast memory region must be mapped before it can be used as part of 2931 * a work request. 2932 */ 2933 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, 2934 int mr_access_flags, 2935 struct ib_fmr_attr *fmr_attr); 2936 2937 /** 2938 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region. 2939 * @fmr: The fast memory region to associate with the pages. 2940 * @page_list: An array of physical pages to map to the fast memory region. 2941 * @list_len: The number of pages in page_list. 2942 * @iova: The I/O virtual address to use with the mapped region. 2943 */ 2944 static inline int ib_map_phys_fmr(struct ib_fmr *fmr, 2945 u64 *page_list, int list_len, 2946 u64 iova) 2947 { 2948 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova); 2949 } 2950 2951 /** 2952 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions. 2953 * @fmr_list: A linked list of fast memory regions to unmap. 2954 */ 2955 int ib_unmap_fmr(struct list_head *fmr_list); 2956 2957 /** 2958 * ib_dealloc_fmr - Deallocates a fast memory region. 2959 * @fmr: The fast memory region to deallocate. 2960 */ 2961 int ib_dealloc_fmr(struct ib_fmr *fmr); 2962 2963 /** 2964 * ib_attach_mcast - Attaches the specified QP to a multicast group. 2965 * @qp: QP to attach to the multicast group. The QP must be type 2966 * IB_QPT_UD. 2967 * @gid: Multicast group GID. 2968 * @lid: Multicast group LID in host byte order. 2969 * 2970 * In order to send and receive multicast packets, subnet 2971 * administration must have created the multicast group and configured 2972 * the fabric appropriately. The port associated with the specified 2973 * QP must also be a member of the multicast group. 2974 */ 2975 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 2976 2977 /** 2978 * ib_detach_mcast - Detaches the specified QP from a multicast group. 2979 * @qp: QP to detach from the multicast group. 2980 * @gid: Multicast group GID. 2981 * @lid: Multicast group LID in host byte order. 2982 */ 2983 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 2984 2985 /** 2986 * ib_alloc_xrcd - Allocates an XRC domain. 2987 * @device: The device on which to allocate the XRC domain. 2988 */ 2989 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device); 2990 2991 /** 2992 * ib_dealloc_xrcd - Deallocates an XRC domain. 2993 * @xrcd: The XRC domain to deallocate. 2994 */ 2995 int ib_dealloc_xrcd(struct ib_xrcd *xrcd); 2996 2997 struct ib_flow *ib_create_flow(struct ib_qp *qp, 2998 struct ib_flow_attr *flow_attr, int domain); 2999 int ib_destroy_flow(struct ib_flow *flow_id); 3000 3001 static inline int ib_check_mr_access(int flags) 3002 { 3003 /* 3004 * Local write permission is required if remote write or 3005 * remote atomic permission is also requested. 3006 */ 3007 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && 3008 !(flags & IB_ACCESS_LOCAL_WRITE)) 3009 return -EINVAL; 3010 3011 return 0; 3012 } 3013 3014 /** 3015 * ib_check_mr_status: lightweight check of MR status. 3016 * This routine may provide status checks on a selected 3017 * ib_mr. first use is for signature status check. 3018 * 3019 * @mr: A memory region. 3020 * @check_mask: Bitmask of which checks to perform from 3021 * ib_mr_status_check enumeration. 3022 * @mr_status: The container of relevant status checks. 3023 * failed checks will be indicated in the status bitmask 3024 * and the relevant info shall be in the error item. 3025 */ 3026 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, 3027 struct ib_mr_status *mr_status); 3028 3029 #endif /* IB_VERBS_H */ 3030