1 /* 2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 8 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 9 * 10 * This software is available to you under a choice of one of two 11 * licenses. You may choose to be licensed under the terms of the GNU 12 * General Public License (GPL) Version 2, available from the file 13 * COPYING in the main directory of this source tree, or the 14 * OpenIB.org BSD license below: 15 * 16 * Redistribution and use in source and binary forms, with or 17 * without modification, are permitted provided that the following 18 * conditions are met: 19 * 20 * - Redistributions of source code must retain the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer. 23 * 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials 27 * provided with the distribution. 28 * 29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 36 * SOFTWARE. 37 */ 38 39 #if !defined(IB_VERBS_H) 40 #define IB_VERBS_H 41 42 #include <linux/types.h> 43 #include <linux/device.h> 44 #include <linux/mm.h> 45 #include <linux/dma-mapping.h> 46 #include <linux/kref.h> 47 #include <linux/list.h> 48 #include <linux/rwsem.h> 49 #include <linux/scatterlist.h> 50 #include <linux/workqueue.h> 51 #include <uapi/linux/if_ether.h> 52 53 #include <linux/atomic.h> 54 #include <asm/uaccess.h> 55 56 extern struct workqueue_struct *ib_wq; 57 58 union ib_gid { 59 u8 raw[16]; 60 struct { 61 __be64 subnet_prefix; 62 __be64 interface_id; 63 } global; 64 }; 65 66 enum rdma_node_type { 67 /* IB values map to NodeInfo:NodeType. */ 68 RDMA_NODE_IB_CA = 1, 69 RDMA_NODE_IB_SWITCH, 70 RDMA_NODE_IB_ROUTER, 71 RDMA_NODE_RNIC, 72 RDMA_NODE_USNIC, 73 RDMA_NODE_USNIC_UDP, 74 }; 75 76 enum rdma_transport_type { 77 RDMA_TRANSPORT_IB, 78 RDMA_TRANSPORT_IWARP, 79 RDMA_TRANSPORT_USNIC, 80 RDMA_TRANSPORT_USNIC_UDP 81 }; 82 83 enum rdma_transport_type 84 rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__; 85 86 enum rdma_link_layer { 87 IB_LINK_LAYER_UNSPECIFIED, 88 IB_LINK_LAYER_INFINIBAND, 89 IB_LINK_LAYER_ETHERNET, 90 }; 91 92 enum ib_device_cap_flags { 93 IB_DEVICE_RESIZE_MAX_WR = 1, 94 IB_DEVICE_BAD_PKEY_CNTR = (1<<1), 95 IB_DEVICE_BAD_QKEY_CNTR = (1<<2), 96 IB_DEVICE_RAW_MULTI = (1<<3), 97 IB_DEVICE_AUTO_PATH_MIG = (1<<4), 98 IB_DEVICE_CHANGE_PHY_PORT = (1<<5), 99 IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6), 100 IB_DEVICE_CURR_QP_STATE_MOD = (1<<7), 101 IB_DEVICE_SHUTDOWN_PORT = (1<<8), 102 IB_DEVICE_INIT_TYPE = (1<<9), 103 IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10), 104 IB_DEVICE_SYS_IMAGE_GUID = (1<<11), 105 IB_DEVICE_RC_RNR_NAK_GEN = (1<<12), 106 IB_DEVICE_SRQ_RESIZE = (1<<13), 107 IB_DEVICE_N_NOTIFY_CQ = (1<<14), 108 IB_DEVICE_LOCAL_DMA_LKEY = (1<<15), 109 IB_DEVICE_RESERVED = (1<<16), /* old SEND_W_INV */ 110 IB_DEVICE_MEM_WINDOW = (1<<17), 111 /* 112 * Devices should set IB_DEVICE_UD_IP_SUM if they support 113 * insertion of UDP and TCP checksum on outgoing UD IPoIB 114 * messages and can verify the validity of checksum for 115 * incoming messages. Setting this flag implies that the 116 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode. 117 */ 118 IB_DEVICE_UD_IP_CSUM = (1<<18), 119 IB_DEVICE_UD_TSO = (1<<19), 120 IB_DEVICE_XRC = (1<<20), 121 IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21), 122 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22), 123 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23), 124 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24), 125 IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29), 126 IB_DEVICE_SIGNATURE_HANDOVER = (1<<30) 127 }; 128 129 enum ib_signature_prot_cap { 130 IB_PROT_T10DIF_TYPE_1 = 1, 131 IB_PROT_T10DIF_TYPE_2 = 1 << 1, 132 IB_PROT_T10DIF_TYPE_3 = 1 << 2, 133 }; 134 135 enum ib_signature_guard_cap { 136 IB_GUARD_T10DIF_CRC = 1, 137 IB_GUARD_T10DIF_CSUM = 1 << 1, 138 }; 139 140 enum ib_atomic_cap { 141 IB_ATOMIC_NONE, 142 IB_ATOMIC_HCA, 143 IB_ATOMIC_GLOB 144 }; 145 146 struct ib_device_attr { 147 u64 fw_ver; 148 __be64 sys_image_guid; 149 u64 max_mr_size; 150 u64 page_size_cap; 151 u32 vendor_id; 152 u32 vendor_part_id; 153 u32 hw_ver; 154 int max_qp; 155 int max_qp_wr; 156 int device_cap_flags; 157 int max_sge; 158 int max_sge_rd; 159 int max_cq; 160 int max_cqe; 161 int max_mr; 162 int max_pd; 163 int max_qp_rd_atom; 164 int max_ee_rd_atom; 165 int max_res_rd_atom; 166 int max_qp_init_rd_atom; 167 int max_ee_init_rd_atom; 168 enum ib_atomic_cap atomic_cap; 169 enum ib_atomic_cap masked_atomic_cap; 170 int max_ee; 171 int max_rdd; 172 int max_mw; 173 int max_raw_ipv6_qp; 174 int max_raw_ethy_qp; 175 int max_mcast_grp; 176 int max_mcast_qp_attach; 177 int max_total_mcast_qp_attach; 178 int max_ah; 179 int max_fmr; 180 int max_map_per_fmr; 181 int max_srq; 182 int max_srq_wr; 183 int max_srq_sge; 184 unsigned int max_fast_reg_page_list_len; 185 u16 max_pkeys; 186 u8 local_ca_ack_delay; 187 int sig_prot_cap; 188 int sig_guard_cap; 189 }; 190 191 enum ib_mtu { 192 IB_MTU_256 = 1, 193 IB_MTU_512 = 2, 194 IB_MTU_1024 = 3, 195 IB_MTU_2048 = 4, 196 IB_MTU_4096 = 5 197 }; 198 199 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu) 200 { 201 switch (mtu) { 202 case IB_MTU_256: return 256; 203 case IB_MTU_512: return 512; 204 case IB_MTU_1024: return 1024; 205 case IB_MTU_2048: return 2048; 206 case IB_MTU_4096: return 4096; 207 default: return -1; 208 } 209 } 210 211 enum ib_port_state { 212 IB_PORT_NOP = 0, 213 IB_PORT_DOWN = 1, 214 IB_PORT_INIT = 2, 215 IB_PORT_ARMED = 3, 216 IB_PORT_ACTIVE = 4, 217 IB_PORT_ACTIVE_DEFER = 5 218 }; 219 220 enum ib_port_cap_flags { 221 IB_PORT_SM = 1 << 1, 222 IB_PORT_NOTICE_SUP = 1 << 2, 223 IB_PORT_TRAP_SUP = 1 << 3, 224 IB_PORT_OPT_IPD_SUP = 1 << 4, 225 IB_PORT_AUTO_MIGR_SUP = 1 << 5, 226 IB_PORT_SL_MAP_SUP = 1 << 6, 227 IB_PORT_MKEY_NVRAM = 1 << 7, 228 IB_PORT_PKEY_NVRAM = 1 << 8, 229 IB_PORT_LED_INFO_SUP = 1 << 9, 230 IB_PORT_SM_DISABLED = 1 << 10, 231 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11, 232 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12, 233 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14, 234 IB_PORT_CM_SUP = 1 << 16, 235 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17, 236 IB_PORT_REINIT_SUP = 1 << 18, 237 IB_PORT_DEVICE_MGMT_SUP = 1 << 19, 238 IB_PORT_VENDOR_CLASS_SUP = 1 << 20, 239 IB_PORT_DR_NOTICE_SUP = 1 << 21, 240 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22, 241 IB_PORT_BOOT_MGMT_SUP = 1 << 23, 242 IB_PORT_LINK_LATENCY_SUP = 1 << 24, 243 IB_PORT_CLIENT_REG_SUP = 1 << 25, 244 IB_PORT_IP_BASED_GIDS = 1 << 26 245 }; 246 247 enum ib_port_width { 248 IB_WIDTH_1X = 1, 249 IB_WIDTH_4X = 2, 250 IB_WIDTH_8X = 4, 251 IB_WIDTH_12X = 8 252 }; 253 254 static inline int ib_width_enum_to_int(enum ib_port_width width) 255 { 256 switch (width) { 257 case IB_WIDTH_1X: return 1; 258 case IB_WIDTH_4X: return 4; 259 case IB_WIDTH_8X: return 8; 260 case IB_WIDTH_12X: return 12; 261 default: return -1; 262 } 263 } 264 265 enum ib_port_speed { 266 IB_SPEED_SDR = 1, 267 IB_SPEED_DDR = 2, 268 IB_SPEED_QDR = 4, 269 IB_SPEED_FDR10 = 8, 270 IB_SPEED_FDR = 16, 271 IB_SPEED_EDR = 32 272 }; 273 274 struct ib_protocol_stats { 275 /* TBD... */ 276 }; 277 278 struct iw_protocol_stats { 279 u64 ipInReceives; 280 u64 ipInHdrErrors; 281 u64 ipInTooBigErrors; 282 u64 ipInNoRoutes; 283 u64 ipInAddrErrors; 284 u64 ipInUnknownProtos; 285 u64 ipInTruncatedPkts; 286 u64 ipInDiscards; 287 u64 ipInDelivers; 288 u64 ipOutForwDatagrams; 289 u64 ipOutRequests; 290 u64 ipOutDiscards; 291 u64 ipOutNoRoutes; 292 u64 ipReasmTimeout; 293 u64 ipReasmReqds; 294 u64 ipReasmOKs; 295 u64 ipReasmFails; 296 u64 ipFragOKs; 297 u64 ipFragFails; 298 u64 ipFragCreates; 299 u64 ipInMcastPkts; 300 u64 ipOutMcastPkts; 301 u64 ipInBcastPkts; 302 u64 ipOutBcastPkts; 303 304 u64 tcpRtoAlgorithm; 305 u64 tcpRtoMin; 306 u64 tcpRtoMax; 307 u64 tcpMaxConn; 308 u64 tcpActiveOpens; 309 u64 tcpPassiveOpens; 310 u64 tcpAttemptFails; 311 u64 tcpEstabResets; 312 u64 tcpCurrEstab; 313 u64 tcpInSegs; 314 u64 tcpOutSegs; 315 u64 tcpRetransSegs; 316 u64 tcpInErrs; 317 u64 tcpOutRsts; 318 }; 319 320 union rdma_protocol_stats { 321 struct ib_protocol_stats ib; 322 struct iw_protocol_stats iw; 323 }; 324 325 struct ib_port_attr { 326 enum ib_port_state state; 327 enum ib_mtu max_mtu; 328 enum ib_mtu active_mtu; 329 int gid_tbl_len; 330 u32 port_cap_flags; 331 u32 max_msg_sz; 332 u32 bad_pkey_cntr; 333 u32 qkey_viol_cntr; 334 u16 pkey_tbl_len; 335 u16 lid; 336 u16 sm_lid; 337 u8 lmc; 338 u8 max_vl_num; 339 u8 sm_sl; 340 u8 subnet_timeout; 341 u8 init_type_reply; 342 u8 active_width; 343 u8 active_speed; 344 u8 phys_state; 345 }; 346 347 enum ib_device_modify_flags { 348 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0, 349 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1 350 }; 351 352 struct ib_device_modify { 353 u64 sys_image_guid; 354 char node_desc[64]; 355 }; 356 357 enum ib_port_modify_flags { 358 IB_PORT_SHUTDOWN = 1, 359 IB_PORT_INIT_TYPE = (1<<2), 360 IB_PORT_RESET_QKEY_CNTR = (1<<3) 361 }; 362 363 struct ib_port_modify { 364 u32 set_port_cap_mask; 365 u32 clr_port_cap_mask; 366 u8 init_type; 367 }; 368 369 enum ib_event_type { 370 IB_EVENT_CQ_ERR, 371 IB_EVENT_QP_FATAL, 372 IB_EVENT_QP_REQ_ERR, 373 IB_EVENT_QP_ACCESS_ERR, 374 IB_EVENT_COMM_EST, 375 IB_EVENT_SQ_DRAINED, 376 IB_EVENT_PATH_MIG, 377 IB_EVENT_PATH_MIG_ERR, 378 IB_EVENT_DEVICE_FATAL, 379 IB_EVENT_PORT_ACTIVE, 380 IB_EVENT_PORT_ERR, 381 IB_EVENT_LID_CHANGE, 382 IB_EVENT_PKEY_CHANGE, 383 IB_EVENT_SM_CHANGE, 384 IB_EVENT_SRQ_ERR, 385 IB_EVENT_SRQ_LIMIT_REACHED, 386 IB_EVENT_QP_LAST_WQE_REACHED, 387 IB_EVENT_CLIENT_REREGISTER, 388 IB_EVENT_GID_CHANGE, 389 }; 390 391 struct ib_event { 392 struct ib_device *device; 393 union { 394 struct ib_cq *cq; 395 struct ib_qp *qp; 396 struct ib_srq *srq; 397 u8 port_num; 398 } element; 399 enum ib_event_type event; 400 }; 401 402 struct ib_event_handler { 403 struct ib_device *device; 404 void (*handler)(struct ib_event_handler *, struct ib_event *); 405 struct list_head list; 406 }; 407 408 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \ 409 do { \ 410 (_ptr)->device = _device; \ 411 (_ptr)->handler = _handler; \ 412 INIT_LIST_HEAD(&(_ptr)->list); \ 413 } while (0) 414 415 struct ib_global_route { 416 union ib_gid dgid; 417 u32 flow_label; 418 u8 sgid_index; 419 u8 hop_limit; 420 u8 traffic_class; 421 }; 422 423 struct ib_grh { 424 __be32 version_tclass_flow; 425 __be16 paylen; 426 u8 next_hdr; 427 u8 hop_limit; 428 union ib_gid sgid; 429 union ib_gid dgid; 430 }; 431 432 enum { 433 IB_MULTICAST_QPN = 0xffffff 434 }; 435 436 #define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF) 437 438 enum ib_ah_flags { 439 IB_AH_GRH = 1 440 }; 441 442 enum ib_rate { 443 IB_RATE_PORT_CURRENT = 0, 444 IB_RATE_2_5_GBPS = 2, 445 IB_RATE_5_GBPS = 5, 446 IB_RATE_10_GBPS = 3, 447 IB_RATE_20_GBPS = 6, 448 IB_RATE_30_GBPS = 4, 449 IB_RATE_40_GBPS = 7, 450 IB_RATE_60_GBPS = 8, 451 IB_RATE_80_GBPS = 9, 452 IB_RATE_120_GBPS = 10, 453 IB_RATE_14_GBPS = 11, 454 IB_RATE_56_GBPS = 12, 455 IB_RATE_112_GBPS = 13, 456 IB_RATE_168_GBPS = 14, 457 IB_RATE_25_GBPS = 15, 458 IB_RATE_100_GBPS = 16, 459 IB_RATE_200_GBPS = 17, 460 IB_RATE_300_GBPS = 18 461 }; 462 463 /** 464 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the 465 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be 466 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec. 467 * @rate: rate to convert. 468 */ 469 int ib_rate_to_mult(enum ib_rate rate) __attribute_const__; 470 471 /** 472 * ib_rate_to_mbps - Convert the IB rate enum to Mbps. 473 * For example, IB_RATE_2_5_GBPS will be converted to 2500. 474 * @rate: rate to convert. 475 */ 476 int ib_rate_to_mbps(enum ib_rate rate) __attribute_const__; 477 478 enum ib_mr_create_flags { 479 IB_MR_SIGNATURE_EN = 1, 480 }; 481 482 /** 483 * ib_mr_init_attr - Memory region init attributes passed to routine 484 * ib_create_mr. 485 * @max_reg_descriptors: max number of registration descriptors that 486 * may be used with registration work requests. 487 * @flags: MR creation flags bit mask. 488 */ 489 struct ib_mr_init_attr { 490 int max_reg_descriptors; 491 u32 flags; 492 }; 493 494 enum ib_signature_type { 495 IB_SIG_TYPE_T10_DIF, 496 }; 497 498 /** 499 * T10-DIF Signature types 500 * T10-DIF types are defined by SCSI 501 * specifications. 502 */ 503 enum ib_t10_dif_type { 504 IB_T10DIF_NONE, 505 IB_T10DIF_TYPE1, 506 IB_T10DIF_TYPE2, 507 IB_T10DIF_TYPE3 508 }; 509 510 /** 511 * Signature T10-DIF block-guard types 512 * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules. 513 * IB_T10DIF_CSUM: Corresponds to IP checksum rules. 514 */ 515 enum ib_t10_dif_bg_type { 516 IB_T10DIF_CRC, 517 IB_T10DIF_CSUM 518 }; 519 520 /** 521 * struct ib_t10_dif_domain - Parameters specific for T10-DIF 522 * domain. 523 * @type: T10-DIF type (0|1|2|3) 524 * @bg_type: T10-DIF block guard type (CRC|CSUM) 525 * @pi_interval: protection information interval. 526 * @bg: seed of guard computation. 527 * @app_tag: application tag of guard block 528 * @ref_tag: initial guard block reference tag. 529 * @type3_inc_reftag: T10-DIF type 3 does not state 530 * about the reference tag, it is the user 531 * choice to increment it or not. 532 */ 533 struct ib_t10_dif_domain { 534 enum ib_t10_dif_type type; 535 enum ib_t10_dif_bg_type bg_type; 536 u16 pi_interval; 537 u16 bg; 538 u16 app_tag; 539 u32 ref_tag; 540 bool type3_inc_reftag; 541 }; 542 543 /** 544 * struct ib_sig_domain - Parameters for signature domain 545 * @sig_type: specific signauture type 546 * @sig: union of all signature domain attributes that may 547 * be used to set domain layout. 548 */ 549 struct ib_sig_domain { 550 enum ib_signature_type sig_type; 551 union { 552 struct ib_t10_dif_domain dif; 553 } sig; 554 }; 555 556 /** 557 * struct ib_sig_attrs - Parameters for signature handover operation 558 * @check_mask: bitmask for signature byte check (8 bytes) 559 * @mem: memory domain layout desciptor. 560 * @wire: wire domain layout desciptor. 561 */ 562 struct ib_sig_attrs { 563 u8 check_mask; 564 struct ib_sig_domain mem; 565 struct ib_sig_domain wire; 566 }; 567 568 enum ib_sig_err_type { 569 IB_SIG_BAD_GUARD, 570 IB_SIG_BAD_REFTAG, 571 IB_SIG_BAD_APPTAG, 572 }; 573 574 /** 575 * struct ib_sig_err - signature error descriptor 576 */ 577 struct ib_sig_err { 578 enum ib_sig_err_type err_type; 579 u32 expected; 580 u32 actual; 581 u64 sig_err_offset; 582 u32 key; 583 }; 584 585 enum ib_mr_status_check { 586 IB_MR_CHECK_SIG_STATUS = 1, 587 }; 588 589 /** 590 * struct ib_mr_status - Memory region status container 591 * 592 * @fail_status: Bitmask of MR checks status. For each 593 * failed check a corresponding status bit is set. 594 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS 595 * failure. 596 */ 597 struct ib_mr_status { 598 u32 fail_status; 599 struct ib_sig_err sig_err; 600 }; 601 602 /** 603 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate 604 * enum. 605 * @mult: multiple to convert. 606 */ 607 enum ib_rate mult_to_ib_rate(int mult) __attribute_const__; 608 609 struct ib_ah_attr { 610 struct ib_global_route grh; 611 u16 dlid; 612 u8 sl; 613 u8 src_path_bits; 614 u8 static_rate; 615 u8 ah_flags; 616 u8 port_num; 617 u8 dmac[ETH_ALEN]; 618 u16 vlan_id; 619 }; 620 621 enum ib_wc_status { 622 IB_WC_SUCCESS, 623 IB_WC_LOC_LEN_ERR, 624 IB_WC_LOC_QP_OP_ERR, 625 IB_WC_LOC_EEC_OP_ERR, 626 IB_WC_LOC_PROT_ERR, 627 IB_WC_WR_FLUSH_ERR, 628 IB_WC_MW_BIND_ERR, 629 IB_WC_BAD_RESP_ERR, 630 IB_WC_LOC_ACCESS_ERR, 631 IB_WC_REM_INV_REQ_ERR, 632 IB_WC_REM_ACCESS_ERR, 633 IB_WC_REM_OP_ERR, 634 IB_WC_RETRY_EXC_ERR, 635 IB_WC_RNR_RETRY_EXC_ERR, 636 IB_WC_LOC_RDD_VIOL_ERR, 637 IB_WC_REM_INV_RD_REQ_ERR, 638 IB_WC_REM_ABORT_ERR, 639 IB_WC_INV_EECN_ERR, 640 IB_WC_INV_EEC_STATE_ERR, 641 IB_WC_FATAL_ERR, 642 IB_WC_RESP_TIMEOUT_ERR, 643 IB_WC_GENERAL_ERR 644 }; 645 646 enum ib_wc_opcode { 647 IB_WC_SEND, 648 IB_WC_RDMA_WRITE, 649 IB_WC_RDMA_READ, 650 IB_WC_COMP_SWAP, 651 IB_WC_FETCH_ADD, 652 IB_WC_BIND_MW, 653 IB_WC_LSO, 654 IB_WC_LOCAL_INV, 655 IB_WC_FAST_REG_MR, 656 IB_WC_MASKED_COMP_SWAP, 657 IB_WC_MASKED_FETCH_ADD, 658 /* 659 * Set value of IB_WC_RECV so consumers can test if a completion is a 660 * receive by testing (opcode & IB_WC_RECV). 661 */ 662 IB_WC_RECV = 1 << 7, 663 IB_WC_RECV_RDMA_WITH_IMM 664 }; 665 666 enum ib_wc_flags { 667 IB_WC_GRH = 1, 668 IB_WC_WITH_IMM = (1<<1), 669 IB_WC_WITH_INVALIDATE = (1<<2), 670 IB_WC_IP_CSUM_OK = (1<<3), 671 IB_WC_WITH_SMAC = (1<<4), 672 IB_WC_WITH_VLAN = (1<<5), 673 }; 674 675 struct ib_wc { 676 u64 wr_id; 677 enum ib_wc_status status; 678 enum ib_wc_opcode opcode; 679 u32 vendor_err; 680 u32 byte_len; 681 struct ib_qp *qp; 682 union { 683 __be32 imm_data; 684 u32 invalidate_rkey; 685 } ex; 686 u32 src_qp; 687 int wc_flags; 688 u16 pkey_index; 689 u16 slid; 690 u8 sl; 691 u8 dlid_path_bits; 692 u8 port_num; /* valid only for DR SMPs on switches */ 693 u8 smac[ETH_ALEN]; 694 u16 vlan_id; 695 }; 696 697 enum ib_cq_notify_flags { 698 IB_CQ_SOLICITED = 1 << 0, 699 IB_CQ_NEXT_COMP = 1 << 1, 700 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP, 701 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2, 702 }; 703 704 enum ib_srq_type { 705 IB_SRQT_BASIC, 706 IB_SRQT_XRC 707 }; 708 709 enum ib_srq_attr_mask { 710 IB_SRQ_MAX_WR = 1 << 0, 711 IB_SRQ_LIMIT = 1 << 1, 712 }; 713 714 struct ib_srq_attr { 715 u32 max_wr; 716 u32 max_sge; 717 u32 srq_limit; 718 }; 719 720 struct ib_srq_init_attr { 721 void (*event_handler)(struct ib_event *, void *); 722 void *srq_context; 723 struct ib_srq_attr attr; 724 enum ib_srq_type srq_type; 725 726 union { 727 struct { 728 struct ib_xrcd *xrcd; 729 struct ib_cq *cq; 730 } xrc; 731 } ext; 732 }; 733 734 struct ib_qp_cap { 735 u32 max_send_wr; 736 u32 max_recv_wr; 737 u32 max_send_sge; 738 u32 max_recv_sge; 739 u32 max_inline_data; 740 }; 741 742 enum ib_sig_type { 743 IB_SIGNAL_ALL_WR, 744 IB_SIGNAL_REQ_WR 745 }; 746 747 enum ib_qp_type { 748 /* 749 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries 750 * here (and in that order) since the MAD layer uses them as 751 * indices into a 2-entry table. 752 */ 753 IB_QPT_SMI, 754 IB_QPT_GSI, 755 756 IB_QPT_RC, 757 IB_QPT_UC, 758 IB_QPT_UD, 759 IB_QPT_RAW_IPV6, 760 IB_QPT_RAW_ETHERTYPE, 761 IB_QPT_RAW_PACKET = 8, 762 IB_QPT_XRC_INI = 9, 763 IB_QPT_XRC_TGT, 764 IB_QPT_MAX, 765 /* Reserve a range for qp types internal to the low level driver. 766 * These qp types will not be visible at the IB core layer, so the 767 * IB_QPT_MAX usages should not be affected in the core layer 768 */ 769 IB_QPT_RESERVED1 = 0x1000, 770 IB_QPT_RESERVED2, 771 IB_QPT_RESERVED3, 772 IB_QPT_RESERVED4, 773 IB_QPT_RESERVED5, 774 IB_QPT_RESERVED6, 775 IB_QPT_RESERVED7, 776 IB_QPT_RESERVED8, 777 IB_QPT_RESERVED9, 778 IB_QPT_RESERVED10, 779 }; 780 781 enum ib_qp_create_flags { 782 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0, 783 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1, 784 IB_QP_CREATE_NETIF_QP = 1 << 5, 785 IB_QP_CREATE_SIGNATURE_EN = 1 << 6, 786 /* reserve bits 26-31 for low level drivers' internal use */ 787 IB_QP_CREATE_RESERVED_START = 1 << 26, 788 IB_QP_CREATE_RESERVED_END = 1 << 31, 789 }; 790 791 792 /* 793 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler 794 * callback to destroy the passed in QP. 795 */ 796 797 struct ib_qp_init_attr { 798 void (*event_handler)(struct ib_event *, void *); 799 void *qp_context; 800 struct ib_cq *send_cq; 801 struct ib_cq *recv_cq; 802 struct ib_srq *srq; 803 struct ib_xrcd *xrcd; /* XRC TGT QPs only */ 804 struct ib_qp_cap cap; 805 enum ib_sig_type sq_sig_type; 806 enum ib_qp_type qp_type; 807 enum ib_qp_create_flags create_flags; 808 u8 port_num; /* special QP types only */ 809 }; 810 811 struct ib_qp_open_attr { 812 void (*event_handler)(struct ib_event *, void *); 813 void *qp_context; 814 u32 qp_num; 815 enum ib_qp_type qp_type; 816 }; 817 818 enum ib_rnr_timeout { 819 IB_RNR_TIMER_655_36 = 0, 820 IB_RNR_TIMER_000_01 = 1, 821 IB_RNR_TIMER_000_02 = 2, 822 IB_RNR_TIMER_000_03 = 3, 823 IB_RNR_TIMER_000_04 = 4, 824 IB_RNR_TIMER_000_06 = 5, 825 IB_RNR_TIMER_000_08 = 6, 826 IB_RNR_TIMER_000_12 = 7, 827 IB_RNR_TIMER_000_16 = 8, 828 IB_RNR_TIMER_000_24 = 9, 829 IB_RNR_TIMER_000_32 = 10, 830 IB_RNR_TIMER_000_48 = 11, 831 IB_RNR_TIMER_000_64 = 12, 832 IB_RNR_TIMER_000_96 = 13, 833 IB_RNR_TIMER_001_28 = 14, 834 IB_RNR_TIMER_001_92 = 15, 835 IB_RNR_TIMER_002_56 = 16, 836 IB_RNR_TIMER_003_84 = 17, 837 IB_RNR_TIMER_005_12 = 18, 838 IB_RNR_TIMER_007_68 = 19, 839 IB_RNR_TIMER_010_24 = 20, 840 IB_RNR_TIMER_015_36 = 21, 841 IB_RNR_TIMER_020_48 = 22, 842 IB_RNR_TIMER_030_72 = 23, 843 IB_RNR_TIMER_040_96 = 24, 844 IB_RNR_TIMER_061_44 = 25, 845 IB_RNR_TIMER_081_92 = 26, 846 IB_RNR_TIMER_122_88 = 27, 847 IB_RNR_TIMER_163_84 = 28, 848 IB_RNR_TIMER_245_76 = 29, 849 IB_RNR_TIMER_327_68 = 30, 850 IB_RNR_TIMER_491_52 = 31 851 }; 852 853 enum ib_qp_attr_mask { 854 IB_QP_STATE = 1, 855 IB_QP_CUR_STATE = (1<<1), 856 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2), 857 IB_QP_ACCESS_FLAGS = (1<<3), 858 IB_QP_PKEY_INDEX = (1<<4), 859 IB_QP_PORT = (1<<5), 860 IB_QP_QKEY = (1<<6), 861 IB_QP_AV = (1<<7), 862 IB_QP_PATH_MTU = (1<<8), 863 IB_QP_TIMEOUT = (1<<9), 864 IB_QP_RETRY_CNT = (1<<10), 865 IB_QP_RNR_RETRY = (1<<11), 866 IB_QP_RQ_PSN = (1<<12), 867 IB_QP_MAX_QP_RD_ATOMIC = (1<<13), 868 IB_QP_ALT_PATH = (1<<14), 869 IB_QP_MIN_RNR_TIMER = (1<<15), 870 IB_QP_SQ_PSN = (1<<16), 871 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17), 872 IB_QP_PATH_MIG_STATE = (1<<18), 873 IB_QP_CAP = (1<<19), 874 IB_QP_DEST_QPN = (1<<20), 875 IB_QP_SMAC = (1<<21), 876 IB_QP_ALT_SMAC = (1<<22), 877 IB_QP_VID = (1<<23), 878 IB_QP_ALT_VID = (1<<24), 879 }; 880 881 enum ib_qp_state { 882 IB_QPS_RESET, 883 IB_QPS_INIT, 884 IB_QPS_RTR, 885 IB_QPS_RTS, 886 IB_QPS_SQD, 887 IB_QPS_SQE, 888 IB_QPS_ERR 889 }; 890 891 enum ib_mig_state { 892 IB_MIG_MIGRATED, 893 IB_MIG_REARM, 894 IB_MIG_ARMED 895 }; 896 897 enum ib_mw_type { 898 IB_MW_TYPE_1 = 1, 899 IB_MW_TYPE_2 = 2 900 }; 901 902 struct ib_qp_attr { 903 enum ib_qp_state qp_state; 904 enum ib_qp_state cur_qp_state; 905 enum ib_mtu path_mtu; 906 enum ib_mig_state path_mig_state; 907 u32 qkey; 908 u32 rq_psn; 909 u32 sq_psn; 910 u32 dest_qp_num; 911 int qp_access_flags; 912 struct ib_qp_cap cap; 913 struct ib_ah_attr ah_attr; 914 struct ib_ah_attr alt_ah_attr; 915 u16 pkey_index; 916 u16 alt_pkey_index; 917 u8 en_sqd_async_notify; 918 u8 sq_draining; 919 u8 max_rd_atomic; 920 u8 max_dest_rd_atomic; 921 u8 min_rnr_timer; 922 u8 port_num; 923 u8 timeout; 924 u8 retry_cnt; 925 u8 rnr_retry; 926 u8 alt_port_num; 927 u8 alt_timeout; 928 u8 smac[ETH_ALEN]; 929 u8 alt_smac[ETH_ALEN]; 930 u16 vlan_id; 931 u16 alt_vlan_id; 932 }; 933 934 enum ib_wr_opcode { 935 IB_WR_RDMA_WRITE, 936 IB_WR_RDMA_WRITE_WITH_IMM, 937 IB_WR_SEND, 938 IB_WR_SEND_WITH_IMM, 939 IB_WR_RDMA_READ, 940 IB_WR_ATOMIC_CMP_AND_SWP, 941 IB_WR_ATOMIC_FETCH_AND_ADD, 942 IB_WR_LSO, 943 IB_WR_SEND_WITH_INV, 944 IB_WR_RDMA_READ_WITH_INV, 945 IB_WR_LOCAL_INV, 946 IB_WR_FAST_REG_MR, 947 IB_WR_MASKED_ATOMIC_CMP_AND_SWP, 948 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD, 949 IB_WR_BIND_MW, 950 IB_WR_REG_SIG_MR, 951 /* reserve values for low level drivers' internal use. 952 * These values will not be used at all in the ib core layer. 953 */ 954 IB_WR_RESERVED1 = 0xf0, 955 IB_WR_RESERVED2, 956 IB_WR_RESERVED3, 957 IB_WR_RESERVED4, 958 IB_WR_RESERVED5, 959 IB_WR_RESERVED6, 960 IB_WR_RESERVED7, 961 IB_WR_RESERVED8, 962 IB_WR_RESERVED9, 963 IB_WR_RESERVED10, 964 }; 965 966 enum ib_send_flags { 967 IB_SEND_FENCE = 1, 968 IB_SEND_SIGNALED = (1<<1), 969 IB_SEND_SOLICITED = (1<<2), 970 IB_SEND_INLINE = (1<<3), 971 IB_SEND_IP_CSUM = (1<<4), 972 973 /* reserve bits 26-31 for low level drivers' internal use */ 974 IB_SEND_RESERVED_START = (1 << 26), 975 IB_SEND_RESERVED_END = (1 << 31), 976 }; 977 978 struct ib_sge { 979 u64 addr; 980 u32 length; 981 u32 lkey; 982 }; 983 984 struct ib_fast_reg_page_list { 985 struct ib_device *device; 986 u64 *page_list; 987 unsigned int max_page_list_len; 988 }; 989 990 /** 991 * struct ib_mw_bind_info - Parameters for a memory window bind operation. 992 * @mr: A memory region to bind the memory window to. 993 * @addr: The address where the memory window should begin. 994 * @length: The length of the memory window, in bytes. 995 * @mw_access_flags: Access flags from enum ib_access_flags for the window. 996 * 997 * This struct contains the shared parameters for type 1 and type 2 998 * memory window bind operations. 999 */ 1000 struct ib_mw_bind_info { 1001 struct ib_mr *mr; 1002 u64 addr; 1003 u64 length; 1004 int mw_access_flags; 1005 }; 1006 1007 struct ib_send_wr { 1008 struct ib_send_wr *next; 1009 u64 wr_id; 1010 struct ib_sge *sg_list; 1011 int num_sge; 1012 enum ib_wr_opcode opcode; 1013 int send_flags; 1014 union { 1015 __be32 imm_data; 1016 u32 invalidate_rkey; 1017 } ex; 1018 union { 1019 struct { 1020 u64 remote_addr; 1021 u32 rkey; 1022 } rdma; 1023 struct { 1024 u64 remote_addr; 1025 u64 compare_add; 1026 u64 swap; 1027 u64 compare_add_mask; 1028 u64 swap_mask; 1029 u32 rkey; 1030 } atomic; 1031 struct { 1032 struct ib_ah *ah; 1033 void *header; 1034 int hlen; 1035 int mss; 1036 u32 remote_qpn; 1037 u32 remote_qkey; 1038 u16 pkey_index; /* valid for GSI only */ 1039 u8 port_num; /* valid for DR SMPs on switch only */ 1040 } ud; 1041 struct { 1042 u64 iova_start; 1043 struct ib_fast_reg_page_list *page_list; 1044 unsigned int page_shift; 1045 unsigned int page_list_len; 1046 u32 length; 1047 int access_flags; 1048 u32 rkey; 1049 } fast_reg; 1050 struct { 1051 struct ib_mw *mw; 1052 /* The new rkey for the memory window. */ 1053 u32 rkey; 1054 struct ib_mw_bind_info bind_info; 1055 } bind_mw; 1056 struct { 1057 struct ib_sig_attrs *sig_attrs; 1058 struct ib_mr *sig_mr; 1059 int access_flags; 1060 struct ib_sge *prot; 1061 } sig_handover; 1062 } wr; 1063 u32 xrc_remote_srq_num; /* XRC TGT QPs only */ 1064 }; 1065 1066 struct ib_recv_wr { 1067 struct ib_recv_wr *next; 1068 u64 wr_id; 1069 struct ib_sge *sg_list; 1070 int num_sge; 1071 }; 1072 1073 enum ib_access_flags { 1074 IB_ACCESS_LOCAL_WRITE = 1, 1075 IB_ACCESS_REMOTE_WRITE = (1<<1), 1076 IB_ACCESS_REMOTE_READ = (1<<2), 1077 IB_ACCESS_REMOTE_ATOMIC = (1<<3), 1078 IB_ACCESS_MW_BIND = (1<<4), 1079 IB_ZERO_BASED = (1<<5) 1080 }; 1081 1082 struct ib_phys_buf { 1083 u64 addr; 1084 u64 size; 1085 }; 1086 1087 struct ib_mr_attr { 1088 struct ib_pd *pd; 1089 u64 device_virt_addr; 1090 u64 size; 1091 int mr_access_flags; 1092 u32 lkey; 1093 u32 rkey; 1094 }; 1095 1096 enum ib_mr_rereg_flags { 1097 IB_MR_REREG_TRANS = 1, 1098 IB_MR_REREG_PD = (1<<1), 1099 IB_MR_REREG_ACCESS = (1<<2) 1100 }; 1101 1102 /** 1103 * struct ib_mw_bind - Parameters for a type 1 memory window bind operation. 1104 * @wr_id: Work request id. 1105 * @send_flags: Flags from ib_send_flags enum. 1106 * @bind_info: More parameters of the bind operation. 1107 */ 1108 struct ib_mw_bind { 1109 u64 wr_id; 1110 int send_flags; 1111 struct ib_mw_bind_info bind_info; 1112 }; 1113 1114 struct ib_fmr_attr { 1115 int max_pages; 1116 int max_maps; 1117 u8 page_shift; 1118 }; 1119 1120 struct ib_ucontext { 1121 struct ib_device *device; 1122 struct list_head pd_list; 1123 struct list_head mr_list; 1124 struct list_head mw_list; 1125 struct list_head cq_list; 1126 struct list_head qp_list; 1127 struct list_head srq_list; 1128 struct list_head ah_list; 1129 struct list_head xrcd_list; 1130 struct list_head rule_list; 1131 int closing; 1132 }; 1133 1134 struct ib_uobject { 1135 u64 user_handle; /* handle given to us by userspace */ 1136 struct ib_ucontext *context; /* associated user context */ 1137 void *object; /* containing object */ 1138 struct list_head list; /* link to context's list */ 1139 int id; /* index into kernel idr */ 1140 struct kref ref; 1141 struct rw_semaphore mutex; /* protects .live */ 1142 int live; 1143 }; 1144 1145 struct ib_udata { 1146 const void __user *inbuf; 1147 void __user *outbuf; 1148 size_t inlen; 1149 size_t outlen; 1150 }; 1151 1152 struct ib_pd { 1153 struct ib_device *device; 1154 struct ib_uobject *uobject; 1155 atomic_t usecnt; /* count all resources */ 1156 }; 1157 1158 struct ib_xrcd { 1159 struct ib_device *device; 1160 atomic_t usecnt; /* count all exposed resources */ 1161 struct inode *inode; 1162 1163 struct mutex tgt_qp_mutex; 1164 struct list_head tgt_qp_list; 1165 }; 1166 1167 struct ib_ah { 1168 struct ib_device *device; 1169 struct ib_pd *pd; 1170 struct ib_uobject *uobject; 1171 }; 1172 1173 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); 1174 1175 struct ib_cq { 1176 struct ib_device *device; 1177 struct ib_uobject *uobject; 1178 ib_comp_handler comp_handler; 1179 void (*event_handler)(struct ib_event *, void *); 1180 void *cq_context; 1181 int cqe; 1182 atomic_t usecnt; /* count number of work queues */ 1183 }; 1184 1185 struct ib_srq { 1186 struct ib_device *device; 1187 struct ib_pd *pd; 1188 struct ib_uobject *uobject; 1189 void (*event_handler)(struct ib_event *, void *); 1190 void *srq_context; 1191 enum ib_srq_type srq_type; 1192 atomic_t usecnt; 1193 1194 union { 1195 struct { 1196 struct ib_xrcd *xrcd; 1197 struct ib_cq *cq; 1198 u32 srq_num; 1199 } xrc; 1200 } ext; 1201 }; 1202 1203 struct ib_qp { 1204 struct ib_device *device; 1205 struct ib_pd *pd; 1206 struct ib_cq *send_cq; 1207 struct ib_cq *recv_cq; 1208 struct ib_srq *srq; 1209 struct ib_xrcd *xrcd; /* XRC TGT QPs only */ 1210 struct list_head xrcd_list; 1211 /* count times opened, mcast attaches, flow attaches */ 1212 atomic_t usecnt; 1213 struct list_head open_list; 1214 struct ib_qp *real_qp; 1215 struct ib_uobject *uobject; 1216 void (*event_handler)(struct ib_event *, void *); 1217 void *qp_context; 1218 u32 qp_num; 1219 enum ib_qp_type qp_type; 1220 }; 1221 1222 struct ib_mr { 1223 struct ib_device *device; 1224 struct ib_pd *pd; 1225 struct ib_uobject *uobject; 1226 u32 lkey; 1227 u32 rkey; 1228 atomic_t usecnt; /* count number of MWs */ 1229 }; 1230 1231 struct ib_mw { 1232 struct ib_device *device; 1233 struct ib_pd *pd; 1234 struct ib_uobject *uobject; 1235 u32 rkey; 1236 enum ib_mw_type type; 1237 }; 1238 1239 struct ib_fmr { 1240 struct ib_device *device; 1241 struct ib_pd *pd; 1242 struct list_head list; 1243 u32 lkey; 1244 u32 rkey; 1245 }; 1246 1247 /* Supported steering options */ 1248 enum ib_flow_attr_type { 1249 /* steering according to rule specifications */ 1250 IB_FLOW_ATTR_NORMAL = 0x0, 1251 /* default unicast and multicast rule - 1252 * receive all Eth traffic which isn't steered to any QP 1253 */ 1254 IB_FLOW_ATTR_ALL_DEFAULT = 0x1, 1255 /* default multicast rule - 1256 * receive all Eth multicast traffic which isn't steered to any QP 1257 */ 1258 IB_FLOW_ATTR_MC_DEFAULT = 0x2, 1259 /* sniffer rule - receive all port traffic */ 1260 IB_FLOW_ATTR_SNIFFER = 0x3 1261 }; 1262 1263 /* Supported steering header types */ 1264 enum ib_flow_spec_type { 1265 /* L2 headers*/ 1266 IB_FLOW_SPEC_ETH = 0x20, 1267 IB_FLOW_SPEC_IB = 0x22, 1268 /* L3 header*/ 1269 IB_FLOW_SPEC_IPV4 = 0x30, 1270 /* L4 headers*/ 1271 IB_FLOW_SPEC_TCP = 0x40, 1272 IB_FLOW_SPEC_UDP = 0x41 1273 }; 1274 #define IB_FLOW_SPEC_LAYER_MASK 0xF0 1275 #define IB_FLOW_SPEC_SUPPORT_LAYERS 4 1276 1277 /* Flow steering rule priority is set according to it's domain. 1278 * Lower domain value means higher priority. 1279 */ 1280 enum ib_flow_domain { 1281 IB_FLOW_DOMAIN_USER, 1282 IB_FLOW_DOMAIN_ETHTOOL, 1283 IB_FLOW_DOMAIN_RFS, 1284 IB_FLOW_DOMAIN_NIC, 1285 IB_FLOW_DOMAIN_NUM /* Must be last */ 1286 }; 1287 1288 struct ib_flow_eth_filter { 1289 u8 dst_mac[6]; 1290 u8 src_mac[6]; 1291 __be16 ether_type; 1292 __be16 vlan_tag; 1293 }; 1294 1295 struct ib_flow_spec_eth { 1296 enum ib_flow_spec_type type; 1297 u16 size; 1298 struct ib_flow_eth_filter val; 1299 struct ib_flow_eth_filter mask; 1300 }; 1301 1302 struct ib_flow_ib_filter { 1303 __be16 dlid; 1304 __u8 sl; 1305 }; 1306 1307 struct ib_flow_spec_ib { 1308 enum ib_flow_spec_type type; 1309 u16 size; 1310 struct ib_flow_ib_filter val; 1311 struct ib_flow_ib_filter mask; 1312 }; 1313 1314 struct ib_flow_ipv4_filter { 1315 __be32 src_ip; 1316 __be32 dst_ip; 1317 }; 1318 1319 struct ib_flow_spec_ipv4 { 1320 enum ib_flow_spec_type type; 1321 u16 size; 1322 struct ib_flow_ipv4_filter val; 1323 struct ib_flow_ipv4_filter mask; 1324 }; 1325 1326 struct ib_flow_tcp_udp_filter { 1327 __be16 dst_port; 1328 __be16 src_port; 1329 }; 1330 1331 struct ib_flow_spec_tcp_udp { 1332 enum ib_flow_spec_type type; 1333 u16 size; 1334 struct ib_flow_tcp_udp_filter val; 1335 struct ib_flow_tcp_udp_filter mask; 1336 }; 1337 1338 union ib_flow_spec { 1339 struct { 1340 enum ib_flow_spec_type type; 1341 u16 size; 1342 }; 1343 struct ib_flow_spec_eth eth; 1344 struct ib_flow_spec_ib ib; 1345 struct ib_flow_spec_ipv4 ipv4; 1346 struct ib_flow_spec_tcp_udp tcp_udp; 1347 }; 1348 1349 struct ib_flow_attr { 1350 enum ib_flow_attr_type type; 1351 u16 size; 1352 u16 priority; 1353 u32 flags; 1354 u8 num_of_specs; 1355 u8 port; 1356 /* Following are the optional layers according to user request 1357 * struct ib_flow_spec_xxx 1358 * struct ib_flow_spec_yyy 1359 */ 1360 }; 1361 1362 struct ib_flow { 1363 struct ib_qp *qp; 1364 struct ib_uobject *uobject; 1365 }; 1366 1367 struct ib_mad; 1368 struct ib_grh; 1369 1370 enum ib_process_mad_flags { 1371 IB_MAD_IGNORE_MKEY = 1, 1372 IB_MAD_IGNORE_BKEY = 2, 1373 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY 1374 }; 1375 1376 enum ib_mad_result { 1377 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */ 1378 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */ 1379 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */ 1380 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */ 1381 }; 1382 1383 #define IB_DEVICE_NAME_MAX 64 1384 1385 struct ib_cache { 1386 rwlock_t lock; 1387 struct ib_event_handler event_handler; 1388 struct ib_pkey_cache **pkey_cache; 1389 struct ib_gid_cache **gid_cache; 1390 u8 *lmc_cache; 1391 }; 1392 1393 struct ib_dma_mapping_ops { 1394 int (*mapping_error)(struct ib_device *dev, 1395 u64 dma_addr); 1396 u64 (*map_single)(struct ib_device *dev, 1397 void *ptr, size_t size, 1398 enum dma_data_direction direction); 1399 void (*unmap_single)(struct ib_device *dev, 1400 u64 addr, size_t size, 1401 enum dma_data_direction direction); 1402 u64 (*map_page)(struct ib_device *dev, 1403 struct page *page, unsigned long offset, 1404 size_t size, 1405 enum dma_data_direction direction); 1406 void (*unmap_page)(struct ib_device *dev, 1407 u64 addr, size_t size, 1408 enum dma_data_direction direction); 1409 int (*map_sg)(struct ib_device *dev, 1410 struct scatterlist *sg, int nents, 1411 enum dma_data_direction direction); 1412 void (*unmap_sg)(struct ib_device *dev, 1413 struct scatterlist *sg, int nents, 1414 enum dma_data_direction direction); 1415 void (*sync_single_for_cpu)(struct ib_device *dev, 1416 u64 dma_handle, 1417 size_t size, 1418 enum dma_data_direction dir); 1419 void (*sync_single_for_device)(struct ib_device *dev, 1420 u64 dma_handle, 1421 size_t size, 1422 enum dma_data_direction dir); 1423 void *(*alloc_coherent)(struct ib_device *dev, 1424 size_t size, 1425 u64 *dma_handle, 1426 gfp_t flag); 1427 void (*free_coherent)(struct ib_device *dev, 1428 size_t size, void *cpu_addr, 1429 u64 dma_handle); 1430 }; 1431 1432 struct iw_cm_verbs; 1433 1434 struct ib_device { 1435 struct device *dma_device; 1436 1437 char name[IB_DEVICE_NAME_MAX]; 1438 1439 struct list_head event_handler_list; 1440 spinlock_t event_handler_lock; 1441 1442 spinlock_t client_data_lock; 1443 struct list_head core_list; 1444 struct list_head client_data_list; 1445 1446 struct ib_cache cache; 1447 int *pkey_tbl_len; 1448 int *gid_tbl_len; 1449 1450 int num_comp_vectors; 1451 1452 struct iw_cm_verbs *iwcm; 1453 1454 int (*get_protocol_stats)(struct ib_device *device, 1455 union rdma_protocol_stats *stats); 1456 int (*query_device)(struct ib_device *device, 1457 struct ib_device_attr *device_attr); 1458 int (*query_port)(struct ib_device *device, 1459 u8 port_num, 1460 struct ib_port_attr *port_attr); 1461 enum rdma_link_layer (*get_link_layer)(struct ib_device *device, 1462 u8 port_num); 1463 int (*query_gid)(struct ib_device *device, 1464 u8 port_num, int index, 1465 union ib_gid *gid); 1466 int (*query_pkey)(struct ib_device *device, 1467 u8 port_num, u16 index, u16 *pkey); 1468 int (*modify_device)(struct ib_device *device, 1469 int device_modify_mask, 1470 struct ib_device_modify *device_modify); 1471 int (*modify_port)(struct ib_device *device, 1472 u8 port_num, int port_modify_mask, 1473 struct ib_port_modify *port_modify); 1474 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device, 1475 struct ib_udata *udata); 1476 int (*dealloc_ucontext)(struct ib_ucontext *context); 1477 int (*mmap)(struct ib_ucontext *context, 1478 struct vm_area_struct *vma); 1479 struct ib_pd * (*alloc_pd)(struct ib_device *device, 1480 struct ib_ucontext *context, 1481 struct ib_udata *udata); 1482 int (*dealloc_pd)(struct ib_pd *pd); 1483 struct ib_ah * (*create_ah)(struct ib_pd *pd, 1484 struct ib_ah_attr *ah_attr); 1485 int (*modify_ah)(struct ib_ah *ah, 1486 struct ib_ah_attr *ah_attr); 1487 int (*query_ah)(struct ib_ah *ah, 1488 struct ib_ah_attr *ah_attr); 1489 int (*destroy_ah)(struct ib_ah *ah); 1490 struct ib_srq * (*create_srq)(struct ib_pd *pd, 1491 struct ib_srq_init_attr *srq_init_attr, 1492 struct ib_udata *udata); 1493 int (*modify_srq)(struct ib_srq *srq, 1494 struct ib_srq_attr *srq_attr, 1495 enum ib_srq_attr_mask srq_attr_mask, 1496 struct ib_udata *udata); 1497 int (*query_srq)(struct ib_srq *srq, 1498 struct ib_srq_attr *srq_attr); 1499 int (*destroy_srq)(struct ib_srq *srq); 1500 int (*post_srq_recv)(struct ib_srq *srq, 1501 struct ib_recv_wr *recv_wr, 1502 struct ib_recv_wr **bad_recv_wr); 1503 struct ib_qp * (*create_qp)(struct ib_pd *pd, 1504 struct ib_qp_init_attr *qp_init_attr, 1505 struct ib_udata *udata); 1506 int (*modify_qp)(struct ib_qp *qp, 1507 struct ib_qp_attr *qp_attr, 1508 int qp_attr_mask, 1509 struct ib_udata *udata); 1510 int (*query_qp)(struct ib_qp *qp, 1511 struct ib_qp_attr *qp_attr, 1512 int qp_attr_mask, 1513 struct ib_qp_init_attr *qp_init_attr); 1514 int (*destroy_qp)(struct ib_qp *qp); 1515 int (*post_send)(struct ib_qp *qp, 1516 struct ib_send_wr *send_wr, 1517 struct ib_send_wr **bad_send_wr); 1518 int (*post_recv)(struct ib_qp *qp, 1519 struct ib_recv_wr *recv_wr, 1520 struct ib_recv_wr **bad_recv_wr); 1521 struct ib_cq * (*create_cq)(struct ib_device *device, int cqe, 1522 int comp_vector, 1523 struct ib_ucontext *context, 1524 struct ib_udata *udata); 1525 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, 1526 u16 cq_period); 1527 int (*destroy_cq)(struct ib_cq *cq); 1528 int (*resize_cq)(struct ib_cq *cq, int cqe, 1529 struct ib_udata *udata); 1530 int (*poll_cq)(struct ib_cq *cq, int num_entries, 1531 struct ib_wc *wc); 1532 int (*peek_cq)(struct ib_cq *cq, int wc_cnt); 1533 int (*req_notify_cq)(struct ib_cq *cq, 1534 enum ib_cq_notify_flags flags); 1535 int (*req_ncomp_notif)(struct ib_cq *cq, 1536 int wc_cnt); 1537 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd, 1538 int mr_access_flags); 1539 struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd, 1540 struct ib_phys_buf *phys_buf_array, 1541 int num_phys_buf, 1542 int mr_access_flags, 1543 u64 *iova_start); 1544 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd, 1545 u64 start, u64 length, 1546 u64 virt_addr, 1547 int mr_access_flags, 1548 struct ib_udata *udata); 1549 int (*query_mr)(struct ib_mr *mr, 1550 struct ib_mr_attr *mr_attr); 1551 int (*dereg_mr)(struct ib_mr *mr); 1552 int (*destroy_mr)(struct ib_mr *mr); 1553 struct ib_mr * (*create_mr)(struct ib_pd *pd, 1554 struct ib_mr_init_attr *mr_init_attr); 1555 struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd, 1556 int max_page_list_len); 1557 struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device, 1558 int page_list_len); 1559 void (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list); 1560 int (*rereg_phys_mr)(struct ib_mr *mr, 1561 int mr_rereg_mask, 1562 struct ib_pd *pd, 1563 struct ib_phys_buf *phys_buf_array, 1564 int num_phys_buf, 1565 int mr_access_flags, 1566 u64 *iova_start); 1567 struct ib_mw * (*alloc_mw)(struct ib_pd *pd, 1568 enum ib_mw_type type); 1569 int (*bind_mw)(struct ib_qp *qp, 1570 struct ib_mw *mw, 1571 struct ib_mw_bind *mw_bind); 1572 int (*dealloc_mw)(struct ib_mw *mw); 1573 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd, 1574 int mr_access_flags, 1575 struct ib_fmr_attr *fmr_attr); 1576 int (*map_phys_fmr)(struct ib_fmr *fmr, 1577 u64 *page_list, int list_len, 1578 u64 iova); 1579 int (*unmap_fmr)(struct list_head *fmr_list); 1580 int (*dealloc_fmr)(struct ib_fmr *fmr); 1581 int (*attach_mcast)(struct ib_qp *qp, 1582 union ib_gid *gid, 1583 u16 lid); 1584 int (*detach_mcast)(struct ib_qp *qp, 1585 union ib_gid *gid, 1586 u16 lid); 1587 int (*process_mad)(struct ib_device *device, 1588 int process_mad_flags, 1589 u8 port_num, 1590 struct ib_wc *in_wc, 1591 struct ib_grh *in_grh, 1592 struct ib_mad *in_mad, 1593 struct ib_mad *out_mad); 1594 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device, 1595 struct ib_ucontext *ucontext, 1596 struct ib_udata *udata); 1597 int (*dealloc_xrcd)(struct ib_xrcd *xrcd); 1598 struct ib_flow * (*create_flow)(struct ib_qp *qp, 1599 struct ib_flow_attr 1600 *flow_attr, 1601 int domain); 1602 int (*destroy_flow)(struct ib_flow *flow_id); 1603 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask, 1604 struct ib_mr_status *mr_status); 1605 1606 struct ib_dma_mapping_ops *dma_ops; 1607 1608 struct module *owner; 1609 struct device dev; 1610 struct kobject *ports_parent; 1611 struct list_head port_list; 1612 1613 enum { 1614 IB_DEV_UNINITIALIZED, 1615 IB_DEV_REGISTERED, 1616 IB_DEV_UNREGISTERED 1617 } reg_state; 1618 1619 int uverbs_abi_ver; 1620 u64 uverbs_cmd_mask; 1621 u64 uverbs_ex_cmd_mask; 1622 1623 char node_desc[64]; 1624 __be64 node_guid; 1625 u32 local_dma_lkey; 1626 u8 node_type; 1627 u8 phys_port_cnt; 1628 }; 1629 1630 struct ib_client { 1631 char *name; 1632 void (*add) (struct ib_device *); 1633 void (*remove)(struct ib_device *); 1634 1635 struct list_head list; 1636 }; 1637 1638 struct ib_device *ib_alloc_device(size_t size); 1639 void ib_dealloc_device(struct ib_device *device); 1640 1641 int ib_register_device(struct ib_device *device, 1642 int (*port_callback)(struct ib_device *, 1643 u8, struct kobject *)); 1644 void ib_unregister_device(struct ib_device *device); 1645 1646 int ib_register_client (struct ib_client *client); 1647 void ib_unregister_client(struct ib_client *client); 1648 1649 void *ib_get_client_data(struct ib_device *device, struct ib_client *client); 1650 void ib_set_client_data(struct ib_device *device, struct ib_client *client, 1651 void *data); 1652 1653 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len) 1654 { 1655 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0; 1656 } 1657 1658 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len) 1659 { 1660 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; 1661 } 1662 1663 /** 1664 * ib_modify_qp_is_ok - Check that the supplied attribute mask 1665 * contains all required attributes and no attributes not allowed for 1666 * the given QP state transition. 1667 * @cur_state: Current QP state 1668 * @next_state: Next QP state 1669 * @type: QP type 1670 * @mask: Mask of supplied QP attributes 1671 * @ll : link layer of port 1672 * 1673 * This function is a helper function that a low-level driver's 1674 * modify_qp method can use to validate the consumer's input. It 1675 * checks that cur_state and next_state are valid QP states, that a 1676 * transition from cur_state to next_state is allowed by the IB spec, 1677 * and that the attribute mask supplied is allowed for the transition. 1678 */ 1679 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 1680 enum ib_qp_type type, enum ib_qp_attr_mask mask, 1681 enum rdma_link_layer ll); 1682 1683 int ib_register_event_handler (struct ib_event_handler *event_handler); 1684 int ib_unregister_event_handler(struct ib_event_handler *event_handler); 1685 void ib_dispatch_event(struct ib_event *event); 1686 1687 int ib_query_device(struct ib_device *device, 1688 struct ib_device_attr *device_attr); 1689 1690 int ib_query_port(struct ib_device *device, 1691 u8 port_num, struct ib_port_attr *port_attr); 1692 1693 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, 1694 u8 port_num); 1695 1696 int ib_query_gid(struct ib_device *device, 1697 u8 port_num, int index, union ib_gid *gid); 1698 1699 int ib_query_pkey(struct ib_device *device, 1700 u8 port_num, u16 index, u16 *pkey); 1701 1702 int ib_modify_device(struct ib_device *device, 1703 int device_modify_mask, 1704 struct ib_device_modify *device_modify); 1705 1706 int ib_modify_port(struct ib_device *device, 1707 u8 port_num, int port_modify_mask, 1708 struct ib_port_modify *port_modify); 1709 1710 int ib_find_gid(struct ib_device *device, union ib_gid *gid, 1711 u8 *port_num, u16 *index); 1712 1713 int ib_find_pkey(struct ib_device *device, 1714 u8 port_num, u16 pkey, u16 *index); 1715 1716 /** 1717 * ib_alloc_pd - Allocates an unused protection domain. 1718 * @device: The device on which to allocate the protection domain. 1719 * 1720 * A protection domain object provides an association between QPs, shared 1721 * receive queues, address handles, memory regions, and memory windows. 1722 */ 1723 struct ib_pd *ib_alloc_pd(struct ib_device *device); 1724 1725 /** 1726 * ib_dealloc_pd - Deallocates a protection domain. 1727 * @pd: The protection domain to deallocate. 1728 */ 1729 int ib_dealloc_pd(struct ib_pd *pd); 1730 1731 /** 1732 * ib_create_ah - Creates an address handle for the given address vector. 1733 * @pd: The protection domain associated with the address handle. 1734 * @ah_attr: The attributes of the address vector. 1735 * 1736 * The address handle is used to reference a local or global destination 1737 * in all UD QP post sends. 1738 */ 1739 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); 1740 1741 /** 1742 * ib_init_ah_from_wc - Initializes address handle attributes from a 1743 * work completion. 1744 * @device: Device on which the received message arrived. 1745 * @port_num: Port on which the received message arrived. 1746 * @wc: Work completion associated with the received message. 1747 * @grh: References the received global route header. This parameter is 1748 * ignored unless the work completion indicates that the GRH is valid. 1749 * @ah_attr: Returned attributes that can be used when creating an address 1750 * handle for replying to the message. 1751 */ 1752 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc, 1753 struct ib_grh *grh, struct ib_ah_attr *ah_attr); 1754 1755 /** 1756 * ib_create_ah_from_wc - Creates an address handle associated with the 1757 * sender of the specified work completion. 1758 * @pd: The protection domain associated with the address handle. 1759 * @wc: Work completion information associated with a received message. 1760 * @grh: References the received global route header. This parameter is 1761 * ignored unless the work completion indicates that the GRH is valid. 1762 * @port_num: The outbound port number to associate with the address. 1763 * 1764 * The address handle is used to reference a local or global destination 1765 * in all UD QP post sends. 1766 */ 1767 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc, 1768 struct ib_grh *grh, u8 port_num); 1769 1770 /** 1771 * ib_modify_ah - Modifies the address vector associated with an address 1772 * handle. 1773 * @ah: The address handle to modify. 1774 * @ah_attr: The new address vector attributes to associate with the 1775 * address handle. 1776 */ 1777 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); 1778 1779 /** 1780 * ib_query_ah - Queries the address vector associated with an address 1781 * handle. 1782 * @ah: The address handle to query. 1783 * @ah_attr: The address vector attributes associated with the address 1784 * handle. 1785 */ 1786 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); 1787 1788 /** 1789 * ib_destroy_ah - Destroys an address handle. 1790 * @ah: The address handle to destroy. 1791 */ 1792 int ib_destroy_ah(struct ib_ah *ah); 1793 1794 /** 1795 * ib_create_srq - Creates a SRQ associated with the specified protection 1796 * domain. 1797 * @pd: The protection domain associated with the SRQ. 1798 * @srq_init_attr: A list of initial attributes required to create the 1799 * SRQ. If SRQ creation succeeds, then the attributes are updated to 1800 * the actual capabilities of the created SRQ. 1801 * 1802 * srq_attr->max_wr and srq_attr->max_sge are read the determine the 1803 * requested size of the SRQ, and set to the actual values allocated 1804 * on return. If ib_create_srq() succeeds, then max_wr and max_sge 1805 * will always be at least as large as the requested values. 1806 */ 1807 struct ib_srq *ib_create_srq(struct ib_pd *pd, 1808 struct ib_srq_init_attr *srq_init_attr); 1809 1810 /** 1811 * ib_modify_srq - Modifies the attributes for the specified SRQ. 1812 * @srq: The SRQ to modify. 1813 * @srq_attr: On input, specifies the SRQ attributes to modify. On output, 1814 * the current values of selected SRQ attributes are returned. 1815 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ 1816 * are being modified. 1817 * 1818 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or 1819 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when 1820 * the number of receives queued drops below the limit. 1821 */ 1822 int ib_modify_srq(struct ib_srq *srq, 1823 struct ib_srq_attr *srq_attr, 1824 enum ib_srq_attr_mask srq_attr_mask); 1825 1826 /** 1827 * ib_query_srq - Returns the attribute list and current values for the 1828 * specified SRQ. 1829 * @srq: The SRQ to query. 1830 * @srq_attr: The attributes of the specified SRQ. 1831 */ 1832 int ib_query_srq(struct ib_srq *srq, 1833 struct ib_srq_attr *srq_attr); 1834 1835 /** 1836 * ib_destroy_srq - Destroys the specified SRQ. 1837 * @srq: The SRQ to destroy. 1838 */ 1839 int ib_destroy_srq(struct ib_srq *srq); 1840 1841 /** 1842 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ. 1843 * @srq: The SRQ to post the work request on. 1844 * @recv_wr: A list of work requests to post on the receive queue. 1845 * @bad_recv_wr: On an immediate failure, this parameter will reference 1846 * the work request that failed to be posted on the QP. 1847 */ 1848 static inline int ib_post_srq_recv(struct ib_srq *srq, 1849 struct ib_recv_wr *recv_wr, 1850 struct ib_recv_wr **bad_recv_wr) 1851 { 1852 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr); 1853 } 1854 1855 /** 1856 * ib_create_qp - Creates a QP associated with the specified protection 1857 * domain. 1858 * @pd: The protection domain associated with the QP. 1859 * @qp_init_attr: A list of initial attributes required to create the 1860 * QP. If QP creation succeeds, then the attributes are updated to 1861 * the actual capabilities of the created QP. 1862 */ 1863 struct ib_qp *ib_create_qp(struct ib_pd *pd, 1864 struct ib_qp_init_attr *qp_init_attr); 1865 1866 /** 1867 * ib_modify_qp - Modifies the attributes for the specified QP and then 1868 * transitions the QP to the given state. 1869 * @qp: The QP to modify. 1870 * @qp_attr: On input, specifies the QP attributes to modify. On output, 1871 * the current values of selected QP attributes are returned. 1872 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP 1873 * are being modified. 1874 */ 1875 int ib_modify_qp(struct ib_qp *qp, 1876 struct ib_qp_attr *qp_attr, 1877 int qp_attr_mask); 1878 1879 /** 1880 * ib_query_qp - Returns the attribute list and current values for the 1881 * specified QP. 1882 * @qp: The QP to query. 1883 * @qp_attr: The attributes of the specified QP. 1884 * @qp_attr_mask: A bit-mask used to select specific attributes to query. 1885 * @qp_init_attr: Additional attributes of the selected QP. 1886 * 1887 * The qp_attr_mask may be used to limit the query to gathering only the 1888 * selected attributes. 1889 */ 1890 int ib_query_qp(struct ib_qp *qp, 1891 struct ib_qp_attr *qp_attr, 1892 int qp_attr_mask, 1893 struct ib_qp_init_attr *qp_init_attr); 1894 1895 /** 1896 * ib_destroy_qp - Destroys the specified QP. 1897 * @qp: The QP to destroy. 1898 */ 1899 int ib_destroy_qp(struct ib_qp *qp); 1900 1901 /** 1902 * ib_open_qp - Obtain a reference to an existing sharable QP. 1903 * @xrcd - XRC domain 1904 * @qp_open_attr: Attributes identifying the QP to open. 1905 * 1906 * Returns a reference to a sharable QP. 1907 */ 1908 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, 1909 struct ib_qp_open_attr *qp_open_attr); 1910 1911 /** 1912 * ib_close_qp - Release an external reference to a QP. 1913 * @qp: The QP handle to release 1914 * 1915 * The opened QP handle is released by the caller. The underlying 1916 * shared QP is not destroyed until all internal references are released. 1917 */ 1918 int ib_close_qp(struct ib_qp *qp); 1919 1920 /** 1921 * ib_post_send - Posts a list of work requests to the send queue of 1922 * the specified QP. 1923 * @qp: The QP to post the work request on. 1924 * @send_wr: A list of work requests to post on the send queue. 1925 * @bad_send_wr: On an immediate failure, this parameter will reference 1926 * the work request that failed to be posted on the QP. 1927 * 1928 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate 1929 * error is returned, the QP state shall not be affected, 1930 * ib_post_send() will return an immediate error after queueing any 1931 * earlier work requests in the list. 1932 */ 1933 static inline int ib_post_send(struct ib_qp *qp, 1934 struct ib_send_wr *send_wr, 1935 struct ib_send_wr **bad_send_wr) 1936 { 1937 return qp->device->post_send(qp, send_wr, bad_send_wr); 1938 } 1939 1940 /** 1941 * ib_post_recv - Posts a list of work requests to the receive queue of 1942 * the specified QP. 1943 * @qp: The QP to post the work request on. 1944 * @recv_wr: A list of work requests to post on the receive queue. 1945 * @bad_recv_wr: On an immediate failure, this parameter will reference 1946 * the work request that failed to be posted on the QP. 1947 */ 1948 static inline int ib_post_recv(struct ib_qp *qp, 1949 struct ib_recv_wr *recv_wr, 1950 struct ib_recv_wr **bad_recv_wr) 1951 { 1952 return qp->device->post_recv(qp, recv_wr, bad_recv_wr); 1953 } 1954 1955 /** 1956 * ib_create_cq - Creates a CQ on the specified device. 1957 * @device: The device on which to create the CQ. 1958 * @comp_handler: A user-specified callback that is invoked when a 1959 * completion event occurs on the CQ. 1960 * @event_handler: A user-specified callback that is invoked when an 1961 * asynchronous event not associated with a completion occurs on the CQ. 1962 * @cq_context: Context associated with the CQ returned to the user via 1963 * the associated completion and event handlers. 1964 * @cqe: The minimum size of the CQ. 1965 * @comp_vector - Completion vector used to signal completion events. 1966 * Must be >= 0 and < context->num_comp_vectors. 1967 * 1968 * Users can examine the cq structure to determine the actual CQ size. 1969 */ 1970 struct ib_cq *ib_create_cq(struct ib_device *device, 1971 ib_comp_handler comp_handler, 1972 void (*event_handler)(struct ib_event *, void *), 1973 void *cq_context, int cqe, int comp_vector); 1974 1975 /** 1976 * ib_resize_cq - Modifies the capacity of the CQ. 1977 * @cq: The CQ to resize. 1978 * @cqe: The minimum size of the CQ. 1979 * 1980 * Users can examine the cq structure to determine the actual CQ size. 1981 */ 1982 int ib_resize_cq(struct ib_cq *cq, int cqe); 1983 1984 /** 1985 * ib_modify_cq - Modifies moderation params of the CQ 1986 * @cq: The CQ to modify. 1987 * @cq_count: number of CQEs that will trigger an event 1988 * @cq_period: max period of time in usec before triggering an event 1989 * 1990 */ 1991 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); 1992 1993 /** 1994 * ib_destroy_cq - Destroys the specified CQ. 1995 * @cq: The CQ to destroy. 1996 */ 1997 int ib_destroy_cq(struct ib_cq *cq); 1998 1999 /** 2000 * ib_poll_cq - poll a CQ for completion(s) 2001 * @cq:the CQ being polled 2002 * @num_entries:maximum number of completions to return 2003 * @wc:array of at least @num_entries &struct ib_wc where completions 2004 * will be returned 2005 * 2006 * Poll a CQ for (possibly multiple) completions. If the return value 2007 * is < 0, an error occurred. If the return value is >= 0, it is the 2008 * number of completions returned. If the return value is 2009 * non-negative and < num_entries, then the CQ was emptied. 2010 */ 2011 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries, 2012 struct ib_wc *wc) 2013 { 2014 return cq->device->poll_cq(cq, num_entries, wc); 2015 } 2016 2017 /** 2018 * ib_peek_cq - Returns the number of unreaped completions currently 2019 * on the specified CQ. 2020 * @cq: The CQ to peek. 2021 * @wc_cnt: A minimum number of unreaped completions to check for. 2022 * 2023 * If the number of unreaped completions is greater than or equal to wc_cnt, 2024 * this function returns wc_cnt, otherwise, it returns the actual number of 2025 * unreaped completions. 2026 */ 2027 int ib_peek_cq(struct ib_cq *cq, int wc_cnt); 2028 2029 /** 2030 * ib_req_notify_cq - Request completion notification on a CQ. 2031 * @cq: The CQ to generate an event for. 2032 * @flags: 2033 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP 2034 * to request an event on the next solicited event or next work 2035 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS 2036 * may also be |ed in to request a hint about missed events, as 2037 * described below. 2038 * 2039 * Return Value: 2040 * < 0 means an error occurred while requesting notification 2041 * == 0 means notification was requested successfully, and if 2042 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events 2043 * were missed and it is safe to wait for another event. In 2044 * this case is it guaranteed that any work completions added 2045 * to the CQ since the last CQ poll will trigger a completion 2046 * notification event. 2047 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed 2048 * in. It means that the consumer must poll the CQ again to 2049 * make sure it is empty to avoid missing an event because of a 2050 * race between requesting notification and an entry being 2051 * added to the CQ. This return value means it is possible 2052 * (but not guaranteed) that a work completion has been added 2053 * to the CQ since the last poll without triggering a 2054 * completion notification event. 2055 */ 2056 static inline int ib_req_notify_cq(struct ib_cq *cq, 2057 enum ib_cq_notify_flags flags) 2058 { 2059 return cq->device->req_notify_cq(cq, flags); 2060 } 2061 2062 /** 2063 * ib_req_ncomp_notif - Request completion notification when there are 2064 * at least the specified number of unreaped completions on the CQ. 2065 * @cq: The CQ to generate an event for. 2066 * @wc_cnt: The number of unreaped completions that should be on the 2067 * CQ before an event is generated. 2068 */ 2069 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) 2070 { 2071 return cq->device->req_ncomp_notif ? 2072 cq->device->req_ncomp_notif(cq, wc_cnt) : 2073 -ENOSYS; 2074 } 2075 2076 /** 2077 * ib_get_dma_mr - Returns a memory region for system memory that is 2078 * usable for DMA. 2079 * @pd: The protection domain associated with the memory region. 2080 * @mr_access_flags: Specifies the memory access rights. 2081 * 2082 * Note that the ib_dma_*() functions defined below must be used 2083 * to create/destroy addresses used with the Lkey or Rkey returned 2084 * by ib_get_dma_mr(). 2085 */ 2086 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags); 2087 2088 /** 2089 * ib_dma_mapping_error - check a DMA addr for error 2090 * @dev: The device for which the dma_addr was created 2091 * @dma_addr: The DMA address to check 2092 */ 2093 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) 2094 { 2095 if (dev->dma_ops) 2096 return dev->dma_ops->mapping_error(dev, dma_addr); 2097 return dma_mapping_error(dev->dma_device, dma_addr); 2098 } 2099 2100 /** 2101 * ib_dma_map_single - Map a kernel virtual address to DMA address 2102 * @dev: The device for which the dma_addr is to be created 2103 * @cpu_addr: The kernel virtual address 2104 * @size: The size of the region in bytes 2105 * @direction: The direction of the DMA 2106 */ 2107 static inline u64 ib_dma_map_single(struct ib_device *dev, 2108 void *cpu_addr, size_t size, 2109 enum dma_data_direction direction) 2110 { 2111 if (dev->dma_ops) 2112 return dev->dma_ops->map_single(dev, cpu_addr, size, direction); 2113 return dma_map_single(dev->dma_device, cpu_addr, size, direction); 2114 } 2115 2116 /** 2117 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single() 2118 * @dev: The device for which the DMA address was created 2119 * @addr: The DMA address 2120 * @size: The size of the region in bytes 2121 * @direction: The direction of the DMA 2122 */ 2123 static inline void ib_dma_unmap_single(struct ib_device *dev, 2124 u64 addr, size_t size, 2125 enum dma_data_direction direction) 2126 { 2127 if (dev->dma_ops) 2128 dev->dma_ops->unmap_single(dev, addr, size, direction); 2129 else 2130 dma_unmap_single(dev->dma_device, addr, size, direction); 2131 } 2132 2133 static inline u64 ib_dma_map_single_attrs(struct ib_device *dev, 2134 void *cpu_addr, size_t size, 2135 enum dma_data_direction direction, 2136 struct dma_attrs *attrs) 2137 { 2138 return dma_map_single_attrs(dev->dma_device, cpu_addr, size, 2139 direction, attrs); 2140 } 2141 2142 static inline void ib_dma_unmap_single_attrs(struct ib_device *dev, 2143 u64 addr, size_t size, 2144 enum dma_data_direction direction, 2145 struct dma_attrs *attrs) 2146 { 2147 return dma_unmap_single_attrs(dev->dma_device, addr, size, 2148 direction, attrs); 2149 } 2150 2151 /** 2152 * ib_dma_map_page - Map a physical page to DMA address 2153 * @dev: The device for which the dma_addr is to be created 2154 * @page: The page to be mapped 2155 * @offset: The offset within the page 2156 * @size: The size of the region in bytes 2157 * @direction: The direction of the DMA 2158 */ 2159 static inline u64 ib_dma_map_page(struct ib_device *dev, 2160 struct page *page, 2161 unsigned long offset, 2162 size_t size, 2163 enum dma_data_direction direction) 2164 { 2165 if (dev->dma_ops) 2166 return dev->dma_ops->map_page(dev, page, offset, size, direction); 2167 return dma_map_page(dev->dma_device, page, offset, size, direction); 2168 } 2169 2170 /** 2171 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page() 2172 * @dev: The device for which the DMA address was created 2173 * @addr: The DMA address 2174 * @size: The size of the region in bytes 2175 * @direction: The direction of the DMA 2176 */ 2177 static inline void ib_dma_unmap_page(struct ib_device *dev, 2178 u64 addr, size_t size, 2179 enum dma_data_direction direction) 2180 { 2181 if (dev->dma_ops) 2182 dev->dma_ops->unmap_page(dev, addr, size, direction); 2183 else 2184 dma_unmap_page(dev->dma_device, addr, size, direction); 2185 } 2186 2187 /** 2188 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses 2189 * @dev: The device for which the DMA addresses are to be created 2190 * @sg: The array of scatter/gather entries 2191 * @nents: The number of scatter/gather entries 2192 * @direction: The direction of the DMA 2193 */ 2194 static inline int ib_dma_map_sg(struct ib_device *dev, 2195 struct scatterlist *sg, int nents, 2196 enum dma_data_direction direction) 2197 { 2198 if (dev->dma_ops) 2199 return dev->dma_ops->map_sg(dev, sg, nents, direction); 2200 return dma_map_sg(dev->dma_device, sg, nents, direction); 2201 } 2202 2203 /** 2204 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses 2205 * @dev: The device for which the DMA addresses were created 2206 * @sg: The array of scatter/gather entries 2207 * @nents: The number of scatter/gather entries 2208 * @direction: The direction of the DMA 2209 */ 2210 static inline void ib_dma_unmap_sg(struct ib_device *dev, 2211 struct scatterlist *sg, int nents, 2212 enum dma_data_direction direction) 2213 { 2214 if (dev->dma_ops) 2215 dev->dma_ops->unmap_sg(dev, sg, nents, direction); 2216 else 2217 dma_unmap_sg(dev->dma_device, sg, nents, direction); 2218 } 2219 2220 static inline int ib_dma_map_sg_attrs(struct ib_device *dev, 2221 struct scatterlist *sg, int nents, 2222 enum dma_data_direction direction, 2223 struct dma_attrs *attrs) 2224 { 2225 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs); 2226 } 2227 2228 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, 2229 struct scatterlist *sg, int nents, 2230 enum dma_data_direction direction, 2231 struct dma_attrs *attrs) 2232 { 2233 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs); 2234 } 2235 /** 2236 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry 2237 * @dev: The device for which the DMA addresses were created 2238 * @sg: The scatter/gather entry 2239 * 2240 * Note: this function is obsolete. To do: change all occurrences of 2241 * ib_sg_dma_address() into sg_dma_address(). 2242 */ 2243 static inline u64 ib_sg_dma_address(struct ib_device *dev, 2244 struct scatterlist *sg) 2245 { 2246 return sg_dma_address(sg); 2247 } 2248 2249 /** 2250 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry 2251 * @dev: The device for which the DMA addresses were created 2252 * @sg: The scatter/gather entry 2253 * 2254 * Note: this function is obsolete. To do: change all occurrences of 2255 * ib_sg_dma_len() into sg_dma_len(). 2256 */ 2257 static inline unsigned int ib_sg_dma_len(struct ib_device *dev, 2258 struct scatterlist *sg) 2259 { 2260 return sg_dma_len(sg); 2261 } 2262 2263 /** 2264 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU 2265 * @dev: The device for which the DMA address was created 2266 * @addr: The DMA address 2267 * @size: The size of the region in bytes 2268 * @dir: The direction of the DMA 2269 */ 2270 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, 2271 u64 addr, 2272 size_t size, 2273 enum dma_data_direction dir) 2274 { 2275 if (dev->dma_ops) 2276 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir); 2277 else 2278 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); 2279 } 2280 2281 /** 2282 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device 2283 * @dev: The device for which the DMA address was created 2284 * @addr: The DMA address 2285 * @size: The size of the region in bytes 2286 * @dir: The direction of the DMA 2287 */ 2288 static inline void ib_dma_sync_single_for_device(struct ib_device *dev, 2289 u64 addr, 2290 size_t size, 2291 enum dma_data_direction dir) 2292 { 2293 if (dev->dma_ops) 2294 dev->dma_ops->sync_single_for_device(dev, addr, size, dir); 2295 else 2296 dma_sync_single_for_device(dev->dma_device, addr, size, dir); 2297 } 2298 2299 /** 2300 * ib_dma_alloc_coherent - Allocate memory and map it for DMA 2301 * @dev: The device for which the DMA address is requested 2302 * @size: The size of the region to allocate in bytes 2303 * @dma_handle: A pointer for returning the DMA address of the region 2304 * @flag: memory allocator flags 2305 */ 2306 static inline void *ib_dma_alloc_coherent(struct ib_device *dev, 2307 size_t size, 2308 u64 *dma_handle, 2309 gfp_t flag) 2310 { 2311 if (dev->dma_ops) 2312 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag); 2313 else { 2314 dma_addr_t handle; 2315 void *ret; 2316 2317 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag); 2318 *dma_handle = handle; 2319 return ret; 2320 } 2321 } 2322 2323 /** 2324 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent() 2325 * @dev: The device for which the DMA addresses were allocated 2326 * @size: The size of the region 2327 * @cpu_addr: the address returned by ib_dma_alloc_coherent() 2328 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent() 2329 */ 2330 static inline void ib_dma_free_coherent(struct ib_device *dev, 2331 size_t size, void *cpu_addr, 2332 u64 dma_handle) 2333 { 2334 if (dev->dma_ops) 2335 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); 2336 else 2337 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); 2338 } 2339 2340 /** 2341 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use 2342 * by an HCA. 2343 * @pd: The protection domain associated assigned to the registered region. 2344 * @phys_buf_array: Specifies a list of physical buffers to use in the 2345 * memory region. 2346 * @num_phys_buf: Specifies the size of the phys_buf_array. 2347 * @mr_access_flags: Specifies the memory access rights. 2348 * @iova_start: The offset of the region's starting I/O virtual address. 2349 */ 2350 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd, 2351 struct ib_phys_buf *phys_buf_array, 2352 int num_phys_buf, 2353 int mr_access_flags, 2354 u64 *iova_start); 2355 2356 /** 2357 * ib_rereg_phys_mr - Modifies the attributes of an existing memory region. 2358 * Conceptually, this call performs the functions deregister memory region 2359 * followed by register physical memory region. Where possible, 2360 * resources are reused instead of deallocated and reallocated. 2361 * @mr: The memory region to modify. 2362 * @mr_rereg_mask: A bit-mask used to indicate which of the following 2363 * properties of the memory region are being modified. 2364 * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies 2365 * the new protection domain to associated with the memory region, 2366 * otherwise, this parameter is ignored. 2367 * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this 2368 * field specifies a list of physical buffers to use in the new 2369 * translation, otherwise, this parameter is ignored. 2370 * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this 2371 * field specifies the size of the phys_buf_array, otherwise, this 2372 * parameter is ignored. 2373 * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this 2374 * field specifies the new memory access rights, otherwise, this 2375 * parameter is ignored. 2376 * @iova_start: The offset of the region's starting I/O virtual address. 2377 */ 2378 int ib_rereg_phys_mr(struct ib_mr *mr, 2379 int mr_rereg_mask, 2380 struct ib_pd *pd, 2381 struct ib_phys_buf *phys_buf_array, 2382 int num_phys_buf, 2383 int mr_access_flags, 2384 u64 *iova_start); 2385 2386 /** 2387 * ib_query_mr - Retrieves information about a specific memory region. 2388 * @mr: The memory region to retrieve information about. 2389 * @mr_attr: The attributes of the specified memory region. 2390 */ 2391 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr); 2392 2393 /** 2394 * ib_dereg_mr - Deregisters a memory region and removes it from the 2395 * HCA translation table. 2396 * @mr: The memory region to deregister. 2397 * 2398 * This function can fail, if the memory region has memory windows bound to it. 2399 */ 2400 int ib_dereg_mr(struct ib_mr *mr); 2401 2402 2403 /** 2404 * ib_create_mr - Allocates a memory region that may be used for 2405 * signature handover operations. 2406 * @pd: The protection domain associated with the region. 2407 * @mr_init_attr: memory region init attributes. 2408 */ 2409 struct ib_mr *ib_create_mr(struct ib_pd *pd, 2410 struct ib_mr_init_attr *mr_init_attr); 2411 2412 /** 2413 * ib_destroy_mr - Destroys a memory region that was created using 2414 * ib_create_mr and removes it from HW translation tables. 2415 * @mr: The memory region to destroy. 2416 * 2417 * This function can fail, if the memory region has memory windows bound to it. 2418 */ 2419 int ib_destroy_mr(struct ib_mr *mr); 2420 2421 /** 2422 * ib_alloc_fast_reg_mr - Allocates memory region usable with the 2423 * IB_WR_FAST_REG_MR send work request. 2424 * @pd: The protection domain associated with the region. 2425 * @max_page_list_len: requested max physical buffer list length to be 2426 * used with fast register work requests for this MR. 2427 */ 2428 struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len); 2429 2430 /** 2431 * ib_alloc_fast_reg_page_list - Allocates a page list array 2432 * @device - ib device pointer. 2433 * @page_list_len - size of the page list array to be allocated. 2434 * 2435 * This allocates and returns a struct ib_fast_reg_page_list * and a 2436 * page_list array that is at least page_list_len in size. The actual 2437 * size is returned in max_page_list_len. The caller is responsible 2438 * for initializing the contents of the page_list array before posting 2439 * a send work request with the IB_WC_FAST_REG_MR opcode. 2440 * 2441 * The page_list array entries must be translated using one of the 2442 * ib_dma_*() functions just like the addresses passed to 2443 * ib_map_phys_fmr(). Once the ib_post_send() is issued, the struct 2444 * ib_fast_reg_page_list must not be modified by the caller until the 2445 * IB_WC_FAST_REG_MR work request completes. 2446 */ 2447 struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list( 2448 struct ib_device *device, int page_list_len); 2449 2450 /** 2451 * ib_free_fast_reg_page_list - Deallocates a previously allocated 2452 * page list array. 2453 * @page_list - struct ib_fast_reg_page_list pointer to be deallocated. 2454 */ 2455 void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list); 2456 2457 /** 2458 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR 2459 * R_Key and L_Key. 2460 * @mr - struct ib_mr pointer to be updated. 2461 * @newkey - new key to be used. 2462 */ 2463 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey) 2464 { 2465 mr->lkey = (mr->lkey & 0xffffff00) | newkey; 2466 mr->rkey = (mr->rkey & 0xffffff00) | newkey; 2467 } 2468 2469 /** 2470 * ib_inc_rkey - increments the key portion of the given rkey. Can be used 2471 * for calculating a new rkey for type 2 memory windows. 2472 * @rkey - the rkey to increment. 2473 */ 2474 static inline u32 ib_inc_rkey(u32 rkey) 2475 { 2476 const u32 mask = 0x000000ff; 2477 return ((rkey + 1) & mask) | (rkey & ~mask); 2478 } 2479 2480 /** 2481 * ib_alloc_mw - Allocates a memory window. 2482 * @pd: The protection domain associated with the memory window. 2483 * @type: The type of the memory window (1 or 2). 2484 */ 2485 struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type); 2486 2487 /** 2488 * ib_bind_mw - Posts a work request to the send queue of the specified 2489 * QP, which binds the memory window to the given address range and 2490 * remote access attributes. 2491 * @qp: QP to post the bind work request on. 2492 * @mw: The memory window to bind. 2493 * @mw_bind: Specifies information about the memory window, including 2494 * its address range, remote access rights, and associated memory region. 2495 * 2496 * If there is no immediate error, the function will update the rkey member 2497 * of the mw parameter to its new value. The bind operation can still fail 2498 * asynchronously. 2499 */ 2500 static inline int ib_bind_mw(struct ib_qp *qp, 2501 struct ib_mw *mw, 2502 struct ib_mw_bind *mw_bind) 2503 { 2504 /* XXX reference counting in corresponding MR? */ 2505 return mw->device->bind_mw ? 2506 mw->device->bind_mw(qp, mw, mw_bind) : 2507 -ENOSYS; 2508 } 2509 2510 /** 2511 * ib_dealloc_mw - Deallocates a memory window. 2512 * @mw: The memory window to deallocate. 2513 */ 2514 int ib_dealloc_mw(struct ib_mw *mw); 2515 2516 /** 2517 * ib_alloc_fmr - Allocates a unmapped fast memory region. 2518 * @pd: The protection domain associated with the unmapped region. 2519 * @mr_access_flags: Specifies the memory access rights. 2520 * @fmr_attr: Attributes of the unmapped region. 2521 * 2522 * A fast memory region must be mapped before it can be used as part of 2523 * a work request. 2524 */ 2525 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, 2526 int mr_access_flags, 2527 struct ib_fmr_attr *fmr_attr); 2528 2529 /** 2530 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region. 2531 * @fmr: The fast memory region to associate with the pages. 2532 * @page_list: An array of physical pages to map to the fast memory region. 2533 * @list_len: The number of pages in page_list. 2534 * @iova: The I/O virtual address to use with the mapped region. 2535 */ 2536 static inline int ib_map_phys_fmr(struct ib_fmr *fmr, 2537 u64 *page_list, int list_len, 2538 u64 iova) 2539 { 2540 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova); 2541 } 2542 2543 /** 2544 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions. 2545 * @fmr_list: A linked list of fast memory regions to unmap. 2546 */ 2547 int ib_unmap_fmr(struct list_head *fmr_list); 2548 2549 /** 2550 * ib_dealloc_fmr - Deallocates a fast memory region. 2551 * @fmr: The fast memory region to deallocate. 2552 */ 2553 int ib_dealloc_fmr(struct ib_fmr *fmr); 2554 2555 /** 2556 * ib_attach_mcast - Attaches the specified QP to a multicast group. 2557 * @qp: QP to attach to the multicast group. The QP must be type 2558 * IB_QPT_UD. 2559 * @gid: Multicast group GID. 2560 * @lid: Multicast group LID in host byte order. 2561 * 2562 * In order to send and receive multicast packets, subnet 2563 * administration must have created the multicast group and configured 2564 * the fabric appropriately. The port associated with the specified 2565 * QP must also be a member of the multicast group. 2566 */ 2567 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 2568 2569 /** 2570 * ib_detach_mcast - Detaches the specified QP from a multicast group. 2571 * @qp: QP to detach from the multicast group. 2572 * @gid: Multicast group GID. 2573 * @lid: Multicast group LID in host byte order. 2574 */ 2575 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 2576 2577 /** 2578 * ib_alloc_xrcd - Allocates an XRC domain. 2579 * @device: The device on which to allocate the XRC domain. 2580 */ 2581 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device); 2582 2583 /** 2584 * ib_dealloc_xrcd - Deallocates an XRC domain. 2585 * @xrcd: The XRC domain to deallocate. 2586 */ 2587 int ib_dealloc_xrcd(struct ib_xrcd *xrcd); 2588 2589 struct ib_flow *ib_create_flow(struct ib_qp *qp, 2590 struct ib_flow_attr *flow_attr, int domain); 2591 int ib_destroy_flow(struct ib_flow *flow_id); 2592 2593 static inline int ib_check_mr_access(int flags) 2594 { 2595 /* 2596 * Local write permission is required if remote write or 2597 * remote atomic permission is also requested. 2598 */ 2599 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && 2600 !(flags & IB_ACCESS_LOCAL_WRITE)) 2601 return -EINVAL; 2602 2603 return 0; 2604 } 2605 2606 /** 2607 * ib_check_mr_status: lightweight check of MR status. 2608 * This routine may provide status checks on a selected 2609 * ib_mr. first use is for signature status check. 2610 * 2611 * @mr: A memory region. 2612 * @check_mask: Bitmask of which checks to perform from 2613 * ib_mr_status_check enumeration. 2614 * @mr_status: The container of relevant status checks. 2615 * failed checks will be indicated in the status bitmask 2616 * and the relevant info shall be in the error item. 2617 */ 2618 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, 2619 struct ib_mr_status *mr_status); 2620 2621 #endif /* IB_VERBS_H */ 2622