1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #ifndef _QED_RDMA_IF_H 33 #define _QED_RDMA_IF_H 34 #include <linux/types.h> 35 #include <linux/delay.h> 36 #include <linux/list.h> 37 #include <linux/slab.h> 38 #include <linux/qed/qed_if.h> 39 #include <linux/qed/qed_ll2_if.h> 40 #include <linux/qed/rdma_common.h> 41 42 #define QED_RDMA_MAX_CNQ_SIZE (0xFFFF) 43 44 /* rdma interface */ 45 46 enum qed_roce_qp_state { 47 QED_ROCE_QP_STATE_RESET, 48 QED_ROCE_QP_STATE_INIT, 49 QED_ROCE_QP_STATE_RTR, 50 QED_ROCE_QP_STATE_RTS, 51 QED_ROCE_QP_STATE_SQD, 52 QED_ROCE_QP_STATE_ERR, 53 QED_ROCE_QP_STATE_SQE 54 }; 55 56 enum qed_rdma_tid_type { 57 QED_RDMA_TID_REGISTERED_MR, 58 QED_RDMA_TID_FMR, 59 QED_RDMA_TID_MW 60 }; 61 62 struct qed_rdma_events { 63 void *context; 64 void (*affiliated_event)(void *context, u8 fw_event_code, 65 void *fw_handle); 66 void (*unaffiliated_event)(void *context, u8 event_code); 67 }; 68 69 struct qed_rdma_device { 70 u32 vendor_id; 71 u32 vendor_part_id; 72 u32 hw_ver; 73 u64 fw_ver; 74 75 u64 node_guid; 76 u64 sys_image_guid; 77 78 u8 max_cnq; 79 u8 max_sge; 80 u8 max_srq_sge; 81 u16 max_inline; 82 u32 max_wqe; 83 u32 max_srq_wqe; 84 u8 max_qp_resp_rd_atomic_resc; 85 u8 max_qp_req_rd_atomic_resc; 86 u64 max_dev_resp_rd_atomic_resc; 87 u32 max_cq; 88 u32 max_qp; 89 u32 max_srq; 90 u32 max_mr; 91 u64 max_mr_size; 92 u32 max_cqe; 93 u32 max_mw; 94 u32 max_fmr; 95 u32 max_mr_mw_fmr_pbl; 96 u64 max_mr_mw_fmr_size; 97 u32 max_pd; 98 u32 max_ah; 99 u8 max_pkey; 100 u16 max_srq_wr; 101 u8 max_stats_queues; 102 u32 dev_caps; 103 104 /* Abilty to support RNR-NAK generation */ 105 106 #define QED_RDMA_DEV_CAP_RNR_NAK_MASK 0x1 107 #define QED_RDMA_DEV_CAP_RNR_NAK_SHIFT 0 108 /* Abilty to support shutdown port */ 109 #define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK 0x1 110 #define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT 1 111 /* Abilty to support port active event */ 112 #define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK 0x1 113 #define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT 2 114 /* Abilty to support port change event */ 115 #define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK 0x1 116 #define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT 3 117 /* Abilty to support system image GUID */ 118 #define QED_RDMA_DEV_CAP_SYS_IMAGE_MASK 0x1 119 #define QED_RDMA_DEV_CAP_SYS_IMAGE_SHIFT 4 120 /* Abilty to support bad P_Key counter support */ 121 #define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK 0x1 122 #define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT 5 123 /* Abilty to support atomic operations */ 124 #define QED_RDMA_DEV_CAP_ATOMIC_OP_MASK 0x1 125 #define QED_RDMA_DEV_CAP_ATOMIC_OP_SHIFT 6 126 #define QED_RDMA_DEV_CAP_RESIZE_CQ_MASK 0x1 127 #define QED_RDMA_DEV_CAP_RESIZE_CQ_SHIFT 7 128 /* Abilty to support modifying the maximum number of 129 * outstanding work requests per QP 130 */ 131 #define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK 0x1 132 #define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT 8 133 /* Abilty to support automatic path migration */ 134 #define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK 0x1 135 #define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT 9 136 /* Abilty to support the base memory management extensions */ 137 #define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK 0x1 138 #define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT 10 139 #define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK 0x1 140 #define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT 11 141 /* Abilty to support multipile page sizes per memory region */ 142 #define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK 0x1 143 #define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT 12 144 /* Abilty to support block list physical buffer list */ 145 #define QED_RDMA_DEV_CAP_BLOCK_MODE_MASK 0x1 146 #define QED_RDMA_DEV_CAP_BLOCK_MODE_SHIFT 13 147 /* Abilty to support zero based virtual addresses */ 148 #define QED_RDMA_DEV_CAP_ZBVA_MASK 0x1 149 #define QED_RDMA_DEV_CAP_ZBVA_SHIFT 14 150 /* Abilty to support local invalidate fencing */ 151 #define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK 0x1 152 #define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT 15 153 /* Abilty to support Loopback on QP */ 154 #define QED_RDMA_DEV_CAP_LB_INDICATOR_MASK 0x1 155 #define QED_RDMA_DEV_CAP_LB_INDICATOR_SHIFT 16 156 u64 page_size_caps; 157 u8 dev_ack_delay; 158 u32 reserved_lkey; 159 u32 bad_pkey_counter; 160 struct qed_rdma_events events; 161 }; 162 163 enum qed_port_state { 164 QED_RDMA_PORT_UP, 165 QED_RDMA_PORT_DOWN, 166 }; 167 168 enum qed_roce_capability { 169 QED_ROCE_V1 = 1 << 0, 170 QED_ROCE_V2 = 1 << 1, 171 }; 172 173 struct qed_rdma_port { 174 enum qed_port_state port_state; 175 int link_speed; 176 u64 max_msg_size; 177 u8 source_gid_table_len; 178 void *source_gid_table_ptr; 179 u8 pkey_table_len; 180 void *pkey_table_ptr; 181 u32 pkey_bad_counter; 182 enum qed_roce_capability capability; 183 }; 184 185 struct qed_rdma_cnq_params { 186 u8 num_pbl_pages; 187 u64 pbl_ptr; 188 }; 189 190 /* The CQ Mode affects the CQ doorbell transaction size. 191 * 64/32 bit machines should configure to 32/16 bits respectively. 192 */ 193 enum qed_rdma_cq_mode { 194 QED_RDMA_CQ_MODE_16_BITS, 195 QED_RDMA_CQ_MODE_32_BITS, 196 }; 197 198 struct qed_roce_dcqcn_params { 199 u8 notification_point; 200 u8 reaction_point; 201 202 /* fields for notification point */ 203 u32 cnp_send_timeout; 204 205 /* fields for reaction point */ 206 u32 rl_bc_rate; 207 u16 rl_max_rate; 208 u16 rl_r_ai; 209 u16 rl_r_hai; 210 u16 dcqcn_g; 211 u32 dcqcn_k_us; 212 u32 dcqcn_timeout_us; 213 }; 214 215 struct qed_rdma_start_in_params { 216 struct qed_rdma_events *events; 217 struct qed_rdma_cnq_params cnq_pbl_list[128]; 218 u8 desired_cnq; 219 enum qed_rdma_cq_mode cq_mode; 220 struct qed_roce_dcqcn_params dcqcn_params; 221 u16 max_mtu; 222 u8 mac_addr[ETH_ALEN]; 223 u8 iwarp_flags; 224 }; 225 226 struct qed_rdma_add_user_out_params { 227 u16 dpi; 228 void __iomem *dpi_addr; 229 u64 dpi_phys_addr; 230 u32 dpi_size; 231 u16 wid_count; 232 }; 233 234 enum roce_mode { 235 ROCE_V1, 236 ROCE_V2_IPV4, 237 ROCE_V2_IPV6, 238 MAX_ROCE_MODE 239 }; 240 241 union qed_gid { 242 u8 bytes[16]; 243 u16 words[8]; 244 u32 dwords[4]; 245 u64 qwords[2]; 246 u32 ipv4_addr; 247 }; 248 249 struct qed_rdma_register_tid_in_params { 250 u32 itid; 251 enum qed_rdma_tid_type tid_type; 252 u8 key; 253 u16 pd; 254 bool local_read; 255 bool local_write; 256 bool remote_read; 257 bool remote_write; 258 bool remote_atomic; 259 bool mw_bind; 260 u64 pbl_ptr; 261 bool pbl_two_level; 262 u8 pbl_page_size_log; 263 u8 page_size_log; 264 u32 fbo; 265 u64 length; 266 u64 vaddr; 267 bool zbva; 268 bool phy_mr; 269 bool dma_mr; 270 271 bool dif_enabled; 272 u64 dif_error_addr; 273 }; 274 275 struct qed_rdma_create_cq_in_params { 276 u32 cq_handle_lo; 277 u32 cq_handle_hi; 278 u32 cq_size; 279 u16 dpi; 280 bool pbl_two_level; 281 u64 pbl_ptr; 282 u16 pbl_num_pages; 283 u8 pbl_page_size_log; 284 u8 cnq_id; 285 u16 int_timeout; 286 }; 287 288 struct qed_rdma_create_srq_in_params { 289 u64 pbl_base_addr; 290 u64 prod_pair_addr; 291 u16 num_pages; 292 u16 pd_id; 293 u16 page_size; 294 }; 295 296 struct qed_rdma_destroy_cq_in_params { 297 u16 icid; 298 }; 299 300 struct qed_rdma_destroy_cq_out_params { 301 u16 num_cq_notif; 302 }; 303 304 struct qed_rdma_create_qp_in_params { 305 u32 qp_handle_lo; 306 u32 qp_handle_hi; 307 u32 qp_handle_async_lo; 308 u32 qp_handle_async_hi; 309 bool use_srq; 310 bool signal_all; 311 bool fmr_and_reserved_lkey; 312 u16 pd; 313 u16 dpi; 314 u16 sq_cq_id; 315 u16 sq_num_pages; 316 u64 sq_pbl_ptr; 317 u8 max_sq_sges; 318 u16 rq_cq_id; 319 u16 rq_num_pages; 320 u64 rq_pbl_ptr; 321 u16 srq_id; 322 u8 stats_queue; 323 }; 324 325 struct qed_rdma_create_qp_out_params { 326 u32 qp_id; 327 u16 icid; 328 void *rq_pbl_virt; 329 dma_addr_t rq_pbl_phys; 330 void *sq_pbl_virt; 331 dma_addr_t sq_pbl_phys; 332 }; 333 334 struct qed_rdma_modify_qp_in_params { 335 u32 modify_flags; 336 #define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK 0x1 337 #define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT 0 338 #define QED_ROCE_MODIFY_QP_VALID_PKEY_MASK 0x1 339 #define QED_ROCE_MODIFY_QP_VALID_PKEY_SHIFT 1 340 #define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK 0x1 341 #define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT 2 342 #define QED_ROCE_MODIFY_QP_VALID_DEST_QP_MASK 0x1 343 #define QED_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT 3 344 #define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK 0x1 345 #define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT 4 346 #define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK 0x1 347 #define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT 5 348 #define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK 0x1 349 #define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT 6 350 #define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK 0x1 351 #define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT 7 352 #define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK 0x1 353 #define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT 8 354 #define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK 0x1 355 #define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT 9 356 #define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK 0x1 357 #define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT 10 358 #define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK 0x1 359 #define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT 11 360 #define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK 0x1 361 #define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT 12 362 #define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK 0x1 363 #define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT 13 364 #define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK 0x1 365 #define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT 14 366 367 enum qed_roce_qp_state new_state; 368 u16 pkey; 369 bool incoming_rdma_read_en; 370 bool incoming_rdma_write_en; 371 bool incoming_atomic_en; 372 bool e2e_flow_control_en; 373 u32 dest_qp; 374 bool lb_indication; 375 u16 mtu; 376 u8 traffic_class_tos; 377 u8 hop_limit_ttl; 378 u32 flow_label; 379 union qed_gid sgid; 380 union qed_gid dgid; 381 u16 udp_src_port; 382 383 u16 vlan_id; 384 385 u32 rq_psn; 386 u32 sq_psn; 387 u8 max_rd_atomic_resp; 388 u8 max_rd_atomic_req; 389 u32 ack_timeout; 390 u8 retry_cnt; 391 u8 rnr_retry_cnt; 392 u8 min_rnr_nak_timer; 393 bool sqd_async; 394 u8 remote_mac_addr[6]; 395 u8 local_mac_addr[6]; 396 bool use_local_mac; 397 enum roce_mode roce_mode; 398 }; 399 400 struct qed_rdma_query_qp_out_params { 401 enum qed_roce_qp_state state; 402 u32 rq_psn; 403 u32 sq_psn; 404 bool draining; 405 u16 mtu; 406 u32 dest_qp; 407 bool incoming_rdma_read_en; 408 bool incoming_rdma_write_en; 409 bool incoming_atomic_en; 410 bool e2e_flow_control_en; 411 union qed_gid sgid; 412 union qed_gid dgid; 413 u32 flow_label; 414 u8 hop_limit_ttl; 415 u8 traffic_class_tos; 416 u32 timeout; 417 u8 rnr_retry; 418 u8 retry_cnt; 419 u8 min_rnr_nak_timer; 420 u16 pkey_index; 421 u8 max_rd_atomic; 422 u8 max_dest_rd_atomic; 423 bool sqd_async; 424 }; 425 426 struct qed_rdma_create_srq_out_params { 427 u16 srq_id; 428 }; 429 430 struct qed_rdma_destroy_srq_in_params { 431 u16 srq_id; 432 }; 433 434 struct qed_rdma_modify_srq_in_params { 435 u32 wqe_limit; 436 u16 srq_id; 437 }; 438 439 struct qed_rdma_stats_out_params { 440 u64 sent_bytes; 441 u64 sent_pkts; 442 u64 rcv_bytes; 443 u64 rcv_pkts; 444 }; 445 446 struct qed_rdma_counters_out_params { 447 u64 pd_count; 448 u64 max_pd; 449 u64 dpi_count; 450 u64 max_dpi; 451 u64 cq_count; 452 u64 max_cq; 453 u64 qp_count; 454 u64 max_qp; 455 u64 tid_count; 456 u64 max_tid; 457 }; 458 459 #define QED_ROCE_TX_HEAD_FAILURE (1) 460 #define QED_ROCE_TX_FRAG_FAILURE (2) 461 462 enum qed_iwarp_event_type { 463 QED_IWARP_EVENT_MPA_REQUEST, /* Passive side request received */ 464 QED_IWARP_EVENT_PASSIVE_COMPLETE, /* ack on mpa response */ 465 QED_IWARP_EVENT_ACTIVE_COMPLETE, /* Active side reply received */ 466 QED_IWARP_EVENT_DISCONNECT, 467 QED_IWARP_EVENT_CLOSE, 468 QED_IWARP_EVENT_IRQ_FULL, 469 QED_IWARP_EVENT_RQ_EMPTY, 470 QED_IWARP_EVENT_LLP_TIMEOUT, 471 QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR, 472 QED_IWARP_EVENT_CQ_OVERFLOW, 473 QED_IWARP_EVENT_QP_CATASTROPHIC, 474 QED_IWARP_EVENT_ACTIVE_MPA_REPLY, 475 QED_IWARP_EVENT_LOCAL_ACCESS_ERROR, 476 QED_IWARP_EVENT_REMOTE_OPERATION_ERROR, 477 QED_IWARP_EVENT_TERMINATE_RECEIVED, 478 QED_IWARP_EVENT_SRQ_LIMIT, 479 QED_IWARP_EVENT_SRQ_EMPTY, 480 }; 481 482 enum qed_tcp_ip_version { 483 QED_TCP_IPV4, 484 QED_TCP_IPV6, 485 }; 486 487 struct qed_iwarp_cm_info { 488 enum qed_tcp_ip_version ip_version; 489 u32 remote_ip[4]; 490 u32 local_ip[4]; 491 u16 remote_port; 492 u16 local_port; 493 u16 vlan; 494 u8 ord; 495 u8 ird; 496 u16 private_data_len; 497 const void *private_data; 498 }; 499 500 struct qed_iwarp_cm_event_params { 501 enum qed_iwarp_event_type event; 502 const struct qed_iwarp_cm_info *cm_info; 503 void *ep_context; /* To be passed to accept call */ 504 int status; 505 }; 506 507 typedef int (*iwarp_event_handler) (void *context, 508 struct qed_iwarp_cm_event_params *event); 509 510 struct qed_iwarp_connect_in { 511 iwarp_event_handler event_cb; 512 void *cb_context; 513 struct qed_rdma_qp *qp; 514 struct qed_iwarp_cm_info cm_info; 515 u16 mss; 516 u8 remote_mac_addr[ETH_ALEN]; 517 u8 local_mac_addr[ETH_ALEN]; 518 }; 519 520 struct qed_iwarp_connect_out { 521 void *ep_context; 522 }; 523 524 struct qed_iwarp_listen_in { 525 iwarp_event_handler event_cb; 526 void *cb_context; /* passed to event_cb */ 527 u32 max_backlog; 528 enum qed_tcp_ip_version ip_version; 529 u32 ip_addr[4]; 530 u16 port; 531 u16 vlan; 532 }; 533 534 struct qed_iwarp_listen_out { 535 void *handle; 536 }; 537 538 struct qed_iwarp_accept_in { 539 void *ep_context; 540 void *cb_context; 541 struct qed_rdma_qp *qp; 542 const void *private_data; 543 u16 private_data_len; 544 u8 ord; 545 u8 ird; 546 }; 547 548 struct qed_iwarp_reject_in { 549 void *ep_context; 550 void *cb_context; 551 const void *private_data; 552 u16 private_data_len; 553 }; 554 555 struct qed_iwarp_send_rtr_in { 556 void *ep_context; 557 }; 558 559 struct qed_roce_ll2_header { 560 void *vaddr; 561 dma_addr_t baddr; 562 size_t len; 563 }; 564 565 struct qed_roce_ll2_buffer { 566 dma_addr_t baddr; 567 size_t len; 568 }; 569 570 struct qed_roce_ll2_packet { 571 struct qed_roce_ll2_header header; 572 int n_seg; 573 struct qed_roce_ll2_buffer payload[RDMA_MAX_SGE_PER_SQ_WQE]; 574 int roce_mode; 575 enum qed_ll2_tx_dest tx_dest; 576 }; 577 578 enum qed_rdma_type { 579 QED_RDMA_TYPE_ROCE, 580 QED_RDMA_TYPE_IWARP 581 }; 582 583 struct qed_dev_rdma_info { 584 struct qed_dev_info common; 585 enum qed_rdma_type rdma_type; 586 u8 user_dpm_enabled; 587 }; 588 589 struct qed_rdma_ops { 590 const struct qed_common_ops *common; 591 592 int (*fill_dev_info)(struct qed_dev *cdev, 593 struct qed_dev_rdma_info *info); 594 void *(*rdma_get_rdma_ctx)(struct qed_dev *cdev); 595 596 int (*rdma_init)(struct qed_dev *dev, 597 struct qed_rdma_start_in_params *iparams); 598 599 int (*rdma_add_user)(void *rdma_cxt, 600 struct qed_rdma_add_user_out_params *oparams); 601 602 void (*rdma_remove_user)(void *rdma_cxt, u16 dpi); 603 int (*rdma_stop)(void *rdma_cxt); 604 struct qed_rdma_device* (*rdma_query_device)(void *rdma_cxt); 605 struct qed_rdma_port* (*rdma_query_port)(void *rdma_cxt); 606 int (*rdma_get_start_sb)(struct qed_dev *cdev); 607 int (*rdma_get_min_cnq_msix)(struct qed_dev *cdev); 608 void (*rdma_cnq_prod_update)(void *rdma_cxt, u8 cnq_index, u16 prod); 609 int (*rdma_get_rdma_int)(struct qed_dev *cdev, 610 struct qed_int_info *info); 611 int (*rdma_set_rdma_int)(struct qed_dev *cdev, u16 cnt); 612 int (*rdma_alloc_pd)(void *rdma_cxt, u16 *pd); 613 void (*rdma_dealloc_pd)(void *rdma_cxt, u16 pd); 614 int (*rdma_create_cq)(void *rdma_cxt, 615 struct qed_rdma_create_cq_in_params *params, 616 u16 *icid); 617 int (*rdma_destroy_cq)(void *rdma_cxt, 618 struct qed_rdma_destroy_cq_in_params *iparams, 619 struct qed_rdma_destroy_cq_out_params *oparams); 620 struct qed_rdma_qp * 621 (*rdma_create_qp)(void *rdma_cxt, 622 struct qed_rdma_create_qp_in_params *iparams, 623 struct qed_rdma_create_qp_out_params *oparams); 624 625 int (*rdma_modify_qp)(void *roce_cxt, struct qed_rdma_qp *qp, 626 struct qed_rdma_modify_qp_in_params *iparams); 627 628 int (*rdma_query_qp)(void *rdma_cxt, struct qed_rdma_qp *qp, 629 struct qed_rdma_query_qp_out_params *oparams); 630 int (*rdma_destroy_qp)(void *rdma_cxt, struct qed_rdma_qp *qp); 631 632 int 633 (*rdma_register_tid)(void *rdma_cxt, 634 struct qed_rdma_register_tid_in_params *iparams); 635 636 int (*rdma_deregister_tid)(void *rdma_cxt, u32 itid); 637 int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid); 638 void (*rdma_free_tid)(void *rdma_cxt, u32 itid); 639 640 int (*rdma_create_srq)(void *rdma_cxt, 641 struct qed_rdma_create_srq_in_params *iparams, 642 struct qed_rdma_create_srq_out_params *oparams); 643 int (*rdma_destroy_srq)(void *rdma_cxt, 644 struct qed_rdma_destroy_srq_in_params *iparams); 645 int (*rdma_modify_srq)(void *rdma_cxt, 646 struct qed_rdma_modify_srq_in_params *iparams); 647 648 int (*ll2_acquire_connection)(void *rdma_cxt, 649 struct qed_ll2_acquire_data *data); 650 651 int (*ll2_establish_connection)(void *rdma_cxt, u8 connection_handle); 652 int (*ll2_terminate_connection)(void *rdma_cxt, u8 connection_handle); 653 void (*ll2_release_connection)(void *rdma_cxt, u8 connection_handle); 654 655 int (*ll2_prepare_tx_packet)(void *rdma_cxt, 656 u8 connection_handle, 657 struct qed_ll2_tx_pkt_info *pkt, 658 bool notify_fw); 659 660 int (*ll2_set_fragment_of_tx_packet)(void *rdma_cxt, 661 u8 connection_handle, 662 dma_addr_t addr, 663 u16 nbytes); 664 int (*ll2_post_rx_buffer)(void *rdma_cxt, u8 connection_handle, 665 dma_addr_t addr, u16 buf_len, void *cookie, 666 u8 notify_fw); 667 int (*ll2_get_stats)(void *rdma_cxt, 668 u8 connection_handle, 669 struct qed_ll2_stats *p_stats); 670 int (*ll2_set_mac_filter)(struct qed_dev *cdev, 671 u8 *old_mac_address, u8 *new_mac_address); 672 673 int (*iwarp_set_engine_affin)(struct qed_dev *cdev, bool b_reset); 674 675 int (*iwarp_connect)(void *rdma_cxt, 676 struct qed_iwarp_connect_in *iparams, 677 struct qed_iwarp_connect_out *oparams); 678 679 int (*iwarp_create_listen)(void *rdma_cxt, 680 struct qed_iwarp_listen_in *iparams, 681 struct qed_iwarp_listen_out *oparams); 682 683 int (*iwarp_accept)(void *rdma_cxt, 684 struct qed_iwarp_accept_in *iparams); 685 686 int (*iwarp_reject)(void *rdma_cxt, 687 struct qed_iwarp_reject_in *iparams); 688 689 int (*iwarp_destroy_listen)(void *rdma_cxt, void *handle); 690 691 int (*iwarp_send_rtr)(void *rdma_cxt, 692 struct qed_iwarp_send_rtr_in *iparams); 693 }; 694 695 const struct qed_rdma_ops *qed_get_rdma_ops(void); 696 697 #endif 698