xref: /openbmc/linux/include/linux/qed/qed_rdma_if.h (revision 8ec90bfd)
1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
2 /* QLogic qed NIC Driver
3  * Copyright (c) 2015-2017  QLogic Corporation
4  * Copyright (c) 2019-2020 Marvell International Ltd.
5  */
6 
7 #ifndef _QED_RDMA_IF_H
8 #define _QED_RDMA_IF_H
9 #include <linux/types.h>
10 #include <linux/delay.h>
11 #include <linux/list.h>
12 #include <linux/slab.h>
13 #include <linux/qed/qed_if.h>
14 #include <linux/qed/qed_ll2_if.h>
15 #include <linux/qed/rdma_common.h>
16 
17 #define QED_RDMA_MAX_CNQ_SIZE               (0xFFFF)
18 
19 /* rdma interface */
20 
21 enum qed_roce_qp_state {
22 	QED_ROCE_QP_STATE_RESET,
23 	QED_ROCE_QP_STATE_INIT,
24 	QED_ROCE_QP_STATE_RTR,
25 	QED_ROCE_QP_STATE_RTS,
26 	QED_ROCE_QP_STATE_SQD,
27 	QED_ROCE_QP_STATE_ERR,
28 	QED_ROCE_QP_STATE_SQE
29 };
30 
31 enum qed_rdma_qp_type {
32 	QED_RDMA_QP_TYPE_RC,
33 	QED_RDMA_QP_TYPE_XRC_INI,
34 	QED_RDMA_QP_TYPE_XRC_TGT,
35 	QED_RDMA_QP_TYPE_INVAL = 0xffff,
36 };
37 
38 enum qed_rdma_tid_type {
39 	QED_RDMA_TID_REGISTERED_MR,
40 	QED_RDMA_TID_FMR,
41 	QED_RDMA_TID_MW
42 };
43 
44 struct qed_rdma_events {
45 	void *context;
46 	void (*affiliated_event)(void *context, u8 fw_event_code,
47 				 void *fw_handle);
48 	void (*unaffiliated_event)(void *context, u8 event_code);
49 };
50 
51 struct qed_rdma_device {
52 	u32 vendor_id;
53 	u32 vendor_part_id;
54 	u32 hw_ver;
55 	u64 fw_ver;
56 
57 	u64 node_guid;
58 	u64 sys_image_guid;
59 
60 	u8 max_cnq;
61 	u8 max_sge;
62 	u8 max_srq_sge;
63 	u16 max_inline;
64 	u32 max_wqe;
65 	u32 max_srq_wqe;
66 	u8 max_qp_resp_rd_atomic_resc;
67 	u8 max_qp_req_rd_atomic_resc;
68 	u64 max_dev_resp_rd_atomic_resc;
69 	u32 max_cq;
70 	u32 max_qp;
71 	u32 max_srq;
72 	u32 max_mr;
73 	u64 max_mr_size;
74 	u32 max_cqe;
75 	u32 max_mw;
76 	u32 max_mr_mw_fmr_pbl;
77 	u64 max_mr_mw_fmr_size;
78 	u32 max_pd;
79 	u32 max_ah;
80 	u8 max_pkey;
81 	u16 max_srq_wr;
82 	u8 max_stats_queues;
83 	u32 dev_caps;
84 
85 	/* Abilty to support RNR-NAK generation */
86 
87 #define QED_RDMA_DEV_CAP_RNR_NAK_MASK                           0x1
88 #define QED_RDMA_DEV_CAP_RNR_NAK_SHIFT                  0
89 	/* Abilty to support shutdown port */
90 #define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK                     0x1
91 #define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT                    1
92 	/* Abilty to support port active event */
93 #define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK         0x1
94 #define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT                2
95 	/* Abilty to support port change event */
96 #define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK         0x1
97 #define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT                3
98 	/* Abilty to support system image GUID */
99 #define QED_RDMA_DEV_CAP_SYS_IMAGE_MASK                 0x1
100 #define QED_RDMA_DEV_CAP_SYS_IMAGE_SHIFT                        4
101 	/* Abilty to support bad P_Key counter support */
102 #define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK                      0x1
103 #define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT                     5
104 	/* Abilty to support atomic operations */
105 #define QED_RDMA_DEV_CAP_ATOMIC_OP_MASK                 0x1
106 #define QED_RDMA_DEV_CAP_ATOMIC_OP_SHIFT                        6
107 #define QED_RDMA_DEV_CAP_RESIZE_CQ_MASK                 0x1
108 #define QED_RDMA_DEV_CAP_RESIZE_CQ_SHIFT                        7
109 	/* Abilty to support modifying the maximum number of
110 	 * outstanding work requests per QP
111 	 */
112 #define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK                     0x1
113 #define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT                    8
114 	/* Abilty to support automatic path migration */
115 #define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK                     0x1
116 #define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT                    9
117 	/* Abilty to support the base memory management extensions */
118 #define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK                   0x1
119 #define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT          10
120 #define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK                    0x1
121 #define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT                   11
122 	/* Abilty to support multipile page sizes per memory region */
123 #define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK             0x1
124 #define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT            12
125 	/* Abilty to support block list physical buffer list */
126 #define QED_RDMA_DEV_CAP_BLOCK_MODE_MASK                        0x1
127 #define QED_RDMA_DEV_CAP_BLOCK_MODE_SHIFT                       13
128 	/* Abilty to support zero based virtual addresses */
129 #define QED_RDMA_DEV_CAP_ZBVA_MASK                              0x1
130 #define QED_RDMA_DEV_CAP_ZBVA_SHIFT                             14
131 	/* Abilty to support local invalidate fencing */
132 #define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK                   0x1
133 #define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT          15
134 	/* Abilty to support Loopback on QP */
135 #define QED_RDMA_DEV_CAP_LB_INDICATOR_MASK                      0x1
136 #define QED_RDMA_DEV_CAP_LB_INDICATOR_SHIFT                     16
137 	u64 page_size_caps;
138 	u8 dev_ack_delay;
139 	u32 reserved_lkey;
140 	u32 bad_pkey_counter;
141 	struct qed_rdma_events events;
142 };
143 
144 enum qed_port_state {
145 	QED_RDMA_PORT_UP,
146 	QED_RDMA_PORT_DOWN,
147 };
148 
149 enum qed_roce_capability {
150 	QED_ROCE_V1 = 1 << 0,
151 	QED_ROCE_V2 = 1 << 1,
152 };
153 
154 struct qed_rdma_port {
155 	enum qed_port_state port_state;
156 	int link_speed;
157 	u64 max_msg_size;
158 	u8 source_gid_table_len;
159 	void *source_gid_table_ptr;
160 	u8 pkey_table_len;
161 	void *pkey_table_ptr;
162 	u32 pkey_bad_counter;
163 	enum qed_roce_capability capability;
164 };
165 
166 struct qed_rdma_cnq_params {
167 	u8 num_pbl_pages;
168 	u64 pbl_ptr;
169 };
170 
171 /* The CQ Mode affects the CQ doorbell transaction size.
172  * 64/32 bit machines should configure to 32/16 bits respectively.
173  */
174 enum qed_rdma_cq_mode {
175 	QED_RDMA_CQ_MODE_16_BITS,
176 	QED_RDMA_CQ_MODE_32_BITS,
177 };
178 
179 struct qed_roce_dcqcn_params {
180 	u8 notification_point;
181 	u8 reaction_point;
182 
183 	/* fields for notification point */
184 	u32 cnp_send_timeout;
185 
186 	/* fields for reaction point */
187 	u32 rl_bc_rate;
188 	u16 rl_max_rate;
189 	u16 rl_r_ai;
190 	u16 rl_r_hai;
191 	u16 dcqcn_g;
192 	u32 dcqcn_k_us;
193 	u32 dcqcn_timeout_us;
194 };
195 
196 struct qed_rdma_start_in_params {
197 	struct qed_rdma_events *events;
198 	struct qed_rdma_cnq_params cnq_pbl_list[128];
199 	u8 desired_cnq;
200 	enum qed_rdma_cq_mode cq_mode;
201 	struct qed_roce_dcqcn_params dcqcn_params;
202 	u16 max_mtu;
203 	u8 mac_addr[ETH_ALEN];
204 	u8 iwarp_flags;
205 };
206 
207 struct qed_rdma_add_user_out_params {
208 	u16 dpi;
209 	void __iomem *dpi_addr;
210 	u64 dpi_phys_addr;
211 	u32 dpi_size;
212 	u16 wid_count;
213 };
214 
215 enum roce_mode {
216 	ROCE_V1,
217 	ROCE_V2_IPV4,
218 	ROCE_V2_IPV6,
219 	MAX_ROCE_MODE
220 };
221 
222 union qed_gid {
223 	u8 bytes[16];
224 	u16 words[8];
225 	u32 dwords[4];
226 	u64 qwords[2];
227 	u32 ipv4_addr;
228 };
229 
230 struct qed_rdma_register_tid_in_params {
231 	u32 itid;
232 	enum qed_rdma_tid_type tid_type;
233 	u8 key;
234 	u16 pd;
235 	bool local_read;
236 	bool local_write;
237 	bool remote_read;
238 	bool remote_write;
239 	bool remote_atomic;
240 	bool mw_bind;
241 	u64 pbl_ptr;
242 	bool pbl_two_level;
243 	u8 pbl_page_size_log;
244 	u8 page_size_log;
245 	u32 fbo;
246 	u64 length;
247 	u64 vaddr;
248 	bool zbva;
249 	bool phy_mr;
250 	bool dma_mr;
251 
252 	bool dif_enabled;
253 	u64 dif_error_addr;
254 };
255 
256 struct qed_rdma_create_cq_in_params {
257 	u32 cq_handle_lo;
258 	u32 cq_handle_hi;
259 	u32 cq_size;
260 	u16 dpi;
261 	bool pbl_two_level;
262 	u64 pbl_ptr;
263 	u16 pbl_num_pages;
264 	u8 pbl_page_size_log;
265 	u8 cnq_id;
266 	u16 int_timeout;
267 };
268 
269 struct qed_rdma_create_srq_in_params {
270 	u64 pbl_base_addr;
271 	u64 prod_pair_addr;
272 	u16 num_pages;
273 	u16 pd_id;
274 	u16 page_size;
275 
276 	/* XRC related only */
277 	bool reserved_key_en;
278 	bool is_xrc;
279 	u32 cq_cid;
280 	u16 xrcd_id;
281 };
282 
283 struct qed_rdma_destroy_cq_in_params {
284 	u16 icid;
285 };
286 
287 struct qed_rdma_destroy_cq_out_params {
288 	u16 num_cq_notif;
289 };
290 
291 struct qed_rdma_create_qp_in_params {
292 	u32 qp_handle_lo;
293 	u32 qp_handle_hi;
294 	u32 qp_handle_async_lo;
295 	u32 qp_handle_async_hi;
296 	bool use_srq;
297 	bool signal_all;
298 	bool fmr_and_reserved_lkey;
299 	u16 pd;
300 	u16 dpi;
301 	u16 sq_cq_id;
302 	u16 sq_num_pages;
303 	u64 sq_pbl_ptr;
304 	u8 max_sq_sges;
305 	u16 rq_cq_id;
306 	u16 rq_num_pages;
307 	u64 rq_pbl_ptr;
308 	u16 srq_id;
309 	u16 xrcd_id;
310 	u8 stats_queue;
311 	enum qed_rdma_qp_type qp_type;
312 	u8 flags;
313 #define QED_ROCE_EDPM_MODE_MASK      0x1
314 #define QED_ROCE_EDPM_MODE_SHIFT     0
315 };
316 
317 struct qed_rdma_create_qp_out_params {
318 	u32 qp_id;
319 	u16 icid;
320 	void *rq_pbl_virt;
321 	dma_addr_t rq_pbl_phys;
322 	void *sq_pbl_virt;
323 	dma_addr_t sq_pbl_phys;
324 };
325 
326 struct qed_rdma_modify_qp_in_params {
327 	u32 modify_flags;
328 #define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK               0x1
329 #define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT              0
330 #define QED_ROCE_MODIFY_QP_VALID_PKEY_MASK                    0x1
331 #define QED_ROCE_MODIFY_QP_VALID_PKEY_SHIFT                   1
332 #define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK             0x1
333 #define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT            2
334 #define QED_ROCE_MODIFY_QP_VALID_DEST_QP_MASK                 0x1
335 #define QED_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT                3
336 #define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK          0x1
337 #define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT         4
338 #define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK                  0x1
339 #define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT                 5
340 #define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK                  0x1
341 #define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT                 6
342 #define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK       0x1
343 #define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT      7
344 #define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK      0x1
345 #define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT     8
346 #define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK             0x1
347 #define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT            9
348 #define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK               0x1
349 #define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT              10
350 #define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK           0x1
351 #define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT          11
352 #define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK       0x1
353 #define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT      12
354 #define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK     0x1
355 #define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT    13
356 #define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK               0x1
357 #define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT              14
358 
359 	enum qed_roce_qp_state new_state;
360 	u16 pkey;
361 	bool incoming_rdma_read_en;
362 	bool incoming_rdma_write_en;
363 	bool incoming_atomic_en;
364 	bool e2e_flow_control_en;
365 	u32 dest_qp;
366 	bool lb_indication;
367 	u16 mtu;
368 	u8 traffic_class_tos;
369 	u8 hop_limit_ttl;
370 	u32 flow_label;
371 	union qed_gid sgid;
372 	union qed_gid dgid;
373 	u16 udp_src_port;
374 
375 	u16 vlan_id;
376 
377 	u32 rq_psn;
378 	u32 sq_psn;
379 	u8 max_rd_atomic_resp;
380 	u8 max_rd_atomic_req;
381 	u32 ack_timeout;
382 	u8 retry_cnt;
383 	u8 rnr_retry_cnt;
384 	u8 min_rnr_nak_timer;
385 	bool sqd_async;
386 	u8 remote_mac_addr[6];
387 	u8 local_mac_addr[6];
388 	bool use_local_mac;
389 	enum roce_mode roce_mode;
390 };
391 
392 struct qed_rdma_query_qp_out_params {
393 	enum qed_roce_qp_state state;
394 	u32 rq_psn;
395 	u32 sq_psn;
396 	bool draining;
397 	u16 mtu;
398 	u32 dest_qp;
399 	bool incoming_rdma_read_en;
400 	bool incoming_rdma_write_en;
401 	bool incoming_atomic_en;
402 	bool e2e_flow_control_en;
403 	union qed_gid sgid;
404 	union qed_gid dgid;
405 	u32 flow_label;
406 	u8 hop_limit_ttl;
407 	u8 traffic_class_tos;
408 	u32 timeout;
409 	u8 rnr_retry;
410 	u8 retry_cnt;
411 	u8 min_rnr_nak_timer;
412 	u16 pkey_index;
413 	u8 max_rd_atomic;
414 	u8 max_dest_rd_atomic;
415 	bool sqd_async;
416 };
417 
418 struct qed_rdma_create_srq_out_params {
419 	u16 srq_id;
420 };
421 
422 struct qed_rdma_destroy_srq_in_params {
423 	u16 srq_id;
424 	bool is_xrc;
425 };
426 
427 struct qed_rdma_modify_srq_in_params {
428 	u32 wqe_limit;
429 	u16 srq_id;
430 	bool is_xrc;
431 };
432 
433 struct qed_rdma_stats_out_params {
434 	u64 sent_bytes;
435 	u64 sent_pkts;
436 	u64 rcv_bytes;
437 	u64 rcv_pkts;
438 };
439 
440 struct qed_rdma_counters_out_params {
441 	u64 pd_count;
442 	u64 max_pd;
443 	u64 dpi_count;
444 	u64 max_dpi;
445 	u64 cq_count;
446 	u64 max_cq;
447 	u64 qp_count;
448 	u64 max_qp;
449 	u64 tid_count;
450 	u64 max_tid;
451 };
452 
453 #define QED_ROCE_TX_HEAD_FAILURE        (1)
454 #define QED_ROCE_TX_FRAG_FAILURE        (2)
455 
456 enum qed_iwarp_event_type {
457 	QED_IWARP_EVENT_MPA_REQUEST,	  /* Passive side request received */
458 	QED_IWARP_EVENT_PASSIVE_COMPLETE, /* ack on mpa response */
459 	QED_IWARP_EVENT_ACTIVE_COMPLETE,  /* Active side reply received */
460 	QED_IWARP_EVENT_DISCONNECT,
461 	QED_IWARP_EVENT_CLOSE,
462 	QED_IWARP_EVENT_IRQ_FULL,
463 	QED_IWARP_EVENT_RQ_EMPTY,
464 	QED_IWARP_EVENT_LLP_TIMEOUT,
465 	QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR,
466 	QED_IWARP_EVENT_CQ_OVERFLOW,
467 	QED_IWARP_EVENT_QP_CATASTROPHIC,
468 	QED_IWARP_EVENT_ACTIVE_MPA_REPLY,
469 	QED_IWARP_EVENT_LOCAL_ACCESS_ERROR,
470 	QED_IWARP_EVENT_REMOTE_OPERATION_ERROR,
471 	QED_IWARP_EVENT_TERMINATE_RECEIVED,
472 	QED_IWARP_EVENT_SRQ_LIMIT,
473 	QED_IWARP_EVENT_SRQ_EMPTY,
474 };
475 
476 enum qed_tcp_ip_version {
477 	QED_TCP_IPV4,
478 	QED_TCP_IPV6,
479 };
480 
481 struct qed_iwarp_cm_info {
482 	enum qed_tcp_ip_version ip_version;
483 	u32 remote_ip[4];
484 	u32 local_ip[4];
485 	u16 remote_port;
486 	u16 local_port;
487 	u16 vlan;
488 	u8 ord;
489 	u8 ird;
490 	u16 private_data_len;
491 	const void *private_data;
492 };
493 
494 struct qed_iwarp_cm_event_params {
495 	enum qed_iwarp_event_type event;
496 	const struct qed_iwarp_cm_info *cm_info;
497 	void *ep_context;	/* To be passed to accept call */
498 	int status;
499 };
500 
501 typedef int (*iwarp_event_handler) (void *context,
502 				    struct qed_iwarp_cm_event_params *event);
503 
504 struct qed_iwarp_connect_in {
505 	iwarp_event_handler event_cb;
506 	void *cb_context;
507 	struct qed_rdma_qp *qp;
508 	struct qed_iwarp_cm_info cm_info;
509 	u16 mss;
510 	u8 remote_mac_addr[ETH_ALEN];
511 	u8 local_mac_addr[ETH_ALEN];
512 };
513 
514 struct qed_iwarp_connect_out {
515 	void *ep_context;
516 };
517 
518 struct qed_iwarp_listen_in {
519 	iwarp_event_handler event_cb;
520 	void *cb_context;	/* passed to event_cb */
521 	u32 max_backlog;
522 	enum qed_tcp_ip_version ip_version;
523 	u32 ip_addr[4];
524 	u16 port;
525 	u16 vlan;
526 };
527 
528 struct qed_iwarp_listen_out {
529 	void *handle;
530 };
531 
532 struct qed_iwarp_accept_in {
533 	void *ep_context;
534 	void *cb_context;
535 	struct qed_rdma_qp *qp;
536 	const void *private_data;
537 	u16 private_data_len;
538 	u8 ord;
539 	u8 ird;
540 };
541 
542 struct qed_iwarp_reject_in {
543 	void *ep_context;
544 	void *cb_context;
545 	const void *private_data;
546 	u16 private_data_len;
547 };
548 
549 struct qed_iwarp_send_rtr_in {
550 	void *ep_context;
551 };
552 
553 struct qed_roce_ll2_header {
554 	void *vaddr;
555 	dma_addr_t baddr;
556 	size_t len;
557 };
558 
559 struct qed_roce_ll2_buffer {
560 	dma_addr_t baddr;
561 	size_t len;
562 };
563 
564 struct qed_roce_ll2_packet {
565 	struct qed_roce_ll2_header header;
566 	int n_seg;
567 	struct qed_roce_ll2_buffer payload[RDMA_MAX_SGE_PER_SQ_WQE];
568 	int roce_mode;
569 	enum qed_ll2_tx_dest tx_dest;
570 };
571 
572 enum qed_rdma_type {
573 	QED_RDMA_TYPE_ROCE,
574 	QED_RDMA_TYPE_IWARP
575 };
576 
577 struct qed_dev_rdma_info {
578 	struct qed_dev_info common;
579 	enum qed_rdma_type rdma_type;
580 	u8 user_dpm_enabled;
581 };
582 
583 struct qed_rdma_ops {
584 	const struct qed_common_ops *common;
585 
586 	int (*fill_dev_info)(struct qed_dev *cdev,
587 			     struct qed_dev_rdma_info *info);
588 	void *(*rdma_get_rdma_ctx)(struct qed_dev *cdev);
589 
590 	int (*rdma_init)(struct qed_dev *dev,
591 			 struct qed_rdma_start_in_params *iparams);
592 
593 	int (*rdma_add_user)(void *rdma_cxt,
594 			     struct qed_rdma_add_user_out_params *oparams);
595 
596 	void (*rdma_remove_user)(void *rdma_cxt, u16 dpi);
597 	int (*rdma_stop)(void *rdma_cxt);
598 	struct qed_rdma_device* (*rdma_query_device)(void *rdma_cxt);
599 	struct qed_rdma_port* (*rdma_query_port)(void *rdma_cxt);
600 	int (*rdma_get_start_sb)(struct qed_dev *cdev);
601 	int (*rdma_get_min_cnq_msix)(struct qed_dev *cdev);
602 	void (*rdma_cnq_prod_update)(void *rdma_cxt, u8 cnq_index, u16 prod);
603 	int (*rdma_get_rdma_int)(struct qed_dev *cdev,
604 				 struct qed_int_info *info);
605 	int (*rdma_set_rdma_int)(struct qed_dev *cdev, u16 cnt);
606 	int (*rdma_alloc_pd)(void *rdma_cxt, u16 *pd);
607 	void (*rdma_dealloc_pd)(void *rdma_cxt, u16 pd);
608 	int (*rdma_alloc_xrcd)(void *rdma_cxt, u16 *xrcd);
609 	void (*rdma_dealloc_xrcd)(void *rdma_cxt, u16 xrcd);
610 	int (*rdma_create_cq)(void *rdma_cxt,
611 			      struct qed_rdma_create_cq_in_params *params,
612 			      u16 *icid);
613 	int (*rdma_destroy_cq)(void *rdma_cxt,
614 			       struct qed_rdma_destroy_cq_in_params *iparams,
615 			       struct qed_rdma_destroy_cq_out_params *oparams);
616 	struct qed_rdma_qp *
617 	(*rdma_create_qp)(void *rdma_cxt,
618 			  struct qed_rdma_create_qp_in_params *iparams,
619 			  struct qed_rdma_create_qp_out_params *oparams);
620 
621 	int (*rdma_modify_qp)(void *roce_cxt, struct qed_rdma_qp *qp,
622 			      struct qed_rdma_modify_qp_in_params *iparams);
623 
624 	int (*rdma_query_qp)(void *rdma_cxt, struct qed_rdma_qp *qp,
625 			     struct qed_rdma_query_qp_out_params *oparams);
626 	int (*rdma_destroy_qp)(void *rdma_cxt, struct qed_rdma_qp *qp);
627 
628 	int
629 	(*rdma_register_tid)(void *rdma_cxt,
630 			     struct qed_rdma_register_tid_in_params *iparams);
631 
632 	int (*rdma_deregister_tid)(void *rdma_cxt, u32 itid);
633 	int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid);
634 	void (*rdma_free_tid)(void *rdma_cxt, u32 itid);
635 
636 	int (*rdma_create_srq)(void *rdma_cxt,
637 			       struct qed_rdma_create_srq_in_params *iparams,
638 			       struct qed_rdma_create_srq_out_params *oparams);
639 	int (*rdma_destroy_srq)(void *rdma_cxt,
640 				struct qed_rdma_destroy_srq_in_params *iparams);
641 	int (*rdma_modify_srq)(void *rdma_cxt,
642 			       struct qed_rdma_modify_srq_in_params *iparams);
643 
644 	int (*ll2_acquire_connection)(void *rdma_cxt,
645 				      struct qed_ll2_acquire_data *data);
646 
647 	int (*ll2_establish_connection)(void *rdma_cxt, u8 connection_handle);
648 	int (*ll2_terminate_connection)(void *rdma_cxt, u8 connection_handle);
649 	void (*ll2_release_connection)(void *rdma_cxt, u8 connection_handle);
650 
651 	int (*ll2_prepare_tx_packet)(void *rdma_cxt,
652 				     u8 connection_handle,
653 				     struct qed_ll2_tx_pkt_info *pkt,
654 				     bool notify_fw);
655 
656 	int (*ll2_set_fragment_of_tx_packet)(void *rdma_cxt,
657 					     u8 connection_handle,
658 					     dma_addr_t addr,
659 					     u16 nbytes);
660 	int (*ll2_post_rx_buffer)(void *rdma_cxt, u8 connection_handle,
661 				  dma_addr_t addr, u16 buf_len, void *cookie,
662 				  u8 notify_fw);
663 	int (*ll2_get_stats)(void *rdma_cxt,
664 			     u8 connection_handle,
665 			     struct qed_ll2_stats *p_stats);
666 	int (*ll2_set_mac_filter)(struct qed_dev *cdev,
667 				  u8 *old_mac_address, u8 *new_mac_address);
668 
669 	int (*iwarp_set_engine_affin)(struct qed_dev *cdev, bool b_reset);
670 
671 	int (*iwarp_connect)(void *rdma_cxt,
672 			     struct qed_iwarp_connect_in *iparams,
673 			     struct qed_iwarp_connect_out *oparams);
674 
675 	int (*iwarp_create_listen)(void *rdma_cxt,
676 				   struct qed_iwarp_listen_in *iparams,
677 				   struct qed_iwarp_listen_out *oparams);
678 
679 	int (*iwarp_accept)(void *rdma_cxt,
680 			    struct qed_iwarp_accept_in *iparams);
681 
682 	int (*iwarp_reject)(void *rdma_cxt,
683 			    struct qed_iwarp_reject_in *iparams);
684 
685 	int (*iwarp_destroy_listen)(void *rdma_cxt, void *handle);
686 
687 	int (*iwarp_send_rtr)(void *rdma_cxt,
688 			      struct qed_iwarp_send_rtr_in *iparams);
689 };
690 
691 const struct qed_rdma_ops *qed_get_rdma_ops(void);
692 
693 #endif
694