xref: /openbmc/linux/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h (revision e4781421e883340b796da5a724bda7226817990b)
1 /*
2  * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of EITHER the GNU General Public License
6  * version 2 as published by the Free Software Foundation or the BSD
7  * 2-Clause License. This program is distributed in the hope that it
8  * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9  * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10  * See the GNU General Public License version 2 for more details at
11  * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program available in the file COPYING in the main
15  * directory of this source tree.
16  *
17  * The BSD 2-Clause License
18  *
19  *     Redistribution and use in source and binary forms, with or
20  *     without modification, are permitted provided that the following
21  *     conditions are met:
22  *
23  *      - Redistributions of source code must retain the above
24  *        copyright notice, this list of conditions and the following
25  *        disclaimer.
26  *
27  *      - Redistributions in binary form must reproduce the above
28  *        copyright notice, this list of conditions and the following
29  *        disclaimer in the documentation and/or other materials
30  *        provided with the distribution.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43  * OF THE POSSIBILITY OF SUCH DAMAGE.
44  */
45 
46 #ifndef __PVRDMA_DEV_API_H__
47 #define __PVRDMA_DEV_API_H__
48 
49 #include <linux/types.h>
50 
51 #include "pvrdma_verbs.h"
52 
53 #define PVRDMA_VERSION			17
54 #define PVRDMA_BOARD_ID			1
55 #define PVRDMA_REV_ID			1
56 
57 /*
58  * Masks and accessors for page directory, which is a two-level lookup:
59  * page directory -> page table -> page. Only one directory for now, but we
60  * could expand that easily. 9 bits for tables, 9 bits for pages, gives one
61  * gigabyte for memory regions and so forth.
62  */
63 
64 #define PVRDMA_PDIR_SHIFT		18
65 #define PVRDMA_PTABLE_SHIFT		9
66 #define PVRDMA_PAGE_DIR_DIR(x)		(((x) >> PVRDMA_PDIR_SHIFT) & 0x1)
67 #define PVRDMA_PAGE_DIR_TABLE(x)	(((x) >> PVRDMA_PTABLE_SHIFT) & 0x1ff)
68 #define PVRDMA_PAGE_DIR_PAGE(x)		((x) & 0x1ff)
69 #define PVRDMA_PAGE_DIR_MAX_PAGES	(1 * 512 * 512)
70 #define PVRDMA_MAX_FAST_REG_PAGES	128
71 
72 /*
73  * Max MSI-X vectors.
74  */
75 
76 #define PVRDMA_MAX_INTERRUPTS	3
77 
78 /* Register offsets within PCI resource on BAR1. */
79 #define PVRDMA_REG_VERSION	0x00	/* R: Version of device. */
80 #define PVRDMA_REG_DSRLOW	0x04	/* W: Device shared region low PA. */
81 #define PVRDMA_REG_DSRHIGH	0x08	/* W: Device shared region high PA. */
82 #define PVRDMA_REG_CTL		0x0c	/* W: PVRDMA_DEVICE_CTL */
83 #define PVRDMA_REG_REQUEST	0x10	/* W: Indicate device request. */
84 #define PVRDMA_REG_ERR		0x14	/* R: Device error. */
85 #define PVRDMA_REG_ICR		0x18	/* R: Interrupt cause. */
86 #define PVRDMA_REG_IMR		0x1c	/* R/W: Interrupt mask. */
87 #define PVRDMA_REG_MACL		0x20	/* R/W: MAC address low. */
88 #define PVRDMA_REG_MACH		0x24	/* R/W: MAC address high. */
89 
90 /* Object flags. */
91 #define PVRDMA_CQ_FLAG_ARMED_SOL	BIT(0)	/* Armed for solicited-only. */
92 #define PVRDMA_CQ_FLAG_ARMED		BIT(1)	/* Armed. */
93 #define PVRDMA_MR_FLAG_DMA		BIT(0)	/* DMA region. */
94 #define PVRDMA_MR_FLAG_FRMR		BIT(1)	/* Fast reg memory region. */
95 
96 /*
97  * Atomic operation capability (masked versions are extended atomic
98  * operations.
99  */
100 
101 #define PVRDMA_ATOMIC_OP_COMP_SWAP	BIT(0)	/* Compare and swap. */
102 #define PVRDMA_ATOMIC_OP_FETCH_ADD	BIT(1)	/* Fetch and add. */
103 #define PVRDMA_ATOMIC_OP_MASK_COMP_SWAP	BIT(2)	/* Masked compare and swap. */
104 #define PVRDMA_ATOMIC_OP_MASK_FETCH_ADD	BIT(3)	/* Masked fetch and add. */
105 
106 /*
107  * Base Memory Management Extension flags to support Fast Reg Memory Regions
108  * and Fast Reg Work Requests. Each flag represents a verb operation and we
109  * must support all of them to qualify for the BMME device cap.
110  */
111 
112 #define PVRDMA_BMME_FLAG_LOCAL_INV	BIT(0)	/* Local Invalidate. */
113 #define PVRDMA_BMME_FLAG_REMOTE_INV	BIT(1)	/* Remote Invalidate. */
114 #define PVRDMA_BMME_FLAG_FAST_REG_WR	BIT(2)	/* Fast Reg Work Request. */
115 
116 /*
117  * GID types. The interpretation of the gid_types bit field in the device
118  * capabilities will depend on the device mode. For now, the device only
119  * supports RoCE as mode, so only the different GID types for RoCE are
120  * defined.
121  */
122 
123 #define PVRDMA_GID_TYPE_FLAG_ROCE_V1	BIT(0)
124 #define PVRDMA_GID_TYPE_FLAG_ROCE_V2	BIT(1)
125 
126 enum pvrdma_pci_resource {
127 	PVRDMA_PCI_RESOURCE_MSIX,	/* BAR0: MSI-X, MMIO. */
128 	PVRDMA_PCI_RESOURCE_REG,	/* BAR1: Registers, MMIO. */
129 	PVRDMA_PCI_RESOURCE_UAR,	/* BAR2: UAR pages, MMIO, 64-bit. */
130 	PVRDMA_PCI_RESOURCE_LAST,	/* Last. */
131 };
132 
133 enum pvrdma_device_ctl {
134 	PVRDMA_DEVICE_CTL_ACTIVATE,	/* Activate device. */
135 	PVRDMA_DEVICE_CTL_QUIESCE,	/* Quiesce device. */
136 	PVRDMA_DEVICE_CTL_RESET,	/* Reset device. */
137 };
138 
139 enum pvrdma_intr_vector {
140 	PVRDMA_INTR_VECTOR_RESPONSE,	/* Command response. */
141 	PVRDMA_INTR_VECTOR_ASYNC,	/* Async events. */
142 	PVRDMA_INTR_VECTOR_CQ,		/* CQ notification. */
143 	/* Additional CQ notification vectors. */
144 };
145 
146 enum pvrdma_intr_cause {
147 	PVRDMA_INTR_CAUSE_RESPONSE	= (1 << PVRDMA_INTR_VECTOR_RESPONSE),
148 	PVRDMA_INTR_CAUSE_ASYNC		= (1 << PVRDMA_INTR_VECTOR_ASYNC),
149 	PVRDMA_INTR_CAUSE_CQ		= (1 << PVRDMA_INTR_VECTOR_CQ),
150 };
151 
152 enum pvrdma_intr_type {
153 	PVRDMA_INTR_TYPE_INTX,		/* Legacy. */
154 	PVRDMA_INTR_TYPE_MSI,		/* MSI. */
155 	PVRDMA_INTR_TYPE_MSIX,		/* MSI-X. */
156 };
157 
158 enum pvrdma_gos_bits {
159 	PVRDMA_GOS_BITS_UNK,		/* Unknown. */
160 	PVRDMA_GOS_BITS_32,		/* 32-bit. */
161 	PVRDMA_GOS_BITS_64,		/* 64-bit. */
162 };
163 
164 enum pvrdma_gos_type {
165 	PVRDMA_GOS_TYPE_UNK,		/* Unknown. */
166 	PVRDMA_GOS_TYPE_LINUX,		/* Linux. */
167 };
168 
169 enum pvrdma_device_mode {
170 	PVRDMA_DEVICE_MODE_ROCE,	/* RoCE. */
171 	PVRDMA_DEVICE_MODE_IWARP,	/* iWarp. */
172 	PVRDMA_DEVICE_MODE_IB,		/* InfiniBand. */
173 };
174 
175 struct pvrdma_gos_info {
176 	u32 gos_bits:2;			/* W: PVRDMA_GOS_BITS_ */
177 	u32 gos_type:4;			/* W: PVRDMA_GOS_TYPE_ */
178 	u32 gos_ver:16;			/* W: Guest OS version. */
179 	u32 gos_misc:10;		/* W: Other. */
180 	u32 pad;			/* Pad to 8-byte alignment. */
181 };
182 
183 struct pvrdma_device_caps {
184 	u64 fw_ver;				/* R: Query device. */
185 	__be64 node_guid;
186 	__be64 sys_image_guid;
187 	u64 max_mr_size;
188 	u64 page_size_cap;
189 	u64 atomic_arg_sizes;			/* EX verbs. */
190 	u32 ex_comp_mask;			/* EX verbs. */
191 	u32 device_cap_flags2;			/* EX verbs. */
192 	u32 max_fa_bit_boundary;		/* EX verbs. */
193 	u32 log_max_atomic_inline_arg;		/* EX verbs. */
194 	u32 vendor_id;
195 	u32 vendor_part_id;
196 	u32 hw_ver;
197 	u32 max_qp;
198 	u32 max_qp_wr;
199 	u32 device_cap_flags;
200 	u32 max_sge;
201 	u32 max_sge_rd;
202 	u32 max_cq;
203 	u32 max_cqe;
204 	u32 max_mr;
205 	u32 max_pd;
206 	u32 max_qp_rd_atom;
207 	u32 max_ee_rd_atom;
208 	u32 max_res_rd_atom;
209 	u32 max_qp_init_rd_atom;
210 	u32 max_ee_init_rd_atom;
211 	u32 max_ee;
212 	u32 max_rdd;
213 	u32 max_mw;
214 	u32 max_raw_ipv6_qp;
215 	u32 max_raw_ethy_qp;
216 	u32 max_mcast_grp;
217 	u32 max_mcast_qp_attach;
218 	u32 max_total_mcast_qp_attach;
219 	u32 max_ah;
220 	u32 max_fmr;
221 	u32 max_map_per_fmr;
222 	u32 max_srq;
223 	u32 max_srq_wr;
224 	u32 max_srq_sge;
225 	u32 max_uar;
226 	u32 gid_tbl_len;
227 	u16 max_pkeys;
228 	u8  local_ca_ack_delay;
229 	u8  phys_port_cnt;
230 	u8  mode;				/* PVRDMA_DEVICE_MODE_ */
231 	u8  atomic_ops;				/* PVRDMA_ATOMIC_OP_* bits */
232 	u8  bmme_flags;				/* FRWR Mem Mgmt Extensions */
233 	u8  gid_types;				/* PVRDMA_GID_TYPE_FLAG_ */
234 	u8  reserved[4];
235 };
236 
237 struct pvrdma_ring_page_info {
238 	u32 num_pages;				/* Num pages incl. header. */
239 	u32 reserved;				/* Reserved. */
240 	u64 pdir_dma;				/* Page directory PA. */
241 };
242 
243 #pragma pack(push, 1)
244 
245 struct pvrdma_device_shared_region {
246 	u32 driver_version;			/* W: Driver version. */
247 	u32 pad;				/* Pad to 8-byte align. */
248 	struct pvrdma_gos_info gos_info;	/* W: Guest OS information. */
249 	u64 cmd_slot_dma;			/* W: Command slot address. */
250 	u64 resp_slot_dma;			/* W: Response slot address. */
251 	struct pvrdma_ring_page_info async_ring_pages;
252 						/* W: Async ring page info. */
253 	struct pvrdma_ring_page_info cq_ring_pages;
254 						/* W: CQ ring page info. */
255 	u32 uar_pfn;				/* W: UAR pageframe. */
256 	u32 pad2;				/* Pad to 8-byte align. */
257 	struct pvrdma_device_caps caps;		/* R: Device capabilities. */
258 };
259 
260 #pragma pack(pop)
261 
262 /* Event types. Currently a 1:1 mapping with enum ib_event. */
263 enum pvrdma_eqe_type {
264 	PVRDMA_EVENT_CQ_ERR,
265 	PVRDMA_EVENT_QP_FATAL,
266 	PVRDMA_EVENT_QP_REQ_ERR,
267 	PVRDMA_EVENT_QP_ACCESS_ERR,
268 	PVRDMA_EVENT_COMM_EST,
269 	PVRDMA_EVENT_SQ_DRAINED,
270 	PVRDMA_EVENT_PATH_MIG,
271 	PVRDMA_EVENT_PATH_MIG_ERR,
272 	PVRDMA_EVENT_DEVICE_FATAL,
273 	PVRDMA_EVENT_PORT_ACTIVE,
274 	PVRDMA_EVENT_PORT_ERR,
275 	PVRDMA_EVENT_LID_CHANGE,
276 	PVRDMA_EVENT_PKEY_CHANGE,
277 	PVRDMA_EVENT_SM_CHANGE,
278 	PVRDMA_EVENT_SRQ_ERR,
279 	PVRDMA_EVENT_SRQ_LIMIT_REACHED,
280 	PVRDMA_EVENT_QP_LAST_WQE_REACHED,
281 	PVRDMA_EVENT_CLIENT_REREGISTER,
282 	PVRDMA_EVENT_GID_CHANGE,
283 };
284 
285 /* Event queue element. */
286 struct pvrdma_eqe {
287 	u32 type;	/* Event type. */
288 	u32 info;	/* Handle, other. */
289 };
290 
291 /* CQ notification queue element. */
292 struct pvrdma_cqne {
293 	u32 info;	/* Handle */
294 };
295 
296 enum {
297 	PVRDMA_CMD_FIRST,
298 	PVRDMA_CMD_QUERY_PORT = PVRDMA_CMD_FIRST,
299 	PVRDMA_CMD_QUERY_PKEY,
300 	PVRDMA_CMD_CREATE_PD,
301 	PVRDMA_CMD_DESTROY_PD,
302 	PVRDMA_CMD_CREATE_MR,
303 	PVRDMA_CMD_DESTROY_MR,
304 	PVRDMA_CMD_CREATE_CQ,
305 	PVRDMA_CMD_RESIZE_CQ,
306 	PVRDMA_CMD_DESTROY_CQ,
307 	PVRDMA_CMD_CREATE_QP,
308 	PVRDMA_CMD_MODIFY_QP,
309 	PVRDMA_CMD_QUERY_QP,
310 	PVRDMA_CMD_DESTROY_QP,
311 	PVRDMA_CMD_CREATE_UC,
312 	PVRDMA_CMD_DESTROY_UC,
313 	PVRDMA_CMD_CREATE_BIND,
314 	PVRDMA_CMD_DESTROY_BIND,
315 	PVRDMA_CMD_MAX,
316 };
317 
318 enum {
319 	PVRDMA_CMD_FIRST_RESP = (1 << 31),
320 	PVRDMA_CMD_QUERY_PORT_RESP = PVRDMA_CMD_FIRST_RESP,
321 	PVRDMA_CMD_QUERY_PKEY_RESP,
322 	PVRDMA_CMD_CREATE_PD_RESP,
323 	PVRDMA_CMD_DESTROY_PD_RESP_NOOP,
324 	PVRDMA_CMD_CREATE_MR_RESP,
325 	PVRDMA_CMD_DESTROY_MR_RESP_NOOP,
326 	PVRDMA_CMD_CREATE_CQ_RESP,
327 	PVRDMA_CMD_RESIZE_CQ_RESP,
328 	PVRDMA_CMD_DESTROY_CQ_RESP_NOOP,
329 	PVRDMA_CMD_CREATE_QP_RESP,
330 	PVRDMA_CMD_MODIFY_QP_RESP,
331 	PVRDMA_CMD_QUERY_QP_RESP,
332 	PVRDMA_CMD_DESTROY_QP_RESP,
333 	PVRDMA_CMD_CREATE_UC_RESP,
334 	PVRDMA_CMD_DESTROY_UC_RESP_NOOP,
335 	PVRDMA_CMD_CREATE_BIND_RESP_NOOP,
336 	PVRDMA_CMD_DESTROY_BIND_RESP_NOOP,
337 	PVRDMA_CMD_MAX_RESP,
338 };
339 
340 struct pvrdma_cmd_hdr {
341 	u64 response;		/* Key for response lookup. */
342 	u32 cmd;		/* PVRDMA_CMD_ */
343 	u32 reserved;		/* Reserved. */
344 };
345 
346 struct pvrdma_cmd_resp_hdr {
347 	u64 response;		/* From cmd hdr. */
348 	u32 ack;		/* PVRDMA_CMD_XXX_RESP */
349 	u8 err;			/* Error. */
350 	u8 reserved[3];		/* Reserved. */
351 };
352 
353 struct pvrdma_cmd_query_port {
354 	struct pvrdma_cmd_hdr hdr;
355 	u8 port_num;
356 	u8 reserved[7];
357 };
358 
359 struct pvrdma_cmd_query_port_resp {
360 	struct pvrdma_cmd_resp_hdr hdr;
361 	struct pvrdma_port_attr attrs;
362 };
363 
364 struct pvrdma_cmd_query_pkey {
365 	struct pvrdma_cmd_hdr hdr;
366 	u8 port_num;
367 	u8 index;
368 	u8 reserved[6];
369 };
370 
371 struct pvrdma_cmd_query_pkey_resp {
372 	struct pvrdma_cmd_resp_hdr hdr;
373 	u16 pkey;
374 	u8 reserved[6];
375 };
376 
377 struct pvrdma_cmd_create_uc {
378 	struct pvrdma_cmd_hdr hdr;
379 	u32 pfn; /* UAR page frame number */
380 	u8 reserved[4];
381 };
382 
383 struct pvrdma_cmd_create_uc_resp {
384 	struct pvrdma_cmd_resp_hdr hdr;
385 	u32 ctx_handle;
386 	u8 reserved[4];
387 };
388 
389 struct pvrdma_cmd_destroy_uc {
390 	struct pvrdma_cmd_hdr hdr;
391 	u32 ctx_handle;
392 	u8 reserved[4];
393 };
394 
395 struct pvrdma_cmd_create_pd {
396 	struct pvrdma_cmd_hdr hdr;
397 	u32 ctx_handle;
398 	u8 reserved[4];
399 };
400 
401 struct pvrdma_cmd_create_pd_resp {
402 	struct pvrdma_cmd_resp_hdr hdr;
403 	u32 pd_handle;
404 	u8 reserved[4];
405 };
406 
407 struct pvrdma_cmd_destroy_pd {
408 	struct pvrdma_cmd_hdr hdr;
409 	u32 pd_handle;
410 	u8 reserved[4];
411 };
412 
413 struct pvrdma_cmd_create_mr {
414 	struct pvrdma_cmd_hdr hdr;
415 	u64 start;
416 	u64 length;
417 	u64 pdir_dma;
418 	u32 pd_handle;
419 	u32 access_flags;
420 	u32 flags;
421 	u32 nchunks;
422 };
423 
424 struct pvrdma_cmd_create_mr_resp {
425 	struct pvrdma_cmd_resp_hdr hdr;
426 	u32 mr_handle;
427 	u32 lkey;
428 	u32 rkey;
429 	u8 reserved[4];
430 };
431 
432 struct pvrdma_cmd_destroy_mr {
433 	struct pvrdma_cmd_hdr hdr;
434 	u32 mr_handle;
435 	u8 reserved[4];
436 };
437 
438 struct pvrdma_cmd_create_cq {
439 	struct pvrdma_cmd_hdr hdr;
440 	u64 pdir_dma;
441 	u32 ctx_handle;
442 	u32 cqe;
443 	u32 nchunks;
444 	u8 reserved[4];
445 };
446 
447 struct pvrdma_cmd_create_cq_resp {
448 	struct pvrdma_cmd_resp_hdr hdr;
449 	u32 cq_handle;
450 	u32 cqe;
451 };
452 
453 struct pvrdma_cmd_resize_cq {
454 	struct pvrdma_cmd_hdr hdr;
455 	u32 cq_handle;
456 	u32 cqe;
457 };
458 
459 struct pvrdma_cmd_resize_cq_resp {
460 	struct pvrdma_cmd_resp_hdr hdr;
461 	u32 cqe;
462 	u8 reserved[4];
463 };
464 
465 struct pvrdma_cmd_destroy_cq {
466 	struct pvrdma_cmd_hdr hdr;
467 	u32 cq_handle;
468 	u8 reserved[4];
469 };
470 
471 struct pvrdma_cmd_create_qp {
472 	struct pvrdma_cmd_hdr hdr;
473 	u64 pdir_dma;
474 	u32 pd_handle;
475 	u32 send_cq_handle;
476 	u32 recv_cq_handle;
477 	u32 srq_handle;
478 	u32 max_send_wr;
479 	u32 max_recv_wr;
480 	u32 max_send_sge;
481 	u32 max_recv_sge;
482 	u32 max_inline_data;
483 	u32 lkey;
484 	u32 access_flags;
485 	u16 total_chunks;
486 	u16 send_chunks;
487 	u16 max_atomic_arg;
488 	u8 sq_sig_all;
489 	u8 qp_type;
490 	u8 is_srq;
491 	u8 reserved[3];
492 };
493 
494 struct pvrdma_cmd_create_qp_resp {
495 	struct pvrdma_cmd_resp_hdr hdr;
496 	u32 qpn;
497 	u32 max_send_wr;
498 	u32 max_recv_wr;
499 	u32 max_send_sge;
500 	u32 max_recv_sge;
501 	u32 max_inline_data;
502 };
503 
504 struct pvrdma_cmd_modify_qp {
505 	struct pvrdma_cmd_hdr hdr;
506 	u32 qp_handle;
507 	u32 attr_mask;
508 	struct pvrdma_qp_attr attrs;
509 };
510 
511 struct pvrdma_cmd_query_qp {
512 	struct pvrdma_cmd_hdr hdr;
513 	u32 qp_handle;
514 	u32 attr_mask;
515 };
516 
517 struct pvrdma_cmd_query_qp_resp {
518 	struct pvrdma_cmd_resp_hdr hdr;
519 	struct pvrdma_qp_attr attrs;
520 };
521 
522 struct pvrdma_cmd_destroy_qp {
523 	struct pvrdma_cmd_hdr hdr;
524 	u32 qp_handle;
525 	u8 reserved[4];
526 };
527 
528 struct pvrdma_cmd_destroy_qp_resp {
529 	struct pvrdma_cmd_resp_hdr hdr;
530 	u32 events_reported;
531 	u8 reserved[4];
532 };
533 
534 struct pvrdma_cmd_create_bind {
535 	struct pvrdma_cmd_hdr hdr;
536 	u32 mtu;
537 	u32 vlan;
538 	u32 index;
539 	u8 new_gid[16];
540 	u8 gid_type;
541 	u8 reserved[3];
542 };
543 
544 struct pvrdma_cmd_destroy_bind {
545 	struct pvrdma_cmd_hdr hdr;
546 	u32 index;
547 	u8 dest_gid[16];
548 	u8 reserved[4];
549 };
550 
551 union pvrdma_cmd_req {
552 	struct pvrdma_cmd_hdr hdr;
553 	struct pvrdma_cmd_query_port query_port;
554 	struct pvrdma_cmd_query_pkey query_pkey;
555 	struct pvrdma_cmd_create_uc create_uc;
556 	struct pvrdma_cmd_destroy_uc destroy_uc;
557 	struct pvrdma_cmd_create_pd create_pd;
558 	struct pvrdma_cmd_destroy_pd destroy_pd;
559 	struct pvrdma_cmd_create_mr create_mr;
560 	struct pvrdma_cmd_destroy_mr destroy_mr;
561 	struct pvrdma_cmd_create_cq create_cq;
562 	struct pvrdma_cmd_resize_cq resize_cq;
563 	struct pvrdma_cmd_destroy_cq destroy_cq;
564 	struct pvrdma_cmd_create_qp create_qp;
565 	struct pvrdma_cmd_modify_qp modify_qp;
566 	struct pvrdma_cmd_query_qp query_qp;
567 	struct pvrdma_cmd_destroy_qp destroy_qp;
568 	struct pvrdma_cmd_create_bind create_bind;
569 	struct pvrdma_cmd_destroy_bind destroy_bind;
570 };
571 
572 union pvrdma_cmd_resp {
573 	struct pvrdma_cmd_resp_hdr hdr;
574 	struct pvrdma_cmd_query_port_resp query_port_resp;
575 	struct pvrdma_cmd_query_pkey_resp query_pkey_resp;
576 	struct pvrdma_cmd_create_uc_resp create_uc_resp;
577 	struct pvrdma_cmd_create_pd_resp create_pd_resp;
578 	struct pvrdma_cmd_create_mr_resp create_mr_resp;
579 	struct pvrdma_cmd_create_cq_resp create_cq_resp;
580 	struct pvrdma_cmd_resize_cq_resp resize_cq_resp;
581 	struct pvrdma_cmd_create_qp_resp create_qp_resp;
582 	struct pvrdma_cmd_query_qp_resp query_qp_resp;
583 	struct pvrdma_cmd_destroy_qp_resp destroy_qp_resp;
584 };
585 
586 #endif /* __PVRDMA_DEV_API_H__ */
587