xref: /openbmc/linux/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h (revision c95baf12f5077419db01313ab61c2aac007d40cd)
1  /*
2   * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
3   *
4   * This program is free software; you can redistribute it and/or
5   * modify it under the terms of EITHER the GNU General Public License
6   * version 2 as published by the Free Software Foundation or the BSD
7   * 2-Clause License. This program is distributed in the hope that it
8   * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9   * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10   * See the GNU General Public License version 2 for more details at
11   * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
12   *
13   * You should have received a copy of the GNU General Public License
14   * along with this program available in the file COPYING in the main
15   * directory of this source tree.
16   *
17   * The BSD 2-Clause License
18   *
19   *     Redistribution and use in source and binary forms, with or
20   *     without modification, are permitted provided that the following
21   *     conditions are met:
22   *
23   *      - Redistributions of source code must retain the above
24   *        copyright notice, this list of conditions and the following
25   *        disclaimer.
26   *
27   *      - Redistributions in binary form must reproduce the above
28   *        copyright notice, this list of conditions and the following
29   *        disclaimer in the documentation and/or other materials
30   *        provided with the distribution.
31   *
32   * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33   * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34   * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35   * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36   * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37   * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38   * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39   * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40   * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41   * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42   * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43   * OF THE POSSIBILITY OF SUCH DAMAGE.
44   */
45  
46  #ifndef __PVRDMA_DEV_API_H__
47  #define __PVRDMA_DEV_API_H__
48  
49  #include <linux/types.h>
50  
51  #include "pvrdma_verbs.h"
52  
53  /*
54   * PVRDMA version macros. Some new features require updates to PVRDMA_VERSION.
55   * These macros allow us to check for different features if necessary.
56   */
57  
58  #define PVRDMA_ROCEV1_VERSION		17
59  #define PVRDMA_ROCEV2_VERSION		18
60  #define PVRDMA_PPN64_VERSION		19
61  #define PVRDMA_QPHANDLE_VERSION		20
62  #define PVRDMA_VERSION			PVRDMA_QPHANDLE_VERSION
63  
64  #define PVRDMA_BOARD_ID			1
65  #define PVRDMA_REV_ID			1
66  
67  /*
68   * Masks and accessors for page directory, which is a two-level lookup:
69   * page directory -> page table -> page. Only one directory for now, but we
70   * could expand that easily. 9 bits for tables, 9 bits for pages, gives one
71   * gigabyte for memory regions and so forth.
72   */
73  
74  #define PVRDMA_PDIR_SHIFT		18
75  #define PVRDMA_PTABLE_SHIFT		9
76  #define PVRDMA_PAGE_DIR_DIR(x)		(((x) >> PVRDMA_PDIR_SHIFT) & 0x1)
77  #define PVRDMA_PAGE_DIR_TABLE(x)	(((x) >> PVRDMA_PTABLE_SHIFT) & 0x1ff)
78  #define PVRDMA_PAGE_DIR_PAGE(x)		((x) & 0x1ff)
79  #define PVRDMA_PAGE_DIR_MAX_PAGES	(1 * 512 * 512)
80  #define PVRDMA_MAX_FAST_REG_PAGES	128
81  
82  /*
83   * Max MSI-X vectors.
84   */
85  
86  #define PVRDMA_MAX_INTERRUPTS	3
87  
88  /* Register offsets within PCI resource on BAR1. */
89  #define PVRDMA_REG_VERSION	0x00	/* R: Version of device. */
90  #define PVRDMA_REG_DSRLOW	0x04	/* W: Device shared region low PA. */
91  #define PVRDMA_REG_DSRHIGH	0x08	/* W: Device shared region high PA. */
92  #define PVRDMA_REG_CTL		0x0c	/* W: PVRDMA_DEVICE_CTL */
93  #define PVRDMA_REG_REQUEST	0x10	/* W: Indicate device request. */
94  #define PVRDMA_REG_ERR		0x14	/* R: Device error. */
95  #define PVRDMA_REG_ICR		0x18	/* R: Interrupt cause. */
96  #define PVRDMA_REG_IMR		0x1c	/* R/W: Interrupt mask. */
97  #define PVRDMA_REG_MACL		0x20	/* R/W: MAC address low. */
98  #define PVRDMA_REG_MACH		0x24	/* R/W: MAC address high. */
99  
100  /* Object flags. */
101  #define PVRDMA_CQ_FLAG_ARMED_SOL	BIT(0)	/* Armed for solicited-only. */
102  #define PVRDMA_CQ_FLAG_ARMED		BIT(1)	/* Armed. */
103  #define PVRDMA_MR_FLAG_DMA		BIT(0)	/* DMA region. */
104  #define PVRDMA_MR_FLAG_FRMR		BIT(1)	/* Fast reg memory region. */
105  
106  /*
107   * Atomic operation capability (masked versions are extended atomic
108   * operations.
109   */
110  
111  #define PVRDMA_ATOMIC_OP_COMP_SWAP	BIT(0)	/* Compare and swap. */
112  #define PVRDMA_ATOMIC_OP_FETCH_ADD	BIT(1)	/* Fetch and add. */
113  #define PVRDMA_ATOMIC_OP_MASK_COMP_SWAP	BIT(2)	/* Masked compare and swap. */
114  #define PVRDMA_ATOMIC_OP_MASK_FETCH_ADD	BIT(3)	/* Masked fetch and add. */
115  
116  /*
117   * Base Memory Management Extension flags to support Fast Reg Memory Regions
118   * and Fast Reg Work Requests. Each flag represents a verb operation and we
119   * must support all of them to qualify for the BMME device cap.
120   */
121  
122  #define PVRDMA_BMME_FLAG_LOCAL_INV	BIT(0)	/* Local Invalidate. */
123  #define PVRDMA_BMME_FLAG_REMOTE_INV	BIT(1)	/* Remote Invalidate. */
124  #define PVRDMA_BMME_FLAG_FAST_REG_WR	BIT(2)	/* Fast Reg Work Request. */
125  
126  /*
127   * GID types. The interpretation of the gid_types bit field in the device
128   * capabilities will depend on the device mode. For now, the device only
129   * supports RoCE as mode, so only the different GID types for RoCE are
130   * defined.
131   */
132  
133  #define PVRDMA_GID_TYPE_FLAG_ROCE_V1	BIT(0)
134  #define PVRDMA_GID_TYPE_FLAG_ROCE_V2	BIT(1)
135  
136  /*
137   * Version checks. This checks whether each version supports specific
138   * capabilities from the device.
139   */
140  
141  #define PVRDMA_IS_VERSION17(_dev)					\
142  	(_dev->dsr_version == PVRDMA_ROCEV1_VERSION &&			\
143  	 _dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V1)
144  
145  #define PVRDMA_IS_VERSION18(_dev)					\
146  	(_dev->dsr_version >= PVRDMA_ROCEV2_VERSION &&			\
147  	 (_dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V1 ||  \
148  	  _dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V2))	\
149  
150  #define PVRDMA_SUPPORTED(_dev)						\
151  	((_dev->dsr->caps.mode == PVRDMA_DEVICE_MODE_ROCE) &&		\
152  	 (PVRDMA_IS_VERSION17(_dev) || PVRDMA_IS_VERSION18(_dev)))
153  
154  /*
155   * Get capability values based on device version.
156   */
157  
158  #define PVRDMA_GET_CAP(_dev, _old_val, _val) \
159  	((PVRDMA_IS_VERSION18(_dev)) ? _val : _old_val)
160  
161  enum pvrdma_pci_resource {
162  	PVRDMA_PCI_RESOURCE_MSIX,	/* BAR0: MSI-X, MMIO. */
163  	PVRDMA_PCI_RESOURCE_REG,	/* BAR1: Registers, MMIO. */
164  	PVRDMA_PCI_RESOURCE_UAR,	/* BAR2: UAR pages, MMIO, 64-bit. */
165  	PVRDMA_PCI_RESOURCE_LAST,	/* Last. */
166  };
167  
168  enum pvrdma_device_ctl {
169  	PVRDMA_DEVICE_CTL_ACTIVATE,	/* Activate device. */
170  	PVRDMA_DEVICE_CTL_UNQUIESCE,	/* Unquiesce device. */
171  	PVRDMA_DEVICE_CTL_RESET,	/* Reset device. */
172  };
173  
174  enum pvrdma_intr_vector {
175  	PVRDMA_INTR_VECTOR_RESPONSE,	/* Command response. */
176  	PVRDMA_INTR_VECTOR_ASYNC,	/* Async events. */
177  	PVRDMA_INTR_VECTOR_CQ,		/* CQ notification. */
178  	/* Additional CQ notification vectors. */
179  };
180  
181  enum pvrdma_intr_cause {
182  	PVRDMA_INTR_CAUSE_RESPONSE	= (1 << PVRDMA_INTR_VECTOR_RESPONSE),
183  	PVRDMA_INTR_CAUSE_ASYNC		= (1 << PVRDMA_INTR_VECTOR_ASYNC),
184  	PVRDMA_INTR_CAUSE_CQ		= (1 << PVRDMA_INTR_VECTOR_CQ),
185  };
186  
187  enum pvrdma_gos_bits {
188  	PVRDMA_GOS_BITS_UNK,		/* Unknown. */
189  	PVRDMA_GOS_BITS_32,		/* 32-bit. */
190  	PVRDMA_GOS_BITS_64,		/* 64-bit. */
191  };
192  
193  enum pvrdma_gos_type {
194  	PVRDMA_GOS_TYPE_UNK,		/* Unknown. */
195  	PVRDMA_GOS_TYPE_LINUX,		/* Linux. */
196  };
197  
198  enum pvrdma_device_mode {
199  	PVRDMA_DEVICE_MODE_ROCE,	/* RoCE. */
200  	PVRDMA_DEVICE_MODE_IWARP,	/* iWarp. */
201  	PVRDMA_DEVICE_MODE_IB,		/* InfiniBand. */
202  };
203  
204  struct pvrdma_gos_info {
205  	u32 gos_bits:2;			/* W: PVRDMA_GOS_BITS_ */
206  	u32 gos_type:4;			/* W: PVRDMA_GOS_TYPE_ */
207  	u32 gos_ver:16;			/* W: Guest OS version. */
208  	u32 gos_misc:10;		/* W: Other. */
209  	u32 pad;			/* Pad to 8-byte alignment. */
210  };
211  
212  struct pvrdma_device_caps {
213  	u64 fw_ver;				/* R: Query device. */
214  	__be64 node_guid;
215  	__be64 sys_image_guid;
216  	u64 max_mr_size;
217  	u64 page_size_cap;
218  	u64 atomic_arg_sizes;			/* EX verbs. */
219  	u32 ex_comp_mask;			/* EX verbs. */
220  	u32 device_cap_flags2;			/* EX verbs. */
221  	u32 max_fa_bit_boundary;		/* EX verbs. */
222  	u32 log_max_atomic_inline_arg;		/* EX verbs. */
223  	u32 vendor_id;
224  	u32 vendor_part_id;
225  	u32 hw_ver;
226  	u32 max_qp;
227  	u32 max_qp_wr;
228  	u32 device_cap_flags;
229  	u32 max_sge;
230  	u32 max_sge_rd;
231  	u32 max_cq;
232  	u32 max_cqe;
233  	u32 max_mr;
234  	u32 max_pd;
235  	u32 max_qp_rd_atom;
236  	u32 max_ee_rd_atom;
237  	u32 max_res_rd_atom;
238  	u32 max_qp_init_rd_atom;
239  	u32 max_ee_init_rd_atom;
240  	u32 max_ee;
241  	u32 max_rdd;
242  	u32 max_mw;
243  	u32 max_raw_ipv6_qp;
244  	u32 max_raw_ethy_qp;
245  	u32 max_mcast_grp;
246  	u32 max_mcast_qp_attach;
247  	u32 max_total_mcast_qp_attach;
248  	u32 max_ah;
249  	u32 max_fmr;
250  	u32 max_map_per_fmr;
251  	u32 max_srq;
252  	u32 max_srq_wr;
253  	u32 max_srq_sge;
254  	u32 max_uar;
255  	u32 gid_tbl_len;
256  	u16 max_pkeys;
257  	u8  local_ca_ack_delay;
258  	u8  phys_port_cnt;
259  	u8  mode;				/* PVRDMA_DEVICE_MODE_ */
260  	u8  atomic_ops;				/* PVRDMA_ATOMIC_OP_* bits */
261  	u8  bmme_flags;				/* FRWR Mem Mgmt Extensions */
262  	u8  gid_types;				/* PVRDMA_GID_TYPE_FLAG_ */
263  	u32 max_fast_reg_page_list_len;
264  };
265  
266  struct pvrdma_ring_page_info {
267  	u32 num_pages;				/* Num pages incl. header. */
268  	u32 reserved;				/* Reserved. */
269  	u64 pdir_dma;				/* Page directory PA. */
270  };
271  
272  #pragma pack(push, 1)
273  
274  struct pvrdma_device_shared_region {
275  	u32 driver_version;			/* W: Driver version. */
276  	u32 pad;				/* Pad to 8-byte align. */
277  	struct pvrdma_gos_info gos_info;	/* W: Guest OS information. */
278  	u64 cmd_slot_dma;			/* W: Command slot address. */
279  	u64 resp_slot_dma;			/* W: Response slot address. */
280  	struct pvrdma_ring_page_info async_ring_pages;
281  						/* W: Async ring page info. */
282  	struct pvrdma_ring_page_info cq_ring_pages;
283  						/* W: CQ ring page info. */
284  	union {
285  		u32 uar_pfn;			/* W: UAR pageframe. */
286  		u64 uar_pfn64;			/* W: 64-bit UAR page frame. */
287  	};
288  	struct pvrdma_device_caps caps;		/* R: Device capabilities. */
289  };
290  
291  #pragma pack(pop)
292  
293  /* Event types. Currently a 1:1 mapping with enum ib_event. */
294  enum pvrdma_eqe_type {
295  	PVRDMA_EVENT_CQ_ERR,
296  	PVRDMA_EVENT_QP_FATAL,
297  	PVRDMA_EVENT_QP_REQ_ERR,
298  	PVRDMA_EVENT_QP_ACCESS_ERR,
299  	PVRDMA_EVENT_COMM_EST,
300  	PVRDMA_EVENT_SQ_DRAINED,
301  	PVRDMA_EVENT_PATH_MIG,
302  	PVRDMA_EVENT_PATH_MIG_ERR,
303  	PVRDMA_EVENT_DEVICE_FATAL,
304  	PVRDMA_EVENT_PORT_ACTIVE,
305  	PVRDMA_EVENT_PORT_ERR,
306  	PVRDMA_EVENT_LID_CHANGE,
307  	PVRDMA_EVENT_PKEY_CHANGE,
308  	PVRDMA_EVENT_SM_CHANGE,
309  	PVRDMA_EVENT_SRQ_ERR,
310  	PVRDMA_EVENT_SRQ_LIMIT_REACHED,
311  	PVRDMA_EVENT_QP_LAST_WQE_REACHED,
312  	PVRDMA_EVENT_CLIENT_REREGISTER,
313  	PVRDMA_EVENT_GID_CHANGE,
314  };
315  
316  /* Event queue element. */
317  struct pvrdma_eqe {
318  	u32 type;	/* Event type. */
319  	u32 info;	/* Handle, other. */
320  };
321  
322  /* CQ notification queue element. */
323  struct pvrdma_cqne {
324  	u32 info;	/* Handle */
325  };
326  
327  enum {
328  	PVRDMA_CMD_FIRST,
329  	PVRDMA_CMD_QUERY_PORT = PVRDMA_CMD_FIRST,
330  	PVRDMA_CMD_QUERY_PKEY,
331  	PVRDMA_CMD_CREATE_PD,
332  	PVRDMA_CMD_DESTROY_PD,
333  	PVRDMA_CMD_CREATE_MR,
334  	PVRDMA_CMD_DESTROY_MR,
335  	PVRDMA_CMD_CREATE_CQ,
336  	PVRDMA_CMD_RESIZE_CQ,
337  	PVRDMA_CMD_DESTROY_CQ,
338  	PVRDMA_CMD_CREATE_QP,
339  	PVRDMA_CMD_MODIFY_QP,
340  	PVRDMA_CMD_QUERY_QP,
341  	PVRDMA_CMD_DESTROY_QP,
342  	PVRDMA_CMD_CREATE_UC,
343  	PVRDMA_CMD_DESTROY_UC,
344  	PVRDMA_CMD_CREATE_BIND,
345  	PVRDMA_CMD_DESTROY_BIND,
346  	PVRDMA_CMD_CREATE_SRQ,
347  	PVRDMA_CMD_MODIFY_SRQ,
348  	PVRDMA_CMD_QUERY_SRQ,
349  	PVRDMA_CMD_DESTROY_SRQ,
350  	PVRDMA_CMD_MAX,
351  };
352  
353  enum {
354  	PVRDMA_CMD_FIRST_RESP = (1 << 31),
355  	PVRDMA_CMD_QUERY_PORT_RESP = PVRDMA_CMD_FIRST_RESP,
356  	PVRDMA_CMD_QUERY_PKEY_RESP,
357  	PVRDMA_CMD_CREATE_PD_RESP,
358  	PVRDMA_CMD_DESTROY_PD_RESP_NOOP,
359  	PVRDMA_CMD_CREATE_MR_RESP,
360  	PVRDMA_CMD_DESTROY_MR_RESP_NOOP,
361  	PVRDMA_CMD_CREATE_CQ_RESP,
362  	PVRDMA_CMD_RESIZE_CQ_RESP,
363  	PVRDMA_CMD_DESTROY_CQ_RESP_NOOP,
364  	PVRDMA_CMD_CREATE_QP_RESP,
365  	PVRDMA_CMD_MODIFY_QP_RESP,
366  	PVRDMA_CMD_QUERY_QP_RESP,
367  	PVRDMA_CMD_DESTROY_QP_RESP,
368  	PVRDMA_CMD_CREATE_UC_RESP,
369  	PVRDMA_CMD_DESTROY_UC_RESP_NOOP,
370  	PVRDMA_CMD_CREATE_BIND_RESP_NOOP,
371  	PVRDMA_CMD_DESTROY_BIND_RESP_NOOP,
372  	PVRDMA_CMD_CREATE_SRQ_RESP,
373  	PVRDMA_CMD_MODIFY_SRQ_RESP,
374  	PVRDMA_CMD_QUERY_SRQ_RESP,
375  	PVRDMA_CMD_DESTROY_SRQ_RESP,
376  	PVRDMA_CMD_MAX_RESP,
377  };
378  
379  struct pvrdma_cmd_hdr {
380  	u64 response;		/* Key for response lookup. */
381  	u32 cmd;		/* PVRDMA_CMD_ */
382  	u32 reserved;		/* Reserved. */
383  };
384  
385  struct pvrdma_cmd_resp_hdr {
386  	u64 response;		/* From cmd hdr. */
387  	u32 ack;		/* PVRDMA_CMD_XXX_RESP */
388  	u8 err;			/* Error. */
389  	u8 reserved[3];		/* Reserved. */
390  };
391  
392  struct pvrdma_cmd_query_port {
393  	struct pvrdma_cmd_hdr hdr;
394  	u8 port_num;
395  	u8 reserved[7];
396  };
397  
398  struct pvrdma_cmd_query_port_resp {
399  	struct pvrdma_cmd_resp_hdr hdr;
400  	struct pvrdma_port_attr attrs;
401  };
402  
403  struct pvrdma_cmd_query_pkey {
404  	struct pvrdma_cmd_hdr hdr;
405  	u8 port_num;
406  	u8 index;
407  	u8 reserved[6];
408  };
409  
410  struct pvrdma_cmd_query_pkey_resp {
411  	struct pvrdma_cmd_resp_hdr hdr;
412  	u16 pkey;
413  	u8 reserved[6];
414  };
415  
416  struct pvrdma_cmd_create_uc {
417  	struct pvrdma_cmd_hdr hdr;
418  	union {
419  		u32 pfn; /* UAR page frame number */
420  		u64 pfn64; /* 64-bit UAR page frame number */
421  	};
422  };
423  
424  struct pvrdma_cmd_create_uc_resp {
425  	struct pvrdma_cmd_resp_hdr hdr;
426  	u32 ctx_handle;
427  	u8 reserved[4];
428  };
429  
430  struct pvrdma_cmd_destroy_uc {
431  	struct pvrdma_cmd_hdr hdr;
432  	u32 ctx_handle;
433  	u8 reserved[4];
434  };
435  
436  struct pvrdma_cmd_create_pd {
437  	struct pvrdma_cmd_hdr hdr;
438  	u32 ctx_handle;
439  	u8 reserved[4];
440  };
441  
442  struct pvrdma_cmd_create_pd_resp {
443  	struct pvrdma_cmd_resp_hdr hdr;
444  	u32 pd_handle;
445  	u8 reserved[4];
446  };
447  
448  struct pvrdma_cmd_destroy_pd {
449  	struct pvrdma_cmd_hdr hdr;
450  	u32 pd_handle;
451  	u8 reserved[4];
452  };
453  
454  struct pvrdma_cmd_create_mr {
455  	struct pvrdma_cmd_hdr hdr;
456  	u64 start;
457  	u64 length;
458  	u64 pdir_dma;
459  	u32 pd_handle;
460  	u32 access_flags;
461  	u32 flags;
462  	u32 nchunks;
463  };
464  
465  struct pvrdma_cmd_create_mr_resp {
466  	struct pvrdma_cmd_resp_hdr hdr;
467  	u32 mr_handle;
468  	u32 lkey;
469  	u32 rkey;
470  	u8 reserved[4];
471  };
472  
473  struct pvrdma_cmd_destroy_mr {
474  	struct pvrdma_cmd_hdr hdr;
475  	u32 mr_handle;
476  	u8 reserved[4];
477  };
478  
479  struct pvrdma_cmd_create_cq {
480  	struct pvrdma_cmd_hdr hdr;
481  	u64 pdir_dma;
482  	u32 ctx_handle;
483  	u32 cqe;
484  	u32 nchunks;
485  	u8 reserved[4];
486  };
487  
488  struct pvrdma_cmd_create_cq_resp {
489  	struct pvrdma_cmd_resp_hdr hdr;
490  	u32 cq_handle;
491  	u32 cqe;
492  };
493  
494  struct pvrdma_cmd_resize_cq {
495  	struct pvrdma_cmd_hdr hdr;
496  	u32 cq_handle;
497  	u32 cqe;
498  };
499  
500  struct pvrdma_cmd_resize_cq_resp {
501  	struct pvrdma_cmd_resp_hdr hdr;
502  	u32 cqe;
503  	u8 reserved[4];
504  };
505  
506  struct pvrdma_cmd_destroy_cq {
507  	struct pvrdma_cmd_hdr hdr;
508  	u32 cq_handle;
509  	u8 reserved[4];
510  };
511  
512  struct pvrdma_cmd_create_srq {
513  	struct pvrdma_cmd_hdr hdr;
514  	u64 pdir_dma;
515  	u32 pd_handle;
516  	u32 nchunks;
517  	struct pvrdma_srq_attr attrs;
518  	u8 srq_type;
519  	u8 reserved[7];
520  };
521  
522  struct pvrdma_cmd_create_srq_resp {
523  	struct pvrdma_cmd_resp_hdr hdr;
524  	u32 srqn;
525  	u8 reserved[4];
526  };
527  
528  struct pvrdma_cmd_modify_srq {
529  	struct pvrdma_cmd_hdr hdr;
530  	u32 srq_handle;
531  	u32 attr_mask;
532  	struct pvrdma_srq_attr attrs;
533  };
534  
535  struct pvrdma_cmd_query_srq {
536  	struct pvrdma_cmd_hdr hdr;
537  	u32 srq_handle;
538  	u8 reserved[4];
539  };
540  
541  struct pvrdma_cmd_query_srq_resp {
542  	struct pvrdma_cmd_resp_hdr hdr;
543  	struct pvrdma_srq_attr attrs;
544  };
545  
546  struct pvrdma_cmd_destroy_srq {
547  	struct pvrdma_cmd_hdr hdr;
548  	u32 srq_handle;
549  	u8 reserved[4];
550  };
551  
552  struct pvrdma_cmd_create_qp {
553  	struct pvrdma_cmd_hdr hdr;
554  	u64 pdir_dma;
555  	u32 pd_handle;
556  	u32 send_cq_handle;
557  	u32 recv_cq_handle;
558  	u32 srq_handle;
559  	u32 max_send_wr;
560  	u32 max_recv_wr;
561  	u32 max_send_sge;
562  	u32 max_recv_sge;
563  	u32 max_inline_data;
564  	u32 lkey;
565  	u32 access_flags;
566  	u16 total_chunks;
567  	u16 send_chunks;
568  	u16 max_atomic_arg;
569  	u8 sq_sig_all;
570  	u8 qp_type;
571  	u8 is_srq;
572  	u8 reserved[3];
573  };
574  
575  struct pvrdma_cmd_create_qp_resp {
576  	struct pvrdma_cmd_resp_hdr hdr;
577  	u32 qpn;
578  	u32 max_send_wr;
579  	u32 max_recv_wr;
580  	u32 max_send_sge;
581  	u32 max_recv_sge;
582  	u32 max_inline_data;
583  };
584  
585  struct pvrdma_cmd_create_qp_resp_v2 {
586  	struct pvrdma_cmd_resp_hdr hdr;
587  	u32 qpn;
588  	u32 qp_handle;
589  	u32 max_send_wr;
590  	u32 max_recv_wr;
591  	u32 max_send_sge;
592  	u32 max_recv_sge;
593  	u32 max_inline_data;
594  };
595  
596  struct pvrdma_cmd_modify_qp {
597  	struct pvrdma_cmd_hdr hdr;
598  	u32 qp_handle;
599  	u32 attr_mask;
600  	struct pvrdma_qp_attr attrs;
601  };
602  
603  struct pvrdma_cmd_query_qp {
604  	struct pvrdma_cmd_hdr hdr;
605  	u32 qp_handle;
606  	u32 attr_mask;
607  };
608  
609  struct pvrdma_cmd_query_qp_resp {
610  	struct pvrdma_cmd_resp_hdr hdr;
611  	struct pvrdma_qp_attr attrs;
612  };
613  
614  struct pvrdma_cmd_destroy_qp {
615  	struct pvrdma_cmd_hdr hdr;
616  	u32 qp_handle;
617  	u8 reserved[4];
618  };
619  
620  struct pvrdma_cmd_destroy_qp_resp {
621  	struct pvrdma_cmd_resp_hdr hdr;
622  	u32 events_reported;
623  	u8 reserved[4];
624  };
625  
626  struct pvrdma_cmd_create_bind {
627  	struct pvrdma_cmd_hdr hdr;
628  	u32 mtu;
629  	u32 vlan;
630  	u32 index;
631  	u8 new_gid[16];
632  	u8 gid_type;
633  	u8 reserved[3];
634  };
635  
636  struct pvrdma_cmd_destroy_bind {
637  	struct pvrdma_cmd_hdr hdr;
638  	u32 index;
639  	u8 dest_gid[16];
640  	u8 reserved[4];
641  };
642  
643  union pvrdma_cmd_req {
644  	struct pvrdma_cmd_hdr hdr;
645  	struct pvrdma_cmd_query_port query_port;
646  	struct pvrdma_cmd_query_pkey query_pkey;
647  	struct pvrdma_cmd_create_uc create_uc;
648  	struct pvrdma_cmd_destroy_uc destroy_uc;
649  	struct pvrdma_cmd_create_pd create_pd;
650  	struct pvrdma_cmd_destroy_pd destroy_pd;
651  	struct pvrdma_cmd_create_mr create_mr;
652  	struct pvrdma_cmd_destroy_mr destroy_mr;
653  	struct pvrdma_cmd_create_cq create_cq;
654  	struct pvrdma_cmd_resize_cq resize_cq;
655  	struct pvrdma_cmd_destroy_cq destroy_cq;
656  	struct pvrdma_cmd_create_qp create_qp;
657  	struct pvrdma_cmd_modify_qp modify_qp;
658  	struct pvrdma_cmd_query_qp query_qp;
659  	struct pvrdma_cmd_destroy_qp destroy_qp;
660  	struct pvrdma_cmd_create_bind create_bind;
661  	struct pvrdma_cmd_destroy_bind destroy_bind;
662  	struct pvrdma_cmd_create_srq create_srq;
663  	struct pvrdma_cmd_modify_srq modify_srq;
664  	struct pvrdma_cmd_query_srq query_srq;
665  	struct pvrdma_cmd_destroy_srq destroy_srq;
666  };
667  
668  union pvrdma_cmd_resp {
669  	struct pvrdma_cmd_resp_hdr hdr;
670  	struct pvrdma_cmd_query_port_resp query_port_resp;
671  	struct pvrdma_cmd_query_pkey_resp query_pkey_resp;
672  	struct pvrdma_cmd_create_uc_resp create_uc_resp;
673  	struct pvrdma_cmd_create_pd_resp create_pd_resp;
674  	struct pvrdma_cmd_create_mr_resp create_mr_resp;
675  	struct pvrdma_cmd_create_cq_resp create_cq_resp;
676  	struct pvrdma_cmd_resize_cq_resp resize_cq_resp;
677  	struct pvrdma_cmd_create_qp_resp create_qp_resp;
678  	struct pvrdma_cmd_create_qp_resp_v2 create_qp_resp_v2;
679  	struct pvrdma_cmd_query_qp_resp query_qp_resp;
680  	struct pvrdma_cmd_destroy_qp_resp destroy_qp_resp;
681  	struct pvrdma_cmd_create_srq_resp create_srq_resp;
682  	struct pvrdma_cmd_query_srq_resp query_srq_resp;
683  };
684  
685  #endif /* __PVRDMA_DEV_API_H__ */
686