1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
2 /*
3  * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All rights reserved.
4  */
5 
6 #ifndef _EFA_ADMIN_CMDS_H_
7 #define _EFA_ADMIN_CMDS_H_
8 
9 #define EFA_ADMIN_API_VERSION_MAJOR          0
10 #define EFA_ADMIN_API_VERSION_MINOR          1
11 
12 /* EFA admin queue opcodes */
13 enum efa_admin_aq_opcode {
14 	EFA_ADMIN_CREATE_QP                         = 1,
15 	EFA_ADMIN_MODIFY_QP                         = 2,
16 	EFA_ADMIN_QUERY_QP                          = 3,
17 	EFA_ADMIN_DESTROY_QP                        = 4,
18 	EFA_ADMIN_CREATE_AH                         = 5,
19 	EFA_ADMIN_DESTROY_AH                        = 6,
20 	EFA_ADMIN_REG_MR                            = 7,
21 	EFA_ADMIN_DEREG_MR                          = 8,
22 	EFA_ADMIN_CREATE_CQ                         = 9,
23 	EFA_ADMIN_DESTROY_CQ                        = 10,
24 	EFA_ADMIN_GET_FEATURE                       = 11,
25 	EFA_ADMIN_SET_FEATURE                       = 12,
26 	EFA_ADMIN_GET_STATS                         = 13,
27 	EFA_ADMIN_ALLOC_PD                          = 14,
28 	EFA_ADMIN_DEALLOC_PD                        = 15,
29 	EFA_ADMIN_ALLOC_UAR                         = 16,
30 	EFA_ADMIN_DEALLOC_UAR                       = 17,
31 	EFA_ADMIN_CREATE_EQ                         = 18,
32 	EFA_ADMIN_DESTROY_EQ                        = 19,
33 	EFA_ADMIN_MAX_OPCODE                        = 19,
34 };
35 
36 enum efa_admin_aq_feature_id {
37 	EFA_ADMIN_DEVICE_ATTR                       = 1,
38 	EFA_ADMIN_AENQ_CONFIG                       = 2,
39 	EFA_ADMIN_NETWORK_ATTR                      = 3,
40 	EFA_ADMIN_QUEUE_ATTR                        = 4,
41 	EFA_ADMIN_HW_HINTS                          = 5,
42 	EFA_ADMIN_HOST_INFO                         = 6,
43 	EFA_ADMIN_EVENT_QUEUE_ATTR                  = 7,
44 };
45 
46 /* QP transport type */
47 enum efa_admin_qp_type {
48 	/* Unreliable Datagram */
49 	EFA_ADMIN_QP_TYPE_UD                        = 1,
50 	/* Scalable Reliable Datagram */
51 	EFA_ADMIN_QP_TYPE_SRD                       = 2,
52 };
53 
54 /* QP state */
55 enum efa_admin_qp_state {
56 	EFA_ADMIN_QP_STATE_RESET                    = 0,
57 	EFA_ADMIN_QP_STATE_INIT                     = 1,
58 	EFA_ADMIN_QP_STATE_RTR                      = 2,
59 	EFA_ADMIN_QP_STATE_RTS                      = 3,
60 	EFA_ADMIN_QP_STATE_SQD                      = 4,
61 	EFA_ADMIN_QP_STATE_SQE                      = 5,
62 	EFA_ADMIN_QP_STATE_ERR                      = 6,
63 };
64 
65 enum efa_admin_get_stats_type {
66 	EFA_ADMIN_GET_STATS_TYPE_BASIC              = 0,
67 	EFA_ADMIN_GET_STATS_TYPE_MESSAGES           = 1,
68 	EFA_ADMIN_GET_STATS_TYPE_RDMA_READ          = 2,
69 	EFA_ADMIN_GET_STATS_TYPE_RDMA_WRITE         = 3,
70 };
71 
72 enum efa_admin_get_stats_scope {
73 	EFA_ADMIN_GET_STATS_SCOPE_ALL               = 0,
74 	EFA_ADMIN_GET_STATS_SCOPE_QUEUE             = 1,
75 };
76 
77 /*
78  * QP allocation sizes, converted by fabric QueuePair (QP) create command
79  * from QP capabilities.
80  */
81 struct efa_admin_qp_alloc_size {
82 	/* Send descriptor ring size in bytes */
83 	u32 send_queue_ring_size;
84 
85 	/* Max number of WQEs that can be outstanding on send queue. */
86 	u32 send_queue_depth;
87 
88 	/*
89 	 * Recv descriptor ring size in bytes, sufficient for user-provided
90 	 * number of WQEs
91 	 */
92 	u32 recv_queue_ring_size;
93 
94 	/* Max number of WQEs that can be outstanding on recv queue */
95 	u32 recv_queue_depth;
96 };
97 
98 struct efa_admin_create_qp_cmd {
99 	/* Common Admin Queue descriptor */
100 	struct efa_admin_aq_common_desc aq_common_desc;
101 
102 	/* Protection Domain associated with this QP */
103 	u16 pd;
104 
105 	/* QP type */
106 	u8 qp_type;
107 
108 	/*
109 	 * 0 : sq_virt - If set, SQ ring base address is
110 	 *    virtual (IOVA returned by MR registration)
111 	 * 1 : rq_virt - If set, RQ ring base address is
112 	 *    virtual (IOVA returned by MR registration)
113 	 * 7:2 : reserved - MBZ
114 	 */
115 	u8 flags;
116 
117 	/*
118 	 * Send queue (SQ) ring base physical address. This field is not
119 	 * used if this is a Low Latency Queue(LLQ).
120 	 */
121 	u64 sq_base_addr;
122 
123 	/* Receive queue (RQ) ring base address. */
124 	u64 rq_base_addr;
125 
126 	/* Index of CQ to be associated with Send Queue completions */
127 	u32 send_cq_idx;
128 
129 	/* Index of CQ to be associated with Recv Queue completions */
130 	u32 recv_cq_idx;
131 
132 	/*
133 	 * Memory registration key for the SQ ring, used only when not in
134 	 * LLQ mode and base address is virtual
135 	 */
136 	u32 sq_l_key;
137 
138 	/*
139 	 * Memory registration key for the RQ ring, used only when base
140 	 * address is virtual
141 	 */
142 	u32 rq_l_key;
143 
144 	/* Requested QP allocation sizes */
145 	struct efa_admin_qp_alloc_size qp_alloc_size;
146 
147 	/* UAR number */
148 	u16 uar;
149 
150 	/* MBZ */
151 	u16 reserved;
152 
153 	/* MBZ */
154 	u32 reserved2;
155 };
156 
157 struct efa_admin_create_qp_resp {
158 	/* Common Admin Queue completion descriptor */
159 	struct efa_admin_acq_common_desc acq_common_desc;
160 
161 	/*
162 	 * Opaque handle to be used for consequent admin operations on the
163 	 * QP
164 	 */
165 	u32 qp_handle;
166 
167 	/*
168 	 * QP number in the given EFA virtual device. Least-significant bits (as
169 	 * needed according to max_qp) carry unique QP ID
170 	 */
171 	u16 qp_num;
172 
173 	/* MBZ */
174 	u16 reserved;
175 
176 	/* Index of sub-CQ for Send Queue completions */
177 	u16 send_sub_cq_idx;
178 
179 	/* Index of sub-CQ for Receive Queue completions */
180 	u16 recv_sub_cq_idx;
181 
182 	/* SQ doorbell address, as offset to PCIe DB BAR */
183 	u32 sq_db_offset;
184 
185 	/* RQ doorbell address, as offset to PCIe DB BAR */
186 	u32 rq_db_offset;
187 
188 	/*
189 	 * low latency send queue ring base address as an offset to PCIe
190 	 * MMIO LLQ_MEM BAR
191 	 */
192 	u32 llq_descriptors_offset;
193 };
194 
195 struct efa_admin_modify_qp_cmd {
196 	/* Common Admin Queue descriptor */
197 	struct efa_admin_aq_common_desc aq_common_desc;
198 
199 	/*
200 	 * Mask indicating which fields should be updated
201 	 * 0 : qp_state
202 	 * 1 : cur_qp_state
203 	 * 2 : qkey
204 	 * 3 : sq_psn
205 	 * 4 : sq_drained_async_notify
206 	 * 5 : rnr_retry
207 	 * 31:6 : reserved
208 	 */
209 	u32 modify_mask;
210 
211 	/* QP handle returned by create_qp command */
212 	u32 qp_handle;
213 
214 	/* QP state */
215 	u32 qp_state;
216 
217 	/* Override current QP state (before applying the transition) */
218 	u32 cur_qp_state;
219 
220 	/* QKey */
221 	u32 qkey;
222 
223 	/* SQ PSN */
224 	u32 sq_psn;
225 
226 	/* Enable async notification when SQ is drained */
227 	u8 sq_drained_async_notify;
228 
229 	/* Number of RNR retries (valid only for SRD QPs) */
230 	u8 rnr_retry;
231 
232 	/* MBZ */
233 	u16 reserved2;
234 };
235 
236 struct efa_admin_modify_qp_resp {
237 	/* Common Admin Queue completion descriptor */
238 	struct efa_admin_acq_common_desc acq_common_desc;
239 };
240 
241 struct efa_admin_query_qp_cmd {
242 	/* Common Admin Queue descriptor */
243 	struct efa_admin_aq_common_desc aq_common_desc;
244 
245 	/* QP handle returned by create_qp command */
246 	u32 qp_handle;
247 };
248 
249 struct efa_admin_query_qp_resp {
250 	/* Common Admin Queue completion descriptor */
251 	struct efa_admin_acq_common_desc acq_common_desc;
252 
253 	/* QP state */
254 	u32 qp_state;
255 
256 	/* QKey */
257 	u32 qkey;
258 
259 	/* SQ PSN */
260 	u32 sq_psn;
261 
262 	/* Indicates that draining is in progress */
263 	u8 sq_draining;
264 
265 	/* Number of RNR retries (valid only for SRD QPs) */
266 	u8 rnr_retry;
267 
268 	/* MBZ */
269 	u16 reserved2;
270 };
271 
272 struct efa_admin_destroy_qp_cmd {
273 	/* Common Admin Queue descriptor */
274 	struct efa_admin_aq_common_desc aq_common_desc;
275 
276 	/* QP handle returned by create_qp command */
277 	u32 qp_handle;
278 };
279 
280 struct efa_admin_destroy_qp_resp {
281 	/* Common Admin Queue completion descriptor */
282 	struct efa_admin_acq_common_desc acq_common_desc;
283 };
284 
285 /*
286  * Create Address Handle command parameters. Must not be called more than
287  * once for the same destination
288  */
289 struct efa_admin_create_ah_cmd {
290 	/* Common Admin Queue descriptor */
291 	struct efa_admin_aq_common_desc aq_common_desc;
292 
293 	/* Destination address in network byte order */
294 	u8 dest_addr[16];
295 
296 	/* PD number */
297 	u16 pd;
298 
299 	/* MBZ */
300 	u16 reserved;
301 };
302 
303 struct efa_admin_create_ah_resp {
304 	/* Common Admin Queue completion descriptor */
305 	struct efa_admin_acq_common_desc acq_common_desc;
306 
307 	/* Target interface address handle (opaque) */
308 	u16 ah;
309 
310 	/* MBZ */
311 	u16 reserved;
312 };
313 
314 struct efa_admin_destroy_ah_cmd {
315 	/* Common Admin Queue descriptor */
316 	struct efa_admin_aq_common_desc aq_common_desc;
317 
318 	/* Target interface address handle (opaque) */
319 	u16 ah;
320 
321 	/* PD number */
322 	u16 pd;
323 };
324 
325 struct efa_admin_destroy_ah_resp {
326 	/* Common Admin Queue completion descriptor */
327 	struct efa_admin_acq_common_desc acq_common_desc;
328 };
329 
330 /*
331  * Registration of MemoryRegion, required for QP working with Virtual
332  * Addresses. In standard verbs semantics, region length is limited to 2GB
333  * space, but EFA offers larger MR support for large memory space, to ease
334  * on users working with very large datasets (i.e. full GPU memory mapping).
335  */
336 struct efa_admin_reg_mr_cmd {
337 	/* Common Admin Queue descriptor */
338 	struct efa_admin_aq_common_desc aq_common_desc;
339 
340 	/* Protection Domain */
341 	u16 pd;
342 
343 	/* MBZ */
344 	u16 reserved16_w1;
345 
346 	/* Physical Buffer List, each element is page-aligned. */
347 	union {
348 		/*
349 		 * Inline array of guest-physical page addresses of user
350 		 * memory pages (optimization for short region
351 		 * registrations)
352 		 */
353 		u64 inline_pbl_array[4];
354 
355 		/* points to PBL (direct or indirect, chained if needed) */
356 		struct efa_admin_ctrl_buff_info pbl;
357 	} pbl;
358 
359 	/* Memory region length, in bytes. */
360 	u64 mr_length;
361 
362 	/*
363 	 * flags and page size
364 	 * 4:0 : phys_page_size_shift - page size is (1 <<
365 	 *    phys_page_size_shift). Page size is used for
366 	 *    building the Virtual to Physical address mapping
367 	 * 6:5 : reserved - MBZ
368 	 * 7 : mem_addr_phy_mode_en - Enable bit for physical
369 	 *    memory registration (no translation), can be used
370 	 *    only by privileged clients. If set, PBL must
371 	 *    contain a single entry.
372 	 */
373 	u8 flags;
374 
375 	/*
376 	 * permissions
377 	 * 0 : local_write_enable - Local write permissions:
378 	 *    must be set for RQ buffers and buffers posted for
379 	 *    RDMA Read requests
380 	 * 1 : remote_write_enable - Remote write
381 	 *    permissions: must be set to enable RDMA write to
382 	 *    the region
383 	 * 2 : remote_read_enable - Remote read permissions:
384 	 *    must be set to enable RDMA read from the region
385 	 * 7:3 : reserved2 - MBZ
386 	 */
387 	u8 permissions;
388 
389 	/* MBZ */
390 	u16 reserved16_w5;
391 
392 	/* number of pages in PBL (redundant, could be calculated) */
393 	u32 page_num;
394 
395 	/*
396 	 * IO Virtual Address associated with this MR. If
397 	 * mem_addr_phy_mode_en is set, contains the physical address of
398 	 * the region.
399 	 */
400 	u64 iova;
401 };
402 
403 struct efa_admin_reg_mr_resp {
404 	/* Common Admin Queue completion descriptor */
405 	struct efa_admin_acq_common_desc acq_common_desc;
406 
407 	/*
408 	 * L_Key, to be used in conjunction with local buffer references in
409 	 * SQ and RQ WQE, or with virtual RQ/CQ rings
410 	 */
411 	u32 l_key;
412 
413 	/*
414 	 * R_Key, to be used in RDMA messages to refer to remotely accessed
415 	 * memory region
416 	 */
417 	u32 r_key;
418 };
419 
420 struct efa_admin_dereg_mr_cmd {
421 	/* Common Admin Queue descriptor */
422 	struct efa_admin_aq_common_desc aq_common_desc;
423 
424 	/* L_Key, memory region's l_key */
425 	u32 l_key;
426 };
427 
428 struct efa_admin_dereg_mr_resp {
429 	/* Common Admin Queue completion descriptor */
430 	struct efa_admin_acq_common_desc acq_common_desc;
431 };
432 
433 struct efa_admin_create_cq_cmd {
434 	struct efa_admin_aq_common_desc aq_common_desc;
435 
436 	/*
437 	 * 4:0 : reserved5 - MBZ
438 	 * 5 : interrupt_mode_enabled - if set, cq operates
439 	 *    in interrupt mode (i.e. CQ events and EQ elements
440 	 *    are generated), otherwise - polling
441 	 * 6 : virt - If set, ring base address is virtual
442 	 *    (IOVA returned by MR registration)
443 	 * 7 : reserved6 - MBZ
444 	 */
445 	u8 cq_caps_1;
446 
447 	/*
448 	 * 4:0 : cq_entry_size_words - size of CQ entry in
449 	 *    32-bit words, valid values: 4, 8.
450 	 * 5 : set_src_addr - If set, source address will be
451 	 *    filled on RX completions from unknown senders.
452 	 *    Requires 8 words CQ entry size.
453 	 * 7:6 : reserved7 - MBZ
454 	 */
455 	u8 cq_caps_2;
456 
457 	/* completion queue depth in # of entries. must be power of 2 */
458 	u16 cq_depth;
459 
460 	/* EQ number assigned to this cq */
461 	u16 eqn;
462 
463 	/* MBZ */
464 	u16 reserved;
465 
466 	/*
467 	 * CQ ring base address, virtual or physical depending on 'virt'
468 	 * flag
469 	 */
470 	struct efa_common_mem_addr cq_ba;
471 
472 	/*
473 	 * Memory registration key for the ring, used only when base
474 	 * address is virtual
475 	 */
476 	u32 l_key;
477 
478 	/*
479 	 * number of sub cqs - must be equal to sub_cqs_per_cq of queue
480 	 * attributes.
481 	 */
482 	u16 num_sub_cqs;
483 
484 	/* UAR number */
485 	u16 uar;
486 };
487 
488 struct efa_admin_create_cq_resp {
489 	struct efa_admin_acq_common_desc acq_common_desc;
490 
491 	u16 cq_idx;
492 
493 	/* actual cq depth in number of entries */
494 	u16 cq_actual_depth;
495 
496 	/* CQ doorbell address, as offset to PCIe DB BAR */
497 	u32 db_offset;
498 
499 	/*
500 	 * 0 : db_valid - If set, doorbell offset is valid.
501 	 *    Always set when interrupts are requested.
502 	 */
503 	u32 flags;
504 };
505 
506 struct efa_admin_destroy_cq_cmd {
507 	struct efa_admin_aq_common_desc aq_common_desc;
508 
509 	u16 cq_idx;
510 
511 	/* MBZ */
512 	u16 reserved1;
513 };
514 
515 struct efa_admin_destroy_cq_resp {
516 	struct efa_admin_acq_common_desc acq_common_desc;
517 };
518 
519 /*
520  * EFA AQ Get Statistics command. Extended statistics are placed in control
521  * buffer pointed by AQ entry
522  */
523 struct efa_admin_aq_get_stats_cmd {
524 	struct efa_admin_aq_common_desc aq_common_descriptor;
525 
526 	union {
527 		/* command specific inline data */
528 		u32 inline_data_w1[3];
529 
530 		struct efa_admin_ctrl_buff_info control_buffer;
531 	} u;
532 
533 	/* stats type as defined in enum efa_admin_get_stats_type */
534 	u8 type;
535 
536 	/* stats scope defined in enum efa_admin_get_stats_scope */
537 	u8 scope;
538 
539 	u16 scope_modifier;
540 };
541 
542 struct efa_admin_basic_stats {
543 	u64 tx_bytes;
544 
545 	u64 tx_pkts;
546 
547 	u64 rx_bytes;
548 
549 	u64 rx_pkts;
550 
551 	u64 rx_drops;
552 };
553 
554 struct efa_admin_messages_stats {
555 	u64 send_bytes;
556 
557 	u64 send_wrs;
558 
559 	u64 recv_bytes;
560 
561 	u64 recv_wrs;
562 };
563 
564 struct efa_admin_rdma_read_stats {
565 	u64 read_wrs;
566 
567 	u64 read_bytes;
568 
569 	u64 read_wr_err;
570 
571 	u64 read_resp_bytes;
572 };
573 
574 struct efa_admin_rdma_write_stats {
575 	u64 write_wrs;
576 
577 	u64 write_bytes;
578 
579 	u64 write_wr_err;
580 
581 	u64 write_recv_bytes;
582 };
583 
584 struct efa_admin_acq_get_stats_resp {
585 	struct efa_admin_acq_common_desc acq_common_desc;
586 
587 	union {
588 		struct efa_admin_basic_stats basic_stats;
589 
590 		struct efa_admin_messages_stats messages_stats;
591 
592 		struct efa_admin_rdma_read_stats rdma_read_stats;
593 
594 		struct efa_admin_rdma_write_stats rdma_write_stats;
595 	} u;
596 };
597 
598 struct efa_admin_get_set_feature_common_desc {
599 	/* MBZ */
600 	u8 reserved0;
601 
602 	/* as appears in efa_admin_aq_feature_id */
603 	u8 feature_id;
604 
605 	/* MBZ */
606 	u16 reserved16;
607 };
608 
609 struct efa_admin_feature_device_attr_desc {
610 	/* Bitmap of efa_admin_aq_feature_id */
611 	u64 supported_features;
612 
613 	/* Bitmap of supported page sizes in MR registrations */
614 	u64 page_size_cap;
615 
616 	u32 fw_version;
617 
618 	u32 admin_api_version;
619 
620 	u32 device_version;
621 
622 	/* Bar used for SQ and RQ doorbells */
623 	u16 db_bar;
624 
625 	/* Indicates how many bits are used on physical address access */
626 	u8 phys_addr_width;
627 
628 	/* Indicates how many bits are used on virtual address access */
629 	u8 virt_addr_width;
630 
631 	/*
632 	 * 0 : rdma_read - If set, RDMA Read is supported on
633 	 *    TX queues
634 	 * 1 : rnr_retry - If set, RNR retry is supported on
635 	 *    modify QP command
636 	 * 2 : data_polling_128 - If set, 128 bytes data
637 	 *    polling is supported
638 	 * 3 : rdma_write - If set, RDMA Write is supported
639 	 *    on TX queues
640 	 * 31:4 : reserved - MBZ
641 	 */
642 	u32 device_caps;
643 
644 	/* Max RDMA transfer size in bytes */
645 	u32 max_rdma_size;
646 };
647 
648 struct efa_admin_feature_queue_attr_desc {
649 	/* The maximum number of queue pairs supported */
650 	u32 max_qp;
651 
652 	/* Maximum number of WQEs per Send Queue */
653 	u32 max_sq_depth;
654 
655 	/* Maximum size of data that can be sent inline in a Send WQE */
656 	u32 inline_buf_size;
657 
658 	/* Maximum number of buffer descriptors per Recv Queue */
659 	u32 max_rq_depth;
660 
661 	/* The maximum number of completion queues supported per VF */
662 	u32 max_cq;
663 
664 	/* Maximum number of CQEs per Completion Queue */
665 	u32 max_cq_depth;
666 
667 	/* Number of sub-CQs to be created for each CQ */
668 	u16 sub_cqs_per_cq;
669 
670 	/* Minimum number of WQEs per SQ */
671 	u16 min_sq_depth;
672 
673 	/* Maximum number of SGEs (buffers) allowed for a single send WQE */
674 	u16 max_wr_send_sges;
675 
676 	/* Maximum number of SGEs allowed for a single recv WQE */
677 	u16 max_wr_recv_sges;
678 
679 	/* The maximum number of memory regions supported */
680 	u32 max_mr;
681 
682 	/* The maximum number of pages can be registered */
683 	u32 max_mr_pages;
684 
685 	/* The maximum number of protection domains supported */
686 	u32 max_pd;
687 
688 	/* The maximum number of address handles supported */
689 	u32 max_ah;
690 
691 	/* The maximum size of LLQ in bytes */
692 	u32 max_llq_size;
693 
694 	/* Maximum number of SGEs for a single RDMA read/write WQE */
695 	u16 max_wr_rdma_sges;
696 
697 	/*
698 	 * Maximum number of bytes that can be written to SQ between two
699 	 * consecutive doorbells (in units of 64B). Driver must ensure that only
700 	 * complete WQEs are written to queue before issuing a doorbell.
701 	 * Examples: max_tx_batch=16 and WQE size = 64B, means up to 16 WQEs can
702 	 * be written to SQ between two consecutive doorbells. max_tx_batch=11
703 	 * and WQE size = 128B, means up to 5 WQEs can be written to SQ between
704 	 * two consecutive doorbells. Zero means unlimited.
705 	 */
706 	u16 max_tx_batch;
707 };
708 
709 struct efa_admin_event_queue_attr_desc {
710 	/* The maximum number of event queues supported */
711 	u32 max_eq;
712 
713 	/* Maximum number of EQEs per Event Queue */
714 	u32 max_eq_depth;
715 
716 	/* Supported events bitmask */
717 	u32 event_bitmask;
718 };
719 
720 struct efa_admin_feature_aenq_desc {
721 	/* bitmask for AENQ groups the device can report */
722 	u32 supported_groups;
723 
724 	/* bitmask for AENQ groups to report */
725 	u32 enabled_groups;
726 };
727 
728 struct efa_admin_feature_network_attr_desc {
729 	/* Raw address data in network byte order */
730 	u8 addr[16];
731 
732 	/* max packet payload size in bytes */
733 	u32 mtu;
734 };
735 
736 /*
737  * When hint value is 0, hints capabilities are not supported or driver
738  * should use its own predefined value
739  */
740 struct efa_admin_hw_hints {
741 	/* value in ms */
742 	u16 mmio_read_timeout;
743 
744 	/* value in ms */
745 	u16 driver_watchdog_timeout;
746 
747 	/* value in ms */
748 	u16 admin_completion_timeout;
749 
750 	/* poll interval in ms */
751 	u16 poll_interval;
752 };
753 
754 struct efa_admin_get_feature_cmd {
755 	struct efa_admin_aq_common_desc aq_common_descriptor;
756 
757 	struct efa_admin_ctrl_buff_info control_buffer;
758 
759 	struct efa_admin_get_set_feature_common_desc feature_common;
760 
761 	u32 raw[11];
762 };
763 
764 struct efa_admin_get_feature_resp {
765 	struct efa_admin_acq_common_desc acq_common_desc;
766 
767 	union {
768 		u32 raw[14];
769 
770 		struct efa_admin_feature_device_attr_desc device_attr;
771 
772 		struct efa_admin_feature_aenq_desc aenq;
773 
774 		struct efa_admin_feature_network_attr_desc network_attr;
775 
776 		struct efa_admin_feature_queue_attr_desc queue_attr;
777 
778 		struct efa_admin_event_queue_attr_desc event_queue_attr;
779 
780 		struct efa_admin_hw_hints hw_hints;
781 	} u;
782 };
783 
784 struct efa_admin_set_feature_cmd {
785 	struct efa_admin_aq_common_desc aq_common_descriptor;
786 
787 	struct efa_admin_ctrl_buff_info control_buffer;
788 
789 	struct efa_admin_get_set_feature_common_desc feature_common;
790 
791 	union {
792 		u32 raw[11];
793 
794 		/* AENQ configuration */
795 		struct efa_admin_feature_aenq_desc aenq;
796 	} u;
797 };
798 
799 struct efa_admin_set_feature_resp {
800 	struct efa_admin_acq_common_desc acq_common_desc;
801 
802 	union {
803 		u32 raw[14];
804 	} u;
805 };
806 
807 struct efa_admin_alloc_pd_cmd {
808 	struct efa_admin_aq_common_desc aq_common_descriptor;
809 };
810 
811 struct efa_admin_alloc_pd_resp {
812 	struct efa_admin_acq_common_desc acq_common_desc;
813 
814 	/* PD number */
815 	u16 pd;
816 
817 	/* MBZ */
818 	u16 reserved;
819 };
820 
821 struct efa_admin_dealloc_pd_cmd {
822 	struct efa_admin_aq_common_desc aq_common_descriptor;
823 
824 	/* PD number */
825 	u16 pd;
826 
827 	/* MBZ */
828 	u16 reserved;
829 };
830 
831 struct efa_admin_dealloc_pd_resp {
832 	struct efa_admin_acq_common_desc acq_common_desc;
833 };
834 
835 struct efa_admin_alloc_uar_cmd {
836 	struct efa_admin_aq_common_desc aq_common_descriptor;
837 };
838 
839 struct efa_admin_alloc_uar_resp {
840 	struct efa_admin_acq_common_desc acq_common_desc;
841 
842 	/* UAR number */
843 	u16 uar;
844 
845 	/* MBZ */
846 	u16 reserved;
847 };
848 
849 struct efa_admin_dealloc_uar_cmd {
850 	struct efa_admin_aq_common_desc aq_common_descriptor;
851 
852 	/* UAR number */
853 	u16 uar;
854 
855 	/* MBZ */
856 	u16 reserved;
857 };
858 
859 struct efa_admin_dealloc_uar_resp {
860 	struct efa_admin_acq_common_desc acq_common_desc;
861 };
862 
863 struct efa_admin_create_eq_cmd {
864 	struct efa_admin_aq_common_desc aq_common_descriptor;
865 
866 	/* Size of the EQ in entries, must be power of 2 */
867 	u16 depth;
868 
869 	/* MSI-X table entry index */
870 	u8 msix_vec;
871 
872 	/*
873 	 * 4:0 : entry_size_words - size of EQ entry in
874 	 *    32-bit words
875 	 * 7:5 : reserved - MBZ
876 	 */
877 	u8 caps;
878 
879 	/* EQ ring base address */
880 	struct efa_common_mem_addr ba;
881 
882 	/*
883 	 * Enabled events on this EQ
884 	 * 0 : completion_events - Enable completion events
885 	 * 31:1 : reserved - MBZ
886 	 */
887 	u32 event_bitmask;
888 
889 	/* MBZ */
890 	u32 reserved;
891 };
892 
893 struct efa_admin_create_eq_resp {
894 	struct efa_admin_acq_common_desc acq_common_desc;
895 
896 	/* EQ number */
897 	u16 eqn;
898 
899 	/* MBZ */
900 	u16 reserved;
901 };
902 
903 struct efa_admin_destroy_eq_cmd {
904 	struct efa_admin_aq_common_desc aq_common_descriptor;
905 
906 	/* EQ number */
907 	u16 eqn;
908 
909 	/* MBZ */
910 	u16 reserved;
911 };
912 
913 struct efa_admin_destroy_eq_resp {
914 	struct efa_admin_acq_common_desc acq_common_desc;
915 };
916 
917 /* asynchronous event notification groups */
918 enum efa_admin_aenq_group {
919 	EFA_ADMIN_FATAL_ERROR                       = 1,
920 	EFA_ADMIN_WARNING                           = 2,
921 	EFA_ADMIN_NOTIFICATION                      = 3,
922 	EFA_ADMIN_KEEP_ALIVE                        = 4,
923 	EFA_ADMIN_AENQ_GROUPS_NUM                   = 5,
924 };
925 
926 struct efa_admin_mmio_req_read_less_resp {
927 	u16 req_id;
928 
929 	u16 reg_off;
930 
931 	/* value is valid when poll is cleared */
932 	u32 reg_val;
933 };
934 
935 enum efa_admin_os_type {
936 	EFA_ADMIN_OS_LINUX                          = 0,
937 };
938 
939 struct efa_admin_host_info {
940 	/* OS distribution string format */
941 	u8 os_dist_str[128];
942 
943 	/* Defined in enum efa_admin_os_type */
944 	u32 os_type;
945 
946 	/* Kernel version string format */
947 	u8 kernel_ver_str[32];
948 
949 	/* Kernel version numeric format */
950 	u32 kernel_ver;
951 
952 	/*
953 	 * 7:0 : driver_module_type
954 	 * 15:8 : driver_sub_minor
955 	 * 23:16 : driver_minor
956 	 * 31:24 : driver_major
957 	 */
958 	u32 driver_ver;
959 
960 	/*
961 	 * Device's Bus, Device and Function
962 	 * 2:0 : function
963 	 * 7:3 : device
964 	 * 15:8 : bus
965 	 */
966 	u16 bdf;
967 
968 	/*
969 	 * Spec version
970 	 * 7:0 : spec_minor
971 	 * 15:8 : spec_major
972 	 */
973 	u16 spec_ver;
974 
975 	/*
976 	 * 0 : intree - Intree driver
977 	 * 1 : gdr - GPUDirect RDMA supported
978 	 * 31:2 : reserved2
979 	 */
980 	u32 flags;
981 };
982 
983 /* create_qp_cmd */
984 #define EFA_ADMIN_CREATE_QP_CMD_SQ_VIRT_MASK                BIT(0)
985 #define EFA_ADMIN_CREATE_QP_CMD_RQ_VIRT_MASK                BIT(1)
986 
987 /* modify_qp_cmd */
988 #define EFA_ADMIN_MODIFY_QP_CMD_QP_STATE_MASK               BIT(0)
989 #define EFA_ADMIN_MODIFY_QP_CMD_CUR_QP_STATE_MASK           BIT(1)
990 #define EFA_ADMIN_MODIFY_QP_CMD_QKEY_MASK                   BIT(2)
991 #define EFA_ADMIN_MODIFY_QP_CMD_SQ_PSN_MASK                 BIT(3)
992 #define EFA_ADMIN_MODIFY_QP_CMD_SQ_DRAINED_ASYNC_NOTIFY_MASK BIT(4)
993 #define EFA_ADMIN_MODIFY_QP_CMD_RNR_RETRY_MASK              BIT(5)
994 
995 /* reg_mr_cmd */
996 #define EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_MASK      GENMASK(4, 0)
997 #define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_MASK      BIT(7)
998 #define EFA_ADMIN_REG_MR_CMD_LOCAL_WRITE_ENABLE_MASK        BIT(0)
999 #define EFA_ADMIN_REG_MR_CMD_REMOTE_WRITE_ENABLE_MASK       BIT(1)
1000 #define EFA_ADMIN_REG_MR_CMD_REMOTE_READ_ENABLE_MASK        BIT(2)
1001 
1002 /* create_cq_cmd */
1003 #define EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5)
1004 #define EFA_ADMIN_CREATE_CQ_CMD_VIRT_MASK                   BIT(6)
1005 #define EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK    GENMASK(4, 0)
1006 #define EFA_ADMIN_CREATE_CQ_CMD_SET_SRC_ADDR_MASK           BIT(5)
1007 
1008 /* create_cq_resp */
1009 #define EFA_ADMIN_CREATE_CQ_RESP_DB_VALID_MASK              BIT(0)
1010 
1011 /* feature_device_attr_desc */
1012 #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK   BIT(0)
1013 #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RNR_RETRY_MASK   BIT(1)
1014 #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_DATA_POLLING_128_MASK BIT(2)
1015 #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_WRITE_MASK  BIT(3)
1016 
1017 /* create_eq_cmd */
1018 #define EFA_ADMIN_CREATE_EQ_CMD_ENTRY_SIZE_WORDS_MASK       GENMASK(4, 0)
1019 #define EFA_ADMIN_CREATE_EQ_CMD_VIRT_MASK                   BIT(6)
1020 #define EFA_ADMIN_CREATE_EQ_CMD_COMPLETION_EVENTS_MASK      BIT(0)
1021 
1022 /* host_info */
1023 #define EFA_ADMIN_HOST_INFO_DRIVER_MODULE_TYPE_MASK         GENMASK(7, 0)
1024 #define EFA_ADMIN_HOST_INFO_DRIVER_SUB_MINOR_MASK           GENMASK(15, 8)
1025 #define EFA_ADMIN_HOST_INFO_DRIVER_MINOR_MASK               GENMASK(23, 16)
1026 #define EFA_ADMIN_HOST_INFO_DRIVER_MAJOR_MASK               GENMASK(31, 24)
1027 #define EFA_ADMIN_HOST_INFO_FUNCTION_MASK                   GENMASK(2, 0)
1028 #define EFA_ADMIN_HOST_INFO_DEVICE_MASK                     GENMASK(7, 3)
1029 #define EFA_ADMIN_HOST_INFO_BUS_MASK                        GENMASK(15, 8)
1030 #define EFA_ADMIN_HOST_INFO_SPEC_MINOR_MASK                 GENMASK(7, 0)
1031 #define EFA_ADMIN_HOST_INFO_SPEC_MAJOR_MASK                 GENMASK(15, 8)
1032 #define EFA_ADMIN_HOST_INFO_INTREE_MASK                     BIT(0)
1033 #define EFA_ADMIN_HOST_INFO_GDR_MASK                        BIT(1)
1034 
1035 #endif /* _EFA_ADMIN_CMDS_H_ */
1036