xref: /openbmc/linux/include/net/mana/gdma.h (revision f4356947)
1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #ifndef _GDMA_H
5 #define _GDMA_H
6 
7 #include <linux/dma-mapping.h>
8 #include <linux/netdevice.h>
9 
10 #include "shm_channel.h"
11 
12 #define GDMA_STATUS_MORE_ENTRIES	0x00000105
13 
14 /* Structures labeled with "HW DATA" are exchanged with the hardware. All of
15  * them are naturally aligned and hence don't need __packed.
16  */
17 
18 enum gdma_request_type {
19 	GDMA_VERIFY_VF_DRIVER_VERSION	= 1,
20 	GDMA_QUERY_MAX_RESOURCES	= 2,
21 	GDMA_LIST_DEVICES		= 3,
22 	GDMA_REGISTER_DEVICE		= 4,
23 	GDMA_DEREGISTER_DEVICE		= 5,
24 	GDMA_GENERATE_TEST_EQE		= 10,
25 	GDMA_CREATE_QUEUE		= 12,
26 	GDMA_DISABLE_QUEUE		= 13,
27 	GDMA_ALLOCATE_RESOURCE_RANGE	= 22,
28 	GDMA_DESTROY_RESOURCE_RANGE	= 24,
29 	GDMA_CREATE_DMA_REGION		= 25,
30 	GDMA_DMA_REGION_ADD_PAGES	= 26,
31 	GDMA_DESTROY_DMA_REGION		= 27,
32 	GDMA_CREATE_PD			= 29,
33 	GDMA_DESTROY_PD			= 30,
34 	GDMA_CREATE_MR			= 31,
35 	GDMA_DESTROY_MR			= 32,
36 };
37 
38 #define GDMA_RESOURCE_DOORBELL_PAGE	27
39 
40 enum gdma_queue_type {
41 	GDMA_INVALID_QUEUE,
42 	GDMA_SQ,
43 	GDMA_RQ,
44 	GDMA_CQ,
45 	GDMA_EQ,
46 };
47 
48 enum gdma_work_request_flags {
49 	GDMA_WR_NONE			= 0,
50 	GDMA_WR_OOB_IN_SGL		= BIT(0),
51 	GDMA_WR_PAD_BY_SGE0		= BIT(1),
52 };
53 
54 enum gdma_eqe_type {
55 	GDMA_EQE_COMPLETION		= 3,
56 	GDMA_EQE_TEST_EVENT		= 64,
57 	GDMA_EQE_HWC_INIT_EQ_ID_DB	= 129,
58 	GDMA_EQE_HWC_INIT_DATA		= 130,
59 	GDMA_EQE_HWC_INIT_DONE		= 131,
60 };
61 
62 enum {
63 	GDMA_DEVICE_NONE	= 0,
64 	GDMA_DEVICE_HWC		= 1,
65 	GDMA_DEVICE_MANA	= 2,
66 };
67 
68 struct gdma_resource {
69 	/* Protect the bitmap */
70 	spinlock_t lock;
71 
72 	/* The bitmap size in bits. */
73 	u32 size;
74 
75 	/* The bitmap tracks the resources. */
76 	unsigned long *map;
77 };
78 
79 union gdma_doorbell_entry {
80 	u64	as_uint64;
81 
82 	struct {
83 		u64 id		: 24;
84 		u64 reserved	: 8;
85 		u64 tail_ptr	: 31;
86 		u64 arm		: 1;
87 	} cq;
88 
89 	struct {
90 		u64 id		: 24;
91 		u64 wqe_cnt	: 8;
92 		u64 tail_ptr	: 32;
93 	} rq;
94 
95 	struct {
96 		u64 id		: 24;
97 		u64 reserved	: 8;
98 		u64 tail_ptr	: 32;
99 	} sq;
100 
101 	struct {
102 		u64 id		: 16;
103 		u64 reserved	: 16;
104 		u64 tail_ptr	: 31;
105 		u64 arm		: 1;
106 	} eq;
107 }; /* HW DATA */
108 
109 struct gdma_msg_hdr {
110 	u32 hdr_type;
111 	u32 msg_type;
112 	u16 msg_version;
113 	u16 hwc_msg_id;
114 	u32 msg_size;
115 }; /* HW DATA */
116 
117 struct gdma_dev_id {
118 	union {
119 		struct {
120 			u16 type;
121 			u16 instance;
122 		};
123 
124 		u32 as_uint32;
125 	};
126 }; /* HW DATA */
127 
128 struct gdma_req_hdr {
129 	struct gdma_msg_hdr req;
130 	struct gdma_msg_hdr resp; /* The expected response */
131 	struct gdma_dev_id dev_id;
132 	u32 activity_id;
133 }; /* HW DATA */
134 
135 struct gdma_resp_hdr {
136 	struct gdma_msg_hdr response;
137 	struct gdma_dev_id dev_id;
138 	u32 activity_id;
139 	u32 status;
140 	u32 reserved;
141 }; /* HW DATA */
142 
143 struct gdma_general_req {
144 	struct gdma_req_hdr hdr;
145 }; /* HW DATA */
146 
147 #define GDMA_MESSAGE_V1 1
148 #define GDMA_MESSAGE_V2 2
149 
150 struct gdma_general_resp {
151 	struct gdma_resp_hdr hdr;
152 }; /* HW DATA */
153 
154 #define GDMA_STANDARD_HEADER_TYPE 0
155 
156 static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code,
157 					u32 req_size, u32 resp_size)
158 {
159 	hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE;
160 	hdr->req.msg_type = code;
161 	hdr->req.msg_version = GDMA_MESSAGE_V1;
162 	hdr->req.msg_size = req_size;
163 
164 	hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE;
165 	hdr->resp.msg_type = code;
166 	hdr->resp.msg_version = GDMA_MESSAGE_V1;
167 	hdr->resp.msg_size = resp_size;
168 }
169 
170 /* The 16-byte struct is part of the GDMA work queue entry (WQE). */
171 struct gdma_sge {
172 	u64 address;
173 	u32 mem_key;
174 	u32 size;
175 }; /* HW DATA */
176 
177 struct gdma_wqe_request {
178 	struct gdma_sge *sgl;
179 	u32 num_sge;
180 
181 	u32 inline_oob_size;
182 	const void *inline_oob_data;
183 
184 	u32 flags;
185 	u32 client_data_unit;
186 };
187 
188 enum gdma_page_type {
189 	GDMA_PAGE_TYPE_4K,
190 };
191 
192 #define GDMA_INVALID_DMA_REGION 0
193 
194 struct gdma_mem_info {
195 	struct device *dev;
196 
197 	dma_addr_t dma_handle;
198 	void *virt_addr;
199 	u64 length;
200 
201 	/* Allocated by the PF driver */
202 	u64 dma_region_handle;
203 };
204 
205 #define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8
206 
207 struct gdma_dev {
208 	struct gdma_context *gdma_context;
209 
210 	struct gdma_dev_id dev_id;
211 
212 	u32 pdid;
213 	u32 doorbell;
214 	u32 gpa_mkey;
215 
216 	/* GDMA driver specific pointer */
217 	void *driver_data;
218 
219 	struct auxiliary_device *adev;
220 };
221 
222 #define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE
223 
224 #define GDMA_CQE_SIZE 64
225 #define GDMA_EQE_SIZE 16
226 #define GDMA_MAX_SQE_SIZE 512
227 #define GDMA_MAX_RQE_SIZE 256
228 
229 #define GDMA_COMP_DATA_SIZE 0x3C
230 
231 #define GDMA_EVENT_DATA_SIZE 0xC
232 
233 /* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */
234 #define GDMA_WQE_BU_SIZE 32
235 
236 #define INVALID_PDID		UINT_MAX
237 #define INVALID_DOORBELL	UINT_MAX
238 #define INVALID_MEM_KEY		UINT_MAX
239 #define INVALID_QUEUE_ID	UINT_MAX
240 #define INVALID_PCI_MSIX_INDEX  UINT_MAX
241 
242 struct gdma_comp {
243 	u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
244 	u32 wq_num;
245 	bool is_sq;
246 };
247 
248 struct gdma_event {
249 	u32 details[GDMA_EVENT_DATA_SIZE / 4];
250 	u8  type;
251 };
252 
253 struct gdma_queue;
254 
255 struct mana_eq {
256 	struct gdma_queue *eq;
257 };
258 
259 typedef void gdma_eq_callback(void *context, struct gdma_queue *q,
260 			      struct gdma_event *e);
261 
262 typedef void gdma_cq_callback(void *context, struct gdma_queue *q);
263 
264 /* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE
265  * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the
266  * driver increases the 'head' in BUs rather than in bytes, and notifies
267  * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track
268  * the HW head, and increases the 'head' by 1 for every processed EQE/CQE.
269  *
270  * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is
271  * processed, the driver increases the 'tail' to indicate that WQEs have
272  * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ.
273  *
274  * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures
275  * that the EQ/CQ is big enough so they can't overflow, and the driver uses
276  * the owner bits mechanism to detect if the queue has become empty.
277  */
278 struct gdma_queue {
279 	struct gdma_dev *gdma_dev;
280 
281 	enum gdma_queue_type type;
282 	u32 id;
283 
284 	struct gdma_mem_info mem_info;
285 
286 	void *queue_mem_ptr;
287 	u32 queue_size;
288 
289 	bool monitor_avl_buf;
290 
291 	u32 head;
292 	u32 tail;
293 
294 	/* Extra fields specific to EQ/CQ. */
295 	union {
296 		struct {
297 			bool disable_needed;
298 
299 			gdma_eq_callback *callback;
300 			void *context;
301 
302 			unsigned int msix_index;
303 
304 			u32 log2_throttle_limit;
305 		} eq;
306 
307 		struct {
308 			gdma_cq_callback *callback;
309 			void *context;
310 
311 			struct gdma_queue *parent; /* For CQ/EQ relationship */
312 		} cq;
313 	};
314 };
315 
316 struct gdma_queue_spec {
317 	enum gdma_queue_type type;
318 	bool monitor_avl_buf;
319 	unsigned int queue_size;
320 
321 	/* Extra fields specific to EQ/CQ. */
322 	union {
323 		struct {
324 			gdma_eq_callback *callback;
325 			void *context;
326 
327 			unsigned long log2_throttle_limit;
328 		} eq;
329 
330 		struct {
331 			gdma_cq_callback *callback;
332 			void *context;
333 
334 			struct gdma_queue *parent_eq;
335 
336 		} cq;
337 	};
338 };
339 
340 #define MANA_IRQ_NAME_SZ 32
341 
342 struct gdma_irq_context {
343 	void (*handler)(void *arg);
344 	void *arg;
345 	char name[MANA_IRQ_NAME_SZ];
346 };
347 
348 struct gdma_context {
349 	struct device		*dev;
350 
351 	/* Per-vPort max number of queues */
352 	unsigned int		max_num_queues;
353 	unsigned int		max_num_msix;
354 	unsigned int		num_msix_usable;
355 	struct gdma_resource	msix_resource;
356 	struct gdma_irq_context	*irq_contexts;
357 
358 	/* L2 MTU */
359 	u16 adapter_mtu;
360 
361 	/* This maps a CQ index to the queue structure. */
362 	unsigned int		max_num_cqs;
363 	struct gdma_queue	**cq_table;
364 
365 	/* Protect eq_test_event and test_event_eq_id  */
366 	struct mutex		eq_test_event_mutex;
367 	struct completion	eq_test_event;
368 	u32			test_event_eq_id;
369 
370 	bool			is_pf;
371 	phys_addr_t		bar0_pa;
372 	void __iomem		*bar0_va;
373 	void __iomem		*shm_base;
374 	void __iomem		*db_page_base;
375 	phys_addr_t		phys_db_page_base;
376 	u32 db_page_size;
377 	int                     numa_node;
378 
379 	/* Shared memory chanenl (used to bootstrap HWC) */
380 	struct shm_channel	shm_channel;
381 
382 	/* Hardware communication channel (HWC) */
383 	struct gdma_dev		hwc;
384 
385 	/* Azure network adapter */
386 	struct gdma_dev		mana;
387 };
388 
389 #define MAX_NUM_GDMA_DEVICES	4
390 
391 static inline bool mana_gd_is_mana(struct gdma_dev *gd)
392 {
393 	return gd->dev_id.type == GDMA_DEVICE_MANA;
394 }
395 
396 static inline bool mana_gd_is_hwc(struct gdma_dev *gd)
397 {
398 	return gd->dev_id.type == GDMA_DEVICE_HWC;
399 }
400 
401 u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset);
402 u32 mana_gd_wq_avail_space(struct gdma_queue *wq);
403 
404 int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq);
405 
406 int mana_gd_create_hwc_queue(struct gdma_dev *gd,
407 			     const struct gdma_queue_spec *spec,
408 			     struct gdma_queue **queue_ptr);
409 
410 int mana_gd_create_mana_eq(struct gdma_dev *gd,
411 			   const struct gdma_queue_spec *spec,
412 			   struct gdma_queue **queue_ptr);
413 
414 int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
415 			      const struct gdma_queue_spec *spec,
416 			      struct gdma_queue **queue_ptr);
417 
418 void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue);
419 
420 int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe);
421 
422 void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit);
423 
424 struct gdma_wqe {
425 	u32 reserved	:24;
426 	u32 last_vbytes	:8;
427 
428 	union {
429 		u32 flags;
430 
431 		struct {
432 			u32 num_sge		:8;
433 			u32 inline_oob_size_div4:3;
434 			u32 client_oob_in_sgl	:1;
435 			u32 reserved1		:4;
436 			u32 client_data_unit	:14;
437 			u32 reserved2		:2;
438 		};
439 	};
440 }; /* HW DATA */
441 
442 #define INLINE_OOB_SMALL_SIZE 8
443 #define INLINE_OOB_LARGE_SIZE 24
444 
445 #define MAX_TX_WQE_SIZE 512
446 #define MAX_RX_WQE_SIZE 256
447 
448 #define MAX_TX_WQE_SGL_ENTRIES	((GDMA_MAX_SQE_SIZE -			   \
449 			sizeof(struct gdma_sge) - INLINE_OOB_SMALL_SIZE) / \
450 			sizeof(struct gdma_sge))
451 
452 #define MAX_RX_WQE_SGL_ENTRIES	((GDMA_MAX_RQE_SIZE -			   \
453 			sizeof(struct gdma_sge)) / sizeof(struct gdma_sge))
454 
455 struct gdma_cqe {
456 	u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
457 
458 	union {
459 		u32 as_uint32;
460 
461 		struct {
462 			u32 wq_num	: 24;
463 			u32 is_sq	: 1;
464 			u32 reserved	: 4;
465 			u32 owner_bits	: 3;
466 		};
467 	} cqe_info;
468 }; /* HW DATA */
469 
470 #define GDMA_CQE_OWNER_BITS 3
471 
472 #define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1)
473 
474 #define SET_ARM_BIT 1
475 
476 #define GDMA_EQE_OWNER_BITS 3
477 
478 union gdma_eqe_info {
479 	u32 as_uint32;
480 
481 	struct {
482 		u32 type	: 8;
483 		u32 reserved1	: 8;
484 		u32 client_id	: 2;
485 		u32 reserved2	: 11;
486 		u32 owner_bits	: 3;
487 	};
488 }; /* HW DATA */
489 
490 #define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1)
491 #define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries))
492 
493 struct gdma_eqe {
494 	u32 details[GDMA_EVENT_DATA_SIZE / 4];
495 	u32 eqe_info;
496 }; /* HW DATA */
497 
498 #define GDMA_REG_DB_PAGE_OFFSET	8
499 #define GDMA_REG_DB_PAGE_SIZE	0x10
500 #define GDMA_REG_SHM_OFFSET	0x18
501 
502 #define GDMA_PF_REG_DB_PAGE_SIZE	0xD0
503 #define GDMA_PF_REG_DB_PAGE_OFF		0xC8
504 #define GDMA_PF_REG_SHM_OFF		0x70
505 
506 #define GDMA_SRIOV_REG_CFG_BASE_OFF	0x108
507 
508 #define MANA_PF_DEVICE_ID 0x00B9
509 #define MANA_VF_DEVICE_ID 0x00BA
510 
511 struct gdma_posted_wqe_info {
512 	u32 wqe_size_in_bu;
513 };
514 
515 /* GDMA_GENERATE_TEST_EQE */
516 struct gdma_generate_test_event_req {
517 	struct gdma_req_hdr hdr;
518 	u32 queue_index;
519 }; /* HW DATA */
520 
521 /* GDMA_VERIFY_VF_DRIVER_VERSION */
522 enum {
523 	GDMA_PROTOCOL_V1	= 1,
524 	GDMA_PROTOCOL_FIRST	= GDMA_PROTOCOL_V1,
525 	GDMA_PROTOCOL_LAST	= GDMA_PROTOCOL_V1,
526 };
527 
528 #define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0)
529 
530 /* Advertise to the NIC firmware: the NAPI work_done variable race is fixed,
531  * so the driver is able to reliably support features like busy_poll.
532  */
533 #define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2)
534 
535 #define GDMA_DRV_CAP_FLAGS1 \
536 	(GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
537 	 GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX)
538 
539 #define GDMA_DRV_CAP_FLAGS2 0
540 
541 #define GDMA_DRV_CAP_FLAGS3 0
542 
543 #define GDMA_DRV_CAP_FLAGS4 0
544 
545 struct gdma_verify_ver_req {
546 	struct gdma_req_hdr hdr;
547 
548 	/* Mandatory fields required for protocol establishment */
549 	u64 protocol_ver_min;
550 	u64 protocol_ver_max;
551 
552 	/* Gdma Driver Capability Flags */
553 	u64 gd_drv_cap_flags1;
554 	u64 gd_drv_cap_flags2;
555 	u64 gd_drv_cap_flags3;
556 	u64 gd_drv_cap_flags4;
557 
558 	/* Advisory fields */
559 	u64 drv_ver;
560 	u32 os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */
561 	u32 reserved;
562 	u32 os_ver_major;
563 	u32 os_ver_minor;
564 	u32 os_ver_build;
565 	u32 os_ver_platform;
566 	u64 reserved_2;
567 	u8 os_ver_str1[128];
568 	u8 os_ver_str2[128];
569 	u8 os_ver_str3[128];
570 	u8 os_ver_str4[128];
571 }; /* HW DATA */
572 
573 struct gdma_verify_ver_resp {
574 	struct gdma_resp_hdr hdr;
575 	u64 gdma_protocol_ver;
576 	u64 pf_cap_flags1;
577 	u64 pf_cap_flags2;
578 	u64 pf_cap_flags3;
579 	u64 pf_cap_flags4;
580 }; /* HW DATA */
581 
582 /* GDMA_QUERY_MAX_RESOURCES */
583 struct gdma_query_max_resources_resp {
584 	struct gdma_resp_hdr hdr;
585 	u32 status;
586 	u32 max_sq;
587 	u32 max_rq;
588 	u32 max_cq;
589 	u32 max_eq;
590 	u32 max_db;
591 	u32 max_mst;
592 	u32 max_cq_mod_ctx;
593 	u32 max_mod_cq;
594 	u32 max_msix;
595 }; /* HW DATA */
596 
597 /* GDMA_LIST_DEVICES */
598 struct gdma_list_devices_resp {
599 	struct gdma_resp_hdr hdr;
600 	u32 num_of_devs;
601 	u32 reserved;
602 	struct gdma_dev_id devs[64];
603 }; /* HW DATA */
604 
605 /* GDMA_REGISTER_DEVICE */
606 struct gdma_register_device_resp {
607 	struct gdma_resp_hdr hdr;
608 	u32 pdid;
609 	u32 gpa_mkey;
610 	u32 db_id;
611 }; /* HW DATA */
612 
613 struct gdma_allocate_resource_range_req {
614 	struct gdma_req_hdr hdr;
615 	u32 resource_type;
616 	u32 num_resources;
617 	u32 alignment;
618 	u32 allocated_resources;
619 };
620 
621 struct gdma_allocate_resource_range_resp {
622 	struct gdma_resp_hdr hdr;
623 	u32 allocated_resources;
624 };
625 
626 struct gdma_destroy_resource_range_req {
627 	struct gdma_req_hdr hdr;
628 	u32 resource_type;
629 	u32 num_resources;
630 	u32 allocated_resources;
631 };
632 
633 /* GDMA_CREATE_QUEUE */
634 struct gdma_create_queue_req {
635 	struct gdma_req_hdr hdr;
636 	u32 type;
637 	u32 reserved1;
638 	u32 pdid;
639 	u32 doolbell_id;
640 	u64 gdma_region;
641 	u32 reserved2;
642 	u32 queue_size;
643 	u32 log2_throttle_limit;
644 	u32 eq_pci_msix_index;
645 	u32 cq_mod_ctx_id;
646 	u32 cq_parent_eq_id;
647 	u8  rq_drop_on_overrun;
648 	u8  rq_err_on_wqe_overflow;
649 	u8  rq_chain_rec_wqes;
650 	u8  sq_hw_db;
651 	u32 reserved3;
652 }; /* HW DATA */
653 
654 struct gdma_create_queue_resp {
655 	struct gdma_resp_hdr hdr;
656 	u32 queue_index;
657 }; /* HW DATA */
658 
659 /* GDMA_DISABLE_QUEUE */
660 struct gdma_disable_queue_req {
661 	struct gdma_req_hdr hdr;
662 	u32 type;
663 	u32 queue_index;
664 	u32 alloc_res_id_on_creation;
665 }; /* HW DATA */
666 
667 enum atb_page_size {
668 	ATB_PAGE_SIZE_4K,
669 	ATB_PAGE_SIZE_8K,
670 	ATB_PAGE_SIZE_16K,
671 	ATB_PAGE_SIZE_32K,
672 	ATB_PAGE_SIZE_64K,
673 	ATB_PAGE_SIZE_128K,
674 	ATB_PAGE_SIZE_256K,
675 	ATB_PAGE_SIZE_512K,
676 	ATB_PAGE_SIZE_1M,
677 	ATB_PAGE_SIZE_2M,
678 	ATB_PAGE_SIZE_MAX,
679 };
680 
681 enum gdma_mr_access_flags {
682 	GDMA_ACCESS_FLAG_LOCAL_READ = BIT_ULL(0),
683 	GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT_ULL(1),
684 	GDMA_ACCESS_FLAG_REMOTE_READ = BIT_ULL(2),
685 	GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT_ULL(3),
686 	GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT_ULL(4),
687 };
688 
689 /* GDMA_CREATE_DMA_REGION */
690 struct gdma_create_dma_region_req {
691 	struct gdma_req_hdr hdr;
692 
693 	/* The total size of the DMA region */
694 	u64 length;
695 
696 	/* The offset in the first page */
697 	u32 offset_in_page;
698 
699 	/* enum gdma_page_type */
700 	u32 gdma_page_type;
701 
702 	/* The total number of pages */
703 	u32 page_count;
704 
705 	/* If page_addr_list_len is smaller than page_count,
706 	 * the remaining page addresses will be added via the
707 	 * message GDMA_DMA_REGION_ADD_PAGES.
708 	 */
709 	u32 page_addr_list_len;
710 	u64 page_addr_list[];
711 }; /* HW DATA */
712 
713 struct gdma_create_dma_region_resp {
714 	struct gdma_resp_hdr hdr;
715 	u64 dma_region_handle;
716 }; /* HW DATA */
717 
718 /* GDMA_DMA_REGION_ADD_PAGES */
719 struct gdma_dma_region_add_pages_req {
720 	struct gdma_req_hdr hdr;
721 
722 	u64 dma_region_handle;
723 
724 	u32 page_addr_list_len;
725 	u32 reserved3;
726 
727 	u64 page_addr_list[];
728 }; /* HW DATA */
729 
730 /* GDMA_DESTROY_DMA_REGION */
731 struct gdma_destroy_dma_region_req {
732 	struct gdma_req_hdr hdr;
733 
734 	u64 dma_region_handle;
735 }; /* HW DATA */
736 
737 enum gdma_pd_flags {
738 	GDMA_PD_FLAG_INVALID = 0,
739 };
740 
741 struct gdma_create_pd_req {
742 	struct gdma_req_hdr hdr;
743 	enum gdma_pd_flags flags;
744 	u32 reserved;
745 };/* HW DATA */
746 
747 struct gdma_create_pd_resp {
748 	struct gdma_resp_hdr hdr;
749 	u64 pd_handle;
750 	u32 pd_id;
751 	u32 reserved;
752 };/* HW DATA */
753 
754 struct gdma_destroy_pd_req {
755 	struct gdma_req_hdr hdr;
756 	u64 pd_handle;
757 };/* HW DATA */
758 
759 struct gdma_destory_pd_resp {
760 	struct gdma_resp_hdr hdr;
761 };/* HW DATA */
762 
763 enum gdma_mr_type {
764 	/* Guest Virtual Address - MRs of this type allow access
765 	 * to memory mapped by PTEs associated with this MR using a virtual
766 	 * address that is set up in the MST
767 	 */
768 	GDMA_MR_TYPE_GVA = 2,
769 };
770 
771 struct gdma_create_mr_params {
772 	u64 pd_handle;
773 	enum gdma_mr_type mr_type;
774 	union {
775 		struct {
776 			u64 dma_region_handle;
777 			u64 virtual_address;
778 			enum gdma_mr_access_flags access_flags;
779 		} gva;
780 	};
781 };
782 
783 struct gdma_create_mr_request {
784 	struct gdma_req_hdr hdr;
785 	u64 pd_handle;
786 	enum gdma_mr_type mr_type;
787 	u32 reserved_1;
788 
789 	union {
790 		struct {
791 			u64 dma_region_handle;
792 			u64 virtual_address;
793 			enum gdma_mr_access_flags access_flags;
794 		} gva;
795 
796 	};
797 	u32 reserved_2;
798 };/* HW DATA */
799 
800 struct gdma_create_mr_response {
801 	struct gdma_resp_hdr hdr;
802 	u64 mr_handle;
803 	u32 lkey;
804 	u32 rkey;
805 };/* HW DATA */
806 
807 struct gdma_destroy_mr_request {
808 	struct gdma_req_hdr hdr;
809 	u64 mr_handle;
810 };/* HW DATA */
811 
812 struct gdma_destroy_mr_response {
813 	struct gdma_resp_hdr hdr;
814 };/* HW DATA */
815 
816 int mana_gd_verify_vf_version(struct pci_dev *pdev);
817 
818 int mana_gd_register_device(struct gdma_dev *gd);
819 int mana_gd_deregister_device(struct gdma_dev *gd);
820 
821 int mana_gd_post_work_request(struct gdma_queue *wq,
822 			      const struct gdma_wqe_request *wqe_req,
823 			      struct gdma_posted_wqe_info *wqe_info);
824 
825 int mana_gd_post_and_ring(struct gdma_queue *queue,
826 			  const struct gdma_wqe_request *wqe,
827 			  struct gdma_posted_wqe_info *wqe_info);
828 
829 int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r);
830 void mana_gd_free_res_map(struct gdma_resource *r);
831 
832 void mana_gd_wq_ring_doorbell(struct gdma_context *gc,
833 			      struct gdma_queue *queue);
834 
835 int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
836 			 struct gdma_mem_info *gmi);
837 
838 void mana_gd_free_memory(struct gdma_mem_info *gmi);
839 
840 int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
841 			 u32 resp_len, void *resp);
842 
843 int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle);
844 
845 #endif /* _GDMA_H */
846