1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2 /* Copyright (c) 2021, Microsoft Corporation. */ 3 4 #ifndef _GDMA_H 5 #define _GDMA_H 6 7 #include <linux/dma-mapping.h> 8 #include <linux/netdevice.h> 9 10 #include "shm_channel.h" 11 12 #define GDMA_STATUS_MORE_ENTRIES 0x00000105 13 14 /* Structures labeled with "HW DATA" are exchanged with the hardware. All of 15 * them are naturally aligned and hence don't need __packed. 16 */ 17 18 enum gdma_request_type { 19 GDMA_VERIFY_VF_DRIVER_VERSION = 1, 20 GDMA_QUERY_MAX_RESOURCES = 2, 21 GDMA_LIST_DEVICES = 3, 22 GDMA_REGISTER_DEVICE = 4, 23 GDMA_DEREGISTER_DEVICE = 5, 24 GDMA_GENERATE_TEST_EQE = 10, 25 GDMA_CREATE_QUEUE = 12, 26 GDMA_DISABLE_QUEUE = 13, 27 GDMA_ALLOCATE_RESOURCE_RANGE = 22, 28 GDMA_DESTROY_RESOURCE_RANGE = 24, 29 GDMA_CREATE_DMA_REGION = 25, 30 GDMA_DMA_REGION_ADD_PAGES = 26, 31 GDMA_DESTROY_DMA_REGION = 27, 32 GDMA_CREATE_PD = 29, 33 GDMA_DESTROY_PD = 30, 34 GDMA_CREATE_MR = 31, 35 GDMA_DESTROY_MR = 32, 36 }; 37 38 #define GDMA_RESOURCE_DOORBELL_PAGE 27 39 40 enum gdma_queue_type { 41 GDMA_INVALID_QUEUE, 42 GDMA_SQ, 43 GDMA_RQ, 44 GDMA_CQ, 45 GDMA_EQ, 46 }; 47 48 enum gdma_work_request_flags { 49 GDMA_WR_NONE = 0, 50 GDMA_WR_OOB_IN_SGL = BIT(0), 51 GDMA_WR_PAD_BY_SGE0 = BIT(1), 52 }; 53 54 enum gdma_eqe_type { 55 GDMA_EQE_COMPLETION = 3, 56 GDMA_EQE_TEST_EVENT = 64, 57 GDMA_EQE_HWC_INIT_EQ_ID_DB = 129, 58 GDMA_EQE_HWC_INIT_DATA = 130, 59 GDMA_EQE_HWC_INIT_DONE = 131, 60 }; 61 62 enum { 63 GDMA_DEVICE_NONE = 0, 64 GDMA_DEVICE_HWC = 1, 65 GDMA_DEVICE_MANA = 2, 66 }; 67 68 typedef u64 gdma_obj_handle_t; 69 70 struct gdma_resource { 71 /* Protect the bitmap */ 72 spinlock_t lock; 73 74 /* The bitmap size in bits. */ 75 u32 size; 76 77 /* The bitmap tracks the resources. */ 78 unsigned long *map; 79 }; 80 81 union gdma_doorbell_entry { 82 u64 as_uint64; 83 84 struct { 85 u64 id : 24; 86 u64 reserved : 8; 87 u64 tail_ptr : 31; 88 u64 arm : 1; 89 } cq; 90 91 struct { 92 u64 id : 24; 93 u64 wqe_cnt : 8; 94 u64 tail_ptr : 32; 95 } rq; 96 97 struct { 98 u64 id : 24; 99 u64 reserved : 8; 100 u64 tail_ptr : 32; 101 } sq; 102 103 struct { 104 u64 id : 16; 105 u64 reserved : 16; 106 u64 tail_ptr : 31; 107 u64 arm : 1; 108 } eq; 109 }; /* HW DATA */ 110 111 struct gdma_msg_hdr { 112 u32 hdr_type; 113 u32 msg_type; 114 u16 msg_version; 115 u16 hwc_msg_id; 116 u32 msg_size; 117 }; /* HW DATA */ 118 119 struct gdma_dev_id { 120 union { 121 struct { 122 u16 type; 123 u16 instance; 124 }; 125 126 u32 as_uint32; 127 }; 128 }; /* HW DATA */ 129 130 struct gdma_req_hdr { 131 struct gdma_msg_hdr req; 132 struct gdma_msg_hdr resp; /* The expected response */ 133 struct gdma_dev_id dev_id; 134 u32 activity_id; 135 }; /* HW DATA */ 136 137 struct gdma_resp_hdr { 138 struct gdma_msg_hdr response; 139 struct gdma_dev_id dev_id; 140 u32 activity_id; 141 u32 status; 142 u32 reserved; 143 }; /* HW DATA */ 144 145 struct gdma_general_req { 146 struct gdma_req_hdr hdr; 147 }; /* HW DATA */ 148 149 #define GDMA_MESSAGE_V1 1 150 151 struct gdma_general_resp { 152 struct gdma_resp_hdr hdr; 153 }; /* HW DATA */ 154 155 #define GDMA_STANDARD_HEADER_TYPE 0 156 157 static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code, 158 u32 req_size, u32 resp_size) 159 { 160 hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE; 161 hdr->req.msg_type = code; 162 hdr->req.msg_version = GDMA_MESSAGE_V1; 163 hdr->req.msg_size = req_size; 164 165 hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE; 166 hdr->resp.msg_type = code; 167 hdr->resp.msg_version = GDMA_MESSAGE_V1; 168 hdr->resp.msg_size = resp_size; 169 } 170 171 /* The 16-byte struct is part of the GDMA work queue entry (WQE). */ 172 struct gdma_sge { 173 u64 address; 174 u32 mem_key; 175 u32 size; 176 }; /* HW DATA */ 177 178 struct gdma_wqe_request { 179 struct gdma_sge *sgl; 180 u32 num_sge; 181 182 u32 inline_oob_size; 183 const void *inline_oob_data; 184 185 u32 flags; 186 u32 client_data_unit; 187 }; 188 189 enum gdma_page_type { 190 GDMA_PAGE_TYPE_4K, 191 }; 192 193 #define GDMA_INVALID_DMA_REGION 0 194 195 struct gdma_mem_info { 196 struct device *dev; 197 198 dma_addr_t dma_handle; 199 void *virt_addr; 200 u64 length; 201 202 /* Allocated by the PF driver */ 203 gdma_obj_handle_t dma_region_handle; 204 }; 205 206 #define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8 207 208 struct gdma_dev { 209 struct gdma_context *gdma_context; 210 211 struct gdma_dev_id dev_id; 212 213 u32 pdid; 214 u32 doorbell; 215 u32 gpa_mkey; 216 217 /* GDMA driver specific pointer */ 218 void *driver_data; 219 220 struct auxiliary_device *adev; 221 }; 222 223 #define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE 224 225 #define GDMA_CQE_SIZE 64 226 #define GDMA_EQE_SIZE 16 227 #define GDMA_MAX_SQE_SIZE 512 228 #define GDMA_MAX_RQE_SIZE 256 229 230 #define GDMA_COMP_DATA_SIZE 0x3C 231 232 #define GDMA_EVENT_DATA_SIZE 0xC 233 234 /* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */ 235 #define GDMA_WQE_BU_SIZE 32 236 237 #define INVALID_PDID UINT_MAX 238 #define INVALID_DOORBELL UINT_MAX 239 #define INVALID_MEM_KEY UINT_MAX 240 #define INVALID_QUEUE_ID UINT_MAX 241 #define INVALID_PCI_MSIX_INDEX UINT_MAX 242 243 struct gdma_comp { 244 u32 cqe_data[GDMA_COMP_DATA_SIZE / 4]; 245 u32 wq_num; 246 bool is_sq; 247 }; 248 249 struct gdma_event { 250 u32 details[GDMA_EVENT_DATA_SIZE / 4]; 251 u8 type; 252 }; 253 254 struct gdma_queue; 255 256 struct mana_eq { 257 struct gdma_queue *eq; 258 }; 259 260 typedef void gdma_eq_callback(void *context, struct gdma_queue *q, 261 struct gdma_event *e); 262 263 typedef void gdma_cq_callback(void *context, struct gdma_queue *q); 264 265 /* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE 266 * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the 267 * driver increases the 'head' in BUs rather than in bytes, and notifies 268 * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track 269 * the HW head, and increases the 'head' by 1 for every processed EQE/CQE. 270 * 271 * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is 272 * processed, the driver increases the 'tail' to indicate that WQEs have 273 * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ. 274 * 275 * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures 276 * that the EQ/CQ is big enough so they can't overflow, and the driver uses 277 * the owner bits mechanism to detect if the queue has become empty. 278 */ 279 struct gdma_queue { 280 struct gdma_dev *gdma_dev; 281 282 enum gdma_queue_type type; 283 u32 id; 284 285 struct gdma_mem_info mem_info; 286 287 void *queue_mem_ptr; 288 u32 queue_size; 289 290 bool monitor_avl_buf; 291 292 u32 head; 293 u32 tail; 294 295 /* Extra fields specific to EQ/CQ. */ 296 union { 297 struct { 298 bool disable_needed; 299 300 gdma_eq_callback *callback; 301 void *context; 302 303 unsigned int msix_index; 304 305 u32 log2_throttle_limit; 306 } eq; 307 308 struct { 309 gdma_cq_callback *callback; 310 void *context; 311 312 struct gdma_queue *parent; /* For CQ/EQ relationship */ 313 } cq; 314 }; 315 }; 316 317 struct gdma_queue_spec { 318 enum gdma_queue_type type; 319 bool monitor_avl_buf; 320 unsigned int queue_size; 321 322 /* Extra fields specific to EQ/CQ. */ 323 union { 324 struct { 325 gdma_eq_callback *callback; 326 void *context; 327 328 unsigned long log2_throttle_limit; 329 } eq; 330 331 struct { 332 gdma_cq_callback *callback; 333 void *context; 334 335 struct gdma_queue *parent_eq; 336 337 } cq; 338 }; 339 }; 340 341 struct gdma_irq_context { 342 void (*handler)(void *arg); 343 void *arg; 344 }; 345 346 struct gdma_context { 347 struct device *dev; 348 349 /* Per-vPort max number of queues */ 350 unsigned int max_num_queues; 351 unsigned int max_num_msix; 352 unsigned int num_msix_usable; 353 struct gdma_resource msix_resource; 354 struct gdma_irq_context *irq_contexts; 355 356 /* This maps a CQ index to the queue structure. */ 357 unsigned int max_num_cqs; 358 struct gdma_queue **cq_table; 359 360 /* Protect eq_test_event and test_event_eq_id */ 361 struct mutex eq_test_event_mutex; 362 struct completion eq_test_event; 363 u32 test_event_eq_id; 364 365 bool is_pf; 366 phys_addr_t bar0_pa; 367 void __iomem *bar0_va; 368 void __iomem *shm_base; 369 void __iomem *db_page_base; 370 phys_addr_t phys_db_page_base; 371 u32 db_page_size; 372 int numa_node; 373 374 /* Shared memory chanenl (used to bootstrap HWC) */ 375 struct shm_channel shm_channel; 376 377 /* Hardware communication channel (HWC) */ 378 struct gdma_dev hwc; 379 380 /* Azure network adapter */ 381 struct gdma_dev mana; 382 }; 383 384 #define MAX_NUM_GDMA_DEVICES 4 385 386 static inline bool mana_gd_is_mana(struct gdma_dev *gd) 387 { 388 return gd->dev_id.type == GDMA_DEVICE_MANA; 389 } 390 391 static inline bool mana_gd_is_hwc(struct gdma_dev *gd) 392 { 393 return gd->dev_id.type == GDMA_DEVICE_HWC; 394 } 395 396 u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset); 397 u32 mana_gd_wq_avail_space(struct gdma_queue *wq); 398 399 int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq); 400 401 int mana_gd_create_hwc_queue(struct gdma_dev *gd, 402 const struct gdma_queue_spec *spec, 403 struct gdma_queue **queue_ptr); 404 405 int mana_gd_create_mana_eq(struct gdma_dev *gd, 406 const struct gdma_queue_spec *spec, 407 struct gdma_queue **queue_ptr); 408 409 int mana_gd_create_mana_wq_cq(struct gdma_dev *gd, 410 const struct gdma_queue_spec *spec, 411 struct gdma_queue **queue_ptr); 412 413 void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue); 414 415 int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe); 416 417 void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit); 418 419 struct gdma_wqe { 420 u32 reserved :24; 421 u32 last_vbytes :8; 422 423 union { 424 u32 flags; 425 426 struct { 427 u32 num_sge :8; 428 u32 inline_oob_size_div4:3; 429 u32 client_oob_in_sgl :1; 430 u32 reserved1 :4; 431 u32 client_data_unit :14; 432 u32 reserved2 :2; 433 }; 434 }; 435 }; /* HW DATA */ 436 437 #define INLINE_OOB_SMALL_SIZE 8 438 #define INLINE_OOB_LARGE_SIZE 24 439 440 #define MAX_TX_WQE_SIZE 512 441 #define MAX_RX_WQE_SIZE 256 442 443 #define MAX_TX_WQE_SGL_ENTRIES ((GDMA_MAX_SQE_SIZE - \ 444 sizeof(struct gdma_sge) - INLINE_OOB_SMALL_SIZE) / \ 445 sizeof(struct gdma_sge)) 446 447 #define MAX_RX_WQE_SGL_ENTRIES ((GDMA_MAX_RQE_SIZE - \ 448 sizeof(struct gdma_sge)) / sizeof(struct gdma_sge)) 449 450 struct gdma_cqe { 451 u32 cqe_data[GDMA_COMP_DATA_SIZE / 4]; 452 453 union { 454 u32 as_uint32; 455 456 struct { 457 u32 wq_num : 24; 458 u32 is_sq : 1; 459 u32 reserved : 4; 460 u32 owner_bits : 3; 461 }; 462 } cqe_info; 463 }; /* HW DATA */ 464 465 #define GDMA_CQE_OWNER_BITS 3 466 467 #define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1) 468 469 #define SET_ARM_BIT 1 470 471 #define GDMA_EQE_OWNER_BITS 3 472 473 union gdma_eqe_info { 474 u32 as_uint32; 475 476 struct { 477 u32 type : 8; 478 u32 reserved1 : 8; 479 u32 client_id : 2; 480 u32 reserved2 : 11; 481 u32 owner_bits : 3; 482 }; 483 }; /* HW DATA */ 484 485 #define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1) 486 #define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries)) 487 488 struct gdma_eqe { 489 u32 details[GDMA_EVENT_DATA_SIZE / 4]; 490 u32 eqe_info; 491 }; /* HW DATA */ 492 493 #define GDMA_REG_DB_PAGE_OFFSET 8 494 #define GDMA_REG_DB_PAGE_SIZE 0x10 495 #define GDMA_REG_SHM_OFFSET 0x18 496 497 #define GDMA_PF_REG_DB_PAGE_SIZE 0xD0 498 #define GDMA_PF_REG_DB_PAGE_OFF 0xC8 499 #define GDMA_PF_REG_SHM_OFF 0x70 500 501 #define GDMA_SRIOV_REG_CFG_BASE_OFF 0x108 502 503 #define MANA_PF_DEVICE_ID 0x00B9 504 #define MANA_VF_DEVICE_ID 0x00BA 505 506 struct gdma_posted_wqe_info { 507 u32 wqe_size_in_bu; 508 }; 509 510 /* GDMA_GENERATE_TEST_EQE */ 511 struct gdma_generate_test_event_req { 512 struct gdma_req_hdr hdr; 513 u32 queue_index; 514 }; /* HW DATA */ 515 516 /* GDMA_VERIFY_VF_DRIVER_VERSION */ 517 enum { 518 GDMA_PROTOCOL_V1 = 1, 519 GDMA_PROTOCOL_FIRST = GDMA_PROTOCOL_V1, 520 GDMA_PROTOCOL_LAST = GDMA_PROTOCOL_V1, 521 }; 522 523 #define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0) 524 525 #define GDMA_DRV_CAP_FLAGS1 GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT 526 527 #define GDMA_DRV_CAP_FLAGS2 0 528 529 #define GDMA_DRV_CAP_FLAGS3 0 530 531 #define GDMA_DRV_CAP_FLAGS4 0 532 533 struct gdma_verify_ver_req { 534 struct gdma_req_hdr hdr; 535 536 /* Mandatory fields required for protocol establishment */ 537 u64 protocol_ver_min; 538 u64 protocol_ver_max; 539 540 /* Gdma Driver Capability Flags */ 541 u64 gd_drv_cap_flags1; 542 u64 gd_drv_cap_flags2; 543 u64 gd_drv_cap_flags3; 544 u64 gd_drv_cap_flags4; 545 546 /* Advisory fields */ 547 u64 drv_ver; 548 u32 os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */ 549 u32 reserved; 550 u32 os_ver_major; 551 u32 os_ver_minor; 552 u32 os_ver_build; 553 u32 os_ver_platform; 554 u64 reserved_2; 555 u8 os_ver_str1[128]; 556 u8 os_ver_str2[128]; 557 u8 os_ver_str3[128]; 558 u8 os_ver_str4[128]; 559 }; /* HW DATA */ 560 561 struct gdma_verify_ver_resp { 562 struct gdma_resp_hdr hdr; 563 u64 gdma_protocol_ver; 564 u64 pf_cap_flags1; 565 u64 pf_cap_flags2; 566 u64 pf_cap_flags3; 567 u64 pf_cap_flags4; 568 }; /* HW DATA */ 569 570 /* GDMA_QUERY_MAX_RESOURCES */ 571 struct gdma_query_max_resources_resp { 572 struct gdma_resp_hdr hdr; 573 u32 status; 574 u32 max_sq; 575 u32 max_rq; 576 u32 max_cq; 577 u32 max_eq; 578 u32 max_db; 579 u32 max_mst; 580 u32 max_cq_mod_ctx; 581 u32 max_mod_cq; 582 u32 max_msix; 583 }; /* HW DATA */ 584 585 /* GDMA_LIST_DEVICES */ 586 struct gdma_list_devices_resp { 587 struct gdma_resp_hdr hdr; 588 u32 num_of_devs; 589 u32 reserved; 590 struct gdma_dev_id devs[64]; 591 }; /* HW DATA */ 592 593 /* GDMA_REGISTER_DEVICE */ 594 struct gdma_register_device_resp { 595 struct gdma_resp_hdr hdr; 596 u32 pdid; 597 u32 gpa_mkey; 598 u32 db_id; 599 }; /* HW DATA */ 600 601 struct gdma_allocate_resource_range_req { 602 struct gdma_req_hdr hdr; 603 u32 resource_type; 604 u32 num_resources; 605 u32 alignment; 606 u32 allocated_resources; 607 }; 608 609 struct gdma_allocate_resource_range_resp { 610 struct gdma_resp_hdr hdr; 611 u32 allocated_resources; 612 }; 613 614 struct gdma_destroy_resource_range_req { 615 struct gdma_req_hdr hdr; 616 u32 resource_type; 617 u32 num_resources; 618 u32 allocated_resources; 619 }; 620 621 /* GDMA_CREATE_QUEUE */ 622 struct gdma_create_queue_req { 623 struct gdma_req_hdr hdr; 624 u32 type; 625 u32 reserved1; 626 u32 pdid; 627 u32 doolbell_id; 628 gdma_obj_handle_t gdma_region; 629 u32 reserved2; 630 u32 queue_size; 631 u32 log2_throttle_limit; 632 u32 eq_pci_msix_index; 633 u32 cq_mod_ctx_id; 634 u32 cq_parent_eq_id; 635 u8 rq_drop_on_overrun; 636 u8 rq_err_on_wqe_overflow; 637 u8 rq_chain_rec_wqes; 638 u8 sq_hw_db; 639 u32 reserved3; 640 }; /* HW DATA */ 641 642 struct gdma_create_queue_resp { 643 struct gdma_resp_hdr hdr; 644 u32 queue_index; 645 }; /* HW DATA */ 646 647 /* GDMA_DISABLE_QUEUE */ 648 struct gdma_disable_queue_req { 649 struct gdma_req_hdr hdr; 650 u32 type; 651 u32 queue_index; 652 u32 alloc_res_id_on_creation; 653 }; /* HW DATA */ 654 655 enum atb_page_size { 656 ATB_PAGE_SIZE_4K, 657 ATB_PAGE_SIZE_8K, 658 ATB_PAGE_SIZE_16K, 659 ATB_PAGE_SIZE_32K, 660 ATB_PAGE_SIZE_64K, 661 ATB_PAGE_SIZE_128K, 662 ATB_PAGE_SIZE_256K, 663 ATB_PAGE_SIZE_512K, 664 ATB_PAGE_SIZE_1M, 665 ATB_PAGE_SIZE_2M, 666 ATB_PAGE_SIZE_MAX, 667 }; 668 669 enum gdma_mr_access_flags { 670 GDMA_ACCESS_FLAG_LOCAL_READ = BIT_ULL(0), 671 GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT_ULL(1), 672 GDMA_ACCESS_FLAG_REMOTE_READ = BIT_ULL(2), 673 GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT_ULL(3), 674 GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT_ULL(4), 675 }; 676 677 /* GDMA_CREATE_DMA_REGION */ 678 struct gdma_create_dma_region_req { 679 struct gdma_req_hdr hdr; 680 681 /* The total size of the DMA region */ 682 u64 length; 683 684 /* The offset in the first page */ 685 u32 offset_in_page; 686 687 /* enum gdma_page_type */ 688 u32 gdma_page_type; 689 690 /* The total number of pages */ 691 u32 page_count; 692 693 /* If page_addr_list_len is smaller than page_count, 694 * the remaining page addresses will be added via the 695 * message GDMA_DMA_REGION_ADD_PAGES. 696 */ 697 u32 page_addr_list_len; 698 u64 page_addr_list[]; 699 }; /* HW DATA */ 700 701 struct gdma_create_dma_region_resp { 702 struct gdma_resp_hdr hdr; 703 gdma_obj_handle_t dma_region_handle; 704 }; /* HW DATA */ 705 706 /* GDMA_DMA_REGION_ADD_PAGES */ 707 struct gdma_dma_region_add_pages_req { 708 struct gdma_req_hdr hdr; 709 710 gdma_obj_handle_t dma_region_handle; 711 712 u32 page_addr_list_len; 713 u32 reserved3; 714 715 u64 page_addr_list[]; 716 }; /* HW DATA */ 717 718 /* GDMA_DESTROY_DMA_REGION */ 719 struct gdma_destroy_dma_region_req { 720 struct gdma_req_hdr hdr; 721 722 gdma_obj_handle_t dma_region_handle; 723 }; /* HW DATA */ 724 725 enum gdma_pd_flags { 726 GDMA_PD_FLAG_INVALID = 0, 727 }; 728 729 struct gdma_create_pd_req { 730 struct gdma_req_hdr hdr; 731 enum gdma_pd_flags flags; 732 u32 reserved; 733 };/* HW DATA */ 734 735 struct gdma_create_pd_resp { 736 struct gdma_resp_hdr hdr; 737 gdma_obj_handle_t pd_handle; 738 u32 pd_id; 739 u32 reserved; 740 };/* HW DATA */ 741 742 struct gdma_destroy_pd_req { 743 struct gdma_req_hdr hdr; 744 gdma_obj_handle_t pd_handle; 745 };/* HW DATA */ 746 747 struct gdma_destory_pd_resp { 748 struct gdma_resp_hdr hdr; 749 };/* HW DATA */ 750 751 enum gdma_mr_type { 752 /* Guest Virtual Address - MRs of this type allow access 753 * to memory mapped by PTEs associated with this MR using a virtual 754 * address that is set up in the MST 755 */ 756 GDMA_MR_TYPE_GVA = 2, 757 }; 758 759 struct gdma_create_mr_params { 760 gdma_obj_handle_t pd_handle; 761 enum gdma_mr_type mr_type; 762 union { 763 struct { 764 gdma_obj_handle_t dma_region_handle; 765 u64 virtual_address; 766 enum gdma_mr_access_flags access_flags; 767 } gva; 768 }; 769 }; 770 771 struct gdma_create_mr_request { 772 struct gdma_req_hdr hdr; 773 gdma_obj_handle_t pd_handle; 774 enum gdma_mr_type mr_type; 775 u32 reserved_1; 776 777 union { 778 struct { 779 gdma_obj_handle_t dma_region_handle; 780 u64 virtual_address; 781 enum gdma_mr_access_flags access_flags; 782 } gva; 783 784 }; 785 u32 reserved_2; 786 };/* HW DATA */ 787 788 struct gdma_create_mr_response { 789 struct gdma_resp_hdr hdr; 790 gdma_obj_handle_t mr_handle; 791 u32 lkey; 792 u32 rkey; 793 };/* HW DATA */ 794 795 struct gdma_destroy_mr_request { 796 struct gdma_req_hdr hdr; 797 gdma_obj_handle_t mr_handle; 798 };/* HW DATA */ 799 800 struct gdma_destroy_mr_response { 801 struct gdma_resp_hdr hdr; 802 };/* HW DATA */ 803 804 int mana_gd_verify_vf_version(struct pci_dev *pdev); 805 806 int mana_gd_register_device(struct gdma_dev *gd); 807 int mana_gd_deregister_device(struct gdma_dev *gd); 808 809 int mana_gd_post_work_request(struct gdma_queue *wq, 810 const struct gdma_wqe_request *wqe_req, 811 struct gdma_posted_wqe_info *wqe_info); 812 813 int mana_gd_post_and_ring(struct gdma_queue *queue, 814 const struct gdma_wqe_request *wqe, 815 struct gdma_posted_wqe_info *wqe_info); 816 817 int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r); 818 void mana_gd_free_res_map(struct gdma_resource *r); 819 820 void mana_gd_wq_ring_doorbell(struct gdma_context *gc, 821 struct gdma_queue *queue); 822 823 int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length, 824 struct gdma_mem_info *gmi); 825 826 void mana_gd_free_memory(struct gdma_mem_info *gmi); 827 828 int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req, 829 u32 resp_len, void *resp); 830 831 int mana_gd_destroy_dma_region(struct gdma_context *gc, 832 gdma_obj_handle_t dma_region_handle); 833 834 #endif /* _GDMA_H */ 835