1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2 /* Copyright (c) 2021, Microsoft Corporation. */ 3 4 #ifndef _GDMA_H 5 #define _GDMA_H 6 7 #include <linux/dma-mapping.h> 8 #include <linux/netdevice.h> 9 10 #include "shm_channel.h" 11 12 #define GDMA_STATUS_MORE_ENTRIES 0x00000105 13 14 /* Structures labeled with "HW DATA" are exchanged with the hardware. All of 15 * them are naturally aligned and hence don't need __packed. 16 */ 17 18 enum gdma_request_type { 19 GDMA_VERIFY_VF_DRIVER_VERSION = 1, 20 GDMA_QUERY_MAX_RESOURCES = 2, 21 GDMA_LIST_DEVICES = 3, 22 GDMA_REGISTER_DEVICE = 4, 23 GDMA_DEREGISTER_DEVICE = 5, 24 GDMA_GENERATE_TEST_EQE = 10, 25 GDMA_CREATE_QUEUE = 12, 26 GDMA_DISABLE_QUEUE = 13, 27 GDMA_ALLOCATE_RESOURCE_RANGE = 22, 28 GDMA_DESTROY_RESOURCE_RANGE = 24, 29 GDMA_CREATE_DMA_REGION = 25, 30 GDMA_DMA_REGION_ADD_PAGES = 26, 31 GDMA_DESTROY_DMA_REGION = 27, 32 GDMA_CREATE_PD = 29, 33 GDMA_DESTROY_PD = 30, 34 GDMA_CREATE_MR = 31, 35 GDMA_DESTROY_MR = 32, 36 GDMA_QUERY_HWC_TIMEOUT = 84, /* 0x54 */ 37 }; 38 39 #define GDMA_RESOURCE_DOORBELL_PAGE 27 40 41 enum gdma_queue_type { 42 GDMA_INVALID_QUEUE, 43 GDMA_SQ, 44 GDMA_RQ, 45 GDMA_CQ, 46 GDMA_EQ, 47 }; 48 49 enum gdma_work_request_flags { 50 GDMA_WR_NONE = 0, 51 GDMA_WR_OOB_IN_SGL = BIT(0), 52 GDMA_WR_PAD_BY_SGE0 = BIT(1), 53 }; 54 55 enum gdma_eqe_type { 56 GDMA_EQE_COMPLETION = 3, 57 GDMA_EQE_TEST_EVENT = 64, 58 GDMA_EQE_HWC_INIT_EQ_ID_DB = 129, 59 GDMA_EQE_HWC_INIT_DATA = 130, 60 GDMA_EQE_HWC_INIT_DONE = 131, 61 GDMA_EQE_HWC_SOC_RECONFIG = 132, 62 GDMA_EQE_HWC_SOC_RECONFIG_DATA = 133, 63 }; 64 65 enum { 66 GDMA_DEVICE_NONE = 0, 67 GDMA_DEVICE_HWC = 1, 68 GDMA_DEVICE_MANA = 2, 69 }; 70 71 struct gdma_resource { 72 /* Protect the bitmap */ 73 spinlock_t lock; 74 75 /* The bitmap size in bits. */ 76 u32 size; 77 78 /* The bitmap tracks the resources. */ 79 unsigned long *map; 80 }; 81 82 union gdma_doorbell_entry { 83 u64 as_uint64; 84 85 struct { 86 u64 id : 24; 87 u64 reserved : 8; 88 u64 tail_ptr : 31; 89 u64 arm : 1; 90 } cq; 91 92 struct { 93 u64 id : 24; 94 u64 wqe_cnt : 8; 95 u64 tail_ptr : 32; 96 } rq; 97 98 struct { 99 u64 id : 24; 100 u64 reserved : 8; 101 u64 tail_ptr : 32; 102 } sq; 103 104 struct { 105 u64 id : 16; 106 u64 reserved : 16; 107 u64 tail_ptr : 31; 108 u64 arm : 1; 109 } eq; 110 }; /* HW DATA */ 111 112 struct gdma_msg_hdr { 113 u32 hdr_type; 114 u32 msg_type; 115 u16 msg_version; 116 u16 hwc_msg_id; 117 u32 msg_size; 118 }; /* HW DATA */ 119 120 struct gdma_dev_id { 121 union { 122 struct { 123 u16 type; 124 u16 instance; 125 }; 126 127 u32 as_uint32; 128 }; 129 }; /* HW DATA */ 130 131 struct gdma_req_hdr { 132 struct gdma_msg_hdr req; 133 struct gdma_msg_hdr resp; /* The expected response */ 134 struct gdma_dev_id dev_id; 135 u32 activity_id; 136 }; /* HW DATA */ 137 138 struct gdma_resp_hdr { 139 struct gdma_msg_hdr response; 140 struct gdma_dev_id dev_id; 141 u32 activity_id; 142 u32 status; 143 u32 reserved; 144 }; /* HW DATA */ 145 146 struct gdma_general_req { 147 struct gdma_req_hdr hdr; 148 }; /* HW DATA */ 149 150 #define GDMA_MESSAGE_V1 1 151 #define GDMA_MESSAGE_V2 2 152 153 struct gdma_general_resp { 154 struct gdma_resp_hdr hdr; 155 }; /* HW DATA */ 156 157 #define GDMA_STANDARD_HEADER_TYPE 0 158 159 static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code, 160 u32 req_size, u32 resp_size) 161 { 162 hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE; 163 hdr->req.msg_type = code; 164 hdr->req.msg_version = GDMA_MESSAGE_V1; 165 hdr->req.msg_size = req_size; 166 167 hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE; 168 hdr->resp.msg_type = code; 169 hdr->resp.msg_version = GDMA_MESSAGE_V1; 170 hdr->resp.msg_size = resp_size; 171 } 172 173 /* The 16-byte struct is part of the GDMA work queue entry (WQE). */ 174 struct gdma_sge { 175 u64 address; 176 u32 mem_key; 177 u32 size; 178 }; /* HW DATA */ 179 180 struct gdma_wqe_request { 181 struct gdma_sge *sgl; 182 u32 num_sge; 183 184 u32 inline_oob_size; 185 const void *inline_oob_data; 186 187 u32 flags; 188 u32 client_data_unit; 189 }; 190 191 enum gdma_page_type { 192 GDMA_PAGE_TYPE_4K, 193 }; 194 195 #define GDMA_INVALID_DMA_REGION 0 196 197 struct gdma_mem_info { 198 struct device *dev; 199 200 dma_addr_t dma_handle; 201 void *virt_addr; 202 u64 length; 203 204 /* Allocated by the PF driver */ 205 u64 dma_region_handle; 206 }; 207 208 #define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8 209 210 struct gdma_dev { 211 struct gdma_context *gdma_context; 212 213 struct gdma_dev_id dev_id; 214 215 u32 pdid; 216 u32 doorbell; 217 u32 gpa_mkey; 218 219 /* GDMA driver specific pointer */ 220 void *driver_data; 221 222 struct auxiliary_device *adev; 223 }; 224 225 /* MANA_PAGE_SIZE is the DMA unit */ 226 #define MANA_PAGE_SHIFT 12 227 #define MANA_PAGE_SIZE BIT(MANA_PAGE_SHIFT) 228 #define MANA_PAGE_ALIGN(x) ALIGN((x), MANA_PAGE_SIZE) 229 #define MANA_PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), MANA_PAGE_SIZE) 230 #define MANA_PFN(a) ((a) >> MANA_PAGE_SHIFT) 231 232 /* Required by HW */ 233 #define MANA_MIN_QSIZE MANA_PAGE_SIZE 234 235 #define GDMA_CQE_SIZE 64 236 #define GDMA_EQE_SIZE 16 237 #define GDMA_MAX_SQE_SIZE 512 238 #define GDMA_MAX_RQE_SIZE 256 239 240 #define GDMA_COMP_DATA_SIZE 0x3C 241 242 #define GDMA_EVENT_DATA_SIZE 0xC 243 244 /* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */ 245 #define GDMA_WQE_BU_SIZE 32 246 247 #define INVALID_PDID UINT_MAX 248 #define INVALID_DOORBELL UINT_MAX 249 #define INVALID_MEM_KEY UINT_MAX 250 #define INVALID_QUEUE_ID UINT_MAX 251 #define INVALID_PCI_MSIX_INDEX UINT_MAX 252 253 struct gdma_comp { 254 u32 cqe_data[GDMA_COMP_DATA_SIZE / 4]; 255 u32 wq_num; 256 bool is_sq; 257 }; 258 259 struct gdma_event { 260 u32 details[GDMA_EVENT_DATA_SIZE / 4]; 261 u8 type; 262 }; 263 264 struct gdma_queue; 265 266 struct mana_eq { 267 struct gdma_queue *eq; 268 }; 269 270 typedef void gdma_eq_callback(void *context, struct gdma_queue *q, 271 struct gdma_event *e); 272 273 typedef void gdma_cq_callback(void *context, struct gdma_queue *q); 274 275 /* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE 276 * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the 277 * driver increases the 'head' in BUs rather than in bytes, and notifies 278 * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track 279 * the HW head, and increases the 'head' by 1 for every processed EQE/CQE. 280 * 281 * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is 282 * processed, the driver increases the 'tail' to indicate that WQEs have 283 * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ. 284 * 285 * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures 286 * that the EQ/CQ is big enough so they can't overflow, and the driver uses 287 * the owner bits mechanism to detect if the queue has become empty. 288 */ 289 struct gdma_queue { 290 struct gdma_dev *gdma_dev; 291 292 enum gdma_queue_type type; 293 u32 id; 294 295 struct gdma_mem_info mem_info; 296 297 void *queue_mem_ptr; 298 u32 queue_size; 299 300 bool monitor_avl_buf; 301 302 u32 head; 303 u32 tail; 304 305 /* Extra fields specific to EQ/CQ. */ 306 union { 307 struct { 308 bool disable_needed; 309 310 gdma_eq_callback *callback; 311 void *context; 312 313 unsigned int msix_index; 314 315 u32 log2_throttle_limit; 316 } eq; 317 318 struct { 319 gdma_cq_callback *callback; 320 void *context; 321 322 struct gdma_queue *parent; /* For CQ/EQ relationship */ 323 } cq; 324 }; 325 }; 326 327 struct gdma_queue_spec { 328 enum gdma_queue_type type; 329 bool monitor_avl_buf; 330 unsigned int queue_size; 331 332 /* Extra fields specific to EQ/CQ. */ 333 union { 334 struct { 335 gdma_eq_callback *callback; 336 void *context; 337 338 unsigned long log2_throttle_limit; 339 } eq; 340 341 struct { 342 gdma_cq_callback *callback; 343 void *context; 344 345 struct gdma_queue *parent_eq; 346 347 } cq; 348 }; 349 }; 350 351 #define MANA_IRQ_NAME_SZ 32 352 353 struct gdma_irq_context { 354 void (*handler)(void *arg); 355 void *arg; 356 char name[MANA_IRQ_NAME_SZ]; 357 }; 358 359 struct gdma_context { 360 struct device *dev; 361 362 /* Per-vPort max number of queues */ 363 unsigned int max_num_queues; 364 unsigned int max_num_msix; 365 unsigned int num_msix_usable; 366 struct gdma_resource msix_resource; 367 struct gdma_irq_context *irq_contexts; 368 369 /* L2 MTU */ 370 u16 adapter_mtu; 371 372 /* This maps a CQ index to the queue structure. */ 373 unsigned int max_num_cqs; 374 struct gdma_queue **cq_table; 375 376 /* Protect eq_test_event and test_event_eq_id */ 377 struct mutex eq_test_event_mutex; 378 struct completion eq_test_event; 379 u32 test_event_eq_id; 380 381 bool is_pf; 382 phys_addr_t bar0_pa; 383 void __iomem *bar0_va; 384 void __iomem *shm_base; 385 void __iomem *db_page_base; 386 phys_addr_t phys_db_page_base; 387 u32 db_page_size; 388 int numa_node; 389 390 /* Shared memory chanenl (used to bootstrap HWC) */ 391 struct shm_channel shm_channel; 392 393 /* Hardware communication channel (HWC) */ 394 struct gdma_dev hwc; 395 396 /* Azure network adapter */ 397 struct gdma_dev mana; 398 }; 399 400 #define MAX_NUM_GDMA_DEVICES 4 401 402 static inline bool mana_gd_is_mana(struct gdma_dev *gd) 403 { 404 return gd->dev_id.type == GDMA_DEVICE_MANA; 405 } 406 407 static inline bool mana_gd_is_hwc(struct gdma_dev *gd) 408 { 409 return gd->dev_id.type == GDMA_DEVICE_HWC; 410 } 411 412 u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset); 413 u32 mana_gd_wq_avail_space(struct gdma_queue *wq); 414 415 int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq); 416 417 int mana_gd_create_hwc_queue(struct gdma_dev *gd, 418 const struct gdma_queue_spec *spec, 419 struct gdma_queue **queue_ptr); 420 421 int mana_gd_create_mana_eq(struct gdma_dev *gd, 422 const struct gdma_queue_spec *spec, 423 struct gdma_queue **queue_ptr); 424 425 int mana_gd_create_mana_wq_cq(struct gdma_dev *gd, 426 const struct gdma_queue_spec *spec, 427 struct gdma_queue **queue_ptr); 428 429 void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue); 430 431 int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe); 432 433 void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit); 434 435 struct gdma_wqe { 436 u32 reserved :24; 437 u32 last_vbytes :8; 438 439 union { 440 u32 flags; 441 442 struct { 443 u32 num_sge :8; 444 u32 inline_oob_size_div4:3; 445 u32 client_oob_in_sgl :1; 446 u32 reserved1 :4; 447 u32 client_data_unit :14; 448 u32 reserved2 :2; 449 }; 450 }; 451 }; /* HW DATA */ 452 453 #define INLINE_OOB_SMALL_SIZE 8 454 #define INLINE_OOB_LARGE_SIZE 24 455 456 #define MAX_TX_WQE_SIZE 512 457 #define MAX_RX_WQE_SIZE 256 458 459 #define MAX_TX_WQE_SGL_ENTRIES ((GDMA_MAX_SQE_SIZE - \ 460 sizeof(struct gdma_sge) - INLINE_OOB_SMALL_SIZE) / \ 461 sizeof(struct gdma_sge)) 462 463 #define MAX_RX_WQE_SGL_ENTRIES ((GDMA_MAX_RQE_SIZE - \ 464 sizeof(struct gdma_sge)) / sizeof(struct gdma_sge)) 465 466 struct gdma_cqe { 467 u32 cqe_data[GDMA_COMP_DATA_SIZE / 4]; 468 469 union { 470 u32 as_uint32; 471 472 struct { 473 u32 wq_num : 24; 474 u32 is_sq : 1; 475 u32 reserved : 4; 476 u32 owner_bits : 3; 477 }; 478 } cqe_info; 479 }; /* HW DATA */ 480 481 #define GDMA_CQE_OWNER_BITS 3 482 483 #define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1) 484 485 #define SET_ARM_BIT 1 486 487 #define GDMA_EQE_OWNER_BITS 3 488 489 union gdma_eqe_info { 490 u32 as_uint32; 491 492 struct { 493 u32 type : 8; 494 u32 reserved1 : 8; 495 u32 client_id : 2; 496 u32 reserved2 : 11; 497 u32 owner_bits : 3; 498 }; 499 }; /* HW DATA */ 500 501 #define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1) 502 #define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries)) 503 504 struct gdma_eqe { 505 u32 details[GDMA_EVENT_DATA_SIZE / 4]; 506 u32 eqe_info; 507 }; /* HW DATA */ 508 509 #define GDMA_REG_DB_PAGE_OFFSET 8 510 #define GDMA_REG_DB_PAGE_SIZE 0x10 511 #define GDMA_REG_SHM_OFFSET 0x18 512 513 #define GDMA_PF_REG_DB_PAGE_SIZE 0xD0 514 #define GDMA_PF_REG_DB_PAGE_OFF 0xC8 515 #define GDMA_PF_REG_SHM_OFF 0x70 516 517 #define GDMA_SRIOV_REG_CFG_BASE_OFF 0x108 518 519 #define MANA_PF_DEVICE_ID 0x00B9 520 #define MANA_VF_DEVICE_ID 0x00BA 521 522 struct gdma_posted_wqe_info { 523 u32 wqe_size_in_bu; 524 }; 525 526 /* GDMA_GENERATE_TEST_EQE */ 527 struct gdma_generate_test_event_req { 528 struct gdma_req_hdr hdr; 529 u32 queue_index; 530 }; /* HW DATA */ 531 532 /* GDMA_VERIFY_VF_DRIVER_VERSION */ 533 enum { 534 GDMA_PROTOCOL_V1 = 1, 535 GDMA_PROTOCOL_FIRST = GDMA_PROTOCOL_V1, 536 GDMA_PROTOCOL_LAST = GDMA_PROTOCOL_V1, 537 }; 538 539 #define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0) 540 541 /* Advertise to the NIC firmware: the NAPI work_done variable race is fixed, 542 * so the driver is able to reliably support features like busy_poll. 543 */ 544 #define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2) 545 #define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3) 546 547 #define GDMA_DRV_CAP_FLAGS1 \ 548 (GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \ 549 GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \ 550 GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG) 551 552 #define GDMA_DRV_CAP_FLAGS2 0 553 554 #define GDMA_DRV_CAP_FLAGS3 0 555 556 #define GDMA_DRV_CAP_FLAGS4 0 557 558 struct gdma_verify_ver_req { 559 struct gdma_req_hdr hdr; 560 561 /* Mandatory fields required for protocol establishment */ 562 u64 protocol_ver_min; 563 u64 protocol_ver_max; 564 565 /* Gdma Driver Capability Flags */ 566 u64 gd_drv_cap_flags1; 567 u64 gd_drv_cap_flags2; 568 u64 gd_drv_cap_flags3; 569 u64 gd_drv_cap_flags4; 570 571 /* Advisory fields */ 572 u64 drv_ver; 573 u32 os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */ 574 u32 reserved; 575 u32 os_ver_major; 576 u32 os_ver_minor; 577 u32 os_ver_build; 578 u32 os_ver_platform; 579 u64 reserved_2; 580 u8 os_ver_str1[128]; 581 u8 os_ver_str2[128]; 582 u8 os_ver_str3[128]; 583 u8 os_ver_str4[128]; 584 }; /* HW DATA */ 585 586 struct gdma_verify_ver_resp { 587 struct gdma_resp_hdr hdr; 588 u64 gdma_protocol_ver; 589 u64 pf_cap_flags1; 590 u64 pf_cap_flags2; 591 u64 pf_cap_flags3; 592 u64 pf_cap_flags4; 593 }; /* HW DATA */ 594 595 /* GDMA_QUERY_MAX_RESOURCES */ 596 struct gdma_query_max_resources_resp { 597 struct gdma_resp_hdr hdr; 598 u32 status; 599 u32 max_sq; 600 u32 max_rq; 601 u32 max_cq; 602 u32 max_eq; 603 u32 max_db; 604 u32 max_mst; 605 u32 max_cq_mod_ctx; 606 u32 max_mod_cq; 607 u32 max_msix; 608 }; /* HW DATA */ 609 610 /* GDMA_LIST_DEVICES */ 611 struct gdma_list_devices_resp { 612 struct gdma_resp_hdr hdr; 613 u32 num_of_devs; 614 u32 reserved; 615 struct gdma_dev_id devs[64]; 616 }; /* HW DATA */ 617 618 /* GDMA_REGISTER_DEVICE */ 619 struct gdma_register_device_resp { 620 struct gdma_resp_hdr hdr; 621 u32 pdid; 622 u32 gpa_mkey; 623 u32 db_id; 624 }; /* HW DATA */ 625 626 struct gdma_allocate_resource_range_req { 627 struct gdma_req_hdr hdr; 628 u32 resource_type; 629 u32 num_resources; 630 u32 alignment; 631 u32 allocated_resources; 632 }; 633 634 struct gdma_allocate_resource_range_resp { 635 struct gdma_resp_hdr hdr; 636 u32 allocated_resources; 637 }; 638 639 struct gdma_destroy_resource_range_req { 640 struct gdma_req_hdr hdr; 641 u32 resource_type; 642 u32 num_resources; 643 u32 allocated_resources; 644 }; 645 646 /* GDMA_CREATE_QUEUE */ 647 struct gdma_create_queue_req { 648 struct gdma_req_hdr hdr; 649 u32 type; 650 u32 reserved1; 651 u32 pdid; 652 u32 doolbell_id; 653 u64 gdma_region; 654 u32 reserved2; 655 u32 queue_size; 656 u32 log2_throttle_limit; 657 u32 eq_pci_msix_index; 658 u32 cq_mod_ctx_id; 659 u32 cq_parent_eq_id; 660 u8 rq_drop_on_overrun; 661 u8 rq_err_on_wqe_overflow; 662 u8 rq_chain_rec_wqes; 663 u8 sq_hw_db; 664 u32 reserved3; 665 }; /* HW DATA */ 666 667 struct gdma_create_queue_resp { 668 struct gdma_resp_hdr hdr; 669 u32 queue_index; 670 }; /* HW DATA */ 671 672 /* GDMA_DISABLE_QUEUE */ 673 struct gdma_disable_queue_req { 674 struct gdma_req_hdr hdr; 675 u32 type; 676 u32 queue_index; 677 u32 alloc_res_id_on_creation; 678 }; /* HW DATA */ 679 680 /* GDMA_QUERY_HWC_TIMEOUT */ 681 struct gdma_query_hwc_timeout_req { 682 struct gdma_req_hdr hdr; 683 u32 timeout_ms; 684 u32 reserved; 685 }; 686 687 struct gdma_query_hwc_timeout_resp { 688 struct gdma_resp_hdr hdr; 689 u32 timeout_ms; 690 u32 reserved; 691 }; 692 693 enum atb_page_size { 694 ATB_PAGE_SIZE_4K, 695 ATB_PAGE_SIZE_8K, 696 ATB_PAGE_SIZE_16K, 697 ATB_PAGE_SIZE_32K, 698 ATB_PAGE_SIZE_64K, 699 ATB_PAGE_SIZE_128K, 700 ATB_PAGE_SIZE_256K, 701 ATB_PAGE_SIZE_512K, 702 ATB_PAGE_SIZE_1M, 703 ATB_PAGE_SIZE_2M, 704 ATB_PAGE_SIZE_MAX, 705 }; 706 707 enum gdma_mr_access_flags { 708 GDMA_ACCESS_FLAG_LOCAL_READ = BIT_ULL(0), 709 GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT_ULL(1), 710 GDMA_ACCESS_FLAG_REMOTE_READ = BIT_ULL(2), 711 GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT_ULL(3), 712 GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT_ULL(4), 713 }; 714 715 /* GDMA_CREATE_DMA_REGION */ 716 struct gdma_create_dma_region_req { 717 struct gdma_req_hdr hdr; 718 719 /* The total size of the DMA region */ 720 u64 length; 721 722 /* The offset in the first page */ 723 u32 offset_in_page; 724 725 /* enum gdma_page_type */ 726 u32 gdma_page_type; 727 728 /* The total number of pages */ 729 u32 page_count; 730 731 /* If page_addr_list_len is smaller than page_count, 732 * the remaining page addresses will be added via the 733 * message GDMA_DMA_REGION_ADD_PAGES. 734 */ 735 u32 page_addr_list_len; 736 u64 page_addr_list[]; 737 }; /* HW DATA */ 738 739 struct gdma_create_dma_region_resp { 740 struct gdma_resp_hdr hdr; 741 u64 dma_region_handle; 742 }; /* HW DATA */ 743 744 /* GDMA_DMA_REGION_ADD_PAGES */ 745 struct gdma_dma_region_add_pages_req { 746 struct gdma_req_hdr hdr; 747 748 u64 dma_region_handle; 749 750 u32 page_addr_list_len; 751 u32 reserved3; 752 753 u64 page_addr_list[]; 754 }; /* HW DATA */ 755 756 /* GDMA_DESTROY_DMA_REGION */ 757 struct gdma_destroy_dma_region_req { 758 struct gdma_req_hdr hdr; 759 760 u64 dma_region_handle; 761 }; /* HW DATA */ 762 763 enum gdma_pd_flags { 764 GDMA_PD_FLAG_INVALID = 0, 765 }; 766 767 struct gdma_create_pd_req { 768 struct gdma_req_hdr hdr; 769 enum gdma_pd_flags flags; 770 u32 reserved; 771 };/* HW DATA */ 772 773 struct gdma_create_pd_resp { 774 struct gdma_resp_hdr hdr; 775 u64 pd_handle; 776 u32 pd_id; 777 u32 reserved; 778 };/* HW DATA */ 779 780 struct gdma_destroy_pd_req { 781 struct gdma_req_hdr hdr; 782 u64 pd_handle; 783 };/* HW DATA */ 784 785 struct gdma_destory_pd_resp { 786 struct gdma_resp_hdr hdr; 787 };/* HW DATA */ 788 789 enum gdma_mr_type { 790 /* Guest Virtual Address - MRs of this type allow access 791 * to memory mapped by PTEs associated with this MR using a virtual 792 * address that is set up in the MST 793 */ 794 GDMA_MR_TYPE_GVA = 2, 795 }; 796 797 struct gdma_create_mr_params { 798 u64 pd_handle; 799 enum gdma_mr_type mr_type; 800 union { 801 struct { 802 u64 dma_region_handle; 803 u64 virtual_address; 804 enum gdma_mr_access_flags access_flags; 805 } gva; 806 }; 807 }; 808 809 struct gdma_create_mr_request { 810 struct gdma_req_hdr hdr; 811 u64 pd_handle; 812 enum gdma_mr_type mr_type; 813 u32 reserved_1; 814 815 union { 816 struct { 817 u64 dma_region_handle; 818 u64 virtual_address; 819 enum gdma_mr_access_flags access_flags; 820 } gva; 821 822 }; 823 u32 reserved_2; 824 };/* HW DATA */ 825 826 struct gdma_create_mr_response { 827 struct gdma_resp_hdr hdr; 828 u64 mr_handle; 829 u32 lkey; 830 u32 rkey; 831 };/* HW DATA */ 832 833 struct gdma_destroy_mr_request { 834 struct gdma_req_hdr hdr; 835 u64 mr_handle; 836 };/* HW DATA */ 837 838 struct gdma_destroy_mr_response { 839 struct gdma_resp_hdr hdr; 840 };/* HW DATA */ 841 842 int mana_gd_verify_vf_version(struct pci_dev *pdev); 843 844 int mana_gd_register_device(struct gdma_dev *gd); 845 int mana_gd_deregister_device(struct gdma_dev *gd); 846 847 int mana_gd_post_work_request(struct gdma_queue *wq, 848 const struct gdma_wqe_request *wqe_req, 849 struct gdma_posted_wqe_info *wqe_info); 850 851 int mana_gd_post_and_ring(struct gdma_queue *queue, 852 const struct gdma_wqe_request *wqe, 853 struct gdma_posted_wqe_info *wqe_info); 854 855 int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r); 856 void mana_gd_free_res_map(struct gdma_resource *r); 857 858 void mana_gd_wq_ring_doorbell(struct gdma_context *gc, 859 struct gdma_queue *queue); 860 861 int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length, 862 struct gdma_mem_info *gmi); 863 864 void mana_gd_free_memory(struct gdma_mem_info *gmi); 865 866 int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req, 867 u32 resp_len, void *resp); 868 869 int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle); 870 871 #endif /* _GDMA_H */ 872