1 /* 2 * vhost-vdpa.h 3 * 4 * Copyright(c) 2017-2018 Intel Corporation. 5 * Copyright(c) 2020 Red Hat, Inc. 6 * 7 * This work is licensed under the terms of the GNU GPL, version 2 or later. 8 * See the COPYING file in the top-level directory. 9 * 10 */ 11 12 #ifndef HW_VIRTIO_VHOST_VDPA_H 13 #define HW_VIRTIO_VHOST_VDPA_H 14 15 #include <gmodule.h> 16 17 #include "hw/virtio/vhost-iova-tree.h" 18 #include "hw/virtio/vhost-shadow-virtqueue.h" 19 #include "hw/virtio/virtio.h" 20 #include "standard-headers/linux/vhost_types.h" 21 22 /* 23 * ASID dedicated to map guest's addresses. If SVQ is disabled it maps GPA to 24 * qemu's IOVA. If SVQ is enabled it maps also the SVQ vring here 25 */ 26 #define VHOST_VDPA_GUEST_PA_ASID 0 27 28 typedef struct VhostVDPAHostNotifier { 29 MemoryRegion mr; 30 void *addr; 31 } VhostVDPAHostNotifier; 32 33 typedef struct vhost_vdpa { 34 int device_fd; 35 int index; 36 uint32_t msg_type; 37 bool iotlb_batch_begin_sent; 38 uint32_t address_space_id; 39 MemoryListener listener; 40 struct vhost_vdpa_iova_range iova_range; 41 uint64_t acked_features; 42 bool shadow_vqs_enabled; 43 /* Vdpa must send shadow addresses as IOTLB key for data queues, not GPA */ 44 bool shadow_data; 45 /* Device suspended successfully */ 46 bool suspended; 47 /* IOVA mapping used by the Shadow Virtqueue */ 48 VhostIOVATree *iova_tree; 49 GPtrArray *shadow_vqs; 50 const VhostShadowVirtqueueOps *shadow_vq_ops; 51 void *shadow_vq_ops_opaque; 52 struct vhost_dev *dev; 53 Error *migration_blocker; 54 VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX]; 55 } VhostVDPA; 56 57 int vhost_vdpa_get_iova_range(int fd, struct vhost_vdpa_iova_range *iova_range); 58 59 int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, 60 hwaddr size, void *vaddr, bool readonly); 61 int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, 62 hwaddr size); 63 64 #endif 65