/openbmc/linux/drivers/iommu/ |
H A D | virtio-iommu.c | 64 struct viommu_dev *viommu; member 65 struct mutex mutex; /* protects viommu pointer */ 78 struct viommu_dev *viommu; member 136 static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu, in viommu_get_write_desc_offset() argument 143 return len - viommu->probe_size - tail_size; in viommu_get_write_desc_offset() 154 static int __viommu_sync_req(struct viommu_dev *viommu) in __viommu_sync_req() argument 159 struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ]; in __viommu_sync_req() 161 assert_spin_locked(&viommu->request_lock); in __viommu_sync_req() 165 while (!list_empty(&viommu->requests)) { in __viommu_sync_req() 187 static int viommu_sync_req(struct viommu_dev *viommu) in viommu_sync_req() argument [all …]
|
/openbmc/linux/drivers/acpi/ |
H A D | viot.c | 48 struct viot_iommu *viommu; member 77 static int __init viot_get_pci_iommu_fwnode(struct viot_iommu *viommu, in viot_get_pci_iommu_fwnode() argument 103 viommu->fwnode = dev_fwnode(&pdev->dev); in viot_get_pci_iommu_fwnode() 108 static int __init viot_get_mmio_iommu_fwnode(struct viot_iommu *viommu, in viot_get_mmio_iommu_fwnode() argument 123 viommu->fwnode = &adev->fwnode; in viot_get_mmio_iommu_fwnode() 130 struct viot_iommu *viommu; in viot_get_iommu() local 138 list_for_each_entry(viommu, &viot_iommus, list) in viot_get_iommu() 139 if (viommu->offset == offset) in viot_get_iommu() 140 return viommu; in viot_get_iommu() 145 viommu = kzalloc(sizeof(*viommu), GFP_KERNEL); in viot_get_iommu() [all …]
|
/openbmc/qemu/docs/ |
H A D | bypass-iommu.txt | 6 Traditionally, there is a global switch to enable/disable vIOMMU. All 7 devices in the system can only support go through vIOMMU or not, which 9 coexist of devices go through vIOMMU and devices not. This is useful to 10 passthrough devices with no-iommu mode and devices go through vIOMMU in 17 bypass vIOMMU. When bypass_iommu property is not set for a host bridge, 18 the attached devices will go through vIOMMU by default.
|
/openbmc/qemu/hw/virtio/ |
H A D | virtio-iommu.c | 86 VirtIOIOMMU *s = sdev->viommu; in virtio_iommu_device_bypassed() 393 VirtIOIOMMU *s = sdev->viommu; in add_prop_resv_regions() 426 sdev->viommu = s; in virtio_iommu_find_add_as() 493 get_host_iommu_device(VirtIOIOMMU *viommu, PCIBus *bus, int devfn) { in get_host_iommu_device() argument 499 return g_hash_table_lookup(viommu->host_iommu_devices, &key); in get_host_iommu_device() 592 static bool check_page_size_mask(VirtIOIOMMU *viommu, uint64_t new_mask, in check_page_size_mask() argument 595 uint64_t cur_mask = viommu->config.page_size_mask; in check_page_size_mask() 608 if (viommu->granule_frozen) { in check_page_size_mask() 625 VirtIOIOMMU *viommu = opaque; in virtio_iommu_set_iommu_device() local 632 if (get_host_iommu_device(viommu, bus, devfn)) { in virtio_iommu_set_iommu_device() [all …]
|
H A D | vhost-vdpa.c | 71 * While using vIOMMU, sometimes the section will be larger than iova_max, in vhost_vdpa_listener_skipped_section()
|
/openbmc/qemu/linux-headers/linux/ |
H A D | iommufd.h | 470 * @pt_id: The IOAS or HWPT or vIOMMU to connect this HWPT to 489 * A user-managed nested HWPT will be created from a given vIOMMU (wrapping a 494 * via @dev_id and the vIOMMU via @pt_id must be associated to the same IOMMU 783 * Supported command list only when passing in a vIOMMU via @hwpt_id: 802 * @hwpt_id: ID of a nested HWPT or a vIOMMU, for cache invalidation 813 * Invalidate iommu cache for user-managed page table or vIOMMU. Modifications 816 * cache can be flushed if a vIOMMU is passed in via the @hwpt_id field. 956 * to the vIOMMU, such as: 978 * @viommu_id: vIOMMU ID to associate with the virtual device 979 * @dev_id: The physical device to allocate a virtual instance on the vIOMMU [all …]
|
/openbmc/qemu/include/hw/i386/ |
H A D | x86-iommu.h | 64 OnOffAuto intr_supported; /* Whether vIOMMU supports IR */ 65 bool dt_supported; /* Whether vIOMMU supports DT */ 66 bool pt_supported; /* Whether vIOMMU supports pass-through */
|
H A D | intel_iommu.h | 127 * knowledge of existing state vIOMMU doesn't know whether it should 135 * invalidate the whole domain. If the vIOMMU directly notifies the 146 * vIOMMU needs to make sure the shadow page table is always in sync
|
/openbmc/linux/Documentation/devicetree/bindings/virtio/ |
H A D | mmio.yaml | 56 iommus = <&viommu 23>; 59 viommu: iommu@3100 {
|
/openbmc/linux/Documentation/userspace-api/ |
H A D | iommu.rst | 14 guest IO virtual address (IOVA), wherein the vIOMMU implementation 84 While launching a guest with vIOMMU, it is strongly advised to check 86 vIOMMU operation, such as cache invalidation failures cannot be nicely
|
/openbmc/qemu/docs/devel/migration/ |
H A D | vfio.rst | 153 System memory dirty pages tracking when vIOMMU is enabled 156 With vIOMMU, an IO virtual address range can get unmapped while in pre-copy 161 mapped ranges. If device dirty tracking is enabled with vIOMMU, live migration
|
/openbmc/qemu/include/hw/pci/ |
H A D | pci.h | 401 * @set_iommu_device: attach a HostIOMMUDevice to a vIOMMU 403 * Optional callback, if not implemented in vIOMMU, then vIOMMU can't 421 * @unset_iommu_device: detach a HostIOMMUDevice from a vIOMMU
|
/openbmc/qemu/include/hw/virtio/ |
H A D | virtio-iommu.h | 37 void *viommu; member
|
/openbmc/qemu/hw/acpi/ |
H A D | viot.c | 100 /* Build the list of PCI ranges that this viommu manages */ in build_viot()
|
/openbmc/qemu/backends/ |
H A D | iommufd.c | 191 * backend which allows it. vIOMMU may trigger a lot of in iommufd_backend_unmap_dma()
|
/openbmc/linux/drivers/hv/ |
H A D | hv_common.c | 489 * Hyper-V does not offer a vIOMMU in the guest in hv_setup_dma_ops()
|
/openbmc/qemu/include/exec/ |
H A D | memory.h | 137 * (1) When the device needs accurate synchronizations of the vIOMMU page 148 * vIOMMU page tables, it needs to register only with UNMAP or 153 * from the vIOMMU through a protocol similar to ATS (Address 156 * Note that in this mode the vIOMMU will not maintain a shadowed
|
/openbmc/qemu/hw/vfio/ |
H A D | migration.c | 1117 "with vIOMMU enabled", vbasedev->name); in vfio_migration_realize()
|
H A D | container.c | 150 * unmap request comes via vIOMMU support which also makes it unlikely in vfio_legacy_dma_unmap()
|
H A D | pci.c | 3123 error_prepend(errp, "Failed to set vIOMMU: "); in vfio_realize()
|
/openbmc/linux/drivers/iommu/amd/ |
H A D | iommu.c | 1430 * When NpCache is on, we infer that we run in a VM and use a vIOMMU. in domain_flush_pages()
|
/openbmc/qemu/ |
H A D | qemu-options.hx | 1200 IOTLB invalidation from the guest IOMMU driver to the vIOMMU device in
|