/openbmc/linux/drivers/media/platform/nvidia/tegra-vde/ |
H A D | iommu.c | 9 #include <linux/iova.h> 21 struct iova **iovap, in tegra_vde_iommu_map() 24 struct iova *iova; in tegra_vde_iommu_map() local 30 size = iova_align(&vde->iova, size); in tegra_vde_iommu_map() 31 shift = iova_shift(&vde->iova); in tegra_vde_iommu_map() 33 iova = alloc_iova(&vde->iova, size >> shift, end >> shift, true); in tegra_vde_iommu_map() 34 if (!iova) in tegra_vde_iommu_map() 37 addr = iova_dma_addr(&vde->iova, iova); in tegra_vde_iommu_map() 42 __free_iova(&vde->iova, iova); in tegra_vde_iommu_map() 46 *iovap = iova; in tegra_vde_iommu_map() [all …]
|
H A D | dmabuf-cache.c | 9 #include <linux/iova.h> 28 struct iova *iova; member 39 tegra_vde_iommu_unmap(entry->vde, entry->iova); in tegra_vde_release_entry() 73 struct iova *iova; in tegra_vde_dmabuf_cache_map() local 91 *addrp = iova_dma_addr(&vde->iova, entry->iova); in tegra_vde_dmabuf_cache_map() 125 err = tegra_vde_iommu_map(vde, sgt, &iova, dmabuf->size); in tegra_vde_dmabuf_cache_map() 129 *addrp = iova_dma_addr(&vde->iova, iova); in tegra_vde_dmabuf_cache_map() 132 iova = NULL; in tegra_vde_dmabuf_cache_map() 139 entry->iova = iova; in tegra_vde_dmabuf_cache_map()
|
/openbmc/linux/drivers/iommu/ |
H A D | iova.c | 8 #include <linux/iova.h> 18 #define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */ 46 static struct iova *to_iova(struct rb_node *node) in to_iova() 48 return rb_entry(node, struct iova, node); in to_iova() 56 * IOVA granularity will normally be equal to the smallest in init_iova_domain() 86 __cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new) in __cached_rbnode_insert_update() 95 __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) in __cached_rbnode_delete_update() 97 struct iova *cached_iova; in __cached_rbnode_delete_update() 118 * enough to the highest-allocated IOVA that starting the allocation in iova_find_limit() 150 /* Insert the iova into domain rbtree by holding writer lock */ [all …]
|
H A D | tegra-gart.c | 60 #define for_each_gart_pte(gart, iova) \ argument 61 for (iova = gart->iovmm_base; \ 62 iova < gart->iovmm_end; \ 63 iova += GART_PAGE_SIZE) 66 unsigned long iova, unsigned long pte) in gart_set_pte() argument 68 writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR); in gart_set_pte() 73 unsigned long iova) in gart_read_pte() argument 77 writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR); in gart_read_pte() 85 unsigned long iova; in do_gart_setup() local 87 for_each_gart_pte(gart, iova) in do_gart_setup() [all …]
|
H A D | io-pgtable-arm-v7s.c | 432 unsigned long iova, phys_addr_t paddr, int prot, in arm_v7s_init_pte() argument 448 tblp = ptep - ARM_V7S_LVL_IDX(iova, lvl, cfg); in arm_v7s_init_pte() 449 if (WARN_ON(__arm_v7s_unmap(data, NULL, iova + i * sz, in arm_v7s_init_pte() 497 static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova, in __arm_v7s_map() argument 506 ptep += ARM_V7S_LVL_IDX(iova, lvl, cfg); in __arm_v7s_map() 510 return arm_v7s_init_pte(data, iova, paddr, prot, in __arm_v7s_map() 541 return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp); in __arm_v7s_map() 544 static int arm_v7s_map_pages(struct io_pgtable_ops *ops, unsigned long iova, in arm_v7s_map_pages() argument 551 if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) || in arm_v7s_map_pages() 559 ret = __arm_v7s_map(data, iova, paddr, pgsize, prot, 1, data->pgd, in arm_v7s_map_pages() [all …]
|
/openbmc/linux/drivers/vfio/ |
H A D | iova_bitmap.c | 14 * struct iova_bitmap_map - A bitmap representing an IOVA range 20 * total IOVA range. The struct iova_bitmap_map, though, represents the 21 * subset of said IOVA space that is pinned by its parent structure (struct 26 * records the IOVA *range* in the bitmap by setting the corresponding 29 * The bitmap is an array of u64 whereas each bit represents an IOVA of 32 * data[(iova / page_size) / 64] & (1ULL << (iova % 64)) 35 /* base IOVA representing bit 0 of the first page */ 36 unsigned long iova; member 52 * struct iova_bitmap - The IOVA bitmap object 56 * Abstracts the pinning work and iterates in IOVA ranges. [all …]
|
H A D | vfio_iommu_type1.c | 17 * the IOVA range that can be mapped. The Type1 IOMMU is currently 90 dma_addr_t iova; /* Device address */ member 129 dma_addr_t iova; /* Device address */ member 136 dma_addr_t iova; member 173 if (start + size <= dma->iova) in vfio_find_dma() 175 else if (start >= dma->iova + dma->size) in vfio_find_dma() 194 if (start < dma->iova + dma->size) { in vfio_find_dma_first_node() 197 if (start >= dma->iova) in vfio_find_dma_first_node() 204 if (res && size && dma_res->iova >= start + size) in vfio_find_dma_first_node() 218 if (new->iova + new->size <= dma->iova) in vfio_link_dma() [all …]
|
/openbmc/linux/drivers/fpga/ |
H A D | dfl-afu-dma-region.c | 118 * @iova: address of the dma memory area 121 * Compare the dma memory area defined by @iova and @size with given dma region. 125 u64 iova, u64 size) in dma_region_check_iova() argument 127 if (!size && region->iova != iova) in dma_region_check_iova() 130 return (region->iova <= iova) && in dma_region_check_iova() 131 (region->length + region->iova >= iova + size); in dma_region_check_iova() 149 dev_dbg(&pdata->dev->dev, "add region (iova = %llx)\n", in afu_dma_region_add() 150 (unsigned long long)region->iova); in afu_dma_region_add() 161 if (dma_region_check_iova(this, region->iova, region->length)) in afu_dma_region_add() 164 if (region->iova < this->iova) in afu_dma_region_add() [all …]
|
/openbmc/linux/include/linux/ |
H A D | iova.h | 17 /* iova structure */ 18 struct iova { struct 27 /* holds all the iova translations for a domain */ argument 30 struct rb_root rbroot; /* iova domain rbtree root */ 37 struct iova anchor; /* rbtree lookup anchor */ 43 static inline unsigned long iova_size(struct iova *iova) in iova_size() argument 45 return iova->pfn_hi - iova->pfn_lo + 1; in iova_size() 58 static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova) in iova_offset() argument 60 return iova & iova_mask(iovad); in iova_offset() 68 static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova) in iova_dma_addr() argument [all …]
|
/openbmc/linux/include/trace/events/ |
H A D | iommu.h | 81 TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size), 83 TP_ARGS(iova, paddr, size), 86 __field(u64, iova) 92 __entry->iova = iova; 97 TP_printk("IOMMU: iova=0x%016llx - 0x%016llx paddr=0x%016llx size=%zu", 98 __entry->iova, __entry->iova + __entry->size, __entry->paddr, 105 TP_PROTO(unsigned long iova, size_t size, size_t unmapped_size), 107 TP_ARGS(iova, size, unmapped_size), 110 __field(u64, iova) 116 __entry->iova = iova; [all …]
|
/openbmc/qemu/hw/virtio/ |
H A D | vhost-iova-tree.c | 2 * vhost software live migration iova tree 11 #include "qemu/iova-tree.h" 12 #include "vhost-iova-tree.h" 18 * - Translate iova address 19 * - Reverse translate iova address (from translated to iova) 20 * - Allocate IOVA regions for translated range (linear operation) 23 /* First addressable iova address in the device */ 26 /* Last addressable iova address in the device */ 29 /* IOVA address to qemu memory maps. */ 34 * Create a new IOVA tree [all …]
|
/openbmc/qemu/include/qemu/ |
H A D | iova-tree.h | 2 * An very simplified iova tree implementation based on GTree. 15 * Currently the iova tree will only allow to keep ranges 22 * protections. Callers of the iova tree should be responsible 31 #define IOVA_ERR_OVERLAP (-2) /* IOVA range overlapped */ 36 hwaddr iova; member 46 * Create a new iova tree. 55 * @tree: the iova tree to insert 58 * Insert an iova range to the tree. If there is overlapped 68 * @tree: the iova tree to remove range from 81 * @tree: the iova tree to search from [all …]
|
/openbmc/linux/drivers/staging/media/ipu3/ |
H A D | ipu3-dmamap.c | 102 struct iova *iova; in imgu_dmamap_alloc() local 107 iova = alloc_iova(&imgu->iova_domain, size >> shift, in imgu_dmamap_alloc() 109 if (!iova) in imgu_dmamap_alloc() 117 iovaddr = iova_dma_addr(&imgu->iova_domain, iova); in imgu_dmamap_alloc() 133 map->daddr = iova_dma_addr(&imgu->iova_domain, iova); in imgu_dmamap_alloc() 135 dev_dbg(dev, "%s: allocated %zu @ IOVA %pad @ VA %p\n", __func__, in imgu_dmamap_alloc() 142 imgu_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova), in imgu_dmamap_alloc() 146 __free_iova(&imgu->iova_domain, iova); in imgu_dmamap_alloc() 153 struct iova *iova; in imgu_dmamap_unmap() local 155 iova = find_iova(&imgu->iova_domain, in imgu_dmamap_unmap() [all …]
|
H A D | ipu3-mmu.c | 149 * address_to_pte_idx - split IOVA into L1 and L2 page table indices 150 * @iova: IOVA to split. 154 static inline void address_to_pte_idx(unsigned long iova, u32 *l1pt_idx, in address_to_pte_idx() argument 157 iova >>= IPU3_PAGE_SHIFT; in address_to_pte_idx() 160 *l2pt_idx = iova & IPU3_L2PT_MASK; in address_to_pte_idx() 162 iova >>= IPU3_L2PT_SHIFT; in address_to_pte_idx() 165 *l1pt_idx = iova & IPU3_L1PT_MASK; in address_to_pte_idx() 210 static int __imgu_mmu_map(struct imgu_mmu *mmu, unsigned long iova, in __imgu_mmu_map() argument 220 address_to_pte_idx(iova, &l1pt_idx, &l2pt_idx); in __imgu_mmu_map() 244 * @iova: the virtual address [all …]
|
/openbmc/linux/include/uapi/linux/ |
H A D | vduse.h | 61 * struct vduse_iotlb_entry - entry of IOTLB to describe one IOVA region [start, last] 63 * @start: start of the IOVA region 64 * @last: last of the IOVA region 65 * @perm: access permission of the IOVA region 67 * Structure used by VDUSE_IOTLB_GET_FD ioctl to find an overlapped IOVA region. 80 * Find the first IOVA region that overlaps with the range [start, last] 82 * IOVA region doesn't exist. Caller should set start and last fields. 214 * struct vduse_iova_umem - userspace memory configuration for one IOVA region 216 * @iova: start of the IOVA region 217 * @size: size of the IOVA region [all …]
|
H A D | iommufd.h | 31 * - ENOENT: An ID or IOVA provided does not exist. 71 * Allocate an IO Address Space (IOAS) which holds an IO Virtual Address (IOVA) 83 * @start: First IOVA 84 * @last: Inclusive last IOVA 86 * An interval in IOVA space. 100 * @out_iova_alignment: Minimum alignment required for mapping IOVA 102 * Query an IOAS for ranges of allowed IOVAs. Mapping IOVA outside these ranges 118 * out_iova_alignment returns the minimum IOVA alignment that can be given 119 * to IOMMU_IOAS_MAP/COPY. IOVA's must satisfy:: 124 * out_iova_alignment can be 1 indicating any IOVA is allowed. It cannot [all …]
|
/openbmc/qemu/linux-headers/linux/ |
H A D | vduse.h | 61 * struct vduse_iotlb_entry - entry of IOTLB to describe one IOVA region [start, last] 63 * @start: start of the IOVA region 64 * @last: last of the IOVA region 65 * @perm: access permission of the IOVA region 67 * Structure used by VDUSE_IOTLB_GET_FD ioctl to find an overlapped IOVA region. 80 * Find the first IOVA region that overlaps with the range [start, last] 82 * IOVA region doesn't exist. Caller should set start and last fields. 214 * struct vduse_iova_umem - userspace memory configuration for one IOVA region 216 * @iova: start of the IOVA region 217 * @size: size of the IOVA region [all …]
|
/openbmc/linux/drivers/infiniband/sw/rxe/ |
H A D | rxe_mr.c | 27 int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length) in mr_check_range() argument 35 if (iova < mr->ibmr.iova || in mr_check_range() 36 iova + length > mr->ibmr.iova + mr->ibmr.length) { in mr_check_range() 37 rxe_dbg_mr(mr, "iova/length out of range\n"); in mr_check_range() 75 static unsigned long rxe_mr_iova_to_index(struct rxe_mr *mr, u64 iova) in rxe_mr_iova_to_index() argument 77 return (iova >> mr->page_shift) - (mr->ibmr.iova >> mr->page_shift); in rxe_mr_iova_to_index() 80 static unsigned long rxe_mr_iova_to_page_offset(struct rxe_mr *mr, u64 iova) in rxe_mr_iova_to_page_offset() argument 82 return iova & (mr_page_size(mr) - 1); in rxe_mr_iova_to_page_offset() 129 int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova, in rxe_mr_init_user() argument 240 mr->page_offset = mr->ibmr.iova & (page_size - 1); in rxe_map_mr_sg() [all …]
|
/openbmc/linux/drivers/vdpa/vdpa_user/ |
H A D | iova_domain.c | 104 u64 iova, u64 size, u64 paddr) in vduse_domain_map_bounce_page() argument 107 u64 last = iova + size - 1; in vduse_domain_map_bounce_page() 109 while (iova <= last) { in vduse_domain_map_bounce_page() 110 map = &domain->bounce_maps[iova >> PAGE_SHIFT]; in vduse_domain_map_bounce_page() 118 iova += PAGE_SIZE; in vduse_domain_map_bounce_page() 124 u64 iova, u64 size) in vduse_domain_unmap_bounce_page() argument 127 u64 last = iova + size - 1; in vduse_domain_unmap_bounce_page() 129 while (iova <= last) { in vduse_domain_unmap_bounce_page() 130 map = &domain->bounce_maps[iova >> PAGE_SHIFT]; in vduse_domain_unmap_bounce_page() 132 iova += PAGE_SIZE; in vduse_domain_unmap_bounce_page() [all …]
|
/openbmc/linux/drivers/iommu/iommufd/ |
H A D | selftest.c | 42 * Syzkaller has trouble randomizing the correct iova to use since it is linked 44 * In syzkaller mode the 64 bit IOVA is converted into an nth area and offset 48 u64 *iova) in __iommufd_test_syz_conv_iova() argument 54 struct syz_layout *syz = (void *)iova; in __iommufd_test_syz_conv_iova() 73 u64 *iova) in iommufd_test_syz_conv_iova() argument 82 ret = __iommufd_test_syz_conv_iova(&access->ioas->iopt, iova); in iommufd_test_syz_conv_iova() 88 unsigned int ioas_id, u64 *iova, u32 *flags) in iommufd_test_syz_conv_iova_id() argument 99 *iova = __iommufd_test_syz_conv_iova(&ioas->iopt, iova); in iommufd_test_syz_conv_iova_id() 194 unsigned long iova, phys_addr_t paddr, in mock_domain_map_pages() argument 201 unsigned long start_iova = iova; in mock_domain_map_pages() [all …]
|
/openbmc/qemu/hw/arm/ |
H A D | smmu-common.c | 42 b += extract64(key->iova, 0, 32); in smmu_iotlb_key_hash() 43 c += extract64(key->iova, 32, 32); in smmu_iotlb_key_hash() 55 return (k1->asid == k2->asid) && (k1->iova == k2->iova) && in smmu_iotlb_key_equal() 60 SMMUIOTLBKey smmu_get_iotlb_key(int asid, int vmid, uint64_t iova, in smmu_get_iotlb_key() argument 63 SMMUIOTLBKey key = {.asid = asid, .vmid = vmid, .iova = iova, in smmu_get_iotlb_key() 72 hwaddr iova) in smmu_iotlb_lookup_all_levels() argument 86 iova & ~mask, tg, level); in smmu_iotlb_lookup_all_levels() 101 * @iova: IOVA address to lookup 106 * the IOVA granule. 109 SMMUTransTableInfo *tt, hwaddr iova) in smmu_iotlb_lookup() argument [all …]
|
/openbmc/linux/drivers/gpu/drm/msm/ |
H A D | msm_iommu.c | 37 unsigned long iova, phys_addr_t paddr, in calc_pgsize() argument 43 unsigned long addr_merge = paddr | iova; in calc_pgsize() 73 if ((iova ^ paddr) & (pgsize_next - 1)) in calc_pgsize() 91 static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova, in msm_iommu_pagetable_unmap() argument 100 pgsize = calc_pgsize(pagetable, iova, iova, size, &count); in msm_iommu_pagetable_unmap() 102 unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL); in msm_iommu_pagetable_unmap() 106 iova += unmapped; in msm_iommu_pagetable_unmap() 115 static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova, in msm_iommu_pagetable_map() argument 121 u64 addr = iova; in msm_iommu_pagetable_map() 145 msm_iommu_pagetable_unmap(mmu, iova, addr - iova); in msm_iommu_pagetable_map() [all …]
|
/openbmc/linux/drivers/gpu/drm/etnaviv/ |
H A D | etnaviv_mmu.c | 17 unsigned long iova, size_t size) in etnaviv_context_unmap() argument 22 if (!IS_ALIGNED(iova | size, pgsize)) { in etnaviv_context_unmap() 23 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n", in etnaviv_context_unmap() 24 iova, size, pgsize); in etnaviv_context_unmap() 29 unmapped_page = context->global->ops->unmap(context, iova, in etnaviv_context_unmap() 34 iova += unmapped_page; in etnaviv_context_unmap() 40 unsigned long iova, phys_addr_t paddr, in etnaviv_context_map() argument 43 unsigned long orig_iova = iova; in etnaviv_context_map() 48 if (!IS_ALIGNED(iova | paddr | size, pgsize)) { in etnaviv_context_map() 49 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n", in etnaviv_context_map() [all …]
|
/openbmc/qemu/hw/vfio/ |
H A D | common.c | 292 hwaddr iova = iotlb->iova + giommu->iommu_offset; in vfio_iommu_map_notify() local 298 iova, iova + iotlb->addr_mask); in vfio_iommu_map_notify() 323 ret = vfio_container_dma_map(bcontainer, iova, in vfio_iommu_map_notify() 329 bcontainer, iova, in vfio_iommu_map_notify() 333 ret = vfio_container_dma_unmap(bcontainer, iova, in vfio_iommu_map_notify() 338 bcontainer, iova, in vfio_iommu_map_notify() 354 const hwaddr iova = section->offset_within_address_space; in vfio_ram_discard_notify_discard() local 358 ret = vfio_container_dma_unmap(bcontainer, iova, size , NULL); in vfio_ram_discard_notify_discard() 373 hwaddr start, next, iova; in vfio_ram_discard_notify_populate() local 385 iova = start - section->offset_within_region + in vfio_ram_discard_notify_populate() [all …]
|
/openbmc/qemu/util/ |
H A D | iova-tree.c | 2 * IOVA tree implementation based on GTree. 13 #include "qemu/iova-tree.h" 33 /* If found, we fill in the IOVA here */ 36 /* Whether have we found a valid IOVA */ 62 if (m1->iova > m2->iova + m2->size) { in iova_tree_compare() 66 if (m1->iova + m1->size < m2->iova) { in iova_tree_compare() 128 if (map->iova + map->size < map->iova || map->perm == IOMMU_NONE) { in iova_tree_insert() 154 * Try to find an unallocated IOVA range between prev and this elements. 169 * Note that this function assumes the last valid iova is HWADDR_MAX, but it 177 if (this && this->iova + this->size < args->iova_begin) { in iova_tree_alloc_map_in_hole() [all …]
|