Home
last modified time | relevance | path

Searched refs:iotlb (Results 1 – 25 of 44) sorted by relevance

12

/openbmc/linux/drivers/vhost/
H A Diotlb.c34 iotlb->nmaps--; in vhost_iotlb_map_free()
75 if (iotlb->limit && in vhost_iotlb_add_range_ctx()
76 iotlb->nmaps == iotlb->limit && in vhost_iotlb_add_range_ctx()
93 iotlb->nmaps++; in vhost_iotlb_add_range_ctx()
138 iotlb->limit = limit; in vhost_iotlb_init()
139 iotlb->nmaps = 0; in vhost_iotlb_init()
154 struct vhost_iotlb *iotlb = kzalloc(sizeof(*iotlb), GFP_KERNEL); in vhost_iotlb_alloc() local
156 if (!iotlb) in vhost_iotlb_alloc()
161 return iotlb; in vhost_iotlb_alloc()
181 if (iotlb) { in vhost_iotlb_free()
[all …]
H A Dvdpa.c41 struct vhost_iotlb iotlb; member
75 vhost_vdpa_as, iotlb); in iotlb_to_asid()
98 return &as->iotlb; in asid_to_iotlb()
116 vhost_iotlb_init(&as->iotlb, 0, 0); in vhost_vdpa_alloc_as()
886 u32 asid = iotlb_to_asid(iotlb); in vhost_vdpa_map()
915 struct vhost_iotlb *iotlb, in vhost_vdpa_unmap() argument
920 u32 asid = iotlb_to_asid(iotlb); in vhost_vdpa_unmap()
932 struct vhost_iotlb *iotlb, in vhost_vdpa_va_map() argument
1155 iotlb = &as->iotlb; in vhost_vdpa_process_iotlb_msg()
1157 iotlb = asid_to_iotlb(v, asid); in vhost_vdpa_process_iotlb_msg()
[all …]
H A Dvhost.c389 vq->iotlb = NULL; in vhost_vq_reset()
518 dev->iotlb = NULL; in vhost_dev_init()
992 dev->iotlb = NULL; in vhost_dev_cleanup()
1097 if (!vq->iotlb) in vhost_copy_to_user()
1132 if (!vq->iotlb) in vhost_copy_from_user()
1599 if (vq->iotlb) in vq_access_ok()
1656 if (!vq->iotlb) in vq_meta_prefetch()
1684 if (vq->iotlb) in vq_log_used_access_ok()
2207 if (!vq->iotlb) in log_used()
2233 if (vq->iotlb) { in vhost_log_write()
[all …]
H A Dvhost.h132 struct vhost_iotlb *iotlb; member
167 struct vhost_iotlb *iotlb; member
191 void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *iotlb);
246 void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
H A DMakefile19 vhost_iotlb-y := iotlb.o
/openbmc/linux/include/linux/
H A Dvhost_iotlb.h33 int vhost_iotlb_add_range_ctx(struct vhost_iotlb *iotlb, u64 start, u64 last,
35 int vhost_iotlb_add_range(struct vhost_iotlb *iotlb, u64 start, u64 last,
37 void vhost_iotlb_del_range(struct vhost_iotlb *iotlb, u64 start, u64 last);
39 void vhost_iotlb_init(struct vhost_iotlb *iotlb, unsigned int limit,
42 void vhost_iotlb_free(struct vhost_iotlb *iotlb);
43 void vhost_iotlb_reset(struct vhost_iotlb *iotlb);
46 vhost_iotlb_itree_first(struct vhost_iotlb *iotlb, u64 start, u64 last);
50 void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
H A Dvringh.h51 struct vhost_iotlb *iotlb; member
294 void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb,
/openbmc/linux/drivers/vdpa/mlx5/core/
H A Dmr.c224 struct vhost_iotlb *iotlb) in map_direct_mr() argument
305 struct vhost_iotlb *iotlb) in add_direct_chain() argument
327 err = map_direct_mr(mvdev, dmr, iotlb); in add_direct_chain()
537 struct vhost_iotlb *iotlb, in _mlx5_vdpa_create_cvq_mr() argument
543 return dup_iotlb(mvdev, iotlb); in _mlx5_vdpa_create_cvq_mr()
547 struct vhost_iotlb *iotlb, in _mlx5_vdpa_create_dvq_mr() argument
559 if (iotlb) in _mlx5_vdpa_create_dvq_mr()
560 err = create_user_mr(mvdev, iotlb); in _mlx5_vdpa_create_dvq_mr()
573 struct vhost_iotlb *iotlb, unsigned int asid) in _mlx5_vdpa_create_mr() argument
599 err = _mlx5_vdpa_create_mr(mvdev, iotlb, asid); in mlx5_vdpa_create_mr()
[all …]
H A Dmlx5_vdpa.h53 struct vhost_iotlb *iotlb; member
117 int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
119 int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
H A Dresources.c232 mvdev->cvq.iotlb = vhost_iotlb_alloc(0, 0); in init_ctrl_vq()
233 if (!mvdev->cvq.iotlb) in init_ctrl_vq()
237 vringh_set_iotlb(&mvdev->cvq.vring, mvdev->cvq.iotlb, &mvdev->cvq.iommu_lock); in init_ctrl_vq()
244 vhost_iotlb_free(mvdev->cvq.iotlb); in cleanup_ctrl_vq()
/openbmc/qemu/hw/virtio/
H A Dvhost-vdpa.c98 msg.iotlb.iova = iova; in vhost_vdpa_dma_map()
99 msg.iotlb.size = size; in vhost_vdpa_dma_map()
102 msg.iotlb.type = VHOST_IOTLB_UPDATE; in vhost_vdpa_dma_map()
105 msg.iotlb.size, msg.iotlb.uaddr, msg.iotlb.perm, in vhost_vdpa_dma_map()
106 msg.iotlb.type); in vhost_vdpa_dma_map()
130 msg.iotlb.iova = iova; in vhost_vdpa_dma_unmap()
131 msg.iotlb.size = size; in vhost_vdpa_dma_unmap()
132 msg.iotlb.type = VHOST_IOTLB_INVALIDATE; in vhost_vdpa_dma_unmap()
135 msg.iotlb.size, msg.iotlb.type); in vhost_vdpa_dma_unmap()
187 msg.iotlb.type = VHOST_IOTLB_BATCH_END; in vhost_vdpa_listener_commit()
[all …]
H A Dvhost-backend.c237 vhost_backend_handle_iotlb_msg(dev, &msg.iotlb); in vhost_kernel_iotlb_read()
252 vhost_backend_handle_iotlb_msg(dev, &msg.iotlb); in vhost_kernel_iotlb_read()
264 msg.iotlb = *imsg; in vhost_kernel_send_device_iotlb_msg()
274 msg.iotlb = *imsg; in vhost_kernel_send_device_iotlb_msg()
H A Dvhost.c184 IOMMUTLBEntry iotlb; in vhost_sync_dirty_bitmap() local
196 if (!iotlb.target_as) { in vhost_sync_dirty_bitmap()
203 offset = used_phys & iotlb.addr_mask; in vhost_sync_dirty_bitmap()
204 phys = iotlb.translated_addr + offset; in vhost_sync_dirty_bitmap()
210 s = iotlb.addr_mask - offset; in vhost_sync_dirty_bitmap()
801 hwaddr iova = iotlb->iova + iommu->iommu_offset; in vhost_iommu_unmap_notify()
1137 IOMMUTLBEntry iotlb; in vhost_device_iotlb_miss() local
1148 if (iotlb.target_as != NULL) { in vhost_device_iotlb_miss()
1154 "%"PRIx64, iotlb.translated_addr); in vhost_device_iotlb_miss()
1158 len = MIN(iotlb.addr_mask + 1, len); in vhost_device_iotlb_miss()
[all …]
/openbmc/linux/drivers/vdpa/vdpa_user/
H A Diova_domain.c35 ret = vhost_iotlb_add_range_ctx(domain->iotlb, start, last, in vduse_iotlb_add_range()
55 vhost_iotlb_map_free(domain->iotlb, map); in vduse_iotlb_del_range()
60 struct vhost_iotlb *iotlb) in vduse_domain_set_map() argument
70 for (map = vhost_iotlb_itree_first(iotlb, start, last); map; in vduse_domain_set_map()
90 struct vhost_iotlb *iotlb) in vduse_domain_clear_map() argument
198 map = vhost_iotlb_itree_first(domain->iotlb, start, last); in vduse_domain_get_coherent_page()
478 vhost_iotlb_map_free(domain->iotlb, map); in vduse_domain_free_coherent()
533 vhost_iotlb_free(domain->iotlb); in vduse_domain_release()
568 domain->iotlb = vhost_iotlb_alloc(0, 0); in vduse_domain_create()
569 if (!domain->iotlb) in vduse_domain_create()
[all …]
H A Diova_domain.h34 struct vhost_iotlb *iotlb; member
42 struct vhost_iotlb *iotlb);
45 struct vhost_iotlb *iotlb);
/openbmc/qemu/hw/vfio/
H A Dcommon.c268 ret = memory_get_xlat_addr(iotlb, vaddr, ram_addr, read_only, in vfio_get_xlat_addr()
296 hwaddr iova = iotlb->iova + giommu->iommu_offset; in vfio_iommu_map_notify()
301 iova, iova + iotlb->addr_mask); in vfio_iommu_map_notify()
303 if (iotlb->target_as != &address_space_memory) { in vfio_iommu_map_notify()
305 iotlb->target_as->name ? iotlb->target_as->name : "none"); in vfio_iommu_map_notify()
312 if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) { in vfio_iommu_map_notify()
326 iotlb->addr_mask + 1, vaddr, in vfio_iommu_map_notify()
335 ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1, iotlb); in vfio_iommu_map_notify()
1205 hwaddr iova = iotlb->iova + giommu->iommu_offset; in vfio_iommu_map_dirty_notify()
1211 if (iotlb->target_as != &address_space_memory) { in vfio_iommu_map_dirty_notify()
[all …]
H A Dcontainer.c65 IOMMUTLBEntry *iotlb) in vfio_dma_unmap_bitmap() argument
103 iotlb->translated_addr, vbmap.pages); in vfio_dma_unmap_bitmap()
119 ram_addr_t size, IOMMUTLBEntry *iotlb) in vfio_dma_unmap() argument
130 if (iotlb && vfio_devices_all_running_and_mig_active(container)) { in vfio_dma_unmap()
133 return vfio_dma_unmap_bitmap(container, iova, size, iotlb); in vfio_dma_unmap()
164 iotlb->translated_addr); in vfio_dma_unmap()
/openbmc/qemu/hw/arm/
H A Dsmmu-common.c85 entry = g_hash_table_lookup(bs->iotlb, &key); in smmu_iotlb_lookup()
113 if (g_hash_table_size(bs->iotlb) >= SMMU_IOTLB_MAX_SIZE) { in smmu_iotlb_insert()
121 g_hash_table_insert(bs->iotlb, key, new); in smmu_iotlb_insert()
127 g_hash_table_remove_all(s->iotlb); in smmu_iotlb_inv_all()
175 if (g_hash_table_remove(s->iotlb, &key)) { in smmu_iotlb_inv_iova()
189 g_hash_table_foreach_remove(s->iotlb, in smmu_iotlb_inv_iova()
197 g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_asid, &asid); in smmu_iotlb_inv_asid()
203 g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_vmid, &vmid); in smmu_iotlb_inv_vmid()
663 s->iotlb = g_hash_table_new_full(smmu_iotlb_key_hash, smmu_iotlb_key_equal, in smmu_base_realize()
681 g_hash_table_remove_all(s->iotlb); in smmu_base_reset_hold()
/openbmc/qemu/include/standard-headers/linux/
H A Dvhost_types.h99 struct vhost_iotlb_msg iotlb; member
108 struct vhost_iotlb_msg iotlb; member
/openbmc/linux/include/uapi/linux/
H A Dvhost_types.h99 struct vhost_iotlb_msg iotlb; member
108 struct vhost_iotlb_msg iotlb; member
/openbmc/qemu/hw/i386/
H A Dtrace-events12 vtd_inv_desc_iotlb_global(void) "iotlb invalidate global"
13 vtd_inv_desc_iotlb_domain(uint16_t domain) "iotlb invalidate whole domain 0x%"PRIx16
14 vtd_inv_desc_iotlb_pages(uint16_t domain, uint64_t addr, uint8_t mask) "iotlb invalidate domain 0x%…
15 vtd_inv_desc_iotlb_pasid_pages(uint16_t domain, uint64_t addr, uint8_t mask, uint32_t pasid) "iotlb
16 vtd_inv_desc_iotlb_pasid(uint16_t domain, uint32_t pasid) "iotlb invalidate domain 0x%"PRIx16" pasi…
74 … bus, uint8_t slot, uint8_t func, uint64_t gpa, uint64_t txaddr) " update iotlb domid 0x%"PRIx16" …
98 amdvi_iotlb_hit(uint8_t bus, uint8_t slot, uint8_t func, uint64_t addr, uint64_t txaddr) "hit iotlb
H A Damd_iommu.c319 return g_hash_table_lookup(s->iotlb, &key); in amdvi_iotlb_lookup()
324 assert(s->iotlb); in amdvi_iotlb_reset()
326 g_hash_table_remove_all(s->iotlb); in amdvi_iotlb_reset()
342 g_hash_table_remove(s->iotlb, &key); in amdvi_iotlb_remove_page()
358 if (g_hash_table_size(s->iotlb) >= AMDVI_IOTLB_MAX_SIZE) { in amdvi_update_iotlb()
367 g_hash_table_replace(s->iotlb, key, entry); in amdvi_update_iotlb()
452 g_hash_table_foreach_remove(s->iotlb, amdvi_iotlb_remove_by_domid, in amdvi_inval_pages()
495 g_hash_table_foreach_remove(s->iotlb, amdvi_iotlb_remove_by_devid, in iommu_inval_iotlb()
1572 s->iotlb = g_hash_table_new_full(amdvi_uint64_hash, in amdvi_sysbus_realize()
H A Dintel_iommu.c306 assert(s->iotlb); in vtd_reset_iotlb_locked()
307 g_hash_table_remove_all(s->iotlb); in vtd_reset_iotlb_locked()
343 entry = g_hash_table_lookup(s->iotlb, &key); in vtd_lookup_iotlb()
381 g_hash_table_replace(s->iotlb, key, entry); in vtd_update_iotlb()
3184 IOMMUTLBEntry iotlb = { in vtd_iommu_translate() local
3195 iotlb.iova = addr & VTD_PAGE_MASK_4K; in vtd_iommu_translate()
3197 iotlb.addr_mask = ~VTD_PAGE_MASK_4K; in vtd_iommu_translate()
3198 iotlb.perm = IOMMU_RW; in vtd_iommu_translate()
3206 iotlb.iova, iotlb.translated_addr, in vtd_iommu_translate()
3207 iotlb.addr_mask); in vtd_iommu_translate()
[all …]
/openbmc/qemu/system/
H A Dphysmem.c429 IOMMUTLBEntry iotlb; in address_space_translate_iommu() local
438 if (!(iotlb.perm & (1 << is_write))) { in address_space_translate_iommu()
442 addr = ((iotlb.translated_addr & ~iotlb.addr_mask) in address_space_translate_iommu()
443 | (addr & iotlb.addr_mask)); in address_space_translate_iommu()
444 page_mask &= iotlb.addr_mask; in address_space_translate_iommu()
446 *target_as = iotlb.target_as; in address_space_translate_iommu()
680 IOMMUTLBEntry iotlb; in address_space_translate_for_iotlb() local
701 addr = ((iotlb.translated_addr & ~iotlb.addr_mask) in address_space_translate_for_iotlb()
702 | (addr & iotlb.addr_mask)); in address_space_translate_for_iotlb()
707 if (!(iotlb.perm & IOMMU_RO)) { in address_space_translate_for_iotlb()
[all …]
/openbmc/qemu/hw/ppc/
H A Dspapr_iommu.c149 IOMMUTLBEntry iotlb; in spapr_tce_replay() local
159 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx); in spapr_tce_replay()
160 if (iotlb.perm != IOMMU_NONE) { in spapr_tce_replay()
161 n->notify(n, &iotlb); in spapr_tce_replay()

12