/openbmc/linux/drivers/iommu/amd/ |
H A D | init.c | 239 bool translation_pre_enabled(struct amd_iommu *iommu) in translation_pre_enabled() argument 241 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED); in translation_pre_enabled() 244 static void clear_translation_pre_enabled(struct amd_iommu *iommu) in clear_translation_pre_enabled() argument 246 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; in clear_translation_pre_enabled() 249 static void init_translation_status(struct amd_iommu *iommu) in init_translation_status() argument 253 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); in init_translation_status() 255 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; in init_translation_status() 277 struct amd_iommu *iommu; in get_global_efr() local 279 for_each_iommu(iommu) { in get_global_efr() 280 u64 tmp = iommu->features; in get_global_efr() [all …]
|
H A D | iommu.c | 122 struct dev_table_entry *get_dev_table(struct amd_iommu *iommu) in get_dev_table() argument 125 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in get_dev_table() 152 void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid) in amd_iommu_set_rlookup_table() argument 154 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in amd_iommu_set_rlookup_table() 156 pci_seg->rlookup_table[devid] = iommu; in amd_iommu_set_rlookup_table() 185 static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid) in alloc_dev_data() argument 188 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in alloc_dev_data() 202 static struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid) in search_dev_data() argument 206 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in search_dev_data() 222 struct amd_iommu *iommu; in clone_alias() local [all …]
|
/openbmc/linux/drivers/iommu/ |
H A D | sun50i-iommu.c | 99 struct iommu_device iommu; member 124 struct sun50i_iommu *iommu; member 137 static u32 iommu_read(struct sun50i_iommu *iommu, u32 offset) in iommu_read() argument 139 return readl(iommu->base + offset); in iommu_read() 142 static void iommu_write(struct sun50i_iommu *iommu, u32 offset, u32 value) in iommu_write() argument 144 writel(value, iommu->base + offset); in iommu_write() 293 struct sun50i_iommu *iommu = sun50i_domain->iommu; in sun50i_table_flush() local 297 dma_sync_single_for_device(iommu->dev, dma, size, DMA_TO_DEVICE); in sun50i_table_flush() 300 static void sun50i_iommu_zap_iova(struct sun50i_iommu *iommu, in sun50i_iommu_zap_iova() argument 306 iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_REG, iova); in sun50i_iommu_zap_iova() [all …]
|
H A D | rockchip-iommu.c | 113 struct iommu_device iommu; member 121 struct rk_iommu *iommu; member 345 static void rk_iommu_command(struct rk_iommu *iommu, u32 command) in rk_iommu_command() argument 349 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_command() 350 writel(command, iommu->bases[i] + RK_MMU_COMMAND); in rk_iommu_command() 357 static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start, in rk_iommu_zap_lines() argument 366 for (i = 0; i < iommu->num_mmu; i++) { in rk_iommu_zap_lines() 370 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova); in rk_iommu_zap_lines() 374 static bool rk_iommu_is_stall_active(struct rk_iommu *iommu) in rk_iommu_is_stall_active() argument 379 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_is_stall_active() [all …]
|
H A D | msm_iommu.c | 54 static int __enable_clocks(struct msm_iommu_dev *iommu) in __enable_clocks() argument 58 ret = clk_enable(iommu->pclk); in __enable_clocks() 62 if (iommu->clk) { in __enable_clocks() 63 ret = clk_enable(iommu->clk); in __enable_clocks() 65 clk_disable(iommu->pclk); in __enable_clocks() 71 static void __disable_clocks(struct msm_iommu_dev *iommu) in __disable_clocks() argument 73 if (iommu->clk) in __disable_clocks() 74 clk_disable(iommu->clk); in __disable_clocks() 75 clk_disable(iommu->pclk); in __disable_clocks() 120 struct msm_iommu_dev *iommu = NULL; in __flush_iotlb() local [all …]
|
H A D | iommu-sysfs.c | 54 int iommu_device_sysfs_add(struct iommu_device *iommu, in iommu_device_sysfs_add() argument 62 iommu->dev = kzalloc(sizeof(*iommu->dev), GFP_KERNEL); in iommu_device_sysfs_add() 63 if (!iommu->dev) in iommu_device_sysfs_add() 66 device_initialize(iommu->dev); in iommu_device_sysfs_add() 68 iommu->dev->class = &iommu_class; in iommu_device_sysfs_add() 69 iommu->dev->parent = parent; in iommu_device_sysfs_add() 70 iommu->dev->groups = groups; in iommu_device_sysfs_add() 73 ret = kobject_set_name_vargs(&iommu->dev->kobj, fmt, vargs); in iommu_device_sysfs_add() 78 ret = device_add(iommu->dev); in iommu_device_sysfs_add() 82 dev_set_drvdata(iommu->dev, iommu); in iommu_device_sysfs_add() [all …]
|
H A D | Makefile | 3 obj-$(CONFIG_IOMMU_API) += iommu.o 4 obj-$(CONFIG_IOMMU_API) += iommu-traces.o 5 obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o 6 obj-$(CONFIG_IOMMU_DEBUGFS) += iommu-debugfs.o 7 obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o 19 obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o 20 obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o 21 obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o 22 obj-$(CONFIG_SUN50I_IOMMU) += sun50i-iommu.o 25 obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o [all …]
|
/openbmc/linux/arch/sparc/kernel/ |
H A D | iommu.c | 52 struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl); in iommu_flushall() local 53 if (iommu->iommu_flushinv) { in iommu_flushall() 54 iommu_write(iommu->iommu_flushinv, ~(u64)0); in iommu_flushall() 59 tag = iommu->iommu_tags; in iommu_flushall() 66 (void) iommu_read(iommu->write_complete_reg); in iommu_flushall() 80 #define IOPTE_IS_DUMMY(iommu, iopte) \ argument 81 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa) 83 static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte) in iopte_make_dummy() argument 88 val |= iommu->dummy_page_pa; in iopte_make_dummy() 93 int iommu_table_init(struct iommu *iommu, int tsbsize, in iommu_table_init() argument [all …]
|
H A D | iommu-common.c | 19 static inline bool need_flush(struct iommu_map_table *iommu) in need_flush() argument 21 return ((iommu->flags & IOMMU_NEED_FLUSH) != 0); in need_flush() 24 static inline void set_flush(struct iommu_map_table *iommu) in set_flush() argument 26 iommu->flags |= IOMMU_NEED_FLUSH; in set_flush() 29 static inline void clear_flush(struct iommu_map_table *iommu) in clear_flush() argument 31 iommu->flags &= ~IOMMU_NEED_FLUSH; in clear_flush() 52 void iommu_tbl_pool_init(struct iommu_map_table *iommu, in iommu_tbl_pool_init() argument 60 struct iommu_pool *p = &(iommu->large_pool); in iommu_tbl_pool_init() 64 iommu->nr_pools = IOMMU_NR_POOLS; in iommu_tbl_pool_init() 66 iommu->nr_pools = npools; in iommu_tbl_pool_init() [all …]
|
H A D | sbus.c | 63 struct iommu *iommu = dev->archdata.iommu; in sbus_set_sbus64() local 78 cfg_reg = iommu->write_complete_reg; in sbus_set_sbus64() 213 struct iommu *iommu = op->dev.archdata.iommu; in sbus_build_irq() local 214 unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; in sbus_build_irq() 275 struct iommu *iommu = op->dev.archdata.iommu; in sysio_ue_handler() local 276 unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; in sysio_ue_handler() 349 struct iommu *iommu = op->dev.archdata.iommu; in sysio_ce_handler() local 350 unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; in sysio_ce_handler() 428 struct iommu *iommu = op->dev.archdata.iommu; in sysio_sbus_error_handler() local 433 reg_base = iommu->write_complete_reg - 0x2000UL; in sysio_sbus_error_handler() [all …]
|
/openbmc/linux/drivers/iommu/intel/ |
H A D | irq_remapping.c | 33 struct intel_iommu *iommu; member 40 struct intel_iommu *iommu; member 47 struct intel_iommu *iommu; member 83 static void iommu_disable_irq_remapping(struct intel_iommu *iommu); 87 static bool ir_pre_enabled(struct intel_iommu *iommu) in ir_pre_enabled() argument 89 return (iommu->flags & VTD_FLAG_IRQ_REMAP_PRE_ENABLED); in ir_pre_enabled() 92 static void clear_ir_pre_enabled(struct intel_iommu *iommu) in clear_ir_pre_enabled() argument 94 iommu->flags &= ~VTD_FLAG_IRQ_REMAP_PRE_ENABLED; in clear_ir_pre_enabled() 97 static void init_ir_status(struct intel_iommu *iommu) in init_ir_status() argument 101 gsts = readl(iommu->reg + DMAR_GSTS_REG); in init_ir_status() [all …]
|
H A D | dmar.c | 67 static void free_iommu(struct intel_iommu *iommu); 461 if (dmaru->iommu) in dmar_free_drhd() 462 free_iommu(dmaru->iommu); in dmar_free_drhd() 501 drhd->iommu->node = node; in dmar_parse_one_rhsa() 939 x86_init.iommu.iommu_init = intel_iommu_init; in detect_intel_iommu() 952 static void unmap_iommu(struct intel_iommu *iommu) in unmap_iommu() argument 954 iounmap(iommu->reg); in unmap_iommu() 955 release_mem_region(iommu->reg_phys, iommu->reg_size); in unmap_iommu() 966 static int map_iommu(struct intel_iommu *iommu, struct dmar_drhd_unit *drhd) in map_iommu() argument 971 iommu->reg_phys = phys_addr; in map_iommu() [all …]
|
H A D | svm.c | 64 int intel_svm_enable_prq(struct intel_iommu *iommu) in intel_svm_enable_prq() argument 70 pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, PRQ_ORDER); in intel_svm_enable_prq() 73 iommu->name); in intel_svm_enable_prq() 76 iommu->prq = page_address(pages); in intel_svm_enable_prq() 78 irq = dmar_alloc_hwirq(IOMMU_IRQ_ID_OFFSET_PRQ + iommu->seq_id, iommu->node, iommu); in intel_svm_enable_prq() 81 iommu->name); in intel_svm_enable_prq() 85 iommu->pr_irq = irq; in intel_svm_enable_prq() 87 snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name), in intel_svm_enable_prq() 88 "dmar%d-iopfq", iommu->seq_id); in intel_svm_enable_prq() 89 iopfq = iopf_queue_alloc(iommu->iopfq_name); in intel_svm_enable_prq() [all …]
|
H A D | iommu.c | 223 static inline bool context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn) in context_copied() argument 225 if (!iommu->copied_tables) in context_copied() 228 return test_bit(((long)bus << 8) | devfn, iommu->copied_tables); in context_copied() 232 set_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn) in set_context_copied() argument 234 set_bit(((long)bus << 8) | devfn, iommu->copied_tables); in set_context_copied() 238 clear_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn) in clear_context_copied() argument 240 clear_bit(((long)bus << 8) | devfn, iommu->copied_tables); in clear_context_copied() 273 struct intel_iommu *iommu; /* the corresponding iommu */ member 304 static bool translation_pre_enabled(struct intel_iommu *iommu) in translation_pre_enabled() argument 306 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED); in translation_pre_enabled() [all …]
|
H A D | cap_audit.c | 74 static int cap_audit_hotplug(struct intel_iommu *iommu, enum cap_audit_type type) in cap_audit_hotplug() argument 81 CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, pi_support, CAP_PI_MASK); in cap_audit_hotplug() 82 CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, eim_support, ECAP_EIM_MASK); in cap_audit_hotplug() 86 CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, fl5lp_support, CAP_FL5LP_MASK); in cap_audit_hotplug() 87 CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, fl1gp_support, CAP_FL1GP_MASK); in cap_audit_hotplug() 88 CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, read_drain, CAP_RD_MASK); in cap_audit_hotplug() 89 CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, write_drain, CAP_WD_MASK); in cap_audit_hotplug() 90 CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, pgsel_inv, CAP_PSI_MASK); in cap_audit_hotplug() 91 CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, zlr, CAP_ZLR_MASK); in cap_audit_hotplug() 92 CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, caching_mode, CAP_CM_MASK); in cap_audit_hotplug() [all …]
|
H A D | pasid.c | 29 int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid) in vcmd_alloc_pasid() argument 36 raw_spin_lock_irqsave(&iommu->register_lock, flags); in vcmd_alloc_pasid() 37 dmar_writeq(iommu->reg + DMAR_VCMD_REG, VCMD_CMD_ALLOC); in vcmd_alloc_pasid() 38 IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq, in vcmd_alloc_pasid() 40 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in vcmd_alloc_pasid() 48 pr_info("IOMMU: %s: No PASID available\n", iommu->name); in vcmd_alloc_pasid() 54 iommu->name, status_code); in vcmd_alloc_pasid() 60 void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid) in vcmd_free_pasid() argument 66 raw_spin_lock_irqsave(&iommu->register_lock, flags); in vcmd_free_pasid() 67 dmar_writeq(iommu->reg + DMAR_VCMD_REG, in vcmd_free_pasid() [all …]
|
H A D | debugfs.c | 117 struct intel_iommu *iommu; in iommu_regset_show() local 123 for_each_active_iommu(iommu, drhd) { in iommu_regset_show() 131 iommu->name, drhd->reg_base_addr); in iommu_regset_show() 137 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_regset_show() 139 value = dmar_readl(iommu->reg + iommu_regs_32[i].offset); in iommu_regset_show() 145 value = dmar_readq(iommu->reg + iommu_regs_64[i].offset); in iommu_regset_show() 150 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_regset_show() 218 static void ctx_tbl_walk(struct seq_file *m, struct intel_iommu *iommu, u16 bus) in ctx_tbl_walk() argument 240 context = iommu_context_addr(iommu, bus, devfn, 0); in ctx_tbl_walk() 249 tbl_wlk.rt_entry = &iommu->root_entry[bus]; in ctx_tbl_walk() [all …]
|
H A D | perfmon.c | 346 struct intel_iommu *iommu = iommu_pmu->iommu; in iommu_pmu_start() local 375 ecmd_submit_sync(iommu, DMA_ECMD_ENABLE, hwc->idx, 0); in iommu_pmu_start() 383 struct intel_iommu *iommu = iommu_pmu->iommu; in iommu_pmu_stop() local 387 ecmd_submit_sync(iommu, DMA_ECMD_DISABLE, hwc->idx, 0); in iommu_pmu_stop() 495 struct intel_iommu *iommu = iommu_pmu->iommu; in iommu_pmu_enable() local 497 ecmd_submit_sync(iommu, DMA_ECMD_UNFREEZE, 0, 0); in iommu_pmu_enable() 503 struct intel_iommu *iommu = iommu_pmu->iommu; in iommu_pmu_disable() local 505 ecmd_submit_sync(iommu, DMA_ECMD_FREEZE, 0, 0); in iommu_pmu_disable() 538 struct intel_iommu *iommu = dev_id; in iommu_pmu_irq_handler() local 540 if (!dmar_readl(iommu->reg + DMAR_PERFINTRSTS_REG)) in iommu_pmu_irq_handler() [all …]
|
/openbmc/linux/drivers/vfio/ |
H A D | vfio_iommu_type1.c | 157 vfio_iommu_find_iommu_group(struct vfio_iommu *iommu, 165 static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu, in vfio_find_dma() argument 168 struct rb_node *node = iommu->dma_list.rb_node; in vfio_find_dma() 184 static struct rb_node *vfio_find_dma_first_node(struct vfio_iommu *iommu, in vfio_find_dma_first_node() argument 188 struct rb_node *node = iommu->dma_list.rb_node; in vfio_find_dma_first_node() 209 static void vfio_link_dma(struct vfio_iommu *iommu, struct vfio_dma *new) in vfio_link_dma() argument 211 struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL; in vfio_link_dma() 225 rb_insert_color(&new->node, &iommu->dma_list); in vfio_link_dma() 228 static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old) in vfio_unlink_dma() argument 230 rb_erase(&old->node, &iommu->dma_list); in vfio_unlink_dma() [all …]
|
/openbmc/linux/arch/powerpc/platforms/cell/ |
H A D | iommu.c | 104 struct cbe_iommu *iommu; member 131 static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte, in invalidate_tce_cache() argument 138 reg = iommu->xlate_regs + IOC_IOPT_CacheInvd; in invalidate_tce_cache() 195 invalidate_tce_cache(window->iommu, io_pte, npages); in tce_build_cell() 218 __pa(window->iommu->pad_page) | in tce_free_cell() 229 invalidate_tce_cache(window->iommu, io_pte, npages); in tce_free_cell() 235 struct cbe_iommu *iommu = data; in ioc_interrupt() local 237 stat = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat); in ioc_interrupt() 253 out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, stat); in ioc_interrupt() 298 static void __init cell_iommu_setup_stab(struct cbe_iommu *iommu, in cell_iommu_setup_stab() argument [all …]
|
/openbmc/linux/Documentation/devicetree/bindings/pci/ |
H A D | pci-iommu.txt | 26 Documentation/devicetree/bindings/iommu/iommu.txt. 35 - iommu-map: Maps a Requester ID to an IOMMU and associated IOMMU specifier 39 (rid-base,iommu,iommu-base,length). 42 the listed IOMMU, with the IOMMU specifier (r - rid-base + iommu-base). 44 - iommu-map-mask: A mask to be applied to each Requester ID prior to being 45 mapped to an IOMMU specifier per the iommu-map property. 55 iommu: iommu@a { 57 compatible = "vendor,some-iommu"; 58 #iommu-cells = <1>; 70 iommu-map = <0x0 &iommu 0x0 0x10000>; [all …]
|
/openbmc/linux/arch/sparc/mm/ |
H A D | iommu.c | 60 struct iommu_struct *iommu; in sbus_iommu_init() local 67 iommu = kmalloc(sizeof(struct iommu_struct), GFP_KERNEL); in sbus_iommu_init() 68 if (!iommu) { in sbus_iommu_init() 73 iommu->regs = of_ioremap(&op->resource[0], 0, PAGE_SIZE * 3, in sbus_iommu_init() 75 if (!iommu->regs) { in sbus_iommu_init() 80 control = sbus_readl(&iommu->regs->control); in sbus_iommu_init() 85 sbus_writel(control, &iommu->regs->control); in sbus_iommu_init() 87 iommu_invalidate(iommu->regs); in sbus_iommu_init() 88 iommu->start = IOMMU_START; in sbus_iommu_init() 89 iommu->end = 0xffffffff; in sbus_iommu_init() [all …]
|
/openbmc/qemu/hw/remote/ |
H A D | iommu.c | 42 RemoteIommu *iommu = opaque; in remote_iommu_find_add_as() local 45 qemu_mutex_lock(&iommu->lock); in remote_iommu_find_add_as() 47 elem = g_hash_table_lookup(iommu->elem_by_devfn, INT2VOIDP(devfn)); in remote_iommu_find_add_as() 51 g_hash_table_insert(iommu->elem_by_devfn, INT2VOIDP(devfn), elem); in remote_iommu_find_add_as() 60 qemu_mutex_unlock(&iommu->lock); in remote_iommu_find_add_as() 85 RemoteIommu *iommu = REMOTE_IOMMU(obj); in remote_iommu_init() local 87 iommu->elem_by_devfn = g_hash_table_new_full(NULL, NULL, NULL, g_free); in remote_iommu_init() 89 qemu_mutex_init(&iommu->lock); in remote_iommu_init() 94 RemoteIommu *iommu = REMOTE_IOMMU(obj); in remote_iommu_finalize() local 96 qemu_mutex_destroy(&iommu->lock); in remote_iommu_finalize() [all …]
|
/openbmc/qemu/docs/ |
H A D | bypass-iommu.txt | 8 is not flexible. We introduce this bypass iommu property to support 10 passthrough devices with no-iommu mode and devices go through vIOMMU in 15 virtual iommu. The bypass_iommu property is valid only when there is a 16 virtual iommu in the system, it is implemented to allow some devices to 22 The bypass iommu feature support PXB host bridge and default main host 26 on AArch64. Other machine types do not support bypass iommu for default 29 1. The following is the bypass iommu options: 33 qemu -machine virt,iommu=smmuv3,default_bus_bypass_iommu=true 34 (3) X86 default root bus bypass iommu: 41 -machine virt,kernel_irqchip=on,iommu=smmuv3,default_bus_bypass_iommu=true \ [all …]
|
/openbmc/linux/drivers/gpu/drm/msm/ |
H A D | msm_iommu.c | 157 struct msm_iommu *iommu = to_msm_iommu(pagetable->parent); in msm_iommu_pagetable_destroy() local 165 if (atomic_dec_return(&iommu->pagetables) == 0) in msm_iommu_pagetable_destroy() 193 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_get_geometry() local 195 return &iommu->domain->geometry; in msm_iommu_get_geometry() 252 struct msm_iommu *iommu = to_msm_iommu(parent); in msm_iommu_pagetable_create() local 295 if (atomic_inc_return(&iommu->pagetables) == 1) { in msm_iommu_pagetable_create() 325 struct msm_iommu *iommu = arg; in msm_fault_handler() local 326 struct msm_mmu *mmu = &iommu->base; in msm_fault_handler() 327 struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(iommu->base.dev); in msm_fault_handler() 335 if (iommu->base.handler) in msm_fault_handler() [all …]
|