Lines Matching full:iommu
27 #include "iommu.h"
28 #include "../dma-iommu.h"
30 #include "../iommu-sva.h"
223 static inline bool context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn) in context_copied() argument
225 if (!iommu->copied_tables) in context_copied()
228 return test_bit(((long)bus << 8) | devfn, iommu->copied_tables); in context_copied()
232 set_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn) in set_context_copied() argument
234 set_bit(((long)bus << 8) | devfn, iommu->copied_tables); in set_context_copied()
238 clear_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn) in clear_context_copied() argument
240 clear_bit(((long)bus << 8) | devfn, iommu->copied_tables); in clear_context_copied()
246 * 2. It maps to each iommu if successful.
247 * 3. Each iommu mapps to this domain if successful.
273 struct intel_iommu *iommu; /* the corresponding iommu */ member
304 static bool translation_pre_enabled(struct intel_iommu *iommu) in translation_pre_enabled() argument
306 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED); in translation_pre_enabled()
309 static void clear_translation_pre_enabled(struct intel_iommu *iommu) in clear_translation_pre_enabled() argument
311 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED; in clear_translation_pre_enabled()
314 static void init_translation_status(struct intel_iommu *iommu) in init_translation_status() argument
318 gsts = readl(iommu->reg + DMAR_GSTS_REG); in init_translation_status()
320 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED; in init_translation_status()
331 pr_info("IOMMU enabled\n"); in intel_iommu_setup()
335 pr_info("IOMMU disabled\n"); in intel_iommu_setup()
340 pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n"); in intel_iommu_setup()
343 pr_warn("intel_iommu=strict deprecated; use iommu.strict=1 instead\n"); in intel_iommu_setup()
355 pr_info("Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); in intel_iommu_setup()
400 * Calculate the Supported Adjusted Guest Address Widths of an IOMMU.
404 static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu) in __iommu_calculate_sagaw() argument
408 fl_sagaw = BIT(2) | (cap_fl5lp_support(iommu->cap) ? BIT(3) : 0); in __iommu_calculate_sagaw()
409 sl_sagaw = cap_sagaw(iommu->cap); in __iommu_calculate_sagaw()
412 if (!sm_supported(iommu) || !ecap_flts(iommu->ecap)) in __iommu_calculate_sagaw()
416 if (!ecap_slts(iommu->ecap)) in __iommu_calculate_sagaw()
422 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) in __iommu_calculate_agaw() argument
427 sagaw = __iommu_calculate_sagaw(iommu); in __iommu_calculate_agaw()
437 * Calculate max SAGAW for each iommu.
439 int iommu_calculate_max_sagaw(struct intel_iommu *iommu) in iommu_calculate_max_sagaw() argument
441 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH); in iommu_calculate_max_sagaw()
445 * calculate agaw for each iommu.
449 int iommu_calculate_agaw(struct intel_iommu *iommu) in iommu_calculate_agaw() argument
451 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH); in iommu_calculate_agaw()
454 static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu) in iommu_paging_structure_coherency() argument
456 return sm_supported(iommu) ? in iommu_paging_structure_coherency()
457 ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap); in iommu_paging_structure_coherency()
464 struct intel_iommu *iommu; in domain_update_iommu_coherency() local
471 if (!iommu_paging_structure_coherency(info->iommu)) { in domain_update_iommu_coherency()
481 for_each_active_iommu(iommu, drhd) { in domain_update_iommu_coherency()
482 if (!iommu_paging_structure_coherency(iommu)) { in domain_update_iommu_coherency()
494 struct intel_iommu *iommu; in domain_update_iommu_superpage() local
502 for_each_active_iommu(iommu, drhd) { in domain_update_iommu_superpage()
503 if (iommu != skip) { in domain_update_iommu_superpage()
505 if (!cap_fl1gp_support(iommu->cap)) in domain_update_iommu_superpage()
508 mask &= cap_super_page_val(iommu->cap); in domain_update_iommu_superpage()
591 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, in iommu_context_addr() argument
594 struct root_entry *root = &iommu->root_entry[bus]; in iommu_context_addr()
602 if (!alloc && context_copied(iommu, bus, devfn)) in iommu_context_addr()
606 if (sm_supported(iommu)) { in iommu_context_addr()
620 context = alloc_pgtable_page(iommu->node, GFP_ATOMIC); in iommu_context_addr()
624 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE); in iommu_context_addr()
627 __iommu_flush_cache(iommu, entry, sizeof(*entry)); in iommu_context_addr()
665 /* We know that this device on this chipset has its own IOMMU. in quirk_ioat_snb_local_iommu()
666 * If we find it under a different IOMMU, then the BIOS is lying in quirk_ioat_snb_local_iommu()
667 * to us. Hope that the IOMMU for this device is actually in quirk_ioat_snb_local_iommu()
678 /* we know that the this iommu should be at offset 0xa000 from vtbar */ in quirk_ioat_snb_local_iommu()
689 static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev) in iommu_is_dummy() argument
691 if (!iommu || iommu->drhd->ignored) in iommu_is_dummy()
710 struct intel_iommu *iommu; in device_to_iommu() local
724 * the PF instead to find the IOMMU. */ in device_to_iommu()
732 for_each_iommu(iommu, drhd) { in device_to_iommu()
740 * which we used for the IOMMU lookup. Strictly speaking in device_to_iommu()
766 iommu = NULL; in device_to_iommu()
768 if (iommu_is_dummy(iommu, dev)) in device_to_iommu()
769 iommu = NULL; in device_to_iommu()
773 return iommu; in device_to_iommu()
783 static void free_context_table(struct intel_iommu *iommu) in free_context_table() argument
788 if (!iommu->root_entry) in free_context_table()
792 context = iommu_context_addr(iommu, i, 0, 0); in free_context_table()
796 if (!sm_supported(iommu)) in free_context_table()
799 context = iommu_context_addr(iommu, i, 0x80, 0); in free_context_table()
804 free_pgtable_page(iommu->root_entry); in free_context_table()
805 iommu->root_entry = NULL; in free_context_table()
809 static void pgtable_walk(struct intel_iommu *iommu, unsigned long pfn, in pgtable_walk() argument
834 void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id, in dmar_fault_dump_ptes() argument
846 pr_info("Dump %s table entries for IOVA 0x%llx\n", iommu->name, addr); in dmar_fault_dump_ptes()
849 if (!iommu->root_entry) { in dmar_fault_dump_ptes()
853 rt_entry = &iommu->root_entry[bus]; in dmar_fault_dump_ptes()
855 if (sm_supported(iommu)) in dmar_fault_dump_ptes()
862 ctx_entry = iommu_context_addr(iommu, bus, devfn, 0); in dmar_fault_dump_ptes()
872 if (!sm_supported(iommu)) { in dmar_fault_dump_ptes()
923 pgtable_walk(iommu, addr >> VTD_PAGE_SHIFT, bus, devfn, pgtable, level); in dmar_fault_dump_ptes()
936 /* Address beyond IOMMU's addressing capabilities. */ in pfn_to_dma_pte()
1178 /* We can't just free the pages because the IOMMU may still be walking
1200 /* iommu handling */
1201 static int iommu_alloc_root_entry(struct intel_iommu *iommu) in iommu_alloc_root_entry() argument
1205 root = alloc_pgtable_page(iommu->node, GFP_ATOMIC); in iommu_alloc_root_entry()
1208 iommu->name); in iommu_alloc_root_entry()
1212 __iommu_flush_cache(iommu, root, ROOT_SIZE); in iommu_alloc_root_entry()
1213 iommu->root_entry = root; in iommu_alloc_root_entry()
1218 static void iommu_set_root_entry(struct intel_iommu *iommu) in iommu_set_root_entry() argument
1224 addr = virt_to_phys(iommu->root_entry); in iommu_set_root_entry()
1225 if (sm_supported(iommu)) in iommu_set_root_entry()
1228 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_set_root_entry()
1229 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr); in iommu_set_root_entry()
1231 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); in iommu_set_root_entry()
1234 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_set_root_entry()
1237 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_set_root_entry()
1243 if (cap_esrtps(iommu->cap)) in iommu_set_root_entry()
1246 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); in iommu_set_root_entry()
1247 if (sm_supported(iommu)) in iommu_set_root_entry()
1248 qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0); in iommu_set_root_entry()
1249 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in iommu_set_root_entry()
1252 void iommu_flush_write_buffer(struct intel_iommu *iommu) in iommu_flush_write_buffer() argument
1257 if (!rwbf_quirk && !cap_rwbf(iommu->cap)) in iommu_flush_write_buffer()
1260 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1261 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); in iommu_flush_write_buffer()
1264 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_flush_write_buffer()
1267 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1271 static void __iommu_flush_context(struct intel_iommu *iommu, in __iommu_flush_context() argument
1291 iommu->name, type); in __iommu_flush_context()
1296 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_context()
1297 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); in __iommu_flush_context()
1300 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, in __iommu_flush_context()
1303 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_context()
1307 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, in __iommu_flush_iotlb() argument
1310 int tlb_offset = ecap_iotlb_offset(iommu->ecap); in __iommu_flush_iotlb()
1329 iommu->name, type); in __iommu_flush_iotlb()
1333 if (cap_write_drain(iommu->cap)) in __iommu_flush_iotlb()
1336 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1339 dmar_writeq(iommu->reg + tlb_offset, val_iva); in __iommu_flush_iotlb()
1340 dmar_writeq(iommu->reg + tlb_offset + 8, val); in __iommu_flush_iotlb()
1343 IOMMU_WAIT_OP(iommu, tlb_offset + 8, in __iommu_flush_iotlb()
1346 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1359 struct intel_iommu *iommu, u8 bus, u8 devfn) in domain_lookup_dev_info() argument
1366 if (info->iommu == iommu && info->bus == bus && in domain_lookup_dev_info()
1476 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, in __iommu_flush_dev_iotlb()
1501 qi_flush_dev_iotlb_pasid(info->iommu, in iommu_flush_dev_iotlb()
1510 static void domain_flush_pasid_iotlb(struct intel_iommu *iommu, in domain_flush_pasid_iotlb() argument
1514 u16 did = domain_id_iommu(domain, iommu); in domain_flush_pasid_iotlb()
1520 qi_flush_piotlb(iommu, did, dev_pasid->pasid, addr, npages, ih); in domain_flush_pasid_iotlb()
1523 qi_flush_piotlb(iommu, did, IOMMU_NO_PASID, addr, npages, ih); in domain_flush_pasid_iotlb()
1527 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, in iommu_flush_iotlb_psi() argument
1535 u16 did = domain_id_iommu(domain, iommu); in iommu_flush_iotlb_psi()
1544 domain_flush_pasid_iotlb(iommu, domain, addr, pages, ih); in iommu_flush_iotlb_psi()
1571 if (!cap_pgsel_inv(iommu->cap) || in iommu_flush_iotlb_psi()
1572 mask > cap_max_amask_val(iommu->cap)) in iommu_flush_iotlb_psi()
1573 iommu->flush.flush_iotlb(iommu, did, 0, 0, in iommu_flush_iotlb_psi()
1576 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask, in iommu_flush_iotlb_psi()
1584 if (!cap_caching_mode(iommu->cap) || !map) in iommu_flush_iotlb_psi()
1589 static inline void __mapping_notify_one(struct intel_iommu *iommu, in __mapping_notify_one() argument
1597 if (cap_caching_mode(iommu->cap) && !domain->use_first_level) in __mapping_notify_one()
1598 iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1); in __mapping_notify_one()
1600 iommu_flush_write_buffer(iommu); in __mapping_notify_one()
1610 struct intel_iommu *iommu = info->iommu; in intel_flush_iotlb_all() local
1611 u16 did = domain_id_iommu(dmar_domain, iommu); in intel_flush_iotlb_all()
1614 domain_flush_pasid_iotlb(iommu, dmar_domain, 0, -1, 0); in intel_flush_iotlb_all()
1616 iommu->flush.flush_iotlb(iommu, did, 0, 0, in intel_flush_iotlb_all()
1619 if (!cap_caching_mode(iommu->cap)) in intel_flush_iotlb_all()
1624 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) in iommu_disable_protect_mem_regions() argument
1629 if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap)) in iommu_disable_protect_mem_regions()
1632 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1633 pmen = readl(iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1635 writel(pmen, iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1638 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, in iommu_disable_protect_mem_regions()
1641 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1644 static void iommu_enable_translation(struct intel_iommu *iommu) in iommu_enable_translation() argument
1649 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_enable_translation()
1650 iommu->gcmd |= DMA_GCMD_TE; in iommu_enable_translation()
1651 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_enable_translation()
1654 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_enable_translation()
1657 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_enable_translation()
1660 static void iommu_disable_translation(struct intel_iommu *iommu) in iommu_disable_translation() argument
1665 if (iommu_skip_te_disable && iommu->drhd->gfx_dedicated && in iommu_disable_translation()
1666 (cap_read_drain(iommu->cap) || cap_write_drain(iommu->cap))) in iommu_disable_translation()
1669 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_disable_translation()
1670 iommu->gcmd &= ~DMA_GCMD_TE; in iommu_disable_translation()
1671 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_disable_translation()
1674 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_disable_translation()
1677 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_disable_translation()
1680 static int iommu_init_domains(struct intel_iommu *iommu) in iommu_init_domains() argument
1684 ndomains = cap_ndoms(iommu->cap); in iommu_init_domains()
1686 iommu->name, ndomains); in iommu_init_domains()
1688 spin_lock_init(&iommu->lock); in iommu_init_domains()
1690 iommu->domain_ids = bitmap_zalloc(ndomains, GFP_KERNEL); in iommu_init_domains()
1691 if (!iommu->domain_ids) in iommu_init_domains()
1700 set_bit(0, iommu->domain_ids); in iommu_init_domains()
1710 set_bit(FLPT_DEFAULT_DID, iommu->domain_ids); in iommu_init_domains()
1715 static void disable_dmar_iommu(struct intel_iommu *iommu) in disable_dmar_iommu() argument
1717 if (!iommu->domain_ids) in disable_dmar_iommu()
1721 * All iommu domains must have been detached from the devices, in disable_dmar_iommu()
1724 if (WARN_ON(bitmap_weight(iommu->domain_ids, cap_ndoms(iommu->cap)) in disable_dmar_iommu()
1728 if (iommu->gcmd & DMA_GCMD_TE) in disable_dmar_iommu()
1729 iommu_disable_translation(iommu); in disable_dmar_iommu()
1732 static void free_dmar_iommu(struct intel_iommu *iommu) in free_dmar_iommu() argument
1734 if (iommu->domain_ids) { in free_dmar_iommu()
1735 bitmap_free(iommu->domain_ids); in free_dmar_iommu()
1736 iommu->domain_ids = NULL; in free_dmar_iommu()
1739 if (iommu->copied_tables) { in free_dmar_iommu()
1740 bitmap_free(iommu->copied_tables); in free_dmar_iommu()
1741 iommu->copied_tables = NULL; in free_dmar_iommu()
1745 free_context_table(iommu); in free_dmar_iommu()
1748 if (pasid_supported(iommu)) { in free_dmar_iommu()
1749 if (ecap_prs(iommu->ecap)) in free_dmar_iommu()
1750 intel_svm_finish_prq(iommu); in free_dmar_iommu()
1794 struct intel_iommu *iommu) in domain_attach_iommu() argument
1804 spin_lock(&iommu->lock); in domain_attach_iommu()
1805 curr = xa_load(&domain->iommu_array, iommu->seq_id); in domain_attach_iommu()
1808 spin_unlock(&iommu->lock); in domain_attach_iommu()
1813 ndomains = cap_ndoms(iommu->cap); in domain_attach_iommu()
1814 num = find_first_zero_bit(iommu->domain_ids, ndomains); in domain_attach_iommu()
1816 pr_err("%s: No free domain ids\n", iommu->name); in domain_attach_iommu()
1820 set_bit(num, iommu->domain_ids); in domain_attach_iommu()
1823 info->iommu = iommu; in domain_attach_iommu()
1824 curr = xa_cmpxchg(&domain->iommu_array, iommu->seq_id, in domain_attach_iommu()
1832 spin_unlock(&iommu->lock); in domain_attach_iommu()
1836 clear_bit(info->did, iommu->domain_ids); in domain_attach_iommu()
1838 spin_unlock(&iommu->lock); in domain_attach_iommu()
1844 struct intel_iommu *iommu) in domain_detach_iommu() argument
1848 spin_lock(&iommu->lock); in domain_detach_iommu()
1849 info = xa_load(&domain->iommu_array, iommu->seq_id); in domain_detach_iommu()
1851 clear_bit(info->did, iommu->domain_ids); in domain_detach_iommu()
1852 xa_erase(&domain->iommu_array, iommu->seq_id); in domain_detach_iommu()
1857 spin_unlock(&iommu->lock); in domain_detach_iommu()
1908 * IOMMU hardware will use the PASID value set in this field for
1939 struct intel_iommu *iommu, in domain_context_mapping_one() argument
1944 domain_lookup_dev_info(domain, iommu, bus, devfn); in domain_context_mapping_one()
1945 u16 did = domain_id_iommu(domain, iommu); in domain_context_mapping_one()
1956 spin_lock(&iommu->lock); in domain_context_mapping_one()
1958 context = iommu_context_addr(iommu, bus, devfn, 1); in domain_context_mapping_one()
1963 if (context_present(context) && !context_copied(iommu, bus, devfn)) in domain_context_mapping_one()
1975 if (context_copied(iommu, bus, devfn)) { in domain_context_mapping_one()
1978 if (did_old < cap_ndoms(iommu->cap)) { in domain_context_mapping_one()
1979 iommu->flush.flush_context(iommu, did_old, in domain_context_mapping_one()
1983 iommu->flush.flush_iotlb(iommu, did_old, 0, 0, in domain_context_mapping_one()
1987 clear_context_copied(iommu, bus, devfn); in domain_context_mapping_one()
1992 if (sm_supported(iommu)) { in domain_context_mapping_one()
2021 * Skip top levels of page tables for iommu which has in domain_context_mapping_one()
2024 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { in domain_context_mapping_one()
2044 context_set_address_width(context, iommu->msagaw); in domain_context_mapping_one()
2052 if (!ecap_coherent(iommu->ecap)) in domain_context_mapping_one()
2061 if (cap_caching_mode(iommu->cap)) { in domain_context_mapping_one()
2062 iommu->flush.flush_context(iommu, 0, in domain_context_mapping_one()
2066 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); in domain_context_mapping_one()
2068 iommu_flush_write_buffer(iommu); in domain_context_mapping_one()
2074 spin_unlock(&iommu->lock); in domain_context_mapping_one()
2081 struct intel_iommu *iommu; member
2090 return domain_context_mapping_one(data->domain, data->iommu, in domain_context_mapping_cb()
2100 struct intel_iommu *iommu; in domain_context_mapping() local
2103 iommu = device_to_iommu(dev, &bus, &devfn); in domain_context_mapping()
2104 if (!iommu) in domain_context_mapping()
2110 return domain_context_mapping_one(domain, iommu, table, in domain_context_mapping()
2114 data.iommu = iommu; in domain_context_mapping()
2182 iommu_flush_iotlb_psi(info->iommu, domain, in switch_to_super_page()
2299 struct intel_iommu *iommu = info->iommu; in domain_context_clear_one() local
2303 if (!iommu) in domain_context_clear_one()
2306 spin_lock(&iommu->lock); in domain_context_clear_one()
2307 context = iommu_context_addr(iommu, bus, devfn, 0); in domain_context_clear_one()
2309 spin_unlock(&iommu->lock); in domain_context_clear_one()
2313 if (sm_supported(iommu)) { in domain_context_clear_one()
2317 did_old = domain_id_iommu(info->domain, iommu); in domain_context_clear_one()
2323 __iommu_flush_cache(iommu, context, sizeof(*context)); in domain_context_clear_one()
2324 spin_unlock(&iommu->lock); in domain_context_clear_one()
2325 iommu->flush.flush_context(iommu, in domain_context_clear_one()
2331 if (sm_supported(iommu)) in domain_context_clear_one()
2332 qi_flush_pasid_cache(iommu, did_old, QI_PC_ALL_PASIDS, 0); in domain_context_clear_one()
2334 iommu->flush.flush_iotlb(iommu, in domain_context_clear_one()
2343 static int domain_setup_first_level(struct intel_iommu *iommu, in domain_setup_first_level() argument
2353 * Skip top levels of page tables for iommu which has in domain_setup_first_level()
2356 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { in domain_setup_first_level()
2372 return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid, in domain_setup_first_level()
2373 domain_id_iommu(domain, iommu), in domain_setup_first_level()
2461 struct intel_iommu *iommu; in dmar_domain_attach_device() local
2466 iommu = device_to_iommu(dev, &bus, &devfn); in dmar_domain_attach_device()
2467 if (!iommu) in dmar_domain_attach_device()
2470 ret = domain_attach_iommu(domain, iommu); in dmar_domain_attach_device()
2479 if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) { in dmar_domain_attach_device()
2482 ret = intel_pasid_setup_pass_through(iommu, domain, in dmar_domain_attach_device()
2485 ret = domain_setup_first_level(iommu, domain, dev, in dmar_domain_attach_device()
2488 ret = intel_pasid_setup_second_level(iommu, domain, in dmar_domain_attach_device()
2504 if (sm_supported(info->iommu) || !domain_type_is_si(info->domain)) in dmar_domain_attach_device()
2565 static void intel_iommu_init_qi(struct intel_iommu *iommu) in intel_iommu_init_qi() argument
2568 * Start from the sane iommu hardware state. in intel_iommu_init_qi()
2573 if (!iommu->qi) { in intel_iommu_init_qi()
2577 dmar_fault(-1, iommu); in intel_iommu_init_qi()
2582 dmar_disable_qi(iommu); in intel_iommu_init_qi()
2585 if (dmar_enable_qi(iommu)) { in intel_iommu_init_qi()
2589 iommu->flush.flush_context = __iommu_flush_context; in intel_iommu_init_qi()
2590 iommu->flush.flush_iotlb = __iommu_flush_iotlb; in intel_iommu_init_qi()
2592 iommu->name); in intel_iommu_init_qi()
2594 iommu->flush.flush_context = qi_flush_context; in intel_iommu_init_qi()
2595 iommu->flush.flush_iotlb = qi_flush_iotlb; in intel_iommu_init_qi()
2596 pr_info("%s: Using Queued invalidation\n", iommu->name); in intel_iommu_init_qi()
2600 static int copy_context_table(struct intel_iommu *iommu, in copy_context_table() argument
2622 __iommu_flush_cache(iommu, new_ce, in copy_context_table()
2652 new_ce = alloc_pgtable_page(iommu->node, GFP_KERNEL); in copy_context_table()
2666 if (did >= 0 && did < cap_ndoms(iommu->cap)) in copy_context_table()
2667 set_bit(did, iommu->domain_ids); in copy_context_table()
2669 set_context_copied(iommu, bus, devfn); in copy_context_table()
2675 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE); in copy_context_table()
2684 static int copy_translation_tables(struct intel_iommu *iommu) in copy_translation_tables() argument
2694 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG); in copy_translation_tables()
2696 new_ext = !!sm_supported(iommu); in copy_translation_tables()
2707 iommu->copied_tables = bitmap_zalloc(BIT_ULL(16), GFP_KERNEL); in copy_translation_tables()
2708 if (!iommu->copied_tables) in copy_translation_tables()
2727 ret = copy_context_table(iommu, &old_rt[bus], in copy_translation_tables()
2731 iommu->name, bus); in copy_translation_tables()
2736 spin_lock(&iommu->lock); in copy_translation_tables()
2745 iommu->root_entry[bus].lo = val; in copy_translation_tables()
2752 iommu->root_entry[bus].hi = val; in copy_translation_tables()
2755 spin_unlock(&iommu->lock); in copy_translation_tables()
2759 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE); in copy_translation_tables()
2772 struct intel_iommu *iommu; in init_dmars() local
2779 for_each_iommu(iommu, drhd) { in init_dmars()
2781 iommu_disable_translation(iommu); in init_dmars()
2786 * Find the max pasid size of all IOMMU's in the system. in init_dmars()
2790 if (pasid_supported(iommu)) { in init_dmars()
2791 u32 temp = 2 << ecap_pss(iommu->ecap); in init_dmars()
2797 intel_iommu_init_qi(iommu); in init_dmars()
2799 ret = iommu_init_domains(iommu); in init_dmars()
2803 init_translation_status(iommu); in init_dmars()
2805 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) { in init_dmars()
2806 iommu_disable_translation(iommu); in init_dmars()
2807 clear_translation_pre_enabled(iommu); in init_dmars()
2809 iommu->name); in init_dmars()
2815 * among all IOMMU's. Need to Split it later. in init_dmars()
2817 ret = iommu_alloc_root_entry(iommu); in init_dmars()
2821 if (translation_pre_enabled(iommu)) { in init_dmars()
2824 ret = copy_translation_tables(iommu); in init_dmars()
2827 * We found the IOMMU with translation in init_dmars()
2836 iommu->name); in init_dmars()
2837 iommu_disable_translation(iommu); in init_dmars()
2838 clear_translation_pre_enabled(iommu); in init_dmars()
2841 iommu->name); in init_dmars()
2845 if (!ecap_pass_through(iommu->ecap)) in init_dmars()
2847 intel_svm_check(iommu); in init_dmars()
2855 for_each_active_iommu(iommu, drhd) { in init_dmars()
2856 iommu_flush_write_buffer(iommu); in init_dmars()
2857 iommu_set_root_entry(iommu); in init_dmars()
2880 for_each_iommu(iommu, drhd) { in init_dmars()
2887 iommu_disable_protect_mem_regions(iommu); in init_dmars()
2891 iommu_flush_write_buffer(iommu); in init_dmars()
2894 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) { in init_dmars()
2900 ret = intel_svm_enable_prq(iommu); in init_dmars()
2906 ret = dmar_set_interrupt(iommu); in init_dmars()
2914 for_each_active_iommu(iommu, drhd) { in init_dmars()
2915 disable_dmar_iommu(iommu); in init_dmars()
2916 free_dmar_iommu(iommu); in init_dmars()
2954 /* This IOMMU has *only* gfx devices. Either bypass it or in init_no_remapping_devices()
2966 struct intel_iommu *iommu = NULL; in init_iommu_hw() local
2969 for_each_active_iommu(iommu, drhd) { in init_iommu_hw()
2970 if (iommu->qi) { in init_iommu_hw()
2971 ret = dmar_reenable_qi(iommu); in init_iommu_hw()
2977 for_each_iommu(iommu, drhd) { in init_iommu_hw()
2984 iommu_disable_protect_mem_regions(iommu); in init_iommu_hw()
2988 iommu_flush_write_buffer(iommu); in init_iommu_hw()
2989 iommu_set_root_entry(iommu); in init_iommu_hw()
2990 iommu_enable_translation(iommu); in init_iommu_hw()
2991 iommu_disable_protect_mem_regions(iommu); in init_iommu_hw()
3000 struct intel_iommu *iommu; in iommu_flush_all() local
3002 for_each_active_iommu(iommu, drhd) { in iommu_flush_all()
3003 iommu->flush.flush_context(iommu, 0, 0, 0, in iommu_flush_all()
3005 iommu->flush.flush_iotlb(iommu, 0, 0, 0, in iommu_flush_all()
3013 struct intel_iommu *iommu = NULL; in iommu_suspend() local
3018 for_each_active_iommu(iommu, drhd) { in iommu_suspend()
3019 iommu_disable_translation(iommu); in iommu_suspend()
3021 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_suspend()
3023 iommu->iommu_state[SR_DMAR_FECTL_REG] = in iommu_suspend()
3024 readl(iommu->reg + DMAR_FECTL_REG); in iommu_suspend()
3025 iommu->iommu_state[SR_DMAR_FEDATA_REG] = in iommu_suspend()
3026 readl(iommu->reg + DMAR_FEDATA_REG); in iommu_suspend()
3027 iommu->iommu_state[SR_DMAR_FEADDR_REG] = in iommu_suspend()
3028 readl(iommu->reg + DMAR_FEADDR_REG); in iommu_suspend()
3029 iommu->iommu_state[SR_DMAR_FEUADDR_REG] = in iommu_suspend()
3030 readl(iommu->reg + DMAR_FEUADDR_REG); in iommu_suspend()
3032 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_suspend()
3040 struct intel_iommu *iommu = NULL; in iommu_resume() local
3045 panic("tboot: IOMMU setup failed, DMAR can not resume!\n"); in iommu_resume()
3047 WARN(1, "IOMMU setup failed, DMAR can not resume!\n"); in iommu_resume()
3051 for_each_active_iommu(iommu, drhd) { in iommu_resume()
3053 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_resume()
3055 writel(iommu->iommu_state[SR_DMAR_FECTL_REG], in iommu_resume()
3056 iommu->reg + DMAR_FECTL_REG); in iommu_resume()
3057 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG], in iommu_resume()
3058 iommu->reg + DMAR_FEDATA_REG); in iommu_resume()
3059 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG], in iommu_resume()
3060 iommu->reg + DMAR_FEADDR_REG); in iommu_resume()
3061 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], in iommu_resume()
3062 iommu->reg + DMAR_FEUADDR_REG); in iommu_resume()
3064 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_resume()
3290 struct intel_iommu *iommu = dmaru->iommu; in intel_iommu_add() local
3292 ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_DMAR, iommu); in intel_iommu_add()
3296 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) { in intel_iommu_add()
3298 iommu->name); in intel_iommu_add()
3302 sp = domain_update_iommu_superpage(NULL, iommu) - 1; in intel_iommu_add()
3303 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) { in intel_iommu_add()
3305 iommu->name); in intel_iommu_add()
3312 if (iommu->gcmd & DMA_GCMD_TE) in intel_iommu_add()
3313 iommu_disable_translation(iommu); in intel_iommu_add()
3315 ret = iommu_init_domains(iommu); in intel_iommu_add()
3317 ret = iommu_alloc_root_entry(iommu); in intel_iommu_add()
3321 intel_svm_check(iommu); in intel_iommu_add()
3328 iommu_disable_protect_mem_regions(iommu); in intel_iommu_add()
3332 intel_iommu_init_qi(iommu); in intel_iommu_add()
3333 iommu_flush_write_buffer(iommu); in intel_iommu_add()
3336 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) { in intel_iommu_add()
3337 ret = intel_svm_enable_prq(iommu); in intel_iommu_add()
3342 ret = dmar_set_interrupt(iommu); in intel_iommu_add()
3346 iommu_set_root_entry(iommu); in intel_iommu_add()
3347 iommu_enable_translation(iommu); in intel_iommu_add()
3349 iommu_disable_protect_mem_regions(iommu); in intel_iommu_add()
3353 disable_dmar_iommu(iommu); in intel_iommu_add()
3355 free_dmar_iommu(iommu); in intel_iommu_add()
3362 struct intel_iommu *iommu = dmaru->iommu; in dmar_iommu_hotplug() local
3366 if (iommu == NULL) in dmar_iommu_hotplug()
3372 disable_dmar_iommu(iommu); in dmar_iommu_hotplug()
3373 free_dmar_iommu(iommu); in dmar_iommu_hotplug()
3426 static int dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu) in dmar_ats_supported() argument
3441 * When IOMMU is in legacy mode, enabling ATS is done in dmar_ats_supported()
3446 return !(satcu->atc_required && !sm_supported(iommu)); in dmar_ats_supported()
3574 struct intel_iommu *iommu; in intel_iommu_memory_notifier() local
3580 for_each_active_iommu(iommu, drhd) in intel_iommu_memory_notifier()
3581 iommu_flush_iotlb_psi(iommu, si_domain, in intel_iommu_memory_notifier()
3600 struct intel_iommu *iommu = NULL; in intel_disable_iommus() local
3603 for_each_iommu(iommu, drhd) in intel_disable_iommus()
3604 iommu_disable_translation(iommu); in intel_disable_iommus()
3610 struct intel_iommu *iommu = NULL; in intel_iommu_shutdown() local
3618 for_each_iommu(iommu, drhd) in intel_iommu_shutdown()
3619 iommu_disable_protect_mem_regions(iommu); in intel_iommu_shutdown()
3631 return container_of(iommu_dev, struct intel_iommu, iommu); in dev_to_intel_iommu()
3637 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in version_show() local
3638 u32 ver = readl(iommu->reg + DMAR_VER_REG); in version_show()
3647 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in address_show() local
3648 return sysfs_emit(buf, "%llx\n", iommu->reg_phys); in address_show()
3655 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in cap_show() local
3656 return sysfs_emit(buf, "%llx\n", iommu->cap); in cap_show()
3663 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in ecap_show() local
3664 return sysfs_emit(buf, "%llx\n", iommu->ecap); in ecap_show()
3671 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in domains_supported_show() local
3672 return sysfs_emit(buf, "%ld\n", cap_ndoms(iommu->cap)); in domains_supported_show()
3679 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in domains_used_show() local
3681 bitmap_weight(iommu->domain_ids, in domains_used_show()
3682 cap_ndoms(iommu->cap))); in domains_used_show()
3697 .name = "intel-iommu",
3725 pr_info("Intel-IOMMU force enabled due to platform opt in\n"); in platform_optin_force_iommu()
3728 * If Intel-IOMMU is disabled by default, we will apply identity in platform_optin_force_iommu()
3744 struct intel_iommu *iommu __maybe_unused; in probe_acpi_namespace_devices()
3748 for_each_active_iommu(iommu, drhd) { in probe_acpi_namespace_devices()
3781 pr_warn("Forcing Intel-IOMMU to enabled\n"); in tboot_force_iommu()
3793 struct intel_iommu *iommu; in intel_iommu_init() local
3796 * Intel IOMMU is required for a TXT/tboot launch or platform in intel_iommu_init()
3830 * We exit the function here to ensure IOMMU's remapping and in intel_iommu_init()
3831 * mempool aren't setup, which means that the IOMMU's PMRs in intel_iommu_init()
3838 for_each_iommu(iommu, drhd) in intel_iommu_init()
3839 iommu_disable_protect_mem_regions(iommu); in intel_iommu_init()
3874 for_each_active_iommu(iommu, drhd) { in intel_iommu_init()
3880 * the virtual and physical IOMMU page-tables. in intel_iommu_init()
3882 if (cap_caching_mode(iommu->cap) && in intel_iommu_init()
3884 pr_info_once("IOMMU batching disallowed due to virtualization\n"); in intel_iommu_init()
3887 iommu_device_sysfs_add(&iommu->iommu, NULL, in intel_iommu_init()
3889 "%s", iommu->name); in intel_iommu_init()
3890 iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL); in intel_iommu_init()
3892 iommu_pmu_register(iommu); in intel_iommu_init()
3904 for_each_iommu(iommu, drhd) { in intel_iommu_init()
3905 if (!drhd->ignored && !translation_pre_enabled(iommu)) in intel_iommu_init()
3906 iommu_enable_translation(iommu); in intel_iommu_init()
3908 iommu_disable_protect_mem_regions(iommu); in intel_iommu_init()
3933 * NB - intel-iommu lacks any sort of reference counting for the users of
3953 struct intel_iommu *iommu = info->iommu; in dmar_remove_one_dev_info() local
3957 if (dev_is_pci(info->dev) && sm_supported(iommu)) in dmar_remove_one_dev_info()
3958 intel_pasid_tear_down_entry(iommu, info->dev, in dmar_remove_one_dev_info()
3969 domain_detach_iommu(domain, iommu); in dmar_remove_one_dev_info()
3981 struct intel_iommu *iommu = info->iommu; in device_block_translation() local
3986 if (sm_supported(iommu)) in device_block_translation()
3987 intel_pasid_tear_down_entry(iommu, dev, in device_block_translation()
4000 domain_detach_iommu(info->domain, iommu); in device_block_translation()
4088 struct intel_iommu *iommu; in prepare_domain_attach_device() local
4091 iommu = device_to_iommu(dev, NULL, NULL); in prepare_domain_attach_device()
4092 if (!iommu) in prepare_domain_attach_device()
4095 if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap)) in prepare_domain_attach_device()
4098 /* check if this iommu agaw is sufficient for max mapped address */ in prepare_domain_attach_device()
4099 addr_width = agaw_to_width(iommu->agaw); in prepare_domain_attach_device()
4100 if (addr_width > cap_mgaw(iommu->cap)) in prepare_domain_attach_device()
4101 addr_width = cap_mgaw(iommu->cap); in prepare_domain_attach_device()
4110 while (iommu->agaw < dmar_domain->agaw) { in prepare_domain_attach_device()
4162 pr_err("%s: iommu width (%d) is not " in intel_iommu_map()
4259 iommu_flush_iotlb_psi(info->iommu, dmar_domain, in intel_iommu_tlb_sync()
4291 if (!ecap_sc_support(info->iommu->ecap)) { in domain_support_force_snooping()
4315 intel_pasid_setup_page_snoop_control(info->iommu, info->dev, in domain_set_force_snooping()
4352 return ecap_sc_support(info->iommu->ecap); in intel_iommu_capable()
4362 struct intel_iommu *iommu; in intel_iommu_probe_device() local
4366 iommu = device_to_iommu(dev, &bus, &devfn); in intel_iommu_probe_device()
4367 if (!iommu || !iommu->iommu.ops) in intel_iommu_probe_device()
4381 info->segment = iommu->segment; in intel_iommu_probe_device()
4385 info->iommu = iommu; in intel_iommu_probe_device()
4387 if (ecap_dev_iotlb_support(iommu->ecap) && in intel_iommu_probe_device()
4389 dmar_ats_supported(pdev, iommu)) { in intel_iommu_probe_device()
4394 * For IOMMU that supports device IOTLB throttling in intel_iommu_probe_device()
4396 * of a VF such that IOMMU HW can gauge queue depth in intel_iommu_probe_device()
4400 if (ecap_dit(iommu->ecap)) in intel_iommu_probe_device()
4404 if (sm_supported(iommu)) { in intel_iommu_probe_device()
4405 if (pasid_supported(iommu)) { in intel_iommu_probe_device()
4412 if (info->ats_supported && ecap_prs(iommu->ecap) && in intel_iommu_probe_device()
4420 if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) { in intel_iommu_probe_device()
4430 return &iommu->iommu; in intel_iommu_probe_device()
4519 struct intel_iommu *iommu; in intel_iommu_enable_sva() local
4524 iommu = info->iommu; in intel_iommu_enable_sva()
4525 if (!iommu) in intel_iommu_enable_sva()
4528 if (!(iommu->flags & VTD_FLAG_SVM_CAPABLE)) in intel_iommu_enable_sva()
4536 * support PCI/PRI. The IOMMU side has no means to check the in intel_iommu_enable_sva()
4537 * capability of device-specific IOPF. Therefore, IOMMU can only in intel_iommu_enable_sva()
4555 struct intel_iommu *iommu; in intel_iommu_enable_iopf() local
4564 iommu = info->iommu; in intel_iommu_enable_iopf()
4565 if (!iommu) in intel_iommu_enable_iopf()
4576 ret = iopf_queue_add_device(iommu->iopf_queue, dev); in intel_iommu_enable_iopf()
4594 iopf_queue_remove_device(iommu->iopf_queue, dev); in intel_iommu_enable_iopf()
4602 struct intel_iommu *iommu = info->iommu; in intel_iommu_disable_iopf() local
4624 WARN_ON(iopf_queue_remove_device(iommu->iopf_queue, dev)); in intel_iommu_disable_iopf()
4663 return translation_pre_enabled(info->iommu) && !info->domain; in intel_iommu_is_attach_deferred()
4669 * thus not be able to bypass the IOMMU restrictions.
4675 "Skipping IOMMU quirk for dev [%04X:%04X] on untrusted PCI link\n", in risky_device()
4693 __mapping_notify_one(info->iommu, dmar_domain, pfn, pages); in intel_iommu_iotlb_sync_map()
4698 struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); in intel_iommu_remove_dev_pasid() local
4710 * notification. Before consolidating that code into iommu core, let in intel_iommu_remove_dev_pasid()
4730 domain_detach_iommu(dmar_domain, iommu); in intel_iommu_remove_dev_pasid()
4733 intel_pasid_tear_down_entry(iommu, dev, pasid, false); in intel_iommu_remove_dev_pasid()
4742 struct intel_iommu *iommu = info->iommu; in intel_iommu_set_dev_pasid() local
4747 if (!pasid_supported(iommu) || dev_is_real_dma_subdevice(dev)) in intel_iommu_set_dev_pasid()
4750 if (context_copied(iommu, info->bus, info->devfn)) in intel_iommu_set_dev_pasid()
4761 ret = domain_attach_iommu(dmar_domain, iommu); in intel_iommu_set_dev_pasid()
4766 ret = intel_pasid_setup_pass_through(iommu, dmar_domain, in intel_iommu_set_dev_pasid()
4769 ret = domain_setup_first_level(iommu, dmar_domain, in intel_iommu_set_dev_pasid()
4772 ret = intel_pasid_setup_second_level(iommu, dmar_domain, in intel_iommu_set_dev_pasid()
4785 domain_detach_iommu(dmar_domain, iommu); in intel_iommu_set_dev_pasid()
4794 struct intel_iommu *iommu = info->iommu; in intel_iommu_hw_info() local
4801 vtd->cap_reg = iommu->cap; in intel_iommu_hw_info()
4802 vtd->ecap_reg = iommu->ecap; in intel_iommu_hw_info()
4845 pci_info(dev, "Disabling IOMMU for graphics on this chipset\n"); in quirk_iommu_igfx()
4926 pci_info(dev, "BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); in quirk_calpella_no_shadow_gtt()
4955 pci_info(dev, "Skip IOMMU disabling for graphics\n"); in quirk_igfx_skip_te_disable()
5049 * before unmap/unbind. For #3, iommu driver gets mmu_notifier to
5066 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, in quirk_extra_dev_tlb_flush()
5069 qi_flush_dev_iotlb_pasid(info->iommu, sid, info->pfsid, in quirk_extra_dev_tlb_flush()
5088 int ecmd_submit_sync(struct intel_iommu *iommu, u8 ecmd, u64 oa, u64 ob) in ecmd_submit_sync() argument
5094 if (!cap_ecmds(iommu->cap)) in ecmd_submit_sync()
5097 raw_spin_lock_irqsave(&iommu->register_lock, flags); in ecmd_submit_sync()
5099 res = dmar_readq(iommu->reg + DMAR_ECRSP_REG); in ecmd_submit_sync()
5112 dmar_writeq(iommu->reg + DMAR_ECEO_REG, ob); in ecmd_submit_sync()
5113 dmar_writeq(iommu->reg + DMAR_ECMD_REG, ecmd | (oa << DMA_ECMD_OA_SHIFT)); in ecmd_submit_sync()
5115 IOMMU_WAIT_OP(iommu, DMAR_ECRSP_REG, dmar_readq, in ecmd_submit_sync()
5125 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in ecmd_submit_sync()