Lines Matching +full:smmu +full:- +full:v1
1 // SPDX-License-Identifier: GPL-2.0-only
3 * IOMMU API for ARM architected SMMU implementations.
10 * - SMMUv1 and v2 implementations
11 * - Stream-matching and stream-indexing
12 * - v7/v8 long-descriptor format
13 * - Non-secure access to the SMMU
14 * - Context fault reporting
15 * - Extended Stream ID (16 bit)
18 #define pr_fmt(fmt) "arm-smmu: " fmt
24 #include <linux/dma-mapping.h>
40 #include "arm-smmu.h"
41 #include "../../dma-iommu.h"
44 * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
50 #define QCOM_DUMMY_VAL -1
58 …"Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' f…
63 …domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
71 static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu) in arm_smmu_rpm_get() argument
73 if (pm_runtime_enabled(smmu->dev)) in arm_smmu_rpm_get()
74 return pm_runtime_resume_and_get(smmu->dev); in arm_smmu_rpm_get()
79 static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu) in arm_smmu_rpm_put() argument
81 if (pm_runtime_enabled(smmu->dev)) in arm_smmu_rpm_put()
82 pm_runtime_put_autosuspend(smmu->dev); in arm_smmu_rpm_put()
97 struct pci_bus *bus = to_pci_dev(dev)->bus; in dev_get_dev_node()
100 bus = bus->parent; in dev_get_dev_node()
101 return of_node_get(bus->bridge->parent->of_node); in dev_get_dev_node()
104 return of_node_get(dev->of_node); in dev_get_dev_node()
116 struct device_node *np = it->node; in __find_legacy_master_phandle()
119 of_for_each_phandle(it, err, dev->of_node, "mmu-masters", in __find_legacy_master_phandle()
120 "#stream-id-cells", -1) in __find_legacy_master_phandle()
121 if (it->node == np) { in __find_legacy_master_phandle()
125 it->node = np; in __find_legacy_master_phandle()
126 return err == -ENOENT ? 0 : err; in __find_legacy_master_phandle()
130 struct arm_smmu_device **smmu) in arm_smmu_register_legacy_master() argument
141 if (!np || !of_property_present(np, "#stream-id-cells")) { in arm_smmu_register_legacy_master()
143 return -ENODEV; in arm_smmu_register_legacy_master()
152 return -ENODEV; in arm_smmu_register_legacy_master()
157 /* "mmu-masters" assumes Stream ID == Requester ID */ in arm_smmu_register_legacy_master()
164 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode, in arm_smmu_register_legacy_master()
171 return -ENOMEM; in arm_smmu_register_legacy_master()
173 *smmu = dev_get_drvdata(smmu_dev); in arm_smmu_register_legacy_master()
181 struct arm_smmu_device **smmu) in arm_smmu_register_legacy_master() argument
183 return -ENODEV; in arm_smmu_register_legacy_master()
193 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page, in __arm_smmu_tlb_sync() argument
199 if (smmu->impl && unlikely(smmu->impl->tlb_sync)) in __arm_smmu_tlb_sync()
200 return smmu->impl->tlb_sync(smmu, page, sync, status); in __arm_smmu_tlb_sync()
202 arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL); in __arm_smmu_tlb_sync()
204 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) { in __arm_smmu_tlb_sync()
205 reg = arm_smmu_readl(smmu, page, status); in __arm_smmu_tlb_sync()
212 dev_err_ratelimited(smmu->dev, in __arm_smmu_tlb_sync()
213 "TLB sync timed out -- SMMU may be deadlocked\n"); in __arm_smmu_tlb_sync()
216 static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu) in arm_smmu_tlb_sync_global() argument
220 spin_lock_irqsave(&smmu->global_sync_lock, flags); in arm_smmu_tlb_sync_global()
221 __arm_smmu_tlb_sync(smmu, ARM_SMMU_GR0, ARM_SMMU_GR0_sTLBGSYNC, in arm_smmu_tlb_sync_global()
223 spin_unlock_irqrestore(&smmu->global_sync_lock, flags); in arm_smmu_tlb_sync_global()
228 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_sync_context() local
231 spin_lock_irqsave(&smmu_domain->cb_lock, flags); in arm_smmu_tlb_sync_context()
232 __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx), in arm_smmu_tlb_sync_context()
234 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); in arm_smmu_tlb_sync_context()
245 arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx, in arm_smmu_tlb_inv_context_s1()
246 ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid); in arm_smmu_tlb_inv_context_s1()
253 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_inv_context_s2() local
257 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid); in arm_smmu_tlb_inv_context_s2()
258 arm_smmu_tlb_sync_global(smmu); in arm_smmu_tlb_inv_context_s2()
265 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_inv_range_s1() local
266 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_tlb_inv_range_s1()
267 int idx = cfg->cbndx; in arm_smmu_tlb_inv_range_s1()
269 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) in arm_smmu_tlb_inv_range_s1()
272 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) { in arm_smmu_tlb_inv_range_s1()
274 iova |= cfg->asid; in arm_smmu_tlb_inv_range_s1()
276 arm_smmu_cb_write(smmu, idx, reg, iova); in arm_smmu_tlb_inv_range_s1()
278 } while (size -= granule); in arm_smmu_tlb_inv_range_s1()
281 iova |= (u64)cfg->asid << 48; in arm_smmu_tlb_inv_range_s1()
283 arm_smmu_cb_writeq(smmu, idx, reg, iova); in arm_smmu_tlb_inv_range_s1()
285 } while (size -= granule); in arm_smmu_tlb_inv_range_s1()
293 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_inv_range_s2() local
294 int idx = smmu_domain->cfg.cbndx; in arm_smmu_tlb_inv_range_s2()
296 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) in arm_smmu_tlb_inv_range_s2()
301 if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64) in arm_smmu_tlb_inv_range_s2()
302 arm_smmu_cb_writeq(smmu, idx, reg, iova); in arm_smmu_tlb_inv_range_s2()
304 arm_smmu_cb_write(smmu, idx, reg, iova); in arm_smmu_tlb_inv_range_s2()
306 } while (size -= granule); in arm_smmu_tlb_inv_range_s2()
313 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_tlb_inv_walk_s1()
315 if (cfg->flush_walk_prefer_tlbiasid) { in arm_smmu_tlb_inv_walk_s1()
354 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
357 * no-op and call arm_smmu_tlb_inv_context_s2() from .iotlb_sync as you might
365 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_add_page_s2_v1() local
367 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) in arm_smmu_tlb_add_page_s2_v1()
370 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid); in arm_smmu_tlb_add_page_s2_v1()
397 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_context_fault() local
398 int idx = smmu_domain->cfg.cbndx; in arm_smmu_context_fault()
401 fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR); in arm_smmu_context_fault()
405 fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0); in arm_smmu_context_fault()
406 iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR); in arm_smmu_context_fault()
407 cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx)); in arm_smmu_context_fault()
412 if (ret == -ENOSYS) in arm_smmu_context_fault()
413 dev_err_ratelimited(smmu->dev, in arm_smmu_context_fault()
417 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr); in arm_smmu_context_fault()
424 struct arm_smmu_device *smmu = dev; in arm_smmu_global_fault() local
428 gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR); in arm_smmu_global_fault()
429 gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0); in arm_smmu_global_fault()
430 gfsynr1 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR1); in arm_smmu_global_fault()
431 gfsynr2 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR2); in arm_smmu_global_fault()
439 dev_err(smmu->dev, in arm_smmu_global_fault()
440 …"Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may h… in arm_smmu_global_fault()
443 dev_err(smmu->dev, in arm_smmu_global_fault()
445 dev_err(smmu->dev, in arm_smmu_global_fault()
450 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr); in arm_smmu_global_fault()
457 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_init_context_bank()
458 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx]; in arm_smmu_init_context_bank()
459 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; in arm_smmu_init_context_bank()
461 cb->cfg = cfg; in arm_smmu_init_context_bank()
465 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { in arm_smmu_init_context_bank()
466 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr; in arm_smmu_init_context_bank()
468 cb->tcr[0] = arm_smmu_lpae_tcr(pgtbl_cfg); in arm_smmu_init_context_bank()
469 cb->tcr[1] = arm_smmu_lpae_tcr2(pgtbl_cfg); in arm_smmu_init_context_bank()
470 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) in arm_smmu_init_context_bank()
471 cb->tcr[1] |= ARM_SMMU_TCR2_AS; in arm_smmu_init_context_bank()
473 cb->tcr[0] |= ARM_SMMU_TCR_EAE; in arm_smmu_init_context_bank()
476 cb->tcr[0] = arm_smmu_lpae_vtcr(pgtbl_cfg); in arm_smmu_init_context_bank()
481 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { in arm_smmu_init_context_bank()
482 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr; in arm_smmu_init_context_bank()
483 cb->ttbr[1] = 0; in arm_smmu_init_context_bank()
485 cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID, in arm_smmu_init_context_bank()
486 cfg->asid); in arm_smmu_init_context_bank()
487 cb->ttbr[1] = FIELD_PREP(ARM_SMMU_TTBRn_ASID, in arm_smmu_init_context_bank()
488 cfg->asid); in arm_smmu_init_context_bank()
490 if (pgtbl_cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) in arm_smmu_init_context_bank()
491 cb->ttbr[1] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr; in arm_smmu_init_context_bank()
493 cb->ttbr[0] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr; in arm_smmu_init_context_bank()
496 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; in arm_smmu_init_context_bank()
499 /* MAIRs (stage-1 only) */ in arm_smmu_init_context_bank()
501 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { in arm_smmu_init_context_bank()
502 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr; in arm_smmu_init_context_bank()
503 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr; in arm_smmu_init_context_bank()
505 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair; in arm_smmu_init_context_bank()
506 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair >> 32; in arm_smmu_init_context_bank()
511 void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx) in arm_smmu_write_context_bank() argument
515 struct arm_smmu_cb *cb = &smmu->cbs[idx]; in arm_smmu_write_context_bank()
516 struct arm_smmu_cfg *cfg = cb->cfg; in arm_smmu_write_context_bank()
520 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, 0); in arm_smmu_write_context_bank()
524 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; in arm_smmu_write_context_bank()
527 if (smmu->version > ARM_SMMU_V1) { in arm_smmu_write_context_bank()
528 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) in arm_smmu_write_context_bank()
532 /* 16-bit VMIDs live in CBA2R */ in arm_smmu_write_context_bank()
533 if (smmu->features & ARM_SMMU_FEAT_VMID16) in arm_smmu_write_context_bank()
534 reg |= FIELD_PREP(ARM_SMMU_CBA2R_VMID16, cfg->vmid); in arm_smmu_write_context_bank()
536 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBA2R(idx), reg); in arm_smmu_write_context_bank()
540 reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, cfg->cbar); in arm_smmu_write_context_bank()
541 if (smmu->version < ARM_SMMU_V2) in arm_smmu_write_context_bank()
542 reg |= FIELD_PREP(ARM_SMMU_CBAR_IRPTNDX, cfg->irptndx); in arm_smmu_write_context_bank()
553 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) { in arm_smmu_write_context_bank()
554 /* 8-bit VMIDs live in CBAR */ in arm_smmu_write_context_bank()
555 reg |= FIELD_PREP(ARM_SMMU_CBAR_VMID, cfg->vmid); in arm_smmu_write_context_bank()
557 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg); in arm_smmu_write_context_bank()
564 if (stage1 && smmu->version > ARM_SMMU_V1) in arm_smmu_write_context_bank()
565 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]); in arm_smmu_write_context_bank()
566 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]); in arm_smmu_write_context_bank()
569 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { in arm_smmu_write_context_bank()
570 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid); in arm_smmu_write_context_bank()
571 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]); in arm_smmu_write_context_bank()
572 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]); in arm_smmu_write_context_bank()
574 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]); in arm_smmu_write_context_bank()
576 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR1, in arm_smmu_write_context_bank()
577 cb->ttbr[1]); in arm_smmu_write_context_bank()
580 /* MAIRs (stage-1 only) */ in arm_smmu_write_context_bank()
582 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]); in arm_smmu_write_context_bank()
583 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]); in arm_smmu_write_context_bank()
594 if (smmu->impl && smmu->impl->write_sctlr) in arm_smmu_write_context_bank()
595 smmu->impl->write_sctlr(smmu, idx, reg); in arm_smmu_write_context_bank()
597 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg); in arm_smmu_write_context_bank()
601 struct arm_smmu_device *smmu, in arm_smmu_alloc_context_bank() argument
604 if (smmu->impl && smmu->impl->alloc_context_bank) in arm_smmu_alloc_context_bank()
605 return smmu->impl->alloc_context_bank(smmu_domain, smmu, dev, start); in arm_smmu_alloc_context_bank()
607 return __arm_smmu_alloc_bitmap(smmu->context_map, start, smmu->num_context_banks); in arm_smmu_alloc_context_bank()
611 struct arm_smmu_device *smmu, in arm_smmu_init_domain_context() argument
620 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_init_domain_context()
623 mutex_lock(&smmu_domain->init_mutex); in arm_smmu_init_domain_context()
624 if (smmu_domain->smmu) in arm_smmu_init_domain_context()
627 if (domain->type == IOMMU_DOMAIN_IDENTITY) { in arm_smmu_init_domain_context()
628 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS; in arm_smmu_init_domain_context()
629 smmu_domain->smmu = smmu; in arm_smmu_init_domain_context()
649 * Note that you can't actually request stage-2 mappings. in arm_smmu_init_domain_context()
651 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) in arm_smmu_init_domain_context()
652 smmu_domain->stage = ARM_SMMU_DOMAIN_S2; in arm_smmu_init_domain_context()
653 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2)) in arm_smmu_init_domain_context()
654 smmu_domain->stage = ARM_SMMU_DOMAIN_S1; in arm_smmu_init_domain_context()
659 * the decision into the io-pgtable code where it arguably belongs, in arm_smmu_init_domain_context()
664 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L) in arm_smmu_init_domain_context()
665 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L; in arm_smmu_init_domain_context()
668 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) && in arm_smmu_init_domain_context()
669 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1)) in arm_smmu_init_domain_context()
670 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S; in arm_smmu_init_domain_context()
671 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) && in arm_smmu_init_domain_context()
672 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K | in arm_smmu_init_domain_context()
675 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64; in arm_smmu_init_domain_context()
677 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) { in arm_smmu_init_domain_context()
678 ret = -EINVAL; in arm_smmu_init_domain_context()
682 switch (smmu_domain->stage) { in arm_smmu_init_domain_context()
684 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; in arm_smmu_init_domain_context()
685 start = smmu->num_s2_context_banks; in arm_smmu_init_domain_context()
686 ias = smmu->va_size; in arm_smmu_init_domain_context()
687 oas = smmu->ipa_size; in arm_smmu_init_domain_context()
688 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) { in arm_smmu_init_domain_context()
690 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) { in arm_smmu_init_domain_context()
699 smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops; in arm_smmu_init_domain_context()
707 cfg->cbar = CBAR_TYPE_S2_TRANS; in arm_smmu_init_domain_context()
709 ias = smmu->ipa_size; in arm_smmu_init_domain_context()
710 oas = smmu->pa_size; in arm_smmu_init_domain_context()
711 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) { in arm_smmu_init_domain_context()
718 if (smmu->version == ARM_SMMU_V2) in arm_smmu_init_domain_context()
719 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2; in arm_smmu_init_domain_context()
721 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1; in arm_smmu_init_domain_context()
724 ret = -EINVAL; in arm_smmu_init_domain_context()
728 ret = arm_smmu_alloc_context_bank(smmu_domain, smmu, dev, start); in arm_smmu_init_domain_context()
733 smmu_domain->smmu = smmu; in arm_smmu_init_domain_context()
735 cfg->cbndx = ret; in arm_smmu_init_domain_context()
736 if (smmu->version < ARM_SMMU_V2) { in arm_smmu_init_domain_context()
737 cfg->irptndx = atomic_inc_return(&smmu->irptndx); in arm_smmu_init_domain_context()
738 cfg->irptndx %= smmu->num_context_irqs; in arm_smmu_init_domain_context()
740 cfg->irptndx = cfg->cbndx; in arm_smmu_init_domain_context()
743 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2) in arm_smmu_init_domain_context()
744 cfg->vmid = cfg->cbndx + 1; in arm_smmu_init_domain_context()
746 cfg->asid = cfg->cbndx; in arm_smmu_init_domain_context()
749 .pgsize_bitmap = smmu->pgsize_bitmap, in arm_smmu_init_domain_context()
752 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK, in arm_smmu_init_domain_context()
753 .tlb = smmu_domain->flush_ops, in arm_smmu_init_domain_context()
754 .iommu_dev = smmu->dev, in arm_smmu_init_domain_context()
757 if (smmu->impl && smmu->impl->init_context) { in arm_smmu_init_domain_context()
758 ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev); in arm_smmu_init_domain_context()
763 if (smmu_domain->pgtbl_quirks) in arm_smmu_init_domain_context()
764 pgtbl_cfg.quirks |= smmu_domain->pgtbl_quirks; in arm_smmu_init_domain_context()
768 ret = -ENOMEM; in arm_smmu_init_domain_context()
773 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; in arm_smmu_init_domain_context()
776 domain->geometry.aperture_start = ~0UL << ias; in arm_smmu_init_domain_context()
777 domain->geometry.aperture_end = ~0UL; in arm_smmu_init_domain_context()
779 domain->geometry.aperture_end = (1UL << ias) - 1; in arm_smmu_init_domain_context()
782 domain->geometry.force_aperture = true; in arm_smmu_init_domain_context()
786 arm_smmu_write_context_bank(smmu, cfg->cbndx); in arm_smmu_init_domain_context()
790 * handler seeing a half-initialised domain state. in arm_smmu_init_domain_context()
792 irq = smmu->irqs[cfg->irptndx]; in arm_smmu_init_domain_context()
794 if (smmu->impl && smmu->impl->context_fault) in arm_smmu_init_domain_context()
795 context_fault = smmu->impl->context_fault; in arm_smmu_init_domain_context()
799 ret = devm_request_irq(smmu->dev, irq, context_fault, in arm_smmu_init_domain_context()
800 IRQF_SHARED, "arm-smmu-context-fault", domain); in arm_smmu_init_domain_context()
802 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", in arm_smmu_init_domain_context()
803 cfg->irptndx, irq); in arm_smmu_init_domain_context()
804 cfg->irptndx = ARM_SMMU_INVALID_IRPTNDX; in arm_smmu_init_domain_context()
807 mutex_unlock(&smmu_domain->init_mutex); in arm_smmu_init_domain_context()
810 smmu_domain->pgtbl_ops = pgtbl_ops; in arm_smmu_init_domain_context()
814 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); in arm_smmu_init_domain_context()
815 smmu_domain->smmu = NULL; in arm_smmu_init_domain_context()
817 mutex_unlock(&smmu_domain->init_mutex); in arm_smmu_init_domain_context()
824 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_destroy_domain_context() local
825 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_destroy_domain_context()
828 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY) in arm_smmu_destroy_domain_context()
831 ret = arm_smmu_rpm_get(smmu); in arm_smmu_destroy_domain_context()
839 smmu->cbs[cfg->cbndx].cfg = NULL; in arm_smmu_destroy_domain_context()
840 arm_smmu_write_context_bank(smmu, cfg->cbndx); in arm_smmu_destroy_domain_context()
842 if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) { in arm_smmu_destroy_domain_context()
843 irq = smmu->irqs[cfg->irptndx]; in arm_smmu_destroy_domain_context()
844 devm_free_irq(smmu->dev, irq, domain); in arm_smmu_destroy_domain_context()
847 free_io_pgtable_ops(smmu_domain->pgtbl_ops); in arm_smmu_destroy_domain_context()
848 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); in arm_smmu_destroy_domain_context()
850 arm_smmu_rpm_put(smmu); in arm_smmu_destroy_domain_context()
870 mutex_init(&smmu_domain->init_mutex); in arm_smmu_domain_alloc()
871 spin_lock_init(&smmu_domain->cb_lock); in arm_smmu_domain_alloc()
873 return &smmu_domain->domain; in arm_smmu_domain_alloc()
888 static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx) in arm_smmu_write_smr() argument
890 struct arm_smmu_smr *smr = smmu->smrs + idx; in arm_smmu_write_smr()
891 u32 reg = FIELD_PREP(ARM_SMMU_SMR_ID, smr->id) | in arm_smmu_write_smr()
892 FIELD_PREP(ARM_SMMU_SMR_MASK, smr->mask); in arm_smmu_write_smr()
894 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid) in arm_smmu_write_smr()
896 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg); in arm_smmu_write_smr()
899 static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx) in arm_smmu_write_s2cr() argument
901 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx; in arm_smmu_write_s2cr()
904 if (smmu->impl && smmu->impl->write_s2cr) { in arm_smmu_write_s2cr()
905 smmu->impl->write_s2cr(smmu, idx); in arm_smmu_write_s2cr()
909 reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) | in arm_smmu_write_s2cr()
910 FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) | in arm_smmu_write_s2cr()
911 FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg); in arm_smmu_write_s2cr()
913 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs && in arm_smmu_write_s2cr()
914 smmu->smrs[idx].valid) in arm_smmu_write_s2cr()
916 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg); in arm_smmu_write_s2cr()
919 static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx) in arm_smmu_write_sme() argument
921 arm_smmu_write_s2cr(smmu, idx); in arm_smmu_write_sme()
922 if (smmu->smrs) in arm_smmu_write_sme()
923 arm_smmu_write_smr(smmu, idx); in arm_smmu_write_sme()
930 static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu) in arm_smmu_test_smr_masks() argument
935 if (!smmu->smrs) in arm_smmu_test_smr_masks()
945 for (i = 0; i < smmu->num_mapping_groups; i++) in arm_smmu_test_smr_masks()
946 if (!smmu->smrs[i].valid) in arm_smmu_test_smr_masks()
955 smr = FIELD_PREP(ARM_SMMU_SMR_ID, smmu->streamid_mask); in arm_smmu_test_smr_masks()
956 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr); in arm_smmu_test_smr_masks()
957 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i)); in arm_smmu_test_smr_masks()
958 smmu->streamid_mask = FIELD_GET(ARM_SMMU_SMR_ID, smr); in arm_smmu_test_smr_masks()
960 smr = FIELD_PREP(ARM_SMMU_SMR_MASK, smmu->streamid_mask); in arm_smmu_test_smr_masks()
961 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr); in arm_smmu_test_smr_masks()
962 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i)); in arm_smmu_test_smr_masks()
963 smmu->smr_mask_mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr); in arm_smmu_test_smr_masks()
966 static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask) in arm_smmu_find_sme() argument
968 struct arm_smmu_smr *smrs = smmu->smrs; in arm_smmu_find_sme()
969 int i, free_idx = -ENOSPC; in arm_smmu_find_sme()
976 for (i = 0; i < smmu->num_mapping_groups; ++i) { in arm_smmu_find_sme()
1002 return -EINVAL; in arm_smmu_find_sme()
1008 static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx) in arm_smmu_free_sme() argument
1010 if (--smmu->s2crs[idx].count) in arm_smmu_free_sme()
1013 smmu->s2crs[idx] = s2cr_init_val; in arm_smmu_free_sme()
1014 if (smmu->smrs) in arm_smmu_free_sme()
1015 smmu->smrs[idx].valid = false; in arm_smmu_free_sme()
1024 struct arm_smmu_device *smmu = cfg->smmu; in arm_smmu_master_alloc_smes() local
1025 struct arm_smmu_smr *smrs = smmu->smrs; in arm_smmu_master_alloc_smes()
1028 mutex_lock(&smmu->stream_map_mutex); in arm_smmu_master_alloc_smes()
1031 u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]); in arm_smmu_master_alloc_smes()
1032 u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]); in arm_smmu_master_alloc_smes()
1035 ret = -EEXIST; in arm_smmu_master_alloc_smes()
1039 ret = arm_smmu_find_sme(smmu, sid, mask); in arm_smmu_master_alloc_smes()
1044 if (smrs && smmu->s2crs[idx].count == 0) { in arm_smmu_master_alloc_smes()
1049 smmu->s2crs[idx].count++; in arm_smmu_master_alloc_smes()
1050 cfg->smendx[i] = (s16)idx; in arm_smmu_master_alloc_smes()
1055 arm_smmu_write_sme(smmu, idx); in arm_smmu_master_alloc_smes()
1057 mutex_unlock(&smmu->stream_map_mutex); in arm_smmu_master_alloc_smes()
1061 while (i--) { in arm_smmu_master_alloc_smes()
1062 arm_smmu_free_sme(smmu, cfg->smendx[i]); in arm_smmu_master_alloc_smes()
1063 cfg->smendx[i] = INVALID_SMENDX; in arm_smmu_master_alloc_smes()
1065 mutex_unlock(&smmu->stream_map_mutex); in arm_smmu_master_alloc_smes()
1072 struct arm_smmu_device *smmu = cfg->smmu; in arm_smmu_master_free_smes() local
1075 mutex_lock(&smmu->stream_map_mutex); in arm_smmu_master_free_smes()
1077 if (arm_smmu_free_sme(smmu, idx)) in arm_smmu_master_free_smes()
1078 arm_smmu_write_sme(smmu, idx); in arm_smmu_master_free_smes()
1079 cfg->smendx[i] = INVALID_SMENDX; in arm_smmu_master_free_smes()
1081 mutex_unlock(&smmu->stream_map_mutex); in arm_smmu_master_free_smes()
1088 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_domain_add_master() local
1089 struct arm_smmu_s2cr *s2cr = smmu->s2crs; in arm_smmu_domain_add_master()
1090 u8 cbndx = smmu_domain->cfg.cbndx; in arm_smmu_domain_add_master()
1094 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) in arm_smmu_domain_add_master()
1106 arm_smmu_write_s2cr(smmu, idx); in arm_smmu_domain_add_master()
1116 struct arm_smmu_device *smmu; in arm_smmu_attach_dev() local
1119 if (!fwspec || fwspec->ops != &arm_smmu_ops) { in arm_smmu_attach_dev()
1120 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); in arm_smmu_attach_dev()
1121 return -ENXIO; in arm_smmu_attach_dev()
1126 * domains between of_xlate() and probe_device() - we have no way to cope in arm_smmu_attach_dev()
1133 return -ENODEV; in arm_smmu_attach_dev()
1135 smmu = cfg->smmu; in arm_smmu_attach_dev()
1137 ret = arm_smmu_rpm_get(smmu); in arm_smmu_attach_dev()
1142 ret = arm_smmu_init_domain_context(domain, smmu, dev); in arm_smmu_attach_dev()
1150 if (smmu_domain->smmu != smmu) { in arm_smmu_attach_dev()
1151 ret = -EINVAL; in arm_smmu_attach_dev()
1166 * to 5-10sec worth of reprogramming the context bank, while in arm_smmu_attach_dev()
1169 pm_runtime_set_autosuspend_delay(smmu->dev, 20); in arm_smmu_attach_dev()
1170 pm_runtime_use_autosuspend(smmu->dev); in arm_smmu_attach_dev()
1173 arm_smmu_rpm_put(smmu); in arm_smmu_attach_dev()
1181 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; in arm_smmu_map_pages()
1182 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu; in arm_smmu_map_pages() local
1186 return -ENODEV; in arm_smmu_map_pages()
1188 arm_smmu_rpm_get(smmu); in arm_smmu_map_pages()
1189 ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp, mapped); in arm_smmu_map_pages()
1190 arm_smmu_rpm_put(smmu); in arm_smmu_map_pages()
1199 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; in arm_smmu_unmap_pages()
1200 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu; in arm_smmu_unmap_pages() local
1206 arm_smmu_rpm_get(smmu); in arm_smmu_unmap_pages()
1207 ret = ops->unmap_pages(ops, iova, pgsize, pgcount, iotlb_gather); in arm_smmu_unmap_pages()
1208 arm_smmu_rpm_put(smmu); in arm_smmu_unmap_pages()
1216 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_flush_iotlb_all() local
1218 if (smmu_domain->flush_ops) { in arm_smmu_flush_iotlb_all()
1219 arm_smmu_rpm_get(smmu); in arm_smmu_flush_iotlb_all()
1220 smmu_domain->flush_ops->tlb_flush_all(smmu_domain); in arm_smmu_flush_iotlb_all()
1221 arm_smmu_rpm_put(smmu); in arm_smmu_flush_iotlb_all()
1229 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_iotlb_sync() local
1231 if (!smmu) in arm_smmu_iotlb_sync()
1234 arm_smmu_rpm_get(smmu); in arm_smmu_iotlb_sync()
1235 if (smmu->version == ARM_SMMU_V2 || in arm_smmu_iotlb_sync()
1236 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) in arm_smmu_iotlb_sync()
1239 arm_smmu_tlb_sync_global(smmu); in arm_smmu_iotlb_sync()
1240 arm_smmu_rpm_put(smmu); in arm_smmu_iotlb_sync()
1247 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_iova_to_phys_hard() local
1248 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_iova_to_phys_hard()
1249 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; in arm_smmu_iova_to_phys_hard()
1250 struct device *dev = smmu->dev; in arm_smmu_iova_to_phys_hard()
1255 int ret, idx = cfg->cbndx; in arm_smmu_iova_to_phys_hard()
1258 ret = arm_smmu_rpm_get(smmu); in arm_smmu_iova_to_phys_hard()
1262 spin_lock_irqsave(&smmu_domain->cb_lock, flags); in arm_smmu_iova_to_phys_hard()
1264 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) in arm_smmu_iova_to_phys_hard()
1265 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_ATS1PR, va); in arm_smmu_iova_to_phys_hard()
1267 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va); in arm_smmu_iova_to_phys_hard()
1269 reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR; in arm_smmu_iova_to_phys_hard()
1272 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); in arm_smmu_iova_to_phys_hard()
1276 arm_smmu_rpm_put(smmu); in arm_smmu_iova_to_phys_hard()
1277 return ops->iova_to_phys(ops, iova); in arm_smmu_iova_to_phys_hard()
1280 phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR); in arm_smmu_iova_to_phys_hard()
1281 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); in arm_smmu_iova_to_phys_hard()
1290 arm_smmu_rpm_put(smmu); in arm_smmu_iova_to_phys_hard()
1299 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; in arm_smmu_iova_to_phys()
1304 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS && in arm_smmu_iova_to_phys()
1305 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) in arm_smmu_iova_to_phys()
1308 return ops->iova_to_phys(ops, iova); in arm_smmu_iova_to_phys()
1323 return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK || in arm_smmu_capable()
1344 struct arm_smmu_device *smmu = NULL; in arm_smmu_probe_device() local
1350 ret = arm_smmu_register_legacy_master(dev, &smmu); in arm_smmu_probe_device()
1353 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master() in arm_smmu_probe_device()
1360 } else if (fwspec && fwspec->ops == &arm_smmu_ops) { in arm_smmu_probe_device()
1361 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode); in arm_smmu_probe_device()
1364 * Defer probe if the relevant SMMU instance hasn't finished in arm_smmu_probe_device()
1370 if (!smmu) in arm_smmu_probe_device()
1371 return ERR_PTR(dev_err_probe(dev, -EPROBE_DEFER, in arm_smmu_probe_device()
1372 "smmu dev has not bound yet\n")); in arm_smmu_probe_device()
1374 return ERR_PTR(-ENODEV); in arm_smmu_probe_device()
1377 ret = -EINVAL; in arm_smmu_probe_device()
1378 for (i = 0; i < fwspec->num_ids; i++) { in arm_smmu_probe_device()
1379 u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]); in arm_smmu_probe_device()
1380 u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]); in arm_smmu_probe_device()
1382 if (sid & ~smmu->streamid_mask) { in arm_smmu_probe_device()
1383 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n", in arm_smmu_probe_device()
1384 sid, smmu->streamid_mask); in arm_smmu_probe_device()
1387 if (mask & ~smmu->smr_mask_mask) { in arm_smmu_probe_device()
1388 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n", in arm_smmu_probe_device()
1389 mask, smmu->smr_mask_mask); in arm_smmu_probe_device()
1394 ret = -ENOMEM; in arm_smmu_probe_device()
1400 cfg->smmu = smmu; in arm_smmu_probe_device()
1402 while (i--) in arm_smmu_probe_device()
1403 cfg->smendx[i] = INVALID_SMENDX; in arm_smmu_probe_device()
1405 ret = arm_smmu_rpm_get(smmu); in arm_smmu_probe_device()
1410 arm_smmu_rpm_put(smmu); in arm_smmu_probe_device()
1415 device_link_add(dev, smmu->dev, in arm_smmu_probe_device()
1418 return &smmu->iommu; in arm_smmu_probe_device()
1433 ret = arm_smmu_rpm_get(cfg->smmu); in arm_smmu_release_device()
1439 arm_smmu_rpm_put(cfg->smmu); in arm_smmu_release_device()
1448 struct arm_smmu_device *smmu; in arm_smmu_probe_finalize() local
1451 smmu = cfg->smmu; in arm_smmu_probe_finalize()
1453 if (smmu->impl && smmu->impl->probe_finalize) in arm_smmu_probe_finalize()
1454 smmu->impl->probe_finalize(smmu, dev); in arm_smmu_probe_finalize()
1461 struct arm_smmu_device *smmu = cfg->smmu; in arm_smmu_device_group() local
1465 mutex_lock(&smmu->stream_map_mutex); in arm_smmu_device_group()
1467 if (group && smmu->s2crs[idx].group && in arm_smmu_device_group()
1468 group != smmu->s2crs[idx].group) { in arm_smmu_device_group()
1469 mutex_unlock(&smmu->stream_map_mutex); in arm_smmu_device_group()
1470 return ERR_PTR(-EINVAL); in arm_smmu_device_group()
1473 group = smmu->s2crs[idx].group; in arm_smmu_device_group()
1477 mutex_unlock(&smmu->stream_map_mutex); in arm_smmu_device_group()
1491 smmu->s2crs[idx].group = group; in arm_smmu_device_group()
1493 mutex_unlock(&smmu->stream_map_mutex); in arm_smmu_device_group()
1502 mutex_lock(&smmu_domain->init_mutex); in arm_smmu_enable_nesting()
1503 if (smmu_domain->smmu) in arm_smmu_enable_nesting()
1504 ret = -EPERM; in arm_smmu_enable_nesting()
1506 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; in arm_smmu_enable_nesting()
1507 mutex_unlock(&smmu_domain->init_mutex); in arm_smmu_enable_nesting()
1518 mutex_lock(&smmu_domain->init_mutex); in arm_smmu_set_pgtable_quirks()
1519 if (smmu_domain->smmu) in arm_smmu_set_pgtable_quirks()
1520 ret = -EPERM; in arm_smmu_set_pgtable_quirks()
1522 smmu_domain->pgtbl_quirks = quirks; in arm_smmu_set_pgtable_quirks()
1523 mutex_unlock(&smmu_domain->init_mutex); in arm_smmu_set_pgtable_quirks()
1532 if (args->args_count > 0) in arm_smmu_of_xlate()
1533 fwid |= FIELD_PREP(ARM_SMMU_SMR_ID, args->args[0]); in arm_smmu_of_xlate()
1535 if (args->args_count > 1) in arm_smmu_of_xlate()
1536 fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, args->args[1]); in arm_smmu_of_xlate()
1537 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask)) in arm_smmu_of_xlate()
1554 list_add_tail(®ion->list, head); in arm_smmu_get_resv_regions()
1562 const struct arm_smmu_impl *impl = cfg->smmu->impl; in arm_smmu_def_domain_type()
1567 if (impl && impl->def_domain_type) in arm_smmu_def_domain_type()
1568 return impl->def_domain_type(dev); in arm_smmu_def_domain_type()
1583 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1598 static void arm_smmu_device_reset(struct arm_smmu_device *smmu) in arm_smmu_device_reset() argument
1604 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR); in arm_smmu_device_reset()
1605 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, reg); in arm_smmu_device_reset()
1611 for (i = 0; i < smmu->num_mapping_groups; ++i) in arm_smmu_device_reset()
1612 arm_smmu_write_sme(smmu, i); in arm_smmu_device_reset()
1615 for (i = 0; i < smmu->num_context_banks; ++i) { in arm_smmu_device_reset()
1616 arm_smmu_write_context_bank(smmu, i); in arm_smmu_device_reset()
1617 arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, ARM_SMMU_FSR_FAULT); in arm_smmu_device_reset()
1621 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLH, QCOM_DUMMY_VAL); in arm_smmu_device_reset()
1622 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLNSNH, QCOM_DUMMY_VAL); in arm_smmu_device_reset()
1624 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0); in arm_smmu_device_reset()
1646 if (smmu->features & ARM_SMMU_FEAT_VMID16) in arm_smmu_device_reset()
1649 if (smmu->features & ARM_SMMU_FEAT_EXIDS) in arm_smmu_device_reset()
1652 if (smmu->impl && smmu->impl->reset) in arm_smmu_device_reset()
1653 smmu->impl->reset(smmu); in arm_smmu_device_reset()
1656 arm_smmu_tlb_sync_global(smmu); in arm_smmu_device_reset()
1657 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg); in arm_smmu_device_reset()
1679 static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) in arm_smmu_device_cfg_probe() argument
1683 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK; in arm_smmu_device_cfg_probe()
1686 dev_notice(smmu->dev, "probing hardware configuration...\n"); in arm_smmu_device_cfg_probe()
1687 dev_notice(smmu->dev, "SMMUv%d with:\n", in arm_smmu_device_cfg_probe()
1688 smmu->version == ARM_SMMU_V2 ? 2 : 1); in arm_smmu_device_cfg_probe()
1691 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0); in arm_smmu_device_cfg_probe()
1700 smmu->features |= ARM_SMMU_FEAT_TRANS_S1; in arm_smmu_device_cfg_probe()
1701 dev_notice(smmu->dev, "\tstage 1 translation\n"); in arm_smmu_device_cfg_probe()
1705 smmu->features |= ARM_SMMU_FEAT_TRANS_S2; in arm_smmu_device_cfg_probe()
1706 dev_notice(smmu->dev, "\tstage 2 translation\n"); in arm_smmu_device_cfg_probe()
1710 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED; in arm_smmu_device_cfg_probe()
1711 dev_notice(smmu->dev, "\tnested translation\n"); in arm_smmu_device_cfg_probe()
1714 if (!(smmu->features & in arm_smmu_device_cfg_probe()
1716 dev_err(smmu->dev, "\tno translation support!\n"); in arm_smmu_device_cfg_probe()
1717 return -ENODEV; in arm_smmu_device_cfg_probe()
1721 ((smmu->version < ARM_SMMU_V2) || !(id & ARM_SMMU_ID0_ATOSNS))) { in arm_smmu_device_cfg_probe()
1722 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS; in arm_smmu_device_cfg_probe()
1723 dev_notice(smmu->dev, "\taddress translation ops\n"); in arm_smmu_device_cfg_probe()
1734 dev_notice(smmu->dev, "\t%scoherent table walk\n", in arm_smmu_device_cfg_probe()
1735 cttw_fw ? "" : "non-"); in arm_smmu_device_cfg_probe()
1737 dev_notice(smmu->dev, in arm_smmu_device_cfg_probe()
1741 if (smmu->version == ARM_SMMU_V2 && id & ARM_SMMU_ID0_EXIDS) { in arm_smmu_device_cfg_probe()
1742 smmu->features |= ARM_SMMU_FEAT_EXIDS; in arm_smmu_device_cfg_probe()
1747 smmu->streamid_mask = size - 1; in arm_smmu_device_cfg_probe()
1749 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH; in arm_smmu_device_cfg_probe()
1752 dev_err(smmu->dev, in arm_smmu_device_cfg_probe()
1753 "stream-matching supported, but no SMRs present!\n"); in arm_smmu_device_cfg_probe()
1754 return -ENODEV; in arm_smmu_device_cfg_probe()
1757 /* Zero-initialised to mark as invalid */ in arm_smmu_device_cfg_probe()
1758 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs), in arm_smmu_device_cfg_probe()
1760 if (!smmu->smrs) in arm_smmu_device_cfg_probe()
1761 return -ENOMEM; in arm_smmu_device_cfg_probe()
1763 dev_notice(smmu->dev, in arm_smmu_device_cfg_probe()
1766 /* s2cr->type == 0 means translation, so initialise explicitly */ in arm_smmu_device_cfg_probe()
1767 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs), in arm_smmu_device_cfg_probe()
1769 if (!smmu->s2crs) in arm_smmu_device_cfg_probe()
1770 return -ENOMEM; in arm_smmu_device_cfg_probe()
1772 smmu->s2crs[i] = s2cr_init_val; in arm_smmu_device_cfg_probe()
1774 smmu->num_mapping_groups = size; in arm_smmu_device_cfg_probe()
1775 mutex_init(&smmu->stream_map_mutex); in arm_smmu_device_cfg_probe()
1776 spin_lock_init(&smmu->global_sync_lock); in arm_smmu_device_cfg_probe()
1778 if (smmu->version < ARM_SMMU_V2 || in arm_smmu_device_cfg_probe()
1780 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L; in arm_smmu_device_cfg_probe()
1782 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S; in arm_smmu_device_cfg_probe()
1786 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1); in arm_smmu_device_cfg_probe()
1787 smmu->pgshift = (id & ARM_SMMU_ID1_PAGESIZE) ? 16 : 12; in arm_smmu_device_cfg_probe()
1789 /* Check for size mismatch of SMMU address space from mapped region */ in arm_smmu_device_cfg_probe()
1791 if (smmu->numpage != 2 * size << smmu->pgshift) in arm_smmu_device_cfg_probe()
1792 dev_warn(smmu->dev, in arm_smmu_device_cfg_probe()
1793 "SMMU address space size (0x%x) differs from mapped region size (0x%x)!\n", in arm_smmu_device_cfg_probe()
1794 2 * size << smmu->pgshift, smmu->numpage); in arm_smmu_device_cfg_probe()
1796 smmu->numpage = size; in arm_smmu_device_cfg_probe()
1798 smmu->num_s2_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMS2CB, id); in arm_smmu_device_cfg_probe()
1799 smmu->num_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMCB, id); in arm_smmu_device_cfg_probe()
1800 if (smmu->num_s2_context_banks > smmu->num_context_banks) { in arm_smmu_device_cfg_probe()
1801 dev_err(smmu->dev, "impossible number of S2 context banks!\n"); in arm_smmu_device_cfg_probe()
1802 return -ENODEV; in arm_smmu_device_cfg_probe()
1804 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n", in arm_smmu_device_cfg_probe()
1805 smmu->num_context_banks, smmu->num_s2_context_banks); in arm_smmu_device_cfg_probe()
1806 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks, in arm_smmu_device_cfg_probe()
1807 sizeof(*smmu->cbs), GFP_KERNEL); in arm_smmu_device_cfg_probe()
1808 if (!smmu->cbs) in arm_smmu_device_cfg_probe()
1809 return -ENOMEM; in arm_smmu_device_cfg_probe()
1812 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2); in arm_smmu_device_cfg_probe()
1814 smmu->ipa_size = size; in arm_smmu_device_cfg_probe()
1818 smmu->pa_size = size; in arm_smmu_device_cfg_probe()
1821 smmu->features |= ARM_SMMU_FEAT_VMID16; in arm_smmu_device_cfg_probe()
1828 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size))) in arm_smmu_device_cfg_probe()
1829 dev_warn(smmu->dev, in arm_smmu_device_cfg_probe()
1832 if (smmu->version < ARM_SMMU_V2) { in arm_smmu_device_cfg_probe()
1833 smmu->va_size = smmu->ipa_size; in arm_smmu_device_cfg_probe()
1834 if (smmu->version == ARM_SMMU_V1_64K) in arm_smmu_device_cfg_probe()
1835 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K; in arm_smmu_device_cfg_probe()
1838 smmu->va_size = arm_smmu_id_size_to_bits(size); in arm_smmu_device_cfg_probe()
1840 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K; in arm_smmu_device_cfg_probe()
1842 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K; in arm_smmu_device_cfg_probe()
1844 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K; in arm_smmu_device_cfg_probe()
1847 if (smmu->impl && smmu->impl->cfg_probe) { in arm_smmu_device_cfg_probe()
1848 ret = smmu->impl->cfg_probe(smmu); in arm_smmu_device_cfg_probe()
1854 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) in arm_smmu_device_cfg_probe()
1855 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M; in arm_smmu_device_cfg_probe()
1856 if (smmu->features & in arm_smmu_device_cfg_probe()
1858 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G; in arm_smmu_device_cfg_probe()
1859 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K) in arm_smmu_device_cfg_probe()
1860 smmu->pgsize_bitmap |= SZ_16K | SZ_32M; in arm_smmu_device_cfg_probe()
1861 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K) in arm_smmu_device_cfg_probe()
1862 smmu->pgsize_bitmap |= SZ_64K | SZ_512M; in arm_smmu_device_cfg_probe()
1864 if (arm_smmu_ops.pgsize_bitmap == -1UL) in arm_smmu_device_cfg_probe()
1865 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap; in arm_smmu_device_cfg_probe()
1867 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap; in arm_smmu_device_cfg_probe()
1868 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", in arm_smmu_device_cfg_probe()
1869 smmu->pgsize_bitmap); in arm_smmu_device_cfg_probe()
1872 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) in arm_smmu_device_cfg_probe()
1873 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n", in arm_smmu_device_cfg_probe()
1874 smmu->va_size, smmu->ipa_size); in arm_smmu_device_cfg_probe()
1876 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) in arm_smmu_device_cfg_probe()
1877 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n", in arm_smmu_device_cfg_probe()
1878 smmu->ipa_size, smmu->pa_size); in arm_smmu_device_cfg_probe()
1899 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1900 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1901 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
1902 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
1903 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
1904 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
1905 { .compatible = "nvidia,smmu-500", .data = &arm_mmu500 },
1906 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
1912 static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu) in acpi_smmu_get_data() argument
1919 smmu->version = ARM_SMMU_V1; in acpi_smmu_get_data()
1920 smmu->model = GENERIC_SMMU; in acpi_smmu_get_data()
1923 smmu->version = ARM_SMMU_V1_64K; in acpi_smmu_get_data()
1924 smmu->model = GENERIC_SMMU; in acpi_smmu_get_data()
1927 smmu->version = ARM_SMMU_V2; in acpi_smmu_get_data()
1928 smmu->model = GENERIC_SMMU; in acpi_smmu_get_data()
1931 smmu->version = ARM_SMMU_V2; in acpi_smmu_get_data()
1932 smmu->model = ARM_MMU500; in acpi_smmu_get_data()
1935 smmu->version = ARM_SMMU_V2; in acpi_smmu_get_data()
1936 smmu->model = CAVIUM_SMMUV2; in acpi_smmu_get_data()
1939 ret = -ENODEV; in acpi_smmu_get_data()
1945 static int arm_smmu_device_acpi_probe(struct arm_smmu_device *smmu, in arm_smmu_device_acpi_probe() argument
1948 struct device *dev = smmu->dev; in arm_smmu_device_acpi_probe()
1955 iort_smmu = (struct acpi_iort_smmu *)node->node_data; in arm_smmu_device_acpi_probe()
1957 ret = acpi_smmu_get_data(iort_smmu->model, smmu); in arm_smmu_device_acpi_probe()
1965 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) in arm_smmu_device_acpi_probe()
1966 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; in arm_smmu_device_acpi_probe()
1971 static inline int arm_smmu_device_acpi_probe(struct arm_smmu_device *smmu, in arm_smmu_device_acpi_probe() argument
1974 return -ENODEV; in arm_smmu_device_acpi_probe()
1978 static int arm_smmu_device_dt_probe(struct arm_smmu_device *smmu, in arm_smmu_device_dt_probe() argument
1982 struct device *dev = smmu->dev; in arm_smmu_device_dt_probe()
1985 if (of_property_read_u32(dev->of_node, "#global-interrupts", global_irqs)) in arm_smmu_device_dt_probe()
1986 return dev_err_probe(dev, -ENODEV, in arm_smmu_device_dt_probe()
1987 "missing #global-interrupts property\n"); in arm_smmu_device_dt_probe()
1991 smmu->version = data->version; in arm_smmu_device_dt_probe()
1992 smmu->model = data->model; in arm_smmu_device_dt_probe()
1994 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL); in arm_smmu_device_dt_probe()
1997 pr_notice("deprecated \"mmu-masters\" DT property in use; %s support unavailable\n", in arm_smmu_device_dt_probe()
1998 IS_ENABLED(CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS) ? "DMA API" : "SMMU"); in arm_smmu_device_dt_probe()
2005 return -ENODEV; in arm_smmu_device_dt_probe()
2008 if (of_dma_is_coherent(dev->of_node)) in arm_smmu_device_dt_probe()
2009 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; in arm_smmu_device_dt_probe()
2014 static void arm_smmu_rmr_install_bypass_smr(struct arm_smmu_device *smmu) in arm_smmu_rmr_install_bypass_smr() argument
2022 iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list); in arm_smmu_rmr_install_bypass_smr()
2028 * SMMU until it gets enabled again in the reset routine. in arm_smmu_rmr_install_bypass_smr()
2030 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0); in arm_smmu_rmr_install_bypass_smr()
2032 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg); in arm_smmu_rmr_install_bypass_smr()
2039 for (i = 0; i < rmr->num_sids; i++) { in arm_smmu_rmr_install_bypass_smr()
2040 idx = arm_smmu_find_sme(smmu, rmr->sids[i], ~0); in arm_smmu_rmr_install_bypass_smr()
2044 if (smmu->s2crs[idx].count == 0) { in arm_smmu_rmr_install_bypass_smr()
2045 smmu->smrs[idx].id = rmr->sids[i]; in arm_smmu_rmr_install_bypass_smr()
2046 smmu->smrs[idx].mask = 0; in arm_smmu_rmr_install_bypass_smr()
2047 smmu->smrs[idx].valid = true; in arm_smmu_rmr_install_bypass_smr()
2049 smmu->s2crs[idx].count++; in arm_smmu_rmr_install_bypass_smr()
2050 smmu->s2crs[idx].type = S2CR_TYPE_BYPASS; in arm_smmu_rmr_install_bypass_smr()
2051 smmu->s2crs[idx].privcfg = S2CR_PRIVCFG_DEFAULT; in arm_smmu_rmr_install_bypass_smr()
2057 dev_notice(smmu->dev, "\tpreserved %d boot mapping%s\n", cnt, in arm_smmu_rmr_install_bypass_smr()
2059 iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list); in arm_smmu_rmr_install_bypass_smr()
2065 struct arm_smmu_device *smmu; in arm_smmu_device_probe() local
2066 struct device *dev = &pdev->dev; in arm_smmu_device_probe()
2071 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); in arm_smmu_device_probe()
2072 if (!smmu) { in arm_smmu_device_probe()
2074 return -ENOMEM; in arm_smmu_device_probe()
2076 smmu->dev = dev; in arm_smmu_device_probe()
2078 if (dev->of_node) in arm_smmu_device_probe()
2079 err = arm_smmu_device_dt_probe(smmu, &global_irqs, &pmu_irqs); in arm_smmu_device_probe()
2081 err = arm_smmu_device_acpi_probe(smmu, &global_irqs, &pmu_irqs); in arm_smmu_device_probe()
2085 smmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); in arm_smmu_device_probe()
2086 if (IS_ERR(smmu->base)) in arm_smmu_device_probe()
2087 return PTR_ERR(smmu->base); in arm_smmu_device_probe()
2088 smmu->ioaddr = res->start; in arm_smmu_device_probe()
2094 smmu->numpage = resource_size(res); in arm_smmu_device_probe()
2096 smmu = arm_smmu_impl_init(smmu); in arm_smmu_device_probe()
2097 if (IS_ERR(smmu)) in arm_smmu_device_probe()
2098 return PTR_ERR(smmu); in arm_smmu_device_probe()
2102 smmu->num_context_irqs = num_irqs - global_irqs - pmu_irqs; in arm_smmu_device_probe()
2103 if (smmu->num_context_irqs <= 0) in arm_smmu_device_probe()
2104 return dev_err_probe(dev, -ENODEV, in arm_smmu_device_probe()
2108 smmu->irqs = devm_kcalloc(dev, smmu->num_context_irqs, in arm_smmu_device_probe()
2109 sizeof(*smmu->irqs), GFP_KERNEL); in arm_smmu_device_probe()
2110 if (!smmu->irqs) in arm_smmu_device_probe()
2111 return dev_err_probe(dev, -ENOMEM, "failed to allocate %d irqs\n", in arm_smmu_device_probe()
2112 smmu->num_context_irqs); in arm_smmu_device_probe()
2114 for (i = 0; i < smmu->num_context_irqs; i++) { in arm_smmu_device_probe()
2119 smmu->irqs[i] = irq; in arm_smmu_device_probe()
2122 err = devm_clk_bulk_get_all(dev, &smmu->clks); in arm_smmu_device_probe()
2127 smmu->num_clks = err; in arm_smmu_device_probe()
2129 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks); in arm_smmu_device_probe()
2133 err = arm_smmu_device_cfg_probe(smmu); in arm_smmu_device_probe()
2137 if (smmu->version == ARM_SMMU_V2) { in arm_smmu_device_probe()
2138 if (smmu->num_context_banks > smmu->num_context_irqs) { in arm_smmu_device_probe()
2141 smmu->num_context_irqs, smmu->num_context_banks); in arm_smmu_device_probe()
2142 return -ENODEV; in arm_smmu_device_probe()
2146 smmu->num_context_irqs = smmu->num_context_banks; in arm_smmu_device_probe()
2149 if (smmu->impl && smmu->impl->global_fault) in arm_smmu_device_probe()
2150 global_fault = smmu->impl->global_fault; in arm_smmu_device_probe()
2161 "arm-smmu global fault", smmu); in arm_smmu_device_probe()
2168 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL, in arm_smmu_device_probe()
2169 "smmu.%pa", &smmu->ioaddr); in arm_smmu_device_probe()
2175 err = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev); in arm_smmu_device_probe()
2178 iommu_device_sysfs_remove(&smmu->iommu); in arm_smmu_device_probe()
2182 platform_set_drvdata(pdev, smmu); in arm_smmu_device_probe()
2185 arm_smmu_rmr_install_bypass_smr(smmu); in arm_smmu_device_probe()
2187 arm_smmu_device_reset(smmu); in arm_smmu_device_probe()
2188 arm_smmu_test_smr_masks(smmu); in arm_smmu_device_probe()
2191 * We want to avoid touching dev->power.lock in fastpaths unless in arm_smmu_device_probe()
2192 * it's really going to do something useful - pm_runtime_enabled() in arm_smmu_device_probe()
2196 if (dev->pm_domain) { in arm_smmu_device_probe()
2206 struct arm_smmu_device *smmu = platform_get_drvdata(pdev); in arm_smmu_device_shutdown() local
2208 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS)) in arm_smmu_device_shutdown()
2209 dev_notice(&pdev->dev, "disabling translation\n"); in arm_smmu_device_shutdown()
2211 arm_smmu_rpm_get(smmu); in arm_smmu_device_shutdown()
2213 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, ARM_SMMU_sCR0_CLIENTPD); in arm_smmu_device_shutdown()
2214 arm_smmu_rpm_put(smmu); in arm_smmu_device_shutdown()
2216 if (pm_runtime_enabled(smmu->dev)) in arm_smmu_device_shutdown()
2217 pm_runtime_force_suspend(smmu->dev); in arm_smmu_device_shutdown()
2219 clk_bulk_disable(smmu->num_clks, smmu->clks); in arm_smmu_device_shutdown()
2221 clk_bulk_unprepare(smmu->num_clks, smmu->clks); in arm_smmu_device_shutdown()
2226 struct arm_smmu_device *smmu = platform_get_drvdata(pdev); in arm_smmu_device_remove() local
2228 iommu_device_unregister(&smmu->iommu); in arm_smmu_device_remove()
2229 iommu_device_sysfs_remove(&smmu->iommu); in arm_smmu_device_remove()
2236 struct arm_smmu_device *smmu = dev_get_drvdata(dev); in arm_smmu_runtime_resume() local
2239 ret = clk_bulk_enable(smmu->num_clks, smmu->clks); in arm_smmu_runtime_resume()
2243 arm_smmu_device_reset(smmu); in arm_smmu_runtime_resume()
2250 struct arm_smmu_device *smmu = dev_get_drvdata(dev); in arm_smmu_runtime_suspend() local
2252 clk_bulk_disable(smmu->num_clks, smmu->clks); in arm_smmu_runtime_suspend()
2260 struct arm_smmu_device *smmu = dev_get_drvdata(dev); in arm_smmu_pm_resume() local
2262 ret = clk_bulk_prepare(smmu->num_clks, smmu->clks); in arm_smmu_pm_resume()
2271 clk_bulk_unprepare(smmu->num_clks, smmu->clks); in arm_smmu_pm_resume()
2279 struct arm_smmu_device *smmu = dev_get_drvdata(dev); in arm_smmu_pm_suspend() local
2289 clk_bulk_unprepare(smmu->num_clks, smmu->clks); in arm_smmu_pm_suspend()
2301 .name = "arm-smmu",
2312 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2314 MODULE_ALIAS("platform:arm-smmu");