Lines Matching +full:mc +full:- +full:sid

1 // SPDX-License-Identifier: GPL-2.0-only
10 * - SMMUv1 and v2 implementations
11 * - Stream-matching and stream-indexing
12 * - v7/v8 long-descriptor format
13 * - Non-secure access to the SMMU
14 * - Context fault reporting
15 * - Extended Stream ID (16 bit)
18 #define pr_fmt(fmt) "arm-smmu: " fmt
24 #include <linux/dma-mapping.h>
38 #include <linux/fsl/mc.h>
40 #include "arm-smmu.h"
41 #include "../../dma-iommu.h"
50 #define QCOM_DUMMY_VAL -1
73 if (pm_runtime_enabled(smmu->dev))
74 return pm_runtime_resume_and_get(smmu->dev);
81 if (pm_runtime_enabled(smmu->dev))
82 pm_runtime_put_autosuspend(smmu->dev);
97 struct pci_bus *bus = to_pci_dev(dev)->bus;
100 bus = bus->parent;
101 return of_node_get(bus->bridge->parent->of_node);
104 return of_node_get(dev->of_node);
116 struct device_node *np = it->node;
119 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
120 "#stream-id-cells", -1)
121 if (it->node == np) {
125 it->node = np;
126 return err == -ENOENT ? 0 : err;
141 if (!np || !of_property_present(np, "#stream-id-cells")) {
143 return -ENODEV;
152 return -ENODEV;
157 /* "mmu-masters" assumes Stream ID == Requester ID */
164 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
171 return -ENOMEM;
183 return -ENODEV;
199 if (smmu->impl && unlikely(smmu->impl->tlb_sync))
200 return smmu->impl->tlb_sync(smmu, page, sync, status);
204 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
212 dev_err_ratelimited(smmu->dev,
213 "TLB sync timed out -- SMMU may be deadlocked\n");
220 spin_lock_irqsave(&smmu->global_sync_lock, flags);
223 spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
228 struct arm_smmu_device *smmu = smmu_domain->smmu;
231 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
232 __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx),
234 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
245 arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
246 ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
253 struct arm_smmu_device *smmu = smmu_domain->smmu;
257 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
265 struct arm_smmu_device *smmu = smmu_domain->smmu;
266 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
267 int idx = cfg->cbndx;
269 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
272 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
274 iova |= cfg->asid;
278 } while (size -= granule);
281 iova |= (u64)cfg->asid << 48;
285 } while (size -= granule);
293 struct arm_smmu_device *smmu = smmu_domain->smmu;
294 int idx = smmu_domain->cfg.cbndx;
296 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
301 if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)
306 } while (size -= granule);
313 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
315 if (cfg->flush_walk_prefer_tlbiasid) {
354 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
357 * no-op and call arm_smmu_tlb_inv_context_s2() from .iotlb_sync as you might
365 struct arm_smmu_device *smmu = smmu_domain->smmu;
367 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
370 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
397 struct arm_smmu_device *smmu = smmu_domain->smmu;
398 int idx = smmu_domain->cfg.cbndx;
412 if (ret == -ENOSYS)
413 dev_err_ratelimited(smmu->dev,
439 dev_err(smmu->dev,
440 "Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may have security implications\n",
443 dev_err(smmu->dev,
445 dev_err(smmu->dev,
457 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
458 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
459 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
461 cb->cfg = cfg;
465 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
466 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
468 cb->tcr[0] = arm_smmu_lpae_tcr(pgtbl_cfg);
469 cb->tcr[1] = arm_smmu_lpae_tcr2(pgtbl_cfg);
470 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
471 cb->tcr[1] |= ARM_SMMU_TCR2_AS;
473 cb->tcr[0] |= ARM_SMMU_TCR_EAE;
476 cb->tcr[0] = arm_smmu_lpae_vtcr(pgtbl_cfg);
481 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
482 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr;
483 cb->ttbr[1] = 0;
485 cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
486 cfg->asid);
487 cb->ttbr[1] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
488 cfg->asid);
490 if (pgtbl_cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
491 cb->ttbr[1] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
493 cb->ttbr[0] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
496 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
499 /* MAIRs (stage-1 only) */
501 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
502 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
503 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
505 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair;
506 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair >> 32;
515 struct arm_smmu_cb *cb = &smmu->cbs[idx];
516 struct arm_smmu_cfg *cfg = cb->cfg;
524 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
527 if (smmu->version > ARM_SMMU_V1) {
528 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
532 /* 16-bit VMIDs live in CBA2R */
533 if (smmu->features & ARM_SMMU_FEAT_VMID16)
534 reg |= FIELD_PREP(ARM_SMMU_CBA2R_VMID16, cfg->vmid);
540 reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, cfg->cbar);
541 if (smmu->version < ARM_SMMU_V2)
542 reg |= FIELD_PREP(ARM_SMMU_CBAR_IRPTNDX, cfg->irptndx);
553 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
554 /* 8-bit VMIDs live in CBAR */
555 reg |= FIELD_PREP(ARM_SMMU_CBAR_VMID, cfg->vmid);
564 if (stage1 && smmu->version > ARM_SMMU_V1)
565 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]);
566 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]);
569 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
570 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid);
571 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
572 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]);
574 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
577 cb->ttbr[1]);
580 /* MAIRs (stage-1 only) */
582 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]);
583 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]);
594 if (smmu->impl && smmu->impl->write_sctlr)
595 smmu->impl->write_sctlr(smmu, idx, reg);
604 if (smmu->impl && smmu->impl->alloc_context_bank)
605 return smmu->impl->alloc_context_bank(smmu_domain, smmu, dev, start);
607 return __arm_smmu_alloc_bitmap(smmu->context_map, start, smmu->num_context_banks);
620 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
623 mutex_lock(&smmu_domain->init_mutex);
624 if (smmu_domain->smmu)
627 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
628 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
629 smmu_domain->smmu = smmu;
649 * Note that you can't actually request stage-2 mappings.
651 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
652 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
653 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
654 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
659 * the decision into the io-pgtable code where it arguably belongs,
664 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
665 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
668 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
669 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
670 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
671 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
672 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
675 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
677 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
678 ret = -EINVAL;
682 switch (smmu_domain->stage) {
684 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
685 start = smmu->num_s2_context_banks;
686 ias = smmu->va_size;
687 oas = smmu->ipa_size;
688 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
690 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
699 smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops;
707 cfg->cbar = CBAR_TYPE_S2_TRANS;
709 ias = smmu->ipa_size;
710 oas = smmu->pa_size;
711 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
718 if (smmu->version == ARM_SMMU_V2)
719 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2;
721 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1;
724 ret = -EINVAL;
733 smmu_domain->smmu = smmu;
735 cfg->cbndx = ret;
736 if (smmu->version < ARM_SMMU_V2) {
737 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
738 cfg->irptndx %= smmu->num_context_irqs;
740 cfg->irptndx = cfg->cbndx;
743 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
744 cfg->vmid = cfg->cbndx + 1;
746 cfg->asid = cfg->cbndx;
749 .pgsize_bitmap = smmu->pgsize_bitmap,
752 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
753 .tlb = smmu_domain->flush_ops,
754 .iommu_dev = smmu->dev,
757 if (smmu->impl && smmu->impl->init_context) {
758 ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev);
763 if (smmu_domain->pgtbl_quirks)
764 pgtbl_cfg.quirks |= smmu_domain->pgtbl_quirks;
768 ret = -ENOMEM;
773 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
776 domain->geometry.aperture_start = ~0UL << ias;
777 domain->geometry.aperture_end = ~0UL;
779 domain->geometry.aperture_end = (1UL << ias) - 1;
782 domain->geometry.force_aperture = true;
786 arm_smmu_write_context_bank(smmu, cfg->cbndx);
790 * handler seeing a half-initialised domain state.
792 irq = smmu->irqs[cfg->irptndx];
794 if (smmu->impl && smmu->impl->context_fault)
795 context_fault = smmu->impl->context_fault;
799 ret = devm_request_irq(smmu->dev, irq, context_fault,
800 IRQF_SHARED, "arm-smmu-context-fault", domain);
802 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
803 cfg->irptndx, irq);
804 cfg->irptndx = ARM_SMMU_INVALID_IRPTNDX;
807 mutex_unlock(&smmu_domain->init_mutex);
810 smmu_domain->pgtbl_ops = pgtbl_ops;
814 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
815 smmu_domain->smmu = NULL;
817 mutex_unlock(&smmu_domain->init_mutex);
824 struct arm_smmu_device *smmu = smmu_domain->smmu;
825 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
828 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
839 smmu->cbs[cfg->cbndx].cfg = NULL;
840 arm_smmu_write_context_bank(smmu, cfg->cbndx);
842 if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) {
843 irq = smmu->irqs[cfg->irptndx];
844 devm_free_irq(smmu->dev, irq, domain);
847 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
848 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
870 mutex_init(&smmu_domain->init_mutex);
871 spin_lock_init(&smmu_domain->cb_lock);
873 return &smmu_domain->domain;
890 struct arm_smmu_smr *smr = smmu->smrs + idx;
891 u32 reg = FIELD_PREP(ARM_SMMU_SMR_ID, smr->id) |
892 FIELD_PREP(ARM_SMMU_SMR_MASK, smr->mask);
894 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
901 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
904 if (smmu->impl && smmu->impl->write_s2cr) {
905 smmu->impl->write_s2cr(smmu, idx);
909 reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) |
910 FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) |
911 FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
913 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
914 smmu->smrs[idx].valid)
922 if (smmu->smrs)
935 if (!smmu->smrs)
945 for (i = 0; i < smmu->num_mapping_groups; i++)
946 if (!smmu->smrs[i].valid)
955 smr = FIELD_PREP(ARM_SMMU_SMR_ID, smmu->streamid_mask);
958 smmu->streamid_mask = FIELD_GET(ARM_SMMU_SMR_ID, smr);
960 smr = FIELD_PREP(ARM_SMMU_SMR_MASK, smmu->streamid_mask);
963 smmu->smr_mask_mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
968 struct arm_smmu_smr *smrs = smmu->smrs;
969 int i, free_idx = -ENOSPC;
976 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1002 return -EINVAL;
1010 if (--smmu->s2crs[idx].count)
1013 smmu->s2crs[idx] = s2cr_init_val;
1014 if (smmu->smrs)
1015 smmu->smrs[idx].valid = false;
1024 struct arm_smmu_device *smmu = cfg->smmu;
1025 struct arm_smmu_smr *smrs = smmu->smrs;
1028 mutex_lock(&smmu->stream_map_mutex);
1031 u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
1032 u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
1035 ret = -EEXIST;
1039 ret = arm_smmu_find_sme(smmu, sid, mask);
1044 if (smrs && smmu->s2crs[idx].count == 0) {
1045 smrs[idx].id = sid;
1049 smmu->s2crs[idx].count++;
1050 cfg->smendx[i] = (s16)idx;
1057 mutex_unlock(&smmu->stream_map_mutex);
1061 while (i--) {
1062 arm_smmu_free_sme(smmu, cfg->smendx[i]);
1063 cfg->smendx[i] = INVALID_SMENDX;
1065 mutex_unlock(&smmu->stream_map_mutex);
1072 struct arm_smmu_device *smmu = cfg->smmu;
1075 mutex_lock(&smmu->stream_map_mutex);
1079 cfg->smendx[i] = INVALID_SMENDX;
1081 mutex_unlock(&smmu->stream_map_mutex);
1088 struct arm_smmu_device *smmu = smmu_domain->smmu;
1089 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1090 u8 cbndx = smmu_domain->cfg.cbndx;
1094 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
1119 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
1121 return -ENXIO;
1126 * domains between of_xlate() and probe_device() - we have no way to cope
1133 return -ENODEV;
1135 smmu = cfg->smmu;
1150 if (smmu_domain->smmu != smmu) {
1151 ret = -EINVAL;
1166 * to 5-10sec worth of reprogramming the context bank, while
1169 pm_runtime_set_autosuspend_delay(smmu->dev, 20);
1170 pm_runtime_use_autosuspend(smmu->dev);
1181 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1182 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1186 return -ENODEV;
1189 ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp, mapped);
1199 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1200 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1207 ret = ops->unmap_pages(ops, iova, pgsize, pgcount, iotlb_gather);
1216 struct arm_smmu_device *smmu = smmu_domain->smmu;
1218 if (smmu_domain->flush_ops) {
1220 smmu_domain->flush_ops->tlb_flush_all(smmu_domain);
1229 struct arm_smmu_device *smmu = smmu_domain->smmu;
1235 if (smmu->version == ARM_SMMU_V2 ||
1236 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1247 struct arm_smmu_device *smmu = smmu_domain->smmu;
1248 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1249 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1250 struct device *dev = smmu->dev;
1255 int ret, idx = cfg->cbndx;
1262 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
1264 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1272 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
1277 return ops->iova_to_phys(ops, iova);
1281 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
1299 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1304 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1305 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1308 return ops->iova_to_phys(ops, iova);
1323 return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK ||
1353 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
1360 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
1361 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
1371 return ERR_PTR(dev_err_probe(dev, -EPROBE_DEFER,
1374 return ERR_PTR(-ENODEV);
1377 ret = -EINVAL;
1378 for (i = 0; i < fwspec->num_ids; i++) {
1379 u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
1380 u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
1382 if (sid & ~smmu->streamid_mask) {
1384 sid, smmu->streamid_mask);
1387 if (mask & ~smmu->smr_mask_mask) {
1389 mask, smmu->smr_mask_mask);
1394 ret = -ENOMEM;
1400 cfg->smmu = smmu;
1402 while (i--)
1403 cfg->smendx[i] = INVALID_SMENDX;
1415 device_link_add(dev, smmu->dev,
1418 return &smmu->iommu;
1433 ret = arm_smmu_rpm_get(cfg->smmu);
1439 arm_smmu_rpm_put(cfg->smmu);
1451 smmu = cfg->smmu;
1453 if (smmu->impl && smmu->impl->probe_finalize)
1454 smmu->impl->probe_finalize(smmu, dev);
1461 struct arm_smmu_device *smmu = cfg->smmu;
1465 mutex_lock(&smmu->stream_map_mutex);
1467 if (group && smmu->s2crs[idx].group &&
1468 group != smmu->s2crs[idx].group) {
1469 mutex_unlock(&smmu->stream_map_mutex);
1470 return ERR_PTR(-EINVAL);
1473 group = smmu->s2crs[idx].group;
1477 mutex_unlock(&smmu->stream_map_mutex);
1491 smmu->s2crs[idx].group = group;
1493 mutex_unlock(&smmu->stream_map_mutex);
1502 mutex_lock(&smmu_domain->init_mutex);
1503 if (smmu_domain->smmu)
1504 ret = -EPERM;
1506 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1507 mutex_unlock(&smmu_domain->init_mutex);
1518 mutex_lock(&smmu_domain->init_mutex);
1519 if (smmu_domain->smmu)
1520 ret = -EPERM;
1522 smmu_domain->pgtbl_quirks = quirks;
1523 mutex_unlock(&smmu_domain->init_mutex);
1532 if (args->args_count > 0)
1533 fwid |= FIELD_PREP(ARM_SMMU_SMR_ID, args->args[0]);
1535 if (args->args_count > 1)
1536 fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, args->args[1]);
1537 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
1554 list_add_tail(&region->list, head);
1562 const struct arm_smmu_impl *impl = cfg->smmu->impl;
1567 if (impl && impl->def_domain_type)
1568 return impl->def_domain_type(dev);
1583 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1611 for (i = 0; i < smmu->num_mapping_groups; ++i)
1615 for (i = 0; i < smmu->num_context_banks; ++i) {
1646 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1649 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1652 if (smmu->impl && smmu->impl->reset)
1653 smmu->impl->reset(smmu);
1683 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
1686 dev_notice(smmu->dev, "probing hardware configuration...\n");
1687 dev_notice(smmu->dev, "SMMUv%d with:\n",
1688 smmu->version == ARM_SMMU_V2 ? 2 : 1);
1700 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1701 dev_notice(smmu->dev, "\tstage 1 translation\n");
1705 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1706 dev_notice(smmu->dev, "\tstage 2 translation\n");
1710 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1711 dev_notice(smmu->dev, "\tnested translation\n");
1714 if (!(smmu->features &
1716 dev_err(smmu->dev, "\tno translation support!\n");
1717 return -ENODEV;
1721 ((smmu->version < ARM_SMMU_V2) || !(id & ARM_SMMU_ID0_ATOSNS))) {
1722 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1723 dev_notice(smmu->dev, "\taddress translation ops\n");
1734 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1735 cttw_fw ? "" : "non-");
1737 dev_notice(smmu->dev,
1741 if (smmu->version == ARM_SMMU_V2 && id & ARM_SMMU_ID0_EXIDS) {
1742 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1747 smmu->streamid_mask = size - 1;
1749 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1752 dev_err(smmu->dev,
1753 "stream-matching supported, but no SMRs present!\n");
1754 return -ENODEV;
1757 /* Zero-initialised to mark as invalid */
1758 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1760 if (!smmu->smrs)
1761 return -ENOMEM;
1763 dev_notice(smmu->dev,
1766 /* s2cr->type == 0 means translation, so initialise explicitly */
1767 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1769 if (!smmu->s2crs)
1770 return -ENOMEM;
1772 smmu->s2crs[i] = s2cr_init_val;
1774 smmu->num_mapping_groups = size;
1775 mutex_init(&smmu->stream_map_mutex);
1776 spin_lock_init(&smmu->global_sync_lock);
1778 if (smmu->version < ARM_SMMU_V2 ||
1780 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1782 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1787 smmu->pgshift = (id & ARM_SMMU_ID1_PAGESIZE) ? 16 : 12;
1791 if (smmu->numpage != 2 * size << smmu->pgshift)
1792 dev_warn(smmu->dev,
1794 2 * size << smmu->pgshift, smmu->numpage);
1796 smmu->numpage = size;
1798 smmu->num_s2_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMS2CB, id);
1799 smmu->num_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMCB, id);
1800 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1801 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1802 return -ENODEV;
1804 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1805 smmu->num_context_banks, smmu->num_s2_context_banks);
1806 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
1807 sizeof(*smmu->cbs), GFP_KERNEL);
1808 if (!smmu->cbs)
1809 return -ENOMEM;
1814 smmu->ipa_size = size;
1818 smmu->pa_size = size;
1821 smmu->features |= ARM_SMMU_FEAT_VMID16;
1828 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1829 dev_warn(smmu->dev,
1832 if (smmu->version < ARM_SMMU_V2) {
1833 smmu->va_size = smmu->ipa_size;
1834 if (smmu->version == ARM_SMMU_V1_64K)
1835 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1838 smmu->va_size = arm_smmu_id_size_to_bits(size);
1840 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
1842 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
1844 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1847 if (smmu->impl && smmu->impl->cfg_probe) {
1848 ret = smmu->impl->cfg_probe(smmu);
1854 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
1855 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
1856 if (smmu->features &
1858 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
1859 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
1860 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
1861 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
1862 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
1864 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1865 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1867 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1868 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1869 smmu->pgsize_bitmap);
1872 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1873 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
1874 smmu->va_size, smmu->ipa_size);
1876 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1877 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
1878 smmu->ipa_size, smmu->pa_size);
1899 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1900 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1901 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
1902 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
1903 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
1904 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
1905 { .compatible = "nvidia,smmu-500", .data = &arm_mmu500 },
1906 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
1919 smmu->version = ARM_SMMU_V1;
1920 smmu->model = GENERIC_SMMU;
1923 smmu->version = ARM_SMMU_V1_64K;
1924 smmu->model = GENERIC_SMMU;
1927 smmu->version = ARM_SMMU_V2;
1928 smmu->model = GENERIC_SMMU;
1931 smmu->version = ARM_SMMU_V2;
1932 smmu->model = ARM_MMU500;
1935 smmu->version = ARM_SMMU_V2;
1936 smmu->model = CAVIUM_SMMUV2;
1939 ret = -ENODEV;
1948 struct device *dev = smmu->dev;
1955 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
1957 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
1965 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
1966 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1974 return -ENODEV;
1982 struct device *dev = smmu->dev;
1985 if (of_property_read_u32(dev->of_node, "#global-interrupts", global_irqs))
1986 return dev_err_probe(dev, -ENODEV,
1987 "missing #global-interrupts property\n");
1991 smmu->version = data->version;
1992 smmu->model = data->model;
1994 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
1997 pr_notice("deprecated \"mmu-masters\" DT property in use; %s support unavailable\n",
2005 return -ENODEV;
2008 if (of_dma_is_coherent(dev->of_node))
2009 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2022 iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
2039 for (i = 0; i < rmr->num_sids; i++) {
2040 idx = arm_smmu_find_sme(smmu, rmr->sids[i], ~0);
2044 if (smmu->s2crs[idx].count == 0) {
2045 smmu->smrs[idx].id = rmr->sids[i];
2046 smmu->smrs[idx].mask = 0;
2047 smmu->smrs[idx].valid = true;
2049 smmu->s2crs[idx].count++;
2050 smmu->s2crs[idx].type = S2CR_TYPE_BYPASS;
2051 smmu->s2crs[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
2057 dev_notice(smmu->dev, "\tpreserved %d boot mapping%s\n", cnt,
2059 iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
2066 struct device *dev = &pdev->dev;
2074 return -ENOMEM;
2076 smmu->dev = dev;
2078 if (dev->of_node)
2085 smmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
2086 if (IS_ERR(smmu->base))
2087 return PTR_ERR(smmu->base);
2088 smmu->ioaddr = res->start;
2094 smmu->numpage = resource_size(res);
2102 smmu->num_context_irqs = num_irqs - global_irqs - pmu_irqs;
2103 if (smmu->num_context_irqs <= 0)
2104 return dev_err_probe(dev, -ENODEV,
2108 smmu->irqs = devm_kcalloc(dev, smmu->num_context_irqs,
2109 sizeof(*smmu->irqs), GFP_KERNEL);
2110 if (!smmu->irqs)
2111 return dev_err_probe(dev, -ENOMEM, "failed to allocate %d irqs\n",
2112 smmu->num_context_irqs);
2114 for (i = 0; i < smmu->num_context_irqs; i++) {
2119 smmu->irqs[i] = irq;
2122 err = devm_clk_bulk_get_all(dev, &smmu->clks);
2127 smmu->num_clks = err;
2129 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
2137 if (smmu->version == ARM_SMMU_V2) {
2138 if (smmu->num_context_banks > smmu->num_context_irqs) {
2141 smmu->num_context_irqs, smmu->num_context_banks);
2142 return -ENODEV;
2146 smmu->num_context_irqs = smmu->num_context_banks;
2149 if (smmu->impl && smmu->impl->global_fault)
2150 global_fault = smmu->impl->global_fault;
2161 "arm-smmu global fault", smmu);
2168 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2169 "smmu.%pa", &smmu->ioaddr);
2175 err = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
2178 iommu_device_sysfs_remove(&smmu->iommu);
2191 * We want to avoid touching dev->power.lock in fastpaths unless
2192 * it's really going to do something useful - pm_runtime_enabled()
2196 if (dev->pm_domain) {
2208 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
2209 dev_notice(&pdev->dev, "disabling translation\n");
2216 if (pm_runtime_enabled(smmu->dev))
2217 pm_runtime_force_suspend(smmu->dev);
2219 clk_bulk_disable(smmu->num_clks, smmu->clks);
2221 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
2228 iommu_device_unregister(&smmu->iommu);
2229 iommu_device_sysfs_remove(&smmu->iommu);
2239 ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
2252 clk_bulk_disable(smmu->num_clks, smmu->clks);
2262 ret = clk_bulk_prepare(smmu->num_clks, smmu->clks);
2271 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
2289 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
2301 .name = "arm-smmu",
2314 MODULE_ALIAS("platform:arm-smmu");