Lines Matching +full:secure +full:- +full:reg +full:- +full:access
1 // SPDX-License-Identifier: GPL-2.0-only
5 #define pr_fmt(fmt) "arm-smmu: " fmt
10 #include "arm-smmu.h"
44 /* Since we don't care for sGFAR, we can do without 64-bit accessors */
65 cs->id_base = atomic_fetch_add(smmu->num_context_banks, &context_count); in cavium_cfg_probe()
66 dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n"); in cavium_cfg_probe()
74 struct cavium_smmu *cs = container_of(smmu_domain->smmu, in cavium_init_context()
77 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2) in cavium_init_context()
78 smmu_domain->cfg.vmid += cs->id_base; in cavium_init_context()
80 smmu_domain->cfg.asid += cs->id_base; in cavium_init_context()
94 cs = devm_krealloc(smmu->dev, smmu, sizeof(*cs), GFP_KERNEL); in cavium_smmu_impl_init()
96 return ERR_PTR(-ENOMEM); in cavium_smmu_impl_init()
98 cs->smmu.impl = &cavium_impl; in cavium_smmu_impl_init()
100 return &cs->smmu; in cavium_smmu_impl_init()
112 u32 reg, major; in arm_mmu500_reset() local
115 * On MMU-500 r2p0 onwards we need to clear ACR.CACHE_LOCK before in arm_mmu500_reset()
117 * Secure has also cleared SACR.CACHE_LOCK for this to take effect... in arm_mmu500_reset()
119 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID7); in arm_mmu500_reset()
120 major = FIELD_GET(ARM_SMMU_ID7_MAJOR, reg); in arm_mmu500_reset()
121 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sACR); in arm_mmu500_reset()
123 reg &= ~ARM_MMU500_ACR_CACHE_LOCK; in arm_mmu500_reset()
128 reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN; in arm_mmu500_reset()
129 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sACR, reg); in arm_mmu500_reset()
132 * Disable MMU-500's not-particularly-beneficial next-page in arm_mmu500_reset()
135 for (i = 0; i < smmu->num_context_banks; ++i) { in arm_mmu500_reset()
136 reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR); in arm_mmu500_reset()
137 reg &= ~ARM_MMU500_ACTLR_CPRE; in arm_mmu500_reset()
138 arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_ACTLR, reg); in arm_mmu500_reset()
139 reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR); in arm_mmu500_reset()
140 if (reg & ARM_MMU500_ACTLR_CPRE) in arm_mmu500_reset()
141 …dev_warn_once(smmu->dev, "Failed to disable prefetcher for errata workarounds, check SACR.CACHE_LO… in arm_mmu500_reset()
154 * Marvell Armada-AP806 erratum #582743. in mrvl_mmu500_readq()
164 * Marvell Armada-AP806 erratum #582743. in mrvl_mmu500_writeq()
174 * Armada-AP806 erratum #582743. in mrvl_mmu500_cfg_probe()
176 * formats altogether and allow using 32 bits access on the in mrvl_mmu500_cfg_probe()
179 smmu->features &= ~(ARM_SMMU_FEAT_FMT_AARCH64_4K | in mrvl_mmu500_cfg_probe()
196 const struct device_node *np = smmu->dev->of_node; in arm_smmu_impl_init()
199 * Set the impl for model-specific implementation quirks first, in arm_smmu_impl_init()
203 switch (smmu->model) { in arm_smmu_impl_init()
205 smmu->impl = &arm_mmu500_impl; in arm_smmu_impl_init()
213 /* This is implicitly MMU-400 */ in arm_smmu_impl_init()
214 if (of_property_read_bool(np, "calxeda,smmu-secure-config-access")) in arm_smmu_impl_init()
215 smmu->impl = &calxeda_impl; in arm_smmu_impl_init()
217 if (of_device_is_compatible(np, "nvidia,tegra234-smmu") || in arm_smmu_impl_init()
218 of_device_is_compatible(np, "nvidia,tegra194-smmu") || in arm_smmu_impl_init()
219 of_device_is_compatible(np, "nvidia,tegra186-smmu")) in arm_smmu_impl_init()
225 if (of_device_is_compatible(np, "marvell,ap806-smmu-500")) in arm_smmu_impl_init()
226 smmu->impl = &mrvl_mmu500_impl; in arm_smmu_impl_init()