Lines Matching +full:smmu +full:- +full:v3

1 // SPDX-License-Identifier: GPL-2.0
12 #include "arm-smmu-v3.h"
13 #include "../../iommu-sva.h"
14 #include "../../io-pgtable-arm.h"
41 * Check if the CPU ASID is available on the SMMU side. If a private context
50 struct arm_smmu_device *smmu; in arm_smmu_share_asid() local
57 if (cd->mm) { in arm_smmu_share_asid()
58 if (WARN_ON(cd->mm != mm)) in arm_smmu_share_asid()
59 return ERR_PTR(-EINVAL); in arm_smmu_share_asid()
61 refcount_inc(&cd->refs); in arm_smmu_share_asid()
66 smmu = smmu_domain->smmu; in arm_smmu_share_asid()
69 XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL); in arm_smmu_share_asid()
71 return ERR_PTR(-ENOSPC); in arm_smmu_share_asid()
74 * which isn't assigned yet. We'll do an invalidate-all on the old ASID in arm_smmu_share_asid()
77 cd->asid = new_asid; in arm_smmu_share_asid()
86 arm_smmu_tlb_inv_asid(smmu, asid); in arm_smmu_share_asid()
105 err = -ESRCH; in arm_smmu_alloc_shared_cd()
111 err = -ENOMEM; in arm_smmu_alloc_shared_cd()
115 refcount_set(&cd->refs, 1); in arm_smmu_alloc_shared_cd()
130 tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, 64ULL - vabits_actual) | in arm_smmu_alloc_shared_cd()
148 err = -EINVAL; in arm_smmu_alloc_shared_cd()
156 cd->ttbr = virt_to_phys(mm->pgd); in arm_smmu_alloc_shared_cd()
157 cd->tcr = tcr; in arm_smmu_alloc_shared_cd()
162 cd->mair = read_sysreg(mair_el1); in arm_smmu_alloc_shared_cd()
163 cd->asid = asid; in arm_smmu_alloc_shared_cd()
164 cd->mm = mm; in arm_smmu_alloc_shared_cd()
183 arm64_mm_context_put(cd->mm); in arm_smmu_free_shared_cd()
184 mmdrop(cd->mm); in arm_smmu_free_shared_cd()
191 * is used as a threshold to replace per-page TLBI commands to issue in the
192 * command queue with an address-space TLBI command, when SMMU w/o a range
193 * invalidation feature handles too many per-page TLBI commands, which will
196 #define CMDQ_MAX_TLBI_OPS (1 << (PAGE_SHIFT - 3))
204 struct arm_smmu_domain *smmu_domain = smmu_mn->domain; in arm_smmu_mm_arch_invalidate_secondary_tlbs()
212 size = end - start; in arm_smmu_mm_arch_invalidate_secondary_tlbs()
213 if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_RANGE_INV)) { in arm_smmu_mm_arch_invalidate_secondary_tlbs()
221 if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM)) { in arm_smmu_mm_arch_invalidate_secondary_tlbs()
223 arm_smmu_tlb_inv_asid(smmu_domain->smmu, in arm_smmu_mm_arch_invalidate_secondary_tlbs()
224 smmu_mn->cd->asid); in arm_smmu_mm_arch_invalidate_secondary_tlbs()
227 smmu_mn->cd->asid, in arm_smmu_mm_arch_invalidate_secondary_tlbs()
232 arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, start, size); in arm_smmu_mm_arch_invalidate_secondary_tlbs()
238 struct arm_smmu_domain *smmu_domain = smmu_mn->domain; in arm_smmu_mm_release()
241 if (smmu_mn->cleared) { in arm_smmu_mm_release()
250 arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, &quiet_cd); in arm_smmu_mm_release()
252 arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid); in arm_smmu_mm_release()
253 arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0); in arm_smmu_mm_release()
255 smmu_mn->cleared = true; in arm_smmu_mm_release()
279 list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) { in arm_smmu_mmu_notifier_get()
280 if (smmu_mn->mn.mm == mm) { in arm_smmu_mmu_notifier_get()
281 refcount_inc(&smmu_mn->refs); in arm_smmu_mmu_notifier_get()
292 ret = -ENOMEM; in arm_smmu_mmu_notifier_get()
296 refcount_set(&smmu_mn->refs, 1); in arm_smmu_mmu_notifier_get()
297 smmu_mn->cd = cd; in arm_smmu_mmu_notifier_get()
298 smmu_mn->domain = smmu_domain; in arm_smmu_mmu_notifier_get()
299 smmu_mn->mn.ops = &arm_smmu_mmu_notifier_ops; in arm_smmu_mmu_notifier_get()
301 ret = mmu_notifier_register(&smmu_mn->mn, mm); in arm_smmu_mmu_notifier_get()
307 ret = arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, cd); in arm_smmu_mmu_notifier_get()
311 list_add(&smmu_mn->list, &smmu_domain->mmu_notifiers); in arm_smmu_mmu_notifier_get()
316 mmu_notifier_put(&smmu_mn->mn); in arm_smmu_mmu_notifier_get()
324 struct mm_struct *mm = smmu_mn->mn.mm; in arm_smmu_mmu_notifier_put()
325 struct arm_smmu_ctx_desc *cd = smmu_mn->cd; in arm_smmu_mmu_notifier_put()
326 struct arm_smmu_domain *smmu_domain = smmu_mn->domain; in arm_smmu_mmu_notifier_put()
328 if (!refcount_dec_and_test(&smmu_mn->refs)) in arm_smmu_mmu_notifier_put()
331 list_del(&smmu_mn->list); in arm_smmu_mmu_notifier_put()
332 arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, NULL); in arm_smmu_mmu_notifier_put()
338 if (!smmu_mn->cleared) { in arm_smmu_mmu_notifier_put()
339 arm_smmu_tlb_inv_asid(smmu_domain->smmu, cd->asid); in arm_smmu_mmu_notifier_put()
340 arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0); in arm_smmu_mmu_notifier_put()
344 mmu_notifier_put(&smmu_mn->mn); in arm_smmu_mmu_notifier_put()
357 if (!master || !master->sva_enabled) in __arm_smmu_sva_bind()
358 return ERR_PTR(-ENODEV); in __arm_smmu_sva_bind()
361 list_for_each_entry(bond, &master->bonds, list) { in __arm_smmu_sva_bind()
362 if (bond->mm == mm) { in __arm_smmu_sva_bind()
363 refcount_inc(&bond->refs); in __arm_smmu_sva_bind()
364 return &bond->sva; in __arm_smmu_sva_bind()
370 return ERR_PTR(-ENOMEM); in __arm_smmu_sva_bind()
372 bond->mm = mm; in __arm_smmu_sva_bind()
373 bond->sva.dev = dev; in __arm_smmu_sva_bind()
374 refcount_set(&bond->refs, 1); in __arm_smmu_sva_bind()
376 bond->smmu_mn = arm_smmu_mmu_notifier_get(smmu_domain, mm); in __arm_smmu_sva_bind()
377 if (IS_ERR(bond->smmu_mn)) { in __arm_smmu_sva_bind()
378 ret = PTR_ERR(bond->smmu_mn); in __arm_smmu_sva_bind()
382 list_add(&bond->list, &master->bonds); in __arm_smmu_sva_bind()
383 return &bond->sva; in __arm_smmu_sva_bind()
390 bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) in arm_smmu_sva_supported() argument
400 if ((smmu->features & feat_mask) != feat_mask) in arm_smmu_sva_supported()
403 if (!(smmu->pgsize_bitmap & PAGE_SIZE)) in arm_smmu_sva_supported()
414 if (smmu->oas < oas) in arm_smmu_sva_supported()
420 if (smmu->asid_bits < asid_bits) in arm_smmu_sva_supported()
428 asid_bits--; in arm_smmu_sva_supported()
429 dev_dbg(smmu->dev, "%d shared contexts\n", (1 << asid_bits) - in arm_smmu_sva_supported()
430 num_possible_cpus() - 2); in arm_smmu_sva_supported()
438 if (master->num_streams != 1) in arm_smmu_master_iopf_supported()
441 return master->stall_enabled; in arm_smmu_master_iopf_supported()
446 if (!(master->smmu->features & ARM_SMMU_FEAT_SVA)) in arm_smmu_master_sva_supported()
450 return master->ssid_bits; in arm_smmu_master_sva_supported()
458 enabled = master->sva_enabled; in arm_smmu_master_sva_enabled()
466 struct device *dev = master->dev; in arm_smmu_master_sva_enable_iopf()
470 * Others have device-specific fault handlers and don't need IOPF. in arm_smmu_master_sva_enable_iopf()
475 if (!master->iopf_enabled) in arm_smmu_master_sva_enable_iopf()
476 return -EINVAL; in arm_smmu_master_sva_enable_iopf()
478 ret = iopf_queue_add_device(master->smmu->evtq.iopf, dev); in arm_smmu_master_sva_enable_iopf()
484 iopf_queue_remove_device(master->smmu->evtq.iopf, dev); in arm_smmu_master_sva_enable_iopf()
492 struct device *dev = master->dev; in arm_smmu_master_sva_disable_iopf()
494 if (!master->iopf_enabled) in arm_smmu_master_sva_disable_iopf()
498 iopf_queue_remove_device(master->smmu->evtq.iopf, dev); in arm_smmu_master_sva_disable_iopf()
508 master->sva_enabled = true; in arm_smmu_master_enable_sva()
517 if (!list_empty(&master->bonds)) { in arm_smmu_master_disable_sva()
518 dev_err(master->dev, "cannot disable SVA, device is bound\n"); in arm_smmu_master_disable_sva()
520 return -EBUSY; in arm_smmu_master_disable_sva()
523 master->sva_enabled = false; in arm_smmu_master_disable_sva()
541 struct mm_struct *mm = domain->mm; in arm_smmu_sva_remove_dev_pasid()
546 list_for_each_entry(t, &master->bonds, list) { in arm_smmu_sva_remove_dev_pasid()
547 if (t->mm == mm) { in arm_smmu_sva_remove_dev_pasid()
553 if (!WARN_ON(!bond) && refcount_dec_and_test(&bond->refs)) { in arm_smmu_sva_remove_dev_pasid()
554 list_del(&bond->list); in arm_smmu_sva_remove_dev_pasid()
555 arm_smmu_mmu_notifier_put(bond->smmu_mn); in arm_smmu_sva_remove_dev_pasid()
566 struct mm_struct *mm = domain->mm; in arm_smmu_sva_set_dev_pasid()
594 domain->ops = &arm_smmu_sva_domain_ops; in arm_smmu_sva_domain_alloc()