Lines Matching full:cd
18 struct arm_smmu_ctx_desc *cd; member
49 struct arm_smmu_ctx_desc *cd; in arm_smmu_share_asid() local
53 cd = xa_load(&arm_smmu_asid_xa, asid); in arm_smmu_share_asid()
54 if (!cd) in arm_smmu_share_asid()
57 if (cd->mm) { in arm_smmu_share_asid()
58 if (WARN_ON(cd->mm != mm)) in arm_smmu_share_asid()
60 /* All devices bound to this mm use the same cd struct. */ in arm_smmu_share_asid()
61 refcount_inc(&cd->refs); in arm_smmu_share_asid()
62 return cd; in arm_smmu_share_asid()
65 smmu_domain = container_of(cd, struct arm_smmu_domain, s1_cfg.cd); in arm_smmu_share_asid()
68 ret = xa_alloc(&arm_smmu_asid_xa, &new_asid, cd, in arm_smmu_share_asid()
77 cd->asid = new_asid; in arm_smmu_share_asid()
79 * Update ASID and invalidate CD in all associated masters. There will in arm_smmu_share_asid()
83 arm_smmu_write_ctx_desc(smmu_domain, IOMMU_NO_PASID, cd); in arm_smmu_share_asid()
97 struct arm_smmu_ctx_desc *cd; in arm_smmu_alloc_shared_cd() local
109 cd = kzalloc(sizeof(*cd), GFP_KERNEL); in arm_smmu_alloc_shared_cd()
110 if (!cd) { in arm_smmu_alloc_shared_cd()
115 refcount_set(&cd->refs, 1); in arm_smmu_alloc_shared_cd()
124 err = xa_insert(&arm_smmu_asid_xa, asid, cd, GFP_KERNEL); in arm_smmu_alloc_shared_cd()
156 cd->ttbr = virt_to_phys(mm->pgd); in arm_smmu_alloc_shared_cd()
157 cd->tcr = tcr; in arm_smmu_alloc_shared_cd()
162 cd->mair = read_sysreg(mair_el1); in arm_smmu_alloc_shared_cd()
163 cd->asid = asid; in arm_smmu_alloc_shared_cd()
164 cd->mm = mm; in arm_smmu_alloc_shared_cd()
166 return cd; in arm_smmu_alloc_shared_cd()
169 arm_smmu_free_asid(cd); in arm_smmu_alloc_shared_cd()
171 kfree(cd); in arm_smmu_alloc_shared_cd()
179 static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd) in arm_smmu_free_shared_cd() argument
181 if (arm_smmu_free_asid(cd)) { in arm_smmu_free_shared_cd()
183 arm64_mm_context_put(cd->mm); in arm_smmu_free_shared_cd()
184 mmdrop(cd->mm); in arm_smmu_free_shared_cd()
185 kfree(cd); in arm_smmu_free_shared_cd()
224 smmu_mn->cd->asid); in arm_smmu_mm_arch_invalidate_secondary_tlbs()
227 smmu_mn->cd->asid, in arm_smmu_mm_arch_invalidate_secondary_tlbs()
247 * DMA may still be running. Keep the cd valid to avoid C_BAD_CD events, in arm_smmu_mm_release()
252 arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid); in arm_smmu_mm_release()
276 struct arm_smmu_ctx_desc *cd; in arm_smmu_mmu_notifier_get() local
286 cd = arm_smmu_alloc_shared_cd(mm); in arm_smmu_mmu_notifier_get()
287 if (IS_ERR(cd)) in arm_smmu_mmu_notifier_get()
288 return ERR_CAST(cd); in arm_smmu_mmu_notifier_get()
297 smmu_mn->cd = cd; in arm_smmu_mmu_notifier_get()
307 ret = arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, cd); in arm_smmu_mmu_notifier_get()
318 arm_smmu_free_shared_cd(cd); in arm_smmu_mmu_notifier_get()
325 struct arm_smmu_ctx_desc *cd = smmu_mn->cd; in arm_smmu_mmu_notifier_put() local
339 arm_smmu_tlb_inv_asid(smmu_domain->smmu, cd->asid); in arm_smmu_mmu_notifier_put()
345 arm_smmu_free_shared_cd(cd); in arm_smmu_mmu_notifier_put()