1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Implementation of the IOMMU SVA API for the ARM SMMUv3
4 */
5
6 #include <linux/mm.h>
7 #include <linux/mmu_context.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/sched/mm.h>
10 #include <linux/slab.h>
11
12 #include "arm-smmu-v3.h"
13 #include "../../iommu-sva.h"
14 #include "../../io-pgtable-arm.h"
15
16 struct arm_smmu_mmu_notifier {
17 struct mmu_notifier mn;
18 struct arm_smmu_ctx_desc *cd;
19 bool cleared;
20 refcount_t refs;
21 struct list_head list;
22 struct arm_smmu_domain *domain;
23 };
24
25 #define mn_to_smmu(mn) container_of(mn, struct arm_smmu_mmu_notifier, mn)
26
27 struct arm_smmu_bond {
28 struct iommu_sva sva;
29 struct mm_struct *mm;
30 struct arm_smmu_mmu_notifier *smmu_mn;
31 struct list_head list;
32 refcount_t refs;
33 };
34
35 #define sva_to_bond(handle) \
36 container_of(handle, struct arm_smmu_bond, sva)
37
38 static DEFINE_MUTEX(sva_lock);
39
40 /*
41 * Check if the CPU ASID is available on the SMMU side. If a private context
42 * descriptor is using it, try to replace it.
43 */
44 static struct arm_smmu_ctx_desc *
arm_smmu_share_asid(struct mm_struct * mm,u16 asid)45 arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
46 {
47 int ret;
48 u32 new_asid;
49 struct arm_smmu_ctx_desc *cd;
50 struct arm_smmu_device *smmu;
51 struct arm_smmu_domain *smmu_domain;
52
53 cd = xa_load(&arm_smmu_asid_xa, asid);
54 if (!cd)
55 return NULL;
56
57 if (cd->mm) {
58 if (WARN_ON(cd->mm != mm))
59 return ERR_PTR(-EINVAL);
60 /* All devices bound to this mm use the same cd struct. */
61 refcount_inc(&cd->refs);
62 return cd;
63 }
64
65 smmu_domain = container_of(cd, struct arm_smmu_domain, s1_cfg.cd);
66 smmu = smmu_domain->smmu;
67
68 ret = xa_alloc(&arm_smmu_asid_xa, &new_asid, cd,
69 XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
70 if (ret)
71 return ERR_PTR(-ENOSPC);
72 /*
73 * Race with unmap: TLB invalidations will start targeting the new ASID,
74 * which isn't assigned yet. We'll do an invalidate-all on the old ASID
75 * later, so it doesn't matter.
76 */
77 cd->asid = new_asid;
78 /*
79 * Update ASID and invalidate CD in all associated masters. There will
80 * be some overlap between use of both ASIDs, until we invalidate the
81 * TLB.
82 */
83 arm_smmu_write_ctx_desc(smmu_domain, IOMMU_NO_PASID, cd);
84
85 /* Invalidate TLB entries previously associated with that context */
86 arm_smmu_tlb_inv_asid(smmu, asid);
87
88 xa_erase(&arm_smmu_asid_xa, asid);
89 return NULL;
90 }
91
arm_smmu_alloc_shared_cd(struct mm_struct * mm)92 static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
93 {
94 u16 asid;
95 int err = 0;
96 u64 tcr, par, reg;
97 struct arm_smmu_ctx_desc *cd;
98 struct arm_smmu_ctx_desc *ret = NULL;
99
100 /* Don't free the mm until we release the ASID */
101 mmgrab(mm);
102
103 asid = arm64_mm_context_get(mm);
104 if (!asid) {
105 err = -ESRCH;
106 goto out_drop_mm;
107 }
108
109 cd = kzalloc(sizeof(*cd), GFP_KERNEL);
110 if (!cd) {
111 err = -ENOMEM;
112 goto out_put_context;
113 }
114
115 refcount_set(&cd->refs, 1);
116
117 mutex_lock(&arm_smmu_asid_lock);
118 ret = arm_smmu_share_asid(mm, asid);
119 if (ret) {
120 mutex_unlock(&arm_smmu_asid_lock);
121 goto out_free_cd;
122 }
123
124 err = xa_insert(&arm_smmu_asid_xa, asid, cd, GFP_KERNEL);
125 mutex_unlock(&arm_smmu_asid_lock);
126
127 if (err)
128 goto out_free_asid;
129
130 tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, 64ULL - vabits_actual) |
131 FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, ARM_LPAE_TCR_RGN_WBWA) |
132 FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, ARM_LPAE_TCR_RGN_WBWA) |
133 FIELD_PREP(CTXDESC_CD_0_TCR_SH0, ARM_LPAE_TCR_SH_IS) |
134 CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64;
135
136 switch (PAGE_SIZE) {
137 case SZ_4K:
138 tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_4K);
139 break;
140 case SZ_16K:
141 tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_16K);
142 break;
143 case SZ_64K:
144 tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_64K);
145 break;
146 default:
147 WARN_ON(1);
148 err = -EINVAL;
149 goto out_free_asid;
150 }
151
152 reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
153 par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
154 tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par);
155
156 cd->ttbr = virt_to_phys(mm->pgd);
157 cd->tcr = tcr;
158 /*
159 * MAIR value is pretty much constant and global, so we can just get it
160 * from the current CPU register
161 */
162 cd->mair = read_sysreg(mair_el1);
163 cd->asid = asid;
164 cd->mm = mm;
165
166 return cd;
167
168 out_free_asid:
169 arm_smmu_free_asid(cd);
170 out_free_cd:
171 kfree(cd);
172 out_put_context:
173 arm64_mm_context_put(mm);
174 out_drop_mm:
175 mmdrop(mm);
176 return err < 0 ? ERR_PTR(err) : ret;
177 }
178
arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc * cd)179 static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
180 {
181 if (arm_smmu_free_asid(cd)) {
182 /* Unpin ASID */
183 arm64_mm_context_put(cd->mm);
184 mmdrop(cd->mm);
185 kfree(cd);
186 }
187 }
188
189 /*
190 * Cloned from the MAX_TLBI_OPS in arch/arm64/include/asm/tlbflush.h, this
191 * is used as a threshold to replace per-page TLBI commands to issue in the
192 * command queue with an address-space TLBI command, when SMMU w/o a range
193 * invalidation feature handles too many per-page TLBI commands, which will
194 * otherwise result in a soft lockup.
195 */
196 #define CMDQ_MAX_TLBI_OPS (1 << (PAGE_SHIFT - 3))
197
arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long start,unsigned long end)198 static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
199 struct mm_struct *mm,
200 unsigned long start,
201 unsigned long end)
202 {
203 struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
204 struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
205 size_t size;
206
207 /*
208 * The mm_types defines vm_end as the first byte after the end address,
209 * different from IOMMU subsystem using the last address of an address
210 * range. So do a simple translation here by calculating size correctly.
211 */
212 size = end - start;
213 if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_RANGE_INV)) {
214 if (size >= CMDQ_MAX_TLBI_OPS * PAGE_SIZE)
215 size = 0;
216 } else {
217 if (size == ULONG_MAX)
218 size = 0;
219 }
220
221 if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM)) {
222 if (!size)
223 arm_smmu_tlb_inv_asid(smmu_domain->smmu,
224 smmu_mn->cd->asid);
225 else
226 arm_smmu_tlb_inv_range_asid(start, size,
227 smmu_mn->cd->asid,
228 PAGE_SIZE, false,
229 smmu_domain);
230 }
231
232 arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, start, size);
233 }
234
arm_smmu_mm_release(struct mmu_notifier * mn,struct mm_struct * mm)235 static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
236 {
237 struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
238 struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
239
240 mutex_lock(&sva_lock);
241 if (smmu_mn->cleared) {
242 mutex_unlock(&sva_lock);
243 return;
244 }
245
246 /*
247 * DMA may still be running. Keep the cd valid to avoid C_BAD_CD events,
248 * but disable translation.
249 */
250 arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, &quiet_cd);
251
252 arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid);
253 arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0);
254
255 smmu_mn->cleared = true;
256 mutex_unlock(&sva_lock);
257 }
258
arm_smmu_mmu_notifier_free(struct mmu_notifier * mn)259 static void arm_smmu_mmu_notifier_free(struct mmu_notifier *mn)
260 {
261 kfree(mn_to_smmu(mn));
262 }
263
264 static const struct mmu_notifier_ops arm_smmu_mmu_notifier_ops = {
265 .arch_invalidate_secondary_tlbs = arm_smmu_mm_arch_invalidate_secondary_tlbs,
266 .release = arm_smmu_mm_release,
267 .free_notifier = arm_smmu_mmu_notifier_free,
268 };
269
270 /* Allocate or get existing MMU notifier for this {domain, mm} pair */
271 static struct arm_smmu_mmu_notifier *
arm_smmu_mmu_notifier_get(struct arm_smmu_domain * smmu_domain,struct mm_struct * mm)272 arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
273 struct mm_struct *mm)
274 {
275 int ret;
276 struct arm_smmu_ctx_desc *cd;
277 struct arm_smmu_mmu_notifier *smmu_mn;
278
279 list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) {
280 if (smmu_mn->mn.mm == mm) {
281 refcount_inc(&smmu_mn->refs);
282 return smmu_mn;
283 }
284 }
285
286 cd = arm_smmu_alloc_shared_cd(mm);
287 if (IS_ERR(cd))
288 return ERR_CAST(cd);
289
290 smmu_mn = kzalloc(sizeof(*smmu_mn), GFP_KERNEL);
291 if (!smmu_mn) {
292 ret = -ENOMEM;
293 goto err_free_cd;
294 }
295
296 refcount_set(&smmu_mn->refs, 1);
297 smmu_mn->cd = cd;
298 smmu_mn->domain = smmu_domain;
299 smmu_mn->mn.ops = &arm_smmu_mmu_notifier_ops;
300
301 ret = mmu_notifier_register(&smmu_mn->mn, mm);
302 if (ret) {
303 kfree(smmu_mn);
304 goto err_free_cd;
305 }
306
307 ret = arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, cd);
308 if (ret)
309 goto err_put_notifier;
310
311 list_add(&smmu_mn->list, &smmu_domain->mmu_notifiers);
312 return smmu_mn;
313
314 err_put_notifier:
315 /* Frees smmu_mn */
316 mmu_notifier_put(&smmu_mn->mn);
317 err_free_cd:
318 arm_smmu_free_shared_cd(cd);
319 return ERR_PTR(ret);
320 }
321
arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier * smmu_mn)322 static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
323 {
324 struct mm_struct *mm = smmu_mn->mn.mm;
325 struct arm_smmu_ctx_desc *cd = smmu_mn->cd;
326 struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
327
328 if (!refcount_dec_and_test(&smmu_mn->refs))
329 return;
330
331 list_del(&smmu_mn->list);
332 arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, NULL);
333
334 /*
335 * If we went through clear(), we've already invalidated, and no
336 * new TLB entry can have been formed.
337 */
338 if (!smmu_mn->cleared) {
339 arm_smmu_tlb_inv_asid(smmu_domain->smmu, cd->asid);
340 arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0);
341 }
342
343 /* Frees smmu_mn */
344 mmu_notifier_put(&smmu_mn->mn);
345 arm_smmu_free_shared_cd(cd);
346 }
347
348 static struct iommu_sva *
__arm_smmu_sva_bind(struct device * dev,struct mm_struct * mm)349 __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
350 {
351 int ret;
352 struct arm_smmu_bond *bond;
353 struct arm_smmu_master *master = dev_iommu_priv_get(dev);
354 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
355 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
356
357 if (!master || !master->sva_enabled)
358 return ERR_PTR(-ENODEV);
359
360 /* If bind() was already called for this {dev, mm} pair, reuse it. */
361 list_for_each_entry(bond, &master->bonds, list) {
362 if (bond->mm == mm) {
363 refcount_inc(&bond->refs);
364 return &bond->sva;
365 }
366 }
367
368 bond = kzalloc(sizeof(*bond), GFP_KERNEL);
369 if (!bond)
370 return ERR_PTR(-ENOMEM);
371
372 bond->mm = mm;
373 bond->sva.dev = dev;
374 refcount_set(&bond->refs, 1);
375
376 bond->smmu_mn = arm_smmu_mmu_notifier_get(smmu_domain, mm);
377 if (IS_ERR(bond->smmu_mn)) {
378 ret = PTR_ERR(bond->smmu_mn);
379 goto err_free_bond;
380 }
381
382 list_add(&bond->list, &master->bonds);
383 return &bond->sva;
384
385 err_free_bond:
386 kfree(bond);
387 return ERR_PTR(ret);
388 }
389
arm_smmu_sva_supported(struct arm_smmu_device * smmu)390 bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
391 {
392 unsigned long reg, fld;
393 unsigned long oas;
394 unsigned long asid_bits;
395 u32 feat_mask = ARM_SMMU_FEAT_COHERENCY;
396
397 if (vabits_actual == 52)
398 feat_mask |= ARM_SMMU_FEAT_VAX;
399
400 if ((smmu->features & feat_mask) != feat_mask)
401 return false;
402
403 if (!(smmu->pgsize_bitmap & PAGE_SIZE))
404 return false;
405
406 /*
407 * Get the smallest PA size of all CPUs (sanitized by cpufeature). We're
408 * not even pretending to support AArch32 here. Abort if the MMU outputs
409 * addresses larger than what we support.
410 */
411 reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
412 fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
413 oas = id_aa64mmfr0_parange_to_phys_shift(fld);
414 if (smmu->oas < oas)
415 return false;
416
417 /* We can support bigger ASIDs than the CPU, but not smaller */
418 fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_ASIDBITS_SHIFT);
419 asid_bits = fld ? 16 : 8;
420 if (smmu->asid_bits < asid_bits)
421 return false;
422
423 /*
424 * See max_pinned_asids in arch/arm64/mm/context.c. The following is
425 * generally the maximum number of bindable processes.
426 */
427 if (arm64_kernel_unmapped_at_el0())
428 asid_bits--;
429 dev_dbg(smmu->dev, "%d shared contexts\n", (1 << asid_bits) -
430 num_possible_cpus() - 2);
431
432 return true;
433 }
434
arm_smmu_master_iopf_supported(struct arm_smmu_master * master)435 bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master)
436 {
437 /* We're not keeping track of SIDs in fault events */
438 if (master->num_streams != 1)
439 return false;
440
441 return master->stall_enabled;
442 }
443
arm_smmu_master_sva_supported(struct arm_smmu_master * master)444 bool arm_smmu_master_sva_supported(struct arm_smmu_master *master)
445 {
446 if (!(master->smmu->features & ARM_SMMU_FEAT_SVA))
447 return false;
448
449 /* SSID support is mandatory for the moment */
450 return master->ssid_bits;
451 }
452
arm_smmu_master_sva_enabled(struct arm_smmu_master * master)453 bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
454 {
455 bool enabled;
456
457 mutex_lock(&sva_lock);
458 enabled = master->sva_enabled;
459 mutex_unlock(&sva_lock);
460 return enabled;
461 }
462
arm_smmu_master_sva_enable_iopf(struct arm_smmu_master * master)463 static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master)
464 {
465 int ret;
466 struct device *dev = master->dev;
467
468 /*
469 * Drivers for devices supporting PRI or stall should enable IOPF first.
470 * Others have device-specific fault handlers and don't need IOPF.
471 */
472 if (!arm_smmu_master_iopf_supported(master))
473 return 0;
474
475 if (!master->iopf_enabled)
476 return -EINVAL;
477
478 ret = iopf_queue_add_device(master->smmu->evtq.iopf, dev);
479 if (ret)
480 return ret;
481
482 ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
483 if (ret) {
484 iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
485 return ret;
486 }
487 return 0;
488 }
489
arm_smmu_master_sva_disable_iopf(struct arm_smmu_master * master)490 static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master)
491 {
492 struct device *dev = master->dev;
493
494 if (!master->iopf_enabled)
495 return;
496
497 iommu_unregister_device_fault_handler(dev);
498 iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
499 }
500
arm_smmu_master_enable_sva(struct arm_smmu_master * master)501 int arm_smmu_master_enable_sva(struct arm_smmu_master *master)
502 {
503 int ret;
504
505 mutex_lock(&sva_lock);
506 ret = arm_smmu_master_sva_enable_iopf(master);
507 if (!ret)
508 master->sva_enabled = true;
509 mutex_unlock(&sva_lock);
510
511 return ret;
512 }
513
arm_smmu_master_disable_sva(struct arm_smmu_master * master)514 int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
515 {
516 mutex_lock(&sva_lock);
517 if (!list_empty(&master->bonds)) {
518 dev_err(master->dev, "cannot disable SVA, device is bound\n");
519 mutex_unlock(&sva_lock);
520 return -EBUSY;
521 }
522 arm_smmu_master_sva_disable_iopf(master);
523 master->sva_enabled = false;
524 mutex_unlock(&sva_lock);
525
526 return 0;
527 }
528
arm_smmu_sva_notifier_synchronize(void)529 void arm_smmu_sva_notifier_synchronize(void)
530 {
531 /*
532 * Some MMU notifiers may still be waiting to be freed, using
533 * arm_smmu_mmu_notifier_free(). Wait for them.
534 */
535 mmu_notifier_synchronize();
536 }
537
arm_smmu_sva_remove_dev_pasid(struct iommu_domain * domain,struct device * dev,ioasid_t id)538 void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
539 struct device *dev, ioasid_t id)
540 {
541 struct mm_struct *mm = domain->mm;
542 struct arm_smmu_bond *bond = NULL, *t;
543 struct arm_smmu_master *master = dev_iommu_priv_get(dev);
544
545 mutex_lock(&sva_lock);
546 list_for_each_entry(t, &master->bonds, list) {
547 if (t->mm == mm) {
548 bond = t;
549 break;
550 }
551 }
552
553 if (!WARN_ON(!bond) && refcount_dec_and_test(&bond->refs)) {
554 list_del(&bond->list);
555 arm_smmu_mmu_notifier_put(bond->smmu_mn);
556 kfree(bond);
557 }
558 mutex_unlock(&sva_lock);
559 }
560
arm_smmu_sva_set_dev_pasid(struct iommu_domain * domain,struct device * dev,ioasid_t id)561 static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
562 struct device *dev, ioasid_t id)
563 {
564 int ret = 0;
565 struct iommu_sva *handle;
566 struct mm_struct *mm = domain->mm;
567
568 mutex_lock(&sva_lock);
569 handle = __arm_smmu_sva_bind(dev, mm);
570 if (IS_ERR(handle))
571 ret = PTR_ERR(handle);
572 mutex_unlock(&sva_lock);
573
574 return ret;
575 }
576
arm_smmu_sva_domain_free(struct iommu_domain * domain)577 static void arm_smmu_sva_domain_free(struct iommu_domain *domain)
578 {
579 kfree(domain);
580 }
581
582 static const struct iommu_domain_ops arm_smmu_sva_domain_ops = {
583 .set_dev_pasid = arm_smmu_sva_set_dev_pasid,
584 .free = arm_smmu_sva_domain_free
585 };
586
arm_smmu_sva_domain_alloc(void)587 struct iommu_domain *arm_smmu_sva_domain_alloc(void)
588 {
589 struct iommu_domain *domain;
590
591 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
592 if (!domain)
593 return NULL;
594 domain->ops = &arm_smmu_sva_domain_ops;
595
596 return domain;
597 }
598