1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Implementation of the IOMMU SVA API for the ARM SMMUv3 4 */ 5 6 #include <linux/mm.h> 7 #include <linux/mmu_context.h> 8 #include <linux/mmu_notifier.h> 9 #include <linux/sched/mm.h> 10 #include <linux/slab.h> 11 12 #include "arm-smmu-v3.h" 13 #include "../../iommu-sva.h" 14 #include "../../io-pgtable-arm.h" 15 16 struct arm_smmu_mmu_notifier { 17 struct mmu_notifier mn; 18 struct arm_smmu_ctx_desc *cd; 19 bool cleared; 20 refcount_t refs; 21 struct list_head list; 22 struct arm_smmu_domain *domain; 23 }; 24 25 #define mn_to_smmu(mn) container_of(mn, struct arm_smmu_mmu_notifier, mn) 26 27 struct arm_smmu_bond { 28 struct iommu_sva sva; 29 struct mm_struct *mm; 30 struct arm_smmu_mmu_notifier *smmu_mn; 31 struct list_head list; 32 refcount_t refs; 33 }; 34 35 #define sva_to_bond(handle) \ 36 container_of(handle, struct arm_smmu_bond, sva) 37 38 static DEFINE_MUTEX(sva_lock); 39 40 /* 41 * Check if the CPU ASID is available on the SMMU side. If a private context 42 * descriptor is using it, try to replace it. 43 */ 44 static struct arm_smmu_ctx_desc * 45 arm_smmu_share_asid(struct mm_struct *mm, u16 asid) 46 { 47 int ret; 48 u32 new_asid; 49 struct arm_smmu_ctx_desc *cd; 50 struct arm_smmu_device *smmu; 51 struct arm_smmu_domain *smmu_domain; 52 53 cd = xa_load(&arm_smmu_asid_xa, asid); 54 if (!cd) 55 return NULL; 56 57 if (cd->mm) { 58 if (WARN_ON(cd->mm != mm)) 59 return ERR_PTR(-EINVAL); 60 /* All devices bound to this mm use the same cd struct. */ 61 refcount_inc(&cd->refs); 62 return cd; 63 } 64 65 smmu_domain = container_of(cd, struct arm_smmu_domain, s1_cfg.cd); 66 smmu = smmu_domain->smmu; 67 68 ret = xa_alloc(&arm_smmu_asid_xa, &new_asid, cd, 69 XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL); 70 if (ret) 71 return ERR_PTR(-ENOSPC); 72 /* 73 * Race with unmap: TLB invalidations will start targeting the new ASID, 74 * which isn't assigned yet. We'll do an invalidate-all on the old ASID 75 * later, so it doesn't matter. 76 */ 77 cd->asid = new_asid; 78 /* 79 * Update ASID and invalidate CD in all associated masters. There will 80 * be some overlap between use of both ASIDs, until we invalidate the 81 * TLB. 82 */ 83 arm_smmu_write_ctx_desc(smmu_domain, IOMMU_NO_PASID, cd); 84 85 /* Invalidate TLB entries previously associated with that context */ 86 arm_smmu_tlb_inv_asid(smmu, asid); 87 88 xa_erase(&arm_smmu_asid_xa, asid); 89 return NULL; 90 } 91 92 static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm) 93 { 94 u16 asid; 95 int err = 0; 96 u64 tcr, par, reg; 97 struct arm_smmu_ctx_desc *cd; 98 struct arm_smmu_ctx_desc *ret = NULL; 99 100 /* Don't free the mm until we release the ASID */ 101 mmgrab(mm); 102 103 asid = arm64_mm_context_get(mm); 104 if (!asid) { 105 err = -ESRCH; 106 goto out_drop_mm; 107 } 108 109 cd = kzalloc(sizeof(*cd), GFP_KERNEL); 110 if (!cd) { 111 err = -ENOMEM; 112 goto out_put_context; 113 } 114 115 refcount_set(&cd->refs, 1); 116 117 mutex_lock(&arm_smmu_asid_lock); 118 ret = arm_smmu_share_asid(mm, asid); 119 if (ret) { 120 mutex_unlock(&arm_smmu_asid_lock); 121 goto out_free_cd; 122 } 123 124 err = xa_insert(&arm_smmu_asid_xa, asid, cd, GFP_KERNEL); 125 mutex_unlock(&arm_smmu_asid_lock); 126 127 if (err) 128 goto out_free_asid; 129 130 tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, 64ULL - vabits_actual) | 131 FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, ARM_LPAE_TCR_RGN_WBWA) | 132 FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, ARM_LPAE_TCR_RGN_WBWA) | 133 FIELD_PREP(CTXDESC_CD_0_TCR_SH0, ARM_LPAE_TCR_SH_IS) | 134 CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64; 135 136 switch (PAGE_SIZE) { 137 case SZ_4K: 138 tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_4K); 139 break; 140 case SZ_16K: 141 tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_16K); 142 break; 143 case SZ_64K: 144 tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_64K); 145 break; 146 default: 147 WARN_ON(1); 148 err = -EINVAL; 149 goto out_free_asid; 150 } 151 152 reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); 153 par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT); 154 tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par); 155 156 cd->ttbr = virt_to_phys(mm->pgd); 157 cd->tcr = tcr; 158 /* 159 * MAIR value is pretty much constant and global, so we can just get it 160 * from the current CPU register 161 */ 162 cd->mair = read_sysreg(mair_el1); 163 cd->asid = asid; 164 cd->mm = mm; 165 166 return cd; 167 168 out_free_asid: 169 arm_smmu_free_asid(cd); 170 out_free_cd: 171 kfree(cd); 172 out_put_context: 173 arm64_mm_context_put(mm); 174 out_drop_mm: 175 mmdrop(mm); 176 return err < 0 ? ERR_PTR(err) : ret; 177 } 178 179 static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd) 180 { 181 if (arm_smmu_free_asid(cd)) { 182 /* Unpin ASID */ 183 arm64_mm_context_put(cd->mm); 184 mmdrop(cd->mm); 185 kfree(cd); 186 } 187 } 188 189 static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn, 190 struct mm_struct *mm, 191 unsigned long start, 192 unsigned long end) 193 { 194 struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn); 195 struct arm_smmu_domain *smmu_domain = smmu_mn->domain; 196 size_t size; 197 198 /* 199 * The mm_types defines vm_end as the first byte after the end address, 200 * different from IOMMU subsystem using the last address of an address 201 * range. So do a simple translation here by calculating size correctly. 202 */ 203 size = end - start; 204 if (size == ULONG_MAX) 205 size = 0; 206 207 if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM)) { 208 if (!size) 209 arm_smmu_tlb_inv_asid(smmu_domain->smmu, 210 smmu_mn->cd->asid); 211 else 212 arm_smmu_tlb_inv_range_asid(start, size, 213 smmu_mn->cd->asid, 214 PAGE_SIZE, false, 215 smmu_domain); 216 } 217 218 arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, start, size); 219 } 220 221 static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) 222 { 223 struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn); 224 struct arm_smmu_domain *smmu_domain = smmu_mn->domain; 225 226 mutex_lock(&sva_lock); 227 if (smmu_mn->cleared) { 228 mutex_unlock(&sva_lock); 229 return; 230 } 231 232 /* 233 * DMA may still be running. Keep the cd valid to avoid C_BAD_CD events, 234 * but disable translation. 235 */ 236 arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, &quiet_cd); 237 238 arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid); 239 arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0); 240 241 smmu_mn->cleared = true; 242 mutex_unlock(&sva_lock); 243 } 244 245 static void arm_smmu_mmu_notifier_free(struct mmu_notifier *mn) 246 { 247 kfree(mn_to_smmu(mn)); 248 } 249 250 static const struct mmu_notifier_ops arm_smmu_mmu_notifier_ops = { 251 .arch_invalidate_secondary_tlbs = arm_smmu_mm_arch_invalidate_secondary_tlbs, 252 .release = arm_smmu_mm_release, 253 .free_notifier = arm_smmu_mmu_notifier_free, 254 }; 255 256 /* Allocate or get existing MMU notifier for this {domain, mm} pair */ 257 static struct arm_smmu_mmu_notifier * 258 arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain, 259 struct mm_struct *mm) 260 { 261 int ret; 262 struct arm_smmu_ctx_desc *cd; 263 struct arm_smmu_mmu_notifier *smmu_mn; 264 265 list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) { 266 if (smmu_mn->mn.mm == mm) { 267 refcount_inc(&smmu_mn->refs); 268 return smmu_mn; 269 } 270 } 271 272 cd = arm_smmu_alloc_shared_cd(mm); 273 if (IS_ERR(cd)) 274 return ERR_CAST(cd); 275 276 smmu_mn = kzalloc(sizeof(*smmu_mn), GFP_KERNEL); 277 if (!smmu_mn) { 278 ret = -ENOMEM; 279 goto err_free_cd; 280 } 281 282 refcount_set(&smmu_mn->refs, 1); 283 smmu_mn->cd = cd; 284 smmu_mn->domain = smmu_domain; 285 smmu_mn->mn.ops = &arm_smmu_mmu_notifier_ops; 286 287 ret = mmu_notifier_register(&smmu_mn->mn, mm); 288 if (ret) { 289 kfree(smmu_mn); 290 goto err_free_cd; 291 } 292 293 ret = arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, cd); 294 if (ret) 295 goto err_put_notifier; 296 297 list_add(&smmu_mn->list, &smmu_domain->mmu_notifiers); 298 return smmu_mn; 299 300 err_put_notifier: 301 /* Frees smmu_mn */ 302 mmu_notifier_put(&smmu_mn->mn); 303 err_free_cd: 304 arm_smmu_free_shared_cd(cd); 305 return ERR_PTR(ret); 306 } 307 308 static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn) 309 { 310 struct mm_struct *mm = smmu_mn->mn.mm; 311 struct arm_smmu_ctx_desc *cd = smmu_mn->cd; 312 struct arm_smmu_domain *smmu_domain = smmu_mn->domain; 313 314 if (!refcount_dec_and_test(&smmu_mn->refs)) 315 return; 316 317 list_del(&smmu_mn->list); 318 arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, NULL); 319 320 /* 321 * If we went through clear(), we've already invalidated, and no 322 * new TLB entry can have been formed. 323 */ 324 if (!smmu_mn->cleared) { 325 arm_smmu_tlb_inv_asid(smmu_domain->smmu, cd->asid); 326 arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0); 327 } 328 329 /* Frees smmu_mn */ 330 mmu_notifier_put(&smmu_mn->mn); 331 arm_smmu_free_shared_cd(cd); 332 } 333 334 static struct iommu_sva * 335 __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm) 336 { 337 int ret; 338 struct arm_smmu_bond *bond; 339 struct arm_smmu_master *master = dev_iommu_priv_get(dev); 340 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 341 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 342 343 if (!master || !master->sva_enabled) 344 return ERR_PTR(-ENODEV); 345 346 /* If bind() was already called for this {dev, mm} pair, reuse it. */ 347 list_for_each_entry(bond, &master->bonds, list) { 348 if (bond->mm == mm) { 349 refcount_inc(&bond->refs); 350 return &bond->sva; 351 } 352 } 353 354 bond = kzalloc(sizeof(*bond), GFP_KERNEL); 355 if (!bond) 356 return ERR_PTR(-ENOMEM); 357 358 bond->mm = mm; 359 bond->sva.dev = dev; 360 refcount_set(&bond->refs, 1); 361 362 bond->smmu_mn = arm_smmu_mmu_notifier_get(smmu_domain, mm); 363 if (IS_ERR(bond->smmu_mn)) { 364 ret = PTR_ERR(bond->smmu_mn); 365 goto err_free_bond; 366 } 367 368 list_add(&bond->list, &master->bonds); 369 return &bond->sva; 370 371 err_free_bond: 372 kfree(bond); 373 return ERR_PTR(ret); 374 } 375 376 bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) 377 { 378 unsigned long reg, fld; 379 unsigned long oas; 380 unsigned long asid_bits; 381 u32 feat_mask = ARM_SMMU_FEAT_COHERENCY; 382 383 if (vabits_actual == 52) 384 feat_mask |= ARM_SMMU_FEAT_VAX; 385 386 if ((smmu->features & feat_mask) != feat_mask) 387 return false; 388 389 if (!(smmu->pgsize_bitmap & PAGE_SIZE)) 390 return false; 391 392 /* 393 * Get the smallest PA size of all CPUs (sanitized by cpufeature). We're 394 * not even pretending to support AArch32 here. Abort if the MMU outputs 395 * addresses larger than what we support. 396 */ 397 reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); 398 fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT); 399 oas = id_aa64mmfr0_parange_to_phys_shift(fld); 400 if (smmu->oas < oas) 401 return false; 402 403 /* We can support bigger ASIDs than the CPU, but not smaller */ 404 fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_ASIDBITS_SHIFT); 405 asid_bits = fld ? 16 : 8; 406 if (smmu->asid_bits < asid_bits) 407 return false; 408 409 /* 410 * See max_pinned_asids in arch/arm64/mm/context.c. The following is 411 * generally the maximum number of bindable processes. 412 */ 413 if (arm64_kernel_unmapped_at_el0()) 414 asid_bits--; 415 dev_dbg(smmu->dev, "%d shared contexts\n", (1 << asid_bits) - 416 num_possible_cpus() - 2); 417 418 return true; 419 } 420 421 bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master) 422 { 423 /* We're not keeping track of SIDs in fault events */ 424 if (master->num_streams != 1) 425 return false; 426 427 return master->stall_enabled; 428 } 429 430 bool arm_smmu_master_sva_supported(struct arm_smmu_master *master) 431 { 432 if (!(master->smmu->features & ARM_SMMU_FEAT_SVA)) 433 return false; 434 435 /* SSID support is mandatory for the moment */ 436 return master->ssid_bits; 437 } 438 439 bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master) 440 { 441 bool enabled; 442 443 mutex_lock(&sva_lock); 444 enabled = master->sva_enabled; 445 mutex_unlock(&sva_lock); 446 return enabled; 447 } 448 449 static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master) 450 { 451 int ret; 452 struct device *dev = master->dev; 453 454 /* 455 * Drivers for devices supporting PRI or stall should enable IOPF first. 456 * Others have device-specific fault handlers and don't need IOPF. 457 */ 458 if (!arm_smmu_master_iopf_supported(master)) 459 return 0; 460 461 if (!master->iopf_enabled) 462 return -EINVAL; 463 464 ret = iopf_queue_add_device(master->smmu->evtq.iopf, dev); 465 if (ret) 466 return ret; 467 468 ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev); 469 if (ret) { 470 iopf_queue_remove_device(master->smmu->evtq.iopf, dev); 471 return ret; 472 } 473 return 0; 474 } 475 476 static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master) 477 { 478 struct device *dev = master->dev; 479 480 if (!master->iopf_enabled) 481 return; 482 483 iommu_unregister_device_fault_handler(dev); 484 iopf_queue_remove_device(master->smmu->evtq.iopf, dev); 485 } 486 487 int arm_smmu_master_enable_sva(struct arm_smmu_master *master) 488 { 489 int ret; 490 491 mutex_lock(&sva_lock); 492 ret = arm_smmu_master_sva_enable_iopf(master); 493 if (!ret) 494 master->sva_enabled = true; 495 mutex_unlock(&sva_lock); 496 497 return ret; 498 } 499 500 int arm_smmu_master_disable_sva(struct arm_smmu_master *master) 501 { 502 mutex_lock(&sva_lock); 503 if (!list_empty(&master->bonds)) { 504 dev_err(master->dev, "cannot disable SVA, device is bound\n"); 505 mutex_unlock(&sva_lock); 506 return -EBUSY; 507 } 508 arm_smmu_master_sva_disable_iopf(master); 509 master->sva_enabled = false; 510 mutex_unlock(&sva_lock); 511 512 return 0; 513 } 514 515 void arm_smmu_sva_notifier_synchronize(void) 516 { 517 /* 518 * Some MMU notifiers may still be waiting to be freed, using 519 * arm_smmu_mmu_notifier_free(). Wait for them. 520 */ 521 mmu_notifier_synchronize(); 522 } 523 524 void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain, 525 struct device *dev, ioasid_t id) 526 { 527 struct mm_struct *mm = domain->mm; 528 struct arm_smmu_bond *bond = NULL, *t; 529 struct arm_smmu_master *master = dev_iommu_priv_get(dev); 530 531 mutex_lock(&sva_lock); 532 list_for_each_entry(t, &master->bonds, list) { 533 if (t->mm == mm) { 534 bond = t; 535 break; 536 } 537 } 538 539 if (!WARN_ON(!bond) && refcount_dec_and_test(&bond->refs)) { 540 list_del(&bond->list); 541 arm_smmu_mmu_notifier_put(bond->smmu_mn); 542 kfree(bond); 543 } 544 mutex_unlock(&sva_lock); 545 } 546 547 static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain, 548 struct device *dev, ioasid_t id) 549 { 550 int ret = 0; 551 struct iommu_sva *handle; 552 struct mm_struct *mm = domain->mm; 553 554 mutex_lock(&sva_lock); 555 handle = __arm_smmu_sva_bind(dev, mm); 556 if (IS_ERR(handle)) 557 ret = PTR_ERR(handle); 558 mutex_unlock(&sva_lock); 559 560 return ret; 561 } 562 563 static void arm_smmu_sva_domain_free(struct iommu_domain *domain) 564 { 565 kfree(domain); 566 } 567 568 static const struct iommu_domain_ops arm_smmu_sva_domain_ops = { 569 .set_dev_pasid = arm_smmu_sva_set_dev_pasid, 570 .free = arm_smmu_sva_domain_free 571 }; 572 573 struct iommu_domain *arm_smmu_sva_domain_alloc(void) 574 { 575 struct iommu_domain *domain; 576 577 domain = kzalloc(sizeof(*domain), GFP_KERNEL); 578 if (!domain) 579 return NULL; 580 domain->ops = &arm_smmu_sva_domain_ops; 581 582 return domain; 583 } 584