1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * IOMMU API for ARM architected SMMU implementations. 4 * 5 * Copyright (C) 2013 ARM Limited 6 * 7 * Author: Will Deacon <will.deacon@arm.com> 8 * 9 * This driver currently supports: 10 * - SMMUv1 and v2 implementations 11 * - Stream-matching and stream-indexing 12 * - v7/v8 long-descriptor format 13 * - Non-secure access to the SMMU 14 * - Context fault reporting 15 * - Extended Stream ID (16 bit) 16 */ 17 18 #define pr_fmt(fmt) "arm-smmu: " fmt 19 20 #include <linux/acpi.h> 21 #include <linux/acpi_iort.h> 22 #include <linux/bitfield.h> 23 #include <linux/delay.h> 24 #include <linux/dma-iommu.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/err.h> 27 #include <linux/interrupt.h> 28 #include <linux/io.h> 29 #include <linux/iopoll.h> 30 #include <linux/module.h> 31 #include <linux/of.h> 32 #include <linux/of_address.h> 33 #include <linux/of_device.h> 34 #include <linux/of_iommu.h> 35 #include <linux/pci.h> 36 #include <linux/platform_device.h> 37 #include <linux/pm_runtime.h> 38 #include <linux/ratelimit.h> 39 #include <linux/slab.h> 40 41 #include <linux/amba/bus.h> 42 #include <linux/fsl/mc.h> 43 44 #include "arm-smmu.h" 45 46 /* 47 * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU 48 * global register space are still, in fact, using a hypervisor to mediate it 49 * by trapping and emulating register accesses. Sadly, some deployed versions 50 * of said trapping code have bugs wherein they go horribly wrong for stores 51 * using r31 (i.e. XZR/WZR) as the source register. 52 */ 53 #define QCOM_DUMMY_VAL -1 54 55 #define MSI_IOVA_BASE 0x8000000 56 #define MSI_IOVA_LENGTH 0x100000 57 58 static int force_stage; 59 module_param(force_stage, int, S_IRUGO); 60 MODULE_PARM_DESC(force_stage, 61 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation."); 62 static bool disable_bypass = 63 IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT); 64 module_param(disable_bypass, bool, S_IRUGO); 65 MODULE_PARM_DESC(disable_bypass, 66 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU."); 67 68 #define s2cr_init_val (struct arm_smmu_s2cr){ \ 69 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \ 70 } 71 72 static bool using_legacy_binding, using_generic_binding; 73 74 static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu) 75 { 76 if (pm_runtime_enabled(smmu->dev)) 77 return pm_runtime_get_sync(smmu->dev); 78 79 return 0; 80 } 81 82 static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu) 83 { 84 if (pm_runtime_enabled(smmu->dev)) 85 pm_runtime_put_autosuspend(smmu->dev); 86 } 87 88 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) 89 { 90 return container_of(dom, struct arm_smmu_domain, domain); 91 } 92 93 static struct platform_driver arm_smmu_driver; 94 static struct iommu_ops arm_smmu_ops; 95 96 #ifdef CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS 97 static int arm_smmu_bus_init(struct iommu_ops *ops); 98 99 static struct device_node *dev_get_dev_node(struct device *dev) 100 { 101 if (dev_is_pci(dev)) { 102 struct pci_bus *bus = to_pci_dev(dev)->bus; 103 104 while (!pci_is_root_bus(bus)) 105 bus = bus->parent; 106 return of_node_get(bus->bridge->parent->of_node); 107 } 108 109 return of_node_get(dev->of_node); 110 } 111 112 static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data) 113 { 114 *((__be32 *)data) = cpu_to_be32(alias); 115 return 0; /* Continue walking */ 116 } 117 118 static int __find_legacy_master_phandle(struct device *dev, void *data) 119 { 120 struct of_phandle_iterator *it = *(void **)data; 121 struct device_node *np = it->node; 122 int err; 123 124 of_for_each_phandle(it, err, dev->of_node, "mmu-masters", 125 "#stream-id-cells", -1) 126 if (it->node == np) { 127 *(void **)data = dev; 128 return 1; 129 } 130 it->node = np; 131 return err == -ENOENT ? 0 : err; 132 } 133 134 static int arm_smmu_register_legacy_master(struct device *dev, 135 struct arm_smmu_device **smmu) 136 { 137 struct device *smmu_dev; 138 struct device_node *np; 139 struct of_phandle_iterator it; 140 void *data = ⁢ 141 u32 *sids; 142 __be32 pci_sid; 143 int err; 144 145 np = dev_get_dev_node(dev); 146 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) { 147 of_node_put(np); 148 return -ENODEV; 149 } 150 151 it.node = np; 152 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data, 153 __find_legacy_master_phandle); 154 smmu_dev = data; 155 of_node_put(np); 156 if (err == 0) 157 return -ENODEV; 158 if (err < 0) 159 return err; 160 161 if (dev_is_pci(dev)) { 162 /* "mmu-masters" assumes Stream ID == Requester ID */ 163 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid, 164 &pci_sid); 165 it.cur = &pci_sid; 166 it.cur_count = 1; 167 } 168 169 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode, 170 &arm_smmu_ops); 171 if (err) 172 return err; 173 174 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL); 175 if (!sids) 176 return -ENOMEM; 177 178 *smmu = dev_get_drvdata(smmu_dev); 179 of_phandle_iterator_args(&it, sids, it.cur_count); 180 err = iommu_fwspec_add_ids(dev, sids, it.cur_count); 181 kfree(sids); 182 return err; 183 } 184 185 /* 186 * With the legacy DT binding in play, we have no guarantees about 187 * probe order, but then we're also not doing default domains, so we can 188 * delay setting bus ops until we're sure every possible SMMU is ready, 189 * and that way ensure that no probe_device() calls get missed. 190 */ 191 static int arm_smmu_legacy_bus_init(void) 192 { 193 if (using_legacy_binding) 194 return arm_smmu_bus_init(&arm_smmu_ops); 195 return 0; 196 } 197 device_initcall_sync(arm_smmu_legacy_bus_init); 198 #else 199 static int arm_smmu_register_legacy_master(struct device *dev, 200 struct arm_smmu_device **smmu) 201 { 202 return -ENODEV; 203 } 204 #endif /* CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS */ 205 206 static void __arm_smmu_free_bitmap(unsigned long *map, int idx) 207 { 208 clear_bit(idx, map); 209 } 210 211 /* Wait for any pending TLB invalidations to complete */ 212 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page, 213 int sync, int status) 214 { 215 unsigned int spin_cnt, delay; 216 u32 reg; 217 218 if (smmu->impl && unlikely(smmu->impl->tlb_sync)) 219 return smmu->impl->tlb_sync(smmu, page, sync, status); 220 221 arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL); 222 for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) { 223 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) { 224 reg = arm_smmu_readl(smmu, page, status); 225 if (!(reg & ARM_SMMU_sTLBGSTATUS_GSACTIVE)) 226 return; 227 cpu_relax(); 228 } 229 udelay(delay); 230 } 231 dev_err_ratelimited(smmu->dev, 232 "TLB sync timed out -- SMMU may be deadlocked\n"); 233 } 234 235 static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu) 236 { 237 unsigned long flags; 238 239 spin_lock_irqsave(&smmu->global_sync_lock, flags); 240 __arm_smmu_tlb_sync(smmu, ARM_SMMU_GR0, ARM_SMMU_GR0_sTLBGSYNC, 241 ARM_SMMU_GR0_sTLBGSTATUS); 242 spin_unlock_irqrestore(&smmu->global_sync_lock, flags); 243 } 244 245 static void arm_smmu_tlb_sync_context(struct arm_smmu_domain *smmu_domain) 246 { 247 struct arm_smmu_device *smmu = smmu_domain->smmu; 248 unsigned long flags; 249 250 spin_lock_irqsave(&smmu_domain->cb_lock, flags); 251 __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx), 252 ARM_SMMU_CB_TLBSYNC, ARM_SMMU_CB_TLBSTATUS); 253 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); 254 } 255 256 static void arm_smmu_tlb_inv_context_s1(void *cookie) 257 { 258 struct arm_smmu_domain *smmu_domain = cookie; 259 /* 260 * The TLBI write may be relaxed, so ensure that PTEs cleared by the 261 * current CPU are visible beforehand. 262 */ 263 wmb(); 264 arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx, 265 ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid); 266 arm_smmu_tlb_sync_context(smmu_domain); 267 } 268 269 static void arm_smmu_tlb_inv_context_s2(void *cookie) 270 { 271 struct arm_smmu_domain *smmu_domain = cookie; 272 struct arm_smmu_device *smmu = smmu_domain->smmu; 273 274 /* See above */ 275 wmb(); 276 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid); 277 arm_smmu_tlb_sync_global(smmu); 278 } 279 280 static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size, 281 size_t granule, void *cookie, int reg) 282 { 283 struct arm_smmu_domain *smmu_domain = cookie; 284 struct arm_smmu_device *smmu = smmu_domain->smmu; 285 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; 286 int idx = cfg->cbndx; 287 288 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) 289 wmb(); 290 291 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) { 292 iova = (iova >> 12) << 12; 293 iova |= cfg->asid; 294 do { 295 arm_smmu_cb_write(smmu, idx, reg, iova); 296 iova += granule; 297 } while (size -= granule); 298 } else { 299 iova >>= 12; 300 iova |= (u64)cfg->asid << 48; 301 do { 302 arm_smmu_cb_writeq(smmu, idx, reg, iova); 303 iova += granule >> 12; 304 } while (size -= granule); 305 } 306 } 307 308 static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size, 309 size_t granule, void *cookie, int reg) 310 { 311 struct arm_smmu_domain *smmu_domain = cookie; 312 struct arm_smmu_device *smmu = smmu_domain->smmu; 313 int idx = smmu_domain->cfg.cbndx; 314 315 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) 316 wmb(); 317 318 iova >>= 12; 319 do { 320 if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64) 321 arm_smmu_cb_writeq(smmu, idx, reg, iova); 322 else 323 arm_smmu_cb_write(smmu, idx, reg, iova); 324 iova += granule >> 12; 325 } while (size -= granule); 326 } 327 328 static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size, 329 size_t granule, void *cookie) 330 { 331 arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie, 332 ARM_SMMU_CB_S1_TLBIVA); 333 arm_smmu_tlb_sync_context(cookie); 334 } 335 336 static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather, 337 unsigned long iova, size_t granule, 338 void *cookie) 339 { 340 arm_smmu_tlb_inv_range_s1(iova, granule, granule, cookie, 341 ARM_SMMU_CB_S1_TLBIVAL); 342 } 343 344 static void arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size, 345 size_t granule, void *cookie) 346 { 347 arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie, 348 ARM_SMMU_CB_S2_TLBIIPAS2); 349 arm_smmu_tlb_sync_context(cookie); 350 } 351 352 static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather, 353 unsigned long iova, size_t granule, 354 void *cookie) 355 { 356 arm_smmu_tlb_inv_range_s2(iova, granule, granule, cookie, 357 ARM_SMMU_CB_S2_TLBIIPAS2L); 358 } 359 360 static void arm_smmu_tlb_inv_walk_s2_v1(unsigned long iova, size_t size, 361 size_t granule, void *cookie) 362 { 363 arm_smmu_tlb_inv_context_s2(cookie); 364 } 365 /* 366 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears 367 * almost negligible, but the benefit of getting the first one in as far ahead 368 * of the sync as possible is significant, hence we don't just make this a 369 * no-op and call arm_smmu_tlb_inv_context_s2() from .iotlb_sync as you might 370 * think. 371 */ 372 static void arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather, 373 unsigned long iova, size_t granule, 374 void *cookie) 375 { 376 struct arm_smmu_domain *smmu_domain = cookie; 377 struct arm_smmu_device *smmu = smmu_domain->smmu; 378 379 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) 380 wmb(); 381 382 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid); 383 } 384 385 static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = { 386 .tlb_flush_all = arm_smmu_tlb_inv_context_s1, 387 .tlb_flush_walk = arm_smmu_tlb_inv_walk_s1, 388 .tlb_add_page = arm_smmu_tlb_add_page_s1, 389 }; 390 391 static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = { 392 .tlb_flush_all = arm_smmu_tlb_inv_context_s2, 393 .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2, 394 .tlb_add_page = arm_smmu_tlb_add_page_s2, 395 }; 396 397 static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = { 398 .tlb_flush_all = arm_smmu_tlb_inv_context_s2, 399 .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2_v1, 400 .tlb_add_page = arm_smmu_tlb_add_page_s2_v1, 401 }; 402 403 static irqreturn_t arm_smmu_context_fault(int irq, void *dev) 404 { 405 u32 fsr, fsynr, cbfrsynra; 406 unsigned long iova; 407 struct iommu_domain *domain = dev; 408 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 409 struct arm_smmu_device *smmu = smmu_domain->smmu; 410 int idx = smmu_domain->cfg.cbndx; 411 int ret; 412 413 fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR); 414 if (!(fsr & ARM_SMMU_FSR_FAULT)) 415 return IRQ_NONE; 416 417 fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0); 418 iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR); 419 cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx)); 420 421 ret = report_iommu_fault(domain, NULL, iova, 422 fsynr & ARM_SMMU_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ); 423 424 if (ret == -ENOSYS) 425 dev_err_ratelimited(smmu->dev, 426 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n", 427 fsr, iova, fsynr, cbfrsynra, idx); 428 429 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr); 430 return IRQ_HANDLED; 431 } 432 433 static irqreturn_t arm_smmu_global_fault(int irq, void *dev) 434 { 435 u32 gfsr, gfsynr0, gfsynr1, gfsynr2; 436 struct arm_smmu_device *smmu = dev; 437 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, 438 DEFAULT_RATELIMIT_BURST); 439 440 gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR); 441 gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0); 442 gfsynr1 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR1); 443 gfsynr2 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR2); 444 445 if (!gfsr) 446 return IRQ_NONE; 447 448 if (__ratelimit(&rs)) { 449 if (IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT) && 450 (gfsr & ARM_SMMU_sGFSR_USF)) 451 dev_err(smmu->dev, 452 "Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may have security implications\n", 453 (u16)gfsynr1); 454 else 455 dev_err(smmu->dev, 456 "Unexpected global fault, this could be serious\n"); 457 dev_err(smmu->dev, 458 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n", 459 gfsr, gfsynr0, gfsynr1, gfsynr2); 460 } 461 462 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr); 463 return IRQ_HANDLED; 464 } 465 466 static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, 467 struct io_pgtable_cfg *pgtbl_cfg) 468 { 469 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; 470 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx]; 471 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; 472 473 cb->cfg = cfg; 474 475 /* TCR */ 476 if (stage1) { 477 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { 478 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr; 479 } else { 480 cb->tcr[0] = arm_smmu_lpae_tcr(pgtbl_cfg); 481 cb->tcr[1] = arm_smmu_lpae_tcr2(pgtbl_cfg); 482 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) 483 cb->tcr[1] |= ARM_SMMU_TCR2_AS; 484 else 485 cb->tcr[0] |= ARM_SMMU_TCR_EAE; 486 } 487 } else { 488 cb->tcr[0] = arm_smmu_lpae_vtcr(pgtbl_cfg); 489 } 490 491 /* TTBRs */ 492 if (stage1) { 493 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { 494 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr; 495 cb->ttbr[1] = 0; 496 } else { 497 cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID, 498 cfg->asid); 499 cb->ttbr[1] = FIELD_PREP(ARM_SMMU_TTBRn_ASID, 500 cfg->asid); 501 502 if (pgtbl_cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) 503 cb->ttbr[1] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr; 504 else 505 cb->ttbr[0] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr; 506 } 507 } else { 508 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; 509 } 510 511 /* MAIRs (stage-1 only) */ 512 if (stage1) { 513 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { 514 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr; 515 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr; 516 } else { 517 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair; 518 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair >> 32; 519 } 520 } 521 } 522 523 void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx) 524 { 525 u32 reg; 526 bool stage1; 527 struct arm_smmu_cb *cb = &smmu->cbs[idx]; 528 struct arm_smmu_cfg *cfg = cb->cfg; 529 530 /* Unassigned context banks only need disabling */ 531 if (!cfg) { 532 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, 0); 533 return; 534 } 535 536 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; 537 538 /* CBA2R */ 539 if (smmu->version > ARM_SMMU_V1) { 540 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) 541 reg = ARM_SMMU_CBA2R_VA64; 542 else 543 reg = 0; 544 /* 16-bit VMIDs live in CBA2R */ 545 if (smmu->features & ARM_SMMU_FEAT_VMID16) 546 reg |= FIELD_PREP(ARM_SMMU_CBA2R_VMID16, cfg->vmid); 547 548 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBA2R(idx), reg); 549 } 550 551 /* CBAR */ 552 reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, cfg->cbar); 553 if (smmu->version < ARM_SMMU_V2) 554 reg |= FIELD_PREP(ARM_SMMU_CBAR_IRPTNDX, cfg->irptndx); 555 556 /* 557 * Use the weakest shareability/memory types, so they are 558 * overridden by the ttbcr/pte. 559 */ 560 if (stage1) { 561 reg |= FIELD_PREP(ARM_SMMU_CBAR_S1_BPSHCFG, 562 ARM_SMMU_CBAR_S1_BPSHCFG_NSH) | 563 FIELD_PREP(ARM_SMMU_CBAR_S1_MEMATTR, 564 ARM_SMMU_CBAR_S1_MEMATTR_WB); 565 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) { 566 /* 8-bit VMIDs live in CBAR */ 567 reg |= FIELD_PREP(ARM_SMMU_CBAR_VMID, cfg->vmid); 568 } 569 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg); 570 571 /* 572 * TCR 573 * We must write this before the TTBRs, since it determines the 574 * access behaviour of some fields (in particular, ASID[15:8]). 575 */ 576 if (stage1 && smmu->version > ARM_SMMU_V1) 577 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]); 578 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]); 579 580 /* TTBRs */ 581 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { 582 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid); 583 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]); 584 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]); 585 } else { 586 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]); 587 if (stage1) 588 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR1, 589 cb->ttbr[1]); 590 } 591 592 /* MAIRs (stage-1 only) */ 593 if (stage1) { 594 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]); 595 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]); 596 } 597 598 /* SCTLR */ 599 reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE | ARM_SMMU_SCTLR_AFE | 600 ARM_SMMU_SCTLR_TRE | ARM_SMMU_SCTLR_M; 601 if (stage1) 602 reg |= ARM_SMMU_SCTLR_S1_ASIDPNE; 603 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 604 reg |= ARM_SMMU_SCTLR_E; 605 606 if (smmu->impl && smmu->impl->write_sctlr) 607 smmu->impl->write_sctlr(smmu, idx, reg); 608 else 609 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg); 610 } 611 612 static int arm_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain, 613 struct arm_smmu_device *smmu, 614 struct device *dev, unsigned int start) 615 { 616 if (smmu->impl && smmu->impl->alloc_context_bank) 617 return smmu->impl->alloc_context_bank(smmu_domain, smmu, dev, start); 618 619 return __arm_smmu_alloc_bitmap(smmu->context_map, start, smmu->num_context_banks); 620 } 621 622 static int arm_smmu_init_domain_context(struct iommu_domain *domain, 623 struct arm_smmu_device *smmu, 624 struct device *dev) 625 { 626 int irq, start, ret = 0; 627 unsigned long ias, oas; 628 struct io_pgtable_ops *pgtbl_ops; 629 struct io_pgtable_cfg pgtbl_cfg; 630 enum io_pgtable_fmt fmt; 631 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 632 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; 633 irqreturn_t (*context_fault)(int irq, void *dev); 634 635 mutex_lock(&smmu_domain->init_mutex); 636 if (smmu_domain->smmu) 637 goto out_unlock; 638 639 if (domain->type == IOMMU_DOMAIN_IDENTITY) { 640 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS; 641 smmu_domain->smmu = smmu; 642 goto out_unlock; 643 } 644 645 /* 646 * Mapping the requested stage onto what we support is surprisingly 647 * complicated, mainly because the spec allows S1+S2 SMMUs without 648 * support for nested translation. That means we end up with the 649 * following table: 650 * 651 * Requested Supported Actual 652 * S1 N S1 653 * S1 S1+S2 S1 654 * S1 S2 S2 655 * S1 S1 S1 656 * N N N 657 * N S1+S2 S2 658 * N S2 S2 659 * N S1 S1 660 * 661 * Note that you can't actually request stage-2 mappings. 662 */ 663 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) 664 smmu_domain->stage = ARM_SMMU_DOMAIN_S2; 665 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2)) 666 smmu_domain->stage = ARM_SMMU_DOMAIN_S1; 667 668 /* 669 * Choosing a suitable context format is even more fiddly. Until we 670 * grow some way for the caller to express a preference, and/or move 671 * the decision into the io-pgtable code where it arguably belongs, 672 * just aim for the closest thing to the rest of the system, and hope 673 * that the hardware isn't esoteric enough that we can't assume AArch64 674 * support to be a superset of AArch32 support... 675 */ 676 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L) 677 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L; 678 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) && 679 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) && 680 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) && 681 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1)) 682 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S; 683 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) && 684 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K | 685 ARM_SMMU_FEAT_FMT_AARCH64_16K | 686 ARM_SMMU_FEAT_FMT_AARCH64_4K))) 687 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64; 688 689 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) { 690 ret = -EINVAL; 691 goto out_unlock; 692 } 693 694 switch (smmu_domain->stage) { 695 case ARM_SMMU_DOMAIN_S1: 696 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; 697 start = smmu->num_s2_context_banks; 698 ias = smmu->va_size; 699 oas = smmu->ipa_size; 700 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) { 701 fmt = ARM_64_LPAE_S1; 702 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) { 703 fmt = ARM_32_LPAE_S1; 704 ias = min(ias, 32UL); 705 oas = min(oas, 40UL); 706 } else { 707 fmt = ARM_V7S; 708 ias = min(ias, 32UL); 709 oas = min(oas, 32UL); 710 } 711 smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops; 712 break; 713 case ARM_SMMU_DOMAIN_NESTED: 714 /* 715 * We will likely want to change this if/when KVM gets 716 * involved. 717 */ 718 case ARM_SMMU_DOMAIN_S2: 719 cfg->cbar = CBAR_TYPE_S2_TRANS; 720 start = 0; 721 ias = smmu->ipa_size; 722 oas = smmu->pa_size; 723 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) { 724 fmt = ARM_64_LPAE_S2; 725 } else { 726 fmt = ARM_32_LPAE_S2; 727 ias = min(ias, 40UL); 728 oas = min(oas, 40UL); 729 } 730 if (smmu->version == ARM_SMMU_V2) 731 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2; 732 else 733 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1; 734 break; 735 default: 736 ret = -EINVAL; 737 goto out_unlock; 738 } 739 740 ret = arm_smmu_alloc_context_bank(smmu_domain, smmu, dev, start); 741 if (ret < 0) { 742 goto out_unlock; 743 } 744 745 smmu_domain->smmu = smmu; 746 747 cfg->cbndx = ret; 748 if (smmu->version < ARM_SMMU_V2) { 749 cfg->irptndx = atomic_inc_return(&smmu->irptndx); 750 cfg->irptndx %= smmu->num_context_irqs; 751 } else { 752 cfg->irptndx = cfg->cbndx; 753 } 754 755 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2) 756 cfg->vmid = cfg->cbndx + 1; 757 else 758 cfg->asid = cfg->cbndx; 759 760 pgtbl_cfg = (struct io_pgtable_cfg) { 761 .pgsize_bitmap = smmu->pgsize_bitmap, 762 .ias = ias, 763 .oas = oas, 764 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK, 765 .tlb = smmu_domain->flush_ops, 766 .iommu_dev = smmu->dev, 767 }; 768 769 if (!iommu_get_dma_strict(domain)) 770 pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT; 771 772 if (smmu->impl && smmu->impl->init_context) { 773 ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev); 774 if (ret) 775 goto out_clear_smmu; 776 } 777 778 if (smmu_domain->pgtbl_quirks) 779 pgtbl_cfg.quirks |= smmu_domain->pgtbl_quirks; 780 781 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); 782 if (!pgtbl_ops) { 783 ret = -ENOMEM; 784 goto out_clear_smmu; 785 } 786 787 /* Update the domain's page sizes to reflect the page table format */ 788 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; 789 790 if (pgtbl_cfg.quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) { 791 domain->geometry.aperture_start = ~0UL << ias; 792 domain->geometry.aperture_end = ~0UL; 793 } else { 794 domain->geometry.aperture_end = (1UL << ias) - 1; 795 } 796 797 domain->geometry.force_aperture = true; 798 799 /* Initialise the context bank with our page table cfg */ 800 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg); 801 arm_smmu_write_context_bank(smmu, cfg->cbndx); 802 803 /* 804 * Request context fault interrupt. Do this last to avoid the 805 * handler seeing a half-initialised domain state. 806 */ 807 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; 808 809 if (smmu->impl && smmu->impl->context_fault) 810 context_fault = smmu->impl->context_fault; 811 else 812 context_fault = arm_smmu_context_fault; 813 814 ret = devm_request_irq(smmu->dev, irq, context_fault, 815 IRQF_SHARED, "arm-smmu-context-fault", domain); 816 if (ret < 0) { 817 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", 818 cfg->irptndx, irq); 819 cfg->irptndx = ARM_SMMU_INVALID_IRPTNDX; 820 } 821 822 mutex_unlock(&smmu_domain->init_mutex); 823 824 /* Publish page table ops for map/unmap */ 825 smmu_domain->pgtbl_ops = pgtbl_ops; 826 return 0; 827 828 out_clear_smmu: 829 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); 830 smmu_domain->smmu = NULL; 831 out_unlock: 832 mutex_unlock(&smmu_domain->init_mutex); 833 return ret; 834 } 835 836 static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) 837 { 838 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 839 struct arm_smmu_device *smmu = smmu_domain->smmu; 840 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; 841 int ret, irq; 842 843 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY) 844 return; 845 846 ret = arm_smmu_rpm_get(smmu); 847 if (ret < 0) 848 return; 849 850 /* 851 * Disable the context bank and free the page tables before freeing 852 * it. 853 */ 854 smmu->cbs[cfg->cbndx].cfg = NULL; 855 arm_smmu_write_context_bank(smmu, cfg->cbndx); 856 857 if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) { 858 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; 859 devm_free_irq(smmu->dev, irq, domain); 860 } 861 862 free_io_pgtable_ops(smmu_domain->pgtbl_ops); 863 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); 864 865 arm_smmu_rpm_put(smmu); 866 } 867 868 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) 869 { 870 struct arm_smmu_domain *smmu_domain; 871 872 if (type != IOMMU_DOMAIN_UNMANAGED && 873 type != IOMMU_DOMAIN_DMA && 874 type != IOMMU_DOMAIN_IDENTITY) 875 return NULL; 876 /* 877 * Allocate the domain and initialise some of its data structures. 878 * We can't really do anything meaningful until we've added a 879 * master. 880 */ 881 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL); 882 if (!smmu_domain) 883 return NULL; 884 885 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding || 886 iommu_get_dma_cookie(&smmu_domain->domain))) { 887 kfree(smmu_domain); 888 return NULL; 889 } 890 891 mutex_init(&smmu_domain->init_mutex); 892 spin_lock_init(&smmu_domain->cb_lock); 893 894 return &smmu_domain->domain; 895 } 896 897 static void arm_smmu_domain_free(struct iommu_domain *domain) 898 { 899 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 900 901 /* 902 * Free the domain resources. We assume that all devices have 903 * already been detached. 904 */ 905 iommu_put_dma_cookie(domain); 906 arm_smmu_destroy_domain_context(domain); 907 kfree(smmu_domain); 908 } 909 910 static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx) 911 { 912 struct arm_smmu_smr *smr = smmu->smrs + idx; 913 u32 reg = FIELD_PREP(ARM_SMMU_SMR_ID, smr->id) | 914 FIELD_PREP(ARM_SMMU_SMR_MASK, smr->mask); 915 916 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid) 917 reg |= ARM_SMMU_SMR_VALID; 918 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg); 919 } 920 921 static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx) 922 { 923 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx; 924 u32 reg; 925 926 if (smmu->impl && smmu->impl->write_s2cr) { 927 smmu->impl->write_s2cr(smmu, idx); 928 return; 929 } 930 931 reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) | 932 FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) | 933 FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg); 934 935 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs && 936 smmu->smrs[idx].valid) 937 reg |= ARM_SMMU_S2CR_EXIDVALID; 938 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg); 939 } 940 941 static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx) 942 { 943 arm_smmu_write_s2cr(smmu, idx); 944 if (smmu->smrs) 945 arm_smmu_write_smr(smmu, idx); 946 } 947 948 /* 949 * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function 950 * should be called after sCR0 is written. 951 */ 952 static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu) 953 { 954 u32 smr; 955 int i; 956 957 if (!smmu->smrs) 958 return; 959 /* 960 * If we've had to accommodate firmware memory regions, we may 961 * have live SMRs by now; tread carefully... 962 * 963 * Somewhat perversely, not having a free SMR for this test implies we 964 * can get away without it anyway, as we'll only be able to 'allocate' 965 * these SMRs for the ID/mask values we're already trusting to be OK. 966 */ 967 for (i = 0; i < smmu->num_mapping_groups; i++) 968 if (!smmu->smrs[i].valid) 969 goto smr_ok; 970 return; 971 smr_ok: 972 /* 973 * SMR.ID bits may not be preserved if the corresponding MASK 974 * bits are set, so check each one separately. We can reject 975 * masters later if they try to claim IDs outside these masks. 976 */ 977 smr = FIELD_PREP(ARM_SMMU_SMR_ID, smmu->streamid_mask); 978 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr); 979 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i)); 980 smmu->streamid_mask = FIELD_GET(ARM_SMMU_SMR_ID, smr); 981 982 smr = FIELD_PREP(ARM_SMMU_SMR_MASK, smmu->streamid_mask); 983 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr); 984 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i)); 985 smmu->smr_mask_mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr); 986 } 987 988 static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask) 989 { 990 struct arm_smmu_smr *smrs = smmu->smrs; 991 int i, free_idx = -ENOSPC; 992 993 /* Stream indexing is blissfully easy */ 994 if (!smrs) 995 return id; 996 997 /* Validating SMRs is... less so */ 998 for (i = 0; i < smmu->num_mapping_groups; ++i) { 999 if (!smrs[i].valid) { 1000 /* 1001 * Note the first free entry we come across, which 1002 * we'll claim in the end if nothing else matches. 1003 */ 1004 if (free_idx < 0) 1005 free_idx = i; 1006 continue; 1007 } 1008 /* 1009 * If the new entry is _entirely_ matched by an existing entry, 1010 * then reuse that, with the guarantee that there also cannot 1011 * be any subsequent conflicting entries. In normal use we'd 1012 * expect simply identical entries for this case, but there's 1013 * no harm in accommodating the generalisation. 1014 */ 1015 if ((mask & smrs[i].mask) == mask && 1016 !((id ^ smrs[i].id) & ~smrs[i].mask)) 1017 return i; 1018 /* 1019 * If the new entry has any other overlap with an existing one, 1020 * though, then there always exists at least one stream ID 1021 * which would cause a conflict, and we can't allow that risk. 1022 */ 1023 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask))) 1024 return -EINVAL; 1025 } 1026 1027 return free_idx; 1028 } 1029 1030 static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx) 1031 { 1032 if (--smmu->s2crs[idx].count) 1033 return false; 1034 1035 smmu->s2crs[idx] = s2cr_init_val; 1036 if (smmu->smrs) 1037 smmu->smrs[idx].valid = false; 1038 1039 return true; 1040 } 1041 1042 static int arm_smmu_master_alloc_smes(struct device *dev) 1043 { 1044 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 1045 struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev); 1046 struct arm_smmu_device *smmu = cfg->smmu; 1047 struct arm_smmu_smr *smrs = smmu->smrs; 1048 int i, idx, ret; 1049 1050 mutex_lock(&smmu->stream_map_mutex); 1051 /* Figure out a viable stream map entry allocation */ 1052 for_each_cfg_sme(cfg, fwspec, i, idx) { 1053 u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]); 1054 u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]); 1055 1056 if (idx != INVALID_SMENDX) { 1057 ret = -EEXIST; 1058 goto out_err; 1059 } 1060 1061 ret = arm_smmu_find_sme(smmu, sid, mask); 1062 if (ret < 0) 1063 goto out_err; 1064 1065 idx = ret; 1066 if (smrs && smmu->s2crs[idx].count == 0) { 1067 smrs[idx].id = sid; 1068 smrs[idx].mask = mask; 1069 smrs[idx].valid = true; 1070 } 1071 smmu->s2crs[idx].count++; 1072 cfg->smendx[i] = (s16)idx; 1073 } 1074 1075 /* It worked! Now, poke the actual hardware */ 1076 for_each_cfg_sme(cfg, fwspec, i, idx) 1077 arm_smmu_write_sme(smmu, idx); 1078 1079 mutex_unlock(&smmu->stream_map_mutex); 1080 return 0; 1081 1082 out_err: 1083 while (i--) { 1084 arm_smmu_free_sme(smmu, cfg->smendx[i]); 1085 cfg->smendx[i] = INVALID_SMENDX; 1086 } 1087 mutex_unlock(&smmu->stream_map_mutex); 1088 return ret; 1089 } 1090 1091 static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg, 1092 struct iommu_fwspec *fwspec) 1093 { 1094 struct arm_smmu_device *smmu = cfg->smmu; 1095 int i, idx; 1096 1097 mutex_lock(&smmu->stream_map_mutex); 1098 for_each_cfg_sme(cfg, fwspec, i, idx) { 1099 if (arm_smmu_free_sme(smmu, idx)) 1100 arm_smmu_write_sme(smmu, idx); 1101 cfg->smendx[i] = INVALID_SMENDX; 1102 } 1103 mutex_unlock(&smmu->stream_map_mutex); 1104 } 1105 1106 static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, 1107 struct arm_smmu_master_cfg *cfg, 1108 struct iommu_fwspec *fwspec) 1109 { 1110 struct arm_smmu_device *smmu = smmu_domain->smmu; 1111 struct arm_smmu_s2cr *s2cr = smmu->s2crs; 1112 u8 cbndx = smmu_domain->cfg.cbndx; 1113 enum arm_smmu_s2cr_type type; 1114 int i, idx; 1115 1116 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) 1117 type = S2CR_TYPE_BYPASS; 1118 else 1119 type = S2CR_TYPE_TRANS; 1120 1121 for_each_cfg_sme(cfg, fwspec, i, idx) { 1122 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx) 1123 continue; 1124 1125 s2cr[idx].type = type; 1126 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT; 1127 s2cr[idx].cbndx = cbndx; 1128 arm_smmu_write_s2cr(smmu, idx); 1129 } 1130 return 0; 1131 } 1132 1133 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) 1134 { 1135 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 1136 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 1137 struct arm_smmu_master_cfg *cfg; 1138 struct arm_smmu_device *smmu; 1139 int ret; 1140 1141 if (!fwspec || fwspec->ops != &arm_smmu_ops) { 1142 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); 1143 return -ENXIO; 1144 } 1145 1146 /* 1147 * FIXME: The arch/arm DMA API code tries to attach devices to its own 1148 * domains between of_xlate() and probe_device() - we have no way to cope 1149 * with that, so until ARM gets converted to rely on groups and default 1150 * domains, just say no (but more politely than by dereferencing NULL). 1151 * This should be at least a WARN_ON once that's sorted. 1152 */ 1153 cfg = dev_iommu_priv_get(dev); 1154 if (!cfg) 1155 return -ENODEV; 1156 1157 smmu = cfg->smmu; 1158 1159 ret = arm_smmu_rpm_get(smmu); 1160 if (ret < 0) 1161 return ret; 1162 1163 /* Ensure that the domain is finalised */ 1164 ret = arm_smmu_init_domain_context(domain, smmu, dev); 1165 if (ret < 0) 1166 goto rpm_put; 1167 1168 /* 1169 * Sanity check the domain. We don't support domains across 1170 * different SMMUs. 1171 */ 1172 if (smmu_domain->smmu != smmu) { 1173 dev_err(dev, 1174 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", 1175 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev)); 1176 ret = -EINVAL; 1177 goto rpm_put; 1178 } 1179 1180 /* Looks ok, so add the device to the domain */ 1181 ret = arm_smmu_domain_add_master(smmu_domain, cfg, fwspec); 1182 1183 /* 1184 * Setup an autosuspend delay to avoid bouncing runpm state. 1185 * Otherwise, if a driver for a suspended consumer device 1186 * unmaps buffers, it will runpm resume/suspend for each one. 1187 * 1188 * For example, when used by a GPU device, when an application 1189 * or game exits, it can trigger unmapping 100s or 1000s of 1190 * buffers. With a runpm cycle for each buffer, that adds up 1191 * to 5-10sec worth of reprogramming the context bank, while 1192 * the system appears to be locked up to the user. 1193 */ 1194 pm_runtime_set_autosuspend_delay(smmu->dev, 20); 1195 pm_runtime_use_autosuspend(smmu->dev); 1196 1197 rpm_put: 1198 arm_smmu_rpm_put(smmu); 1199 return ret; 1200 } 1201 1202 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, 1203 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 1204 { 1205 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; 1206 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu; 1207 int ret; 1208 1209 if (!ops) 1210 return -ENODEV; 1211 1212 arm_smmu_rpm_get(smmu); 1213 ret = ops->map(ops, iova, paddr, size, prot, gfp); 1214 arm_smmu_rpm_put(smmu); 1215 1216 return ret; 1217 } 1218 1219 static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, 1220 size_t size, struct iommu_iotlb_gather *gather) 1221 { 1222 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; 1223 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu; 1224 size_t ret; 1225 1226 if (!ops) 1227 return 0; 1228 1229 arm_smmu_rpm_get(smmu); 1230 ret = ops->unmap(ops, iova, size, gather); 1231 arm_smmu_rpm_put(smmu); 1232 1233 return ret; 1234 } 1235 1236 static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain) 1237 { 1238 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 1239 struct arm_smmu_device *smmu = smmu_domain->smmu; 1240 1241 if (smmu_domain->flush_ops) { 1242 arm_smmu_rpm_get(smmu); 1243 smmu_domain->flush_ops->tlb_flush_all(smmu_domain); 1244 arm_smmu_rpm_put(smmu); 1245 } 1246 } 1247 1248 static void arm_smmu_iotlb_sync(struct iommu_domain *domain, 1249 struct iommu_iotlb_gather *gather) 1250 { 1251 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 1252 struct arm_smmu_device *smmu = smmu_domain->smmu; 1253 1254 if (!smmu) 1255 return; 1256 1257 arm_smmu_rpm_get(smmu); 1258 if (smmu->version == ARM_SMMU_V2 || 1259 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) 1260 arm_smmu_tlb_sync_context(smmu_domain); 1261 else 1262 arm_smmu_tlb_sync_global(smmu); 1263 arm_smmu_rpm_put(smmu); 1264 } 1265 1266 static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, 1267 dma_addr_t iova) 1268 { 1269 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 1270 struct arm_smmu_device *smmu = smmu_domain->smmu; 1271 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; 1272 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; 1273 struct device *dev = smmu->dev; 1274 void __iomem *reg; 1275 u32 tmp; 1276 u64 phys; 1277 unsigned long va, flags; 1278 int ret, idx = cfg->cbndx; 1279 1280 ret = arm_smmu_rpm_get(smmu); 1281 if (ret < 0) 1282 return 0; 1283 1284 spin_lock_irqsave(&smmu_domain->cb_lock, flags); 1285 va = iova & ~0xfffUL; 1286 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) 1287 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_ATS1PR, va); 1288 else 1289 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va); 1290 1291 reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR; 1292 if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ARM_SMMU_ATSR_ACTIVE), 1293 5, 50)) { 1294 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); 1295 dev_err(dev, 1296 "iova to phys timed out on %pad. Falling back to software table walk.\n", 1297 &iova); 1298 return ops->iova_to_phys(ops, iova); 1299 } 1300 1301 phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR); 1302 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); 1303 if (phys & ARM_SMMU_CB_PAR_F) { 1304 dev_err(dev, "translation fault!\n"); 1305 dev_err(dev, "PAR = 0x%llx\n", phys); 1306 return 0; 1307 } 1308 1309 arm_smmu_rpm_put(smmu); 1310 1311 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff); 1312 } 1313 1314 static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, 1315 dma_addr_t iova) 1316 { 1317 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 1318 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; 1319 1320 if (domain->type == IOMMU_DOMAIN_IDENTITY) 1321 return iova; 1322 1323 if (!ops) 1324 return 0; 1325 1326 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS && 1327 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) 1328 return arm_smmu_iova_to_phys_hard(domain, iova); 1329 1330 return ops->iova_to_phys(ops, iova); 1331 } 1332 1333 static bool arm_smmu_capable(enum iommu_cap cap) 1334 { 1335 switch (cap) { 1336 case IOMMU_CAP_CACHE_COHERENCY: 1337 /* 1338 * Return true here as the SMMU can always send out coherent 1339 * requests. 1340 */ 1341 return true; 1342 case IOMMU_CAP_NOEXEC: 1343 return true; 1344 default: 1345 return false; 1346 } 1347 } 1348 1349 static 1350 struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode) 1351 { 1352 struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver, 1353 fwnode); 1354 put_device(dev); 1355 return dev ? dev_get_drvdata(dev) : NULL; 1356 } 1357 1358 static struct iommu_device *arm_smmu_probe_device(struct device *dev) 1359 { 1360 struct arm_smmu_device *smmu = NULL; 1361 struct arm_smmu_master_cfg *cfg; 1362 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 1363 int i, ret; 1364 1365 if (using_legacy_binding) { 1366 ret = arm_smmu_register_legacy_master(dev, &smmu); 1367 1368 /* 1369 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master() 1370 * will allocate/initialise a new one. Thus we need to update fwspec for 1371 * later use. 1372 */ 1373 fwspec = dev_iommu_fwspec_get(dev); 1374 if (ret) 1375 goto out_free; 1376 } else if (fwspec && fwspec->ops == &arm_smmu_ops) { 1377 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode); 1378 } else { 1379 return ERR_PTR(-ENODEV); 1380 } 1381 1382 ret = -EINVAL; 1383 for (i = 0; i < fwspec->num_ids; i++) { 1384 u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]); 1385 u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]); 1386 1387 if (sid & ~smmu->streamid_mask) { 1388 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n", 1389 sid, smmu->streamid_mask); 1390 goto out_free; 1391 } 1392 if (mask & ~smmu->smr_mask_mask) { 1393 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n", 1394 mask, smmu->smr_mask_mask); 1395 goto out_free; 1396 } 1397 } 1398 1399 ret = -ENOMEM; 1400 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]), 1401 GFP_KERNEL); 1402 if (!cfg) 1403 goto out_free; 1404 1405 cfg->smmu = smmu; 1406 dev_iommu_priv_set(dev, cfg); 1407 while (i--) 1408 cfg->smendx[i] = INVALID_SMENDX; 1409 1410 ret = arm_smmu_rpm_get(smmu); 1411 if (ret < 0) 1412 goto out_cfg_free; 1413 1414 ret = arm_smmu_master_alloc_smes(dev); 1415 arm_smmu_rpm_put(smmu); 1416 1417 if (ret) 1418 goto out_cfg_free; 1419 1420 device_link_add(dev, smmu->dev, 1421 DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER); 1422 1423 return &smmu->iommu; 1424 1425 out_cfg_free: 1426 kfree(cfg); 1427 out_free: 1428 iommu_fwspec_free(dev); 1429 return ERR_PTR(ret); 1430 } 1431 1432 static void arm_smmu_release_device(struct device *dev) 1433 { 1434 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 1435 struct arm_smmu_master_cfg *cfg; 1436 struct arm_smmu_device *smmu; 1437 int ret; 1438 1439 if (!fwspec || fwspec->ops != &arm_smmu_ops) 1440 return; 1441 1442 cfg = dev_iommu_priv_get(dev); 1443 smmu = cfg->smmu; 1444 1445 ret = arm_smmu_rpm_get(smmu); 1446 if (ret < 0) 1447 return; 1448 1449 arm_smmu_master_free_smes(cfg, fwspec); 1450 1451 arm_smmu_rpm_put(smmu); 1452 1453 dev_iommu_priv_set(dev, NULL); 1454 kfree(cfg); 1455 iommu_fwspec_free(dev); 1456 } 1457 1458 static struct iommu_group *arm_smmu_device_group(struct device *dev) 1459 { 1460 struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev); 1461 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 1462 struct arm_smmu_device *smmu = cfg->smmu; 1463 struct iommu_group *group = NULL; 1464 int i, idx; 1465 1466 for_each_cfg_sme(cfg, fwspec, i, idx) { 1467 if (group && smmu->s2crs[idx].group && 1468 group != smmu->s2crs[idx].group) 1469 return ERR_PTR(-EINVAL); 1470 1471 group = smmu->s2crs[idx].group; 1472 } 1473 1474 if (group) 1475 return iommu_group_ref_get(group); 1476 1477 if (dev_is_pci(dev)) 1478 group = pci_device_group(dev); 1479 else if (dev_is_fsl_mc(dev)) 1480 group = fsl_mc_device_group(dev); 1481 else 1482 group = generic_device_group(dev); 1483 1484 /* Remember group for faster lookups */ 1485 if (!IS_ERR(group)) 1486 for_each_cfg_sme(cfg, fwspec, i, idx) 1487 smmu->s2crs[idx].group = group; 1488 1489 return group; 1490 } 1491 1492 static int arm_smmu_enable_nesting(struct iommu_domain *domain) 1493 { 1494 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 1495 int ret = 0; 1496 1497 mutex_lock(&smmu_domain->init_mutex); 1498 if (smmu_domain->smmu) 1499 ret = -EPERM; 1500 else 1501 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; 1502 mutex_unlock(&smmu_domain->init_mutex); 1503 1504 return ret; 1505 } 1506 1507 static int arm_smmu_set_pgtable_quirks(struct iommu_domain *domain, 1508 unsigned long quirks) 1509 { 1510 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 1511 int ret = 0; 1512 1513 mutex_lock(&smmu_domain->init_mutex); 1514 if (smmu_domain->smmu) 1515 ret = -EPERM; 1516 else 1517 smmu_domain->pgtbl_quirks = quirks; 1518 mutex_unlock(&smmu_domain->init_mutex); 1519 1520 return ret; 1521 } 1522 1523 static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) 1524 { 1525 u32 mask, fwid = 0; 1526 1527 if (args->args_count > 0) 1528 fwid |= FIELD_PREP(ARM_SMMU_SMR_ID, args->args[0]); 1529 1530 if (args->args_count > 1) 1531 fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, args->args[1]); 1532 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask)) 1533 fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, mask); 1534 1535 return iommu_fwspec_add_ids(dev, &fwid, 1); 1536 } 1537 1538 static void arm_smmu_get_resv_regions(struct device *dev, 1539 struct list_head *head) 1540 { 1541 struct iommu_resv_region *region; 1542 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; 1543 1544 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, 1545 prot, IOMMU_RESV_SW_MSI); 1546 if (!region) 1547 return; 1548 1549 list_add_tail(®ion->list, head); 1550 1551 iommu_dma_get_resv_regions(dev, head); 1552 } 1553 1554 static int arm_smmu_def_domain_type(struct device *dev) 1555 { 1556 struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev); 1557 const struct arm_smmu_impl *impl = cfg->smmu->impl; 1558 1559 if (impl && impl->def_domain_type) 1560 return impl->def_domain_type(dev); 1561 1562 return 0; 1563 } 1564 1565 static struct iommu_ops arm_smmu_ops = { 1566 .capable = arm_smmu_capable, 1567 .domain_alloc = arm_smmu_domain_alloc, 1568 .domain_free = arm_smmu_domain_free, 1569 .attach_dev = arm_smmu_attach_dev, 1570 .map = arm_smmu_map, 1571 .unmap = arm_smmu_unmap, 1572 .flush_iotlb_all = arm_smmu_flush_iotlb_all, 1573 .iotlb_sync = arm_smmu_iotlb_sync, 1574 .iova_to_phys = arm_smmu_iova_to_phys, 1575 .probe_device = arm_smmu_probe_device, 1576 .release_device = arm_smmu_release_device, 1577 .device_group = arm_smmu_device_group, 1578 .enable_nesting = arm_smmu_enable_nesting, 1579 .set_pgtable_quirks = arm_smmu_set_pgtable_quirks, 1580 .of_xlate = arm_smmu_of_xlate, 1581 .get_resv_regions = arm_smmu_get_resv_regions, 1582 .put_resv_regions = generic_iommu_put_resv_regions, 1583 .def_domain_type = arm_smmu_def_domain_type, 1584 .pgsize_bitmap = -1UL, /* Restricted during device attach */ 1585 .owner = THIS_MODULE, 1586 }; 1587 1588 static void arm_smmu_device_reset(struct arm_smmu_device *smmu) 1589 { 1590 int i; 1591 u32 reg; 1592 1593 /* clear global FSR */ 1594 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR); 1595 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, reg); 1596 1597 /* 1598 * Reset stream mapping groups: Initial values mark all SMRn as 1599 * invalid and all S2CRn as bypass unless overridden. 1600 */ 1601 for (i = 0; i < smmu->num_mapping_groups; ++i) 1602 arm_smmu_write_sme(smmu, i); 1603 1604 /* Make sure all context banks are disabled and clear CB_FSR */ 1605 for (i = 0; i < smmu->num_context_banks; ++i) { 1606 arm_smmu_write_context_bank(smmu, i); 1607 arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, ARM_SMMU_FSR_FAULT); 1608 } 1609 1610 /* Invalidate the TLB, just in case */ 1611 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLH, QCOM_DUMMY_VAL); 1612 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLNSNH, QCOM_DUMMY_VAL); 1613 1614 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0); 1615 1616 /* Enable fault reporting */ 1617 reg |= (ARM_SMMU_sCR0_GFRE | ARM_SMMU_sCR0_GFIE | 1618 ARM_SMMU_sCR0_GCFGFRE | ARM_SMMU_sCR0_GCFGFIE); 1619 1620 /* Disable TLB broadcasting. */ 1621 reg |= (ARM_SMMU_sCR0_VMIDPNE | ARM_SMMU_sCR0_PTM); 1622 1623 /* Enable client access, handling unmatched streams as appropriate */ 1624 reg &= ~ARM_SMMU_sCR0_CLIENTPD; 1625 if (disable_bypass) 1626 reg |= ARM_SMMU_sCR0_USFCFG; 1627 else 1628 reg &= ~ARM_SMMU_sCR0_USFCFG; 1629 1630 /* Disable forced broadcasting */ 1631 reg &= ~ARM_SMMU_sCR0_FB; 1632 1633 /* Don't upgrade barriers */ 1634 reg &= ~(ARM_SMMU_sCR0_BSU); 1635 1636 if (smmu->features & ARM_SMMU_FEAT_VMID16) 1637 reg |= ARM_SMMU_sCR0_VMID16EN; 1638 1639 if (smmu->features & ARM_SMMU_FEAT_EXIDS) 1640 reg |= ARM_SMMU_sCR0_EXIDENABLE; 1641 1642 if (smmu->impl && smmu->impl->reset) 1643 smmu->impl->reset(smmu); 1644 1645 /* Push the button */ 1646 arm_smmu_tlb_sync_global(smmu); 1647 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg); 1648 } 1649 1650 static int arm_smmu_id_size_to_bits(int size) 1651 { 1652 switch (size) { 1653 case 0: 1654 return 32; 1655 case 1: 1656 return 36; 1657 case 2: 1658 return 40; 1659 case 3: 1660 return 42; 1661 case 4: 1662 return 44; 1663 case 5: 1664 default: 1665 return 48; 1666 } 1667 } 1668 1669 static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) 1670 { 1671 unsigned int size; 1672 u32 id; 1673 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK; 1674 int i, ret; 1675 1676 dev_notice(smmu->dev, "probing hardware configuration...\n"); 1677 dev_notice(smmu->dev, "SMMUv%d with:\n", 1678 smmu->version == ARM_SMMU_V2 ? 2 : 1); 1679 1680 /* ID0 */ 1681 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0); 1682 1683 /* Restrict available stages based on module parameter */ 1684 if (force_stage == 1) 1685 id &= ~(ARM_SMMU_ID0_S2TS | ARM_SMMU_ID0_NTS); 1686 else if (force_stage == 2) 1687 id &= ~(ARM_SMMU_ID0_S1TS | ARM_SMMU_ID0_NTS); 1688 1689 if (id & ARM_SMMU_ID0_S1TS) { 1690 smmu->features |= ARM_SMMU_FEAT_TRANS_S1; 1691 dev_notice(smmu->dev, "\tstage 1 translation\n"); 1692 } 1693 1694 if (id & ARM_SMMU_ID0_S2TS) { 1695 smmu->features |= ARM_SMMU_FEAT_TRANS_S2; 1696 dev_notice(smmu->dev, "\tstage 2 translation\n"); 1697 } 1698 1699 if (id & ARM_SMMU_ID0_NTS) { 1700 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED; 1701 dev_notice(smmu->dev, "\tnested translation\n"); 1702 } 1703 1704 if (!(smmu->features & 1705 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) { 1706 dev_err(smmu->dev, "\tno translation support!\n"); 1707 return -ENODEV; 1708 } 1709 1710 if ((id & ARM_SMMU_ID0_S1TS) && 1711 ((smmu->version < ARM_SMMU_V2) || !(id & ARM_SMMU_ID0_ATOSNS))) { 1712 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS; 1713 dev_notice(smmu->dev, "\taddress translation ops\n"); 1714 } 1715 1716 /* 1717 * In order for DMA API calls to work properly, we must defer to what 1718 * the FW says about coherency, regardless of what the hardware claims. 1719 * Fortunately, this also opens up a workaround for systems where the 1720 * ID register value has ended up configured incorrectly. 1721 */ 1722 cttw_reg = !!(id & ARM_SMMU_ID0_CTTW); 1723 if (cttw_fw || cttw_reg) 1724 dev_notice(smmu->dev, "\t%scoherent table walk\n", 1725 cttw_fw ? "" : "non-"); 1726 if (cttw_fw != cttw_reg) 1727 dev_notice(smmu->dev, 1728 "\t(IDR0.CTTW overridden by FW configuration)\n"); 1729 1730 /* Max. number of entries we have for stream matching/indexing */ 1731 if (smmu->version == ARM_SMMU_V2 && id & ARM_SMMU_ID0_EXIDS) { 1732 smmu->features |= ARM_SMMU_FEAT_EXIDS; 1733 size = 1 << 16; 1734 } else { 1735 size = 1 << FIELD_GET(ARM_SMMU_ID0_NUMSIDB, id); 1736 } 1737 smmu->streamid_mask = size - 1; 1738 if (id & ARM_SMMU_ID0_SMS) { 1739 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH; 1740 size = FIELD_GET(ARM_SMMU_ID0_NUMSMRG, id); 1741 if (size == 0) { 1742 dev_err(smmu->dev, 1743 "stream-matching supported, but no SMRs present!\n"); 1744 return -ENODEV; 1745 } 1746 1747 /* Zero-initialised to mark as invalid */ 1748 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs), 1749 GFP_KERNEL); 1750 if (!smmu->smrs) 1751 return -ENOMEM; 1752 1753 dev_notice(smmu->dev, 1754 "\tstream matching with %u register groups", size); 1755 } 1756 /* s2cr->type == 0 means translation, so initialise explicitly */ 1757 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs), 1758 GFP_KERNEL); 1759 if (!smmu->s2crs) 1760 return -ENOMEM; 1761 for (i = 0; i < size; i++) 1762 smmu->s2crs[i] = s2cr_init_val; 1763 1764 smmu->num_mapping_groups = size; 1765 mutex_init(&smmu->stream_map_mutex); 1766 spin_lock_init(&smmu->global_sync_lock); 1767 1768 if (smmu->version < ARM_SMMU_V2 || 1769 !(id & ARM_SMMU_ID0_PTFS_NO_AARCH32)) { 1770 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L; 1771 if (!(id & ARM_SMMU_ID0_PTFS_NO_AARCH32S)) 1772 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S; 1773 } 1774 1775 /* ID1 */ 1776 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1); 1777 smmu->pgshift = (id & ARM_SMMU_ID1_PAGESIZE) ? 16 : 12; 1778 1779 /* Check for size mismatch of SMMU address space from mapped region */ 1780 size = 1 << (FIELD_GET(ARM_SMMU_ID1_NUMPAGENDXB, id) + 1); 1781 if (smmu->numpage != 2 * size << smmu->pgshift) 1782 dev_warn(smmu->dev, 1783 "SMMU address space size (0x%x) differs from mapped region size (0x%x)!\n", 1784 2 * size << smmu->pgshift, smmu->numpage); 1785 /* Now properly encode NUMPAGE to subsequently derive SMMU_CB_BASE */ 1786 smmu->numpage = size; 1787 1788 smmu->num_s2_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMS2CB, id); 1789 smmu->num_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMCB, id); 1790 if (smmu->num_s2_context_banks > smmu->num_context_banks) { 1791 dev_err(smmu->dev, "impossible number of S2 context banks!\n"); 1792 return -ENODEV; 1793 } 1794 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n", 1795 smmu->num_context_banks, smmu->num_s2_context_banks); 1796 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks, 1797 sizeof(*smmu->cbs), GFP_KERNEL); 1798 if (!smmu->cbs) 1799 return -ENOMEM; 1800 1801 /* ID2 */ 1802 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2); 1803 size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_IAS, id)); 1804 smmu->ipa_size = size; 1805 1806 /* The output mask is also applied for bypass */ 1807 size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_OAS, id)); 1808 smmu->pa_size = size; 1809 1810 if (id & ARM_SMMU_ID2_VMID16) 1811 smmu->features |= ARM_SMMU_FEAT_VMID16; 1812 1813 /* 1814 * What the page table walker can address actually depends on which 1815 * descriptor format is in use, but since a) we don't know that yet, 1816 * and b) it can vary per context bank, this will have to do... 1817 */ 1818 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size))) 1819 dev_warn(smmu->dev, 1820 "failed to set DMA mask for table walker\n"); 1821 1822 if (smmu->version < ARM_SMMU_V2) { 1823 smmu->va_size = smmu->ipa_size; 1824 if (smmu->version == ARM_SMMU_V1_64K) 1825 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K; 1826 } else { 1827 size = FIELD_GET(ARM_SMMU_ID2_UBS, id); 1828 smmu->va_size = arm_smmu_id_size_to_bits(size); 1829 if (id & ARM_SMMU_ID2_PTFS_4K) 1830 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K; 1831 if (id & ARM_SMMU_ID2_PTFS_16K) 1832 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K; 1833 if (id & ARM_SMMU_ID2_PTFS_64K) 1834 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K; 1835 } 1836 1837 if (smmu->impl && smmu->impl->cfg_probe) { 1838 ret = smmu->impl->cfg_probe(smmu); 1839 if (ret) 1840 return ret; 1841 } 1842 1843 /* Now we've corralled the various formats, what'll it do? */ 1844 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) 1845 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M; 1846 if (smmu->features & 1847 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K)) 1848 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G; 1849 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K) 1850 smmu->pgsize_bitmap |= SZ_16K | SZ_32M; 1851 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K) 1852 smmu->pgsize_bitmap |= SZ_64K | SZ_512M; 1853 1854 if (arm_smmu_ops.pgsize_bitmap == -1UL) 1855 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap; 1856 else 1857 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap; 1858 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", 1859 smmu->pgsize_bitmap); 1860 1861 1862 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) 1863 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n", 1864 smmu->va_size, smmu->ipa_size); 1865 1866 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) 1867 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n", 1868 smmu->ipa_size, smmu->pa_size); 1869 1870 return 0; 1871 } 1872 1873 struct arm_smmu_match_data { 1874 enum arm_smmu_arch_version version; 1875 enum arm_smmu_implementation model; 1876 }; 1877 1878 #define ARM_SMMU_MATCH_DATA(name, ver, imp) \ 1879 static const struct arm_smmu_match_data name = { .version = ver, .model = imp } 1880 1881 ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU); 1882 ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU); 1883 ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU); 1884 ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500); 1885 ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2); 1886 ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2); 1887 1888 static const struct of_device_id arm_smmu_of_match[] = { 1889 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 }, 1890 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 }, 1891 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 }, 1892 { .compatible = "arm,mmu-401", .data = &arm_mmu401 }, 1893 { .compatible = "arm,mmu-500", .data = &arm_mmu500 }, 1894 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 }, 1895 { .compatible = "nvidia,smmu-500", .data = &arm_mmu500 }, 1896 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 }, 1897 { }, 1898 }; 1899 MODULE_DEVICE_TABLE(of, arm_smmu_of_match); 1900 1901 #ifdef CONFIG_ACPI 1902 static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu) 1903 { 1904 int ret = 0; 1905 1906 switch (model) { 1907 case ACPI_IORT_SMMU_V1: 1908 case ACPI_IORT_SMMU_CORELINK_MMU400: 1909 smmu->version = ARM_SMMU_V1; 1910 smmu->model = GENERIC_SMMU; 1911 break; 1912 case ACPI_IORT_SMMU_CORELINK_MMU401: 1913 smmu->version = ARM_SMMU_V1_64K; 1914 smmu->model = GENERIC_SMMU; 1915 break; 1916 case ACPI_IORT_SMMU_V2: 1917 smmu->version = ARM_SMMU_V2; 1918 smmu->model = GENERIC_SMMU; 1919 break; 1920 case ACPI_IORT_SMMU_CORELINK_MMU500: 1921 smmu->version = ARM_SMMU_V2; 1922 smmu->model = ARM_MMU500; 1923 break; 1924 case ACPI_IORT_SMMU_CAVIUM_THUNDERX: 1925 smmu->version = ARM_SMMU_V2; 1926 smmu->model = CAVIUM_SMMUV2; 1927 break; 1928 default: 1929 ret = -ENODEV; 1930 } 1931 1932 return ret; 1933 } 1934 1935 static int arm_smmu_device_acpi_probe(struct platform_device *pdev, 1936 struct arm_smmu_device *smmu) 1937 { 1938 struct device *dev = smmu->dev; 1939 struct acpi_iort_node *node = 1940 *(struct acpi_iort_node **)dev_get_platdata(dev); 1941 struct acpi_iort_smmu *iort_smmu; 1942 int ret; 1943 1944 /* Retrieve SMMU1/2 specific data */ 1945 iort_smmu = (struct acpi_iort_smmu *)node->node_data; 1946 1947 ret = acpi_smmu_get_data(iort_smmu->model, smmu); 1948 if (ret < 0) 1949 return ret; 1950 1951 /* Ignore the configuration access interrupt */ 1952 smmu->num_global_irqs = 1; 1953 1954 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) 1955 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; 1956 1957 return 0; 1958 } 1959 #else 1960 static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev, 1961 struct arm_smmu_device *smmu) 1962 { 1963 return -ENODEV; 1964 } 1965 #endif 1966 1967 static int arm_smmu_device_dt_probe(struct platform_device *pdev, 1968 struct arm_smmu_device *smmu) 1969 { 1970 const struct arm_smmu_match_data *data; 1971 struct device *dev = &pdev->dev; 1972 bool legacy_binding; 1973 1974 if (of_property_read_u32(dev->of_node, "#global-interrupts", 1975 &smmu->num_global_irqs)) { 1976 dev_err(dev, "missing #global-interrupts property\n"); 1977 return -ENODEV; 1978 } 1979 1980 data = of_device_get_match_data(dev); 1981 smmu->version = data->version; 1982 smmu->model = data->model; 1983 1984 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL); 1985 if (legacy_binding && !using_generic_binding) { 1986 if (!using_legacy_binding) { 1987 pr_notice("deprecated \"mmu-masters\" DT property in use; %s support unavailable\n", 1988 IS_ENABLED(CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS) ? "DMA API" : "SMMU"); 1989 } 1990 using_legacy_binding = true; 1991 } else if (!legacy_binding && !using_legacy_binding) { 1992 using_generic_binding = true; 1993 } else { 1994 dev_err(dev, "not probing due to mismatched DT properties\n"); 1995 return -ENODEV; 1996 } 1997 1998 if (of_dma_is_coherent(dev->of_node)) 1999 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; 2000 2001 return 0; 2002 } 2003 2004 static int arm_smmu_bus_init(struct iommu_ops *ops) 2005 { 2006 int err; 2007 2008 /* Oh, for a proper bus abstraction */ 2009 if (!iommu_present(&platform_bus_type)) { 2010 err = bus_set_iommu(&platform_bus_type, ops); 2011 if (err) 2012 return err; 2013 } 2014 #ifdef CONFIG_ARM_AMBA 2015 if (!iommu_present(&amba_bustype)) { 2016 err = bus_set_iommu(&amba_bustype, ops); 2017 if (err) 2018 goto err_reset_platform_ops; 2019 } 2020 #endif 2021 #ifdef CONFIG_PCI 2022 if (!iommu_present(&pci_bus_type)) { 2023 err = bus_set_iommu(&pci_bus_type, ops); 2024 if (err) 2025 goto err_reset_amba_ops; 2026 } 2027 #endif 2028 #ifdef CONFIG_FSL_MC_BUS 2029 if (!iommu_present(&fsl_mc_bus_type)) { 2030 err = bus_set_iommu(&fsl_mc_bus_type, ops); 2031 if (err) 2032 goto err_reset_pci_ops; 2033 } 2034 #endif 2035 return 0; 2036 2037 err_reset_pci_ops: __maybe_unused; 2038 #ifdef CONFIG_PCI 2039 bus_set_iommu(&pci_bus_type, NULL); 2040 #endif 2041 err_reset_amba_ops: __maybe_unused; 2042 #ifdef CONFIG_ARM_AMBA 2043 bus_set_iommu(&amba_bustype, NULL); 2044 #endif 2045 err_reset_platform_ops: __maybe_unused; 2046 bus_set_iommu(&platform_bus_type, NULL); 2047 return err; 2048 } 2049 2050 static int arm_smmu_device_probe(struct platform_device *pdev) 2051 { 2052 struct resource *res; 2053 resource_size_t ioaddr; 2054 struct arm_smmu_device *smmu; 2055 struct device *dev = &pdev->dev; 2056 int num_irqs, i, err; 2057 irqreturn_t (*global_fault)(int irq, void *dev); 2058 2059 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); 2060 if (!smmu) { 2061 dev_err(dev, "failed to allocate arm_smmu_device\n"); 2062 return -ENOMEM; 2063 } 2064 smmu->dev = dev; 2065 2066 if (dev->of_node) 2067 err = arm_smmu_device_dt_probe(pdev, smmu); 2068 else 2069 err = arm_smmu_device_acpi_probe(pdev, smmu); 2070 2071 if (err) 2072 return err; 2073 2074 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2075 ioaddr = res->start; 2076 smmu->base = devm_ioremap_resource(dev, res); 2077 if (IS_ERR(smmu->base)) 2078 return PTR_ERR(smmu->base); 2079 /* 2080 * The resource size should effectively match the value of SMMU_TOP; 2081 * stash that temporarily until we know PAGESIZE to validate it with. 2082 */ 2083 smmu->numpage = resource_size(res); 2084 2085 smmu = arm_smmu_impl_init(smmu); 2086 if (IS_ERR(smmu)) 2087 return PTR_ERR(smmu); 2088 2089 num_irqs = 0; 2090 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) { 2091 num_irqs++; 2092 if (num_irqs > smmu->num_global_irqs) 2093 smmu->num_context_irqs++; 2094 } 2095 2096 if (!smmu->num_context_irqs) { 2097 dev_err(dev, "found %d interrupts but expected at least %d\n", 2098 num_irqs, smmu->num_global_irqs + 1); 2099 return -ENODEV; 2100 } 2101 2102 smmu->irqs = devm_kcalloc(dev, num_irqs, sizeof(*smmu->irqs), 2103 GFP_KERNEL); 2104 if (!smmu->irqs) { 2105 dev_err(dev, "failed to allocate %d irqs\n", num_irqs); 2106 return -ENOMEM; 2107 } 2108 2109 for (i = 0; i < num_irqs; ++i) { 2110 int irq = platform_get_irq(pdev, i); 2111 2112 if (irq < 0) 2113 return -ENODEV; 2114 smmu->irqs[i] = irq; 2115 } 2116 2117 err = devm_clk_bulk_get_all(dev, &smmu->clks); 2118 if (err < 0) { 2119 dev_err(dev, "failed to get clocks %d\n", err); 2120 return err; 2121 } 2122 smmu->num_clks = err; 2123 2124 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks); 2125 if (err) 2126 return err; 2127 2128 err = arm_smmu_device_cfg_probe(smmu); 2129 if (err) 2130 return err; 2131 2132 if (smmu->version == ARM_SMMU_V2) { 2133 if (smmu->num_context_banks > smmu->num_context_irqs) { 2134 dev_err(dev, 2135 "found only %d context irq(s) but %d required\n", 2136 smmu->num_context_irqs, smmu->num_context_banks); 2137 return -ENODEV; 2138 } 2139 2140 /* Ignore superfluous interrupts */ 2141 smmu->num_context_irqs = smmu->num_context_banks; 2142 } 2143 2144 if (smmu->impl && smmu->impl->global_fault) 2145 global_fault = smmu->impl->global_fault; 2146 else 2147 global_fault = arm_smmu_global_fault; 2148 2149 for (i = 0; i < smmu->num_global_irqs; ++i) { 2150 err = devm_request_irq(smmu->dev, smmu->irqs[i], 2151 global_fault, 2152 IRQF_SHARED, 2153 "arm-smmu global fault", 2154 smmu); 2155 if (err) { 2156 dev_err(dev, "failed to request global IRQ %d (%u)\n", 2157 i, smmu->irqs[i]); 2158 return err; 2159 } 2160 } 2161 2162 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL, 2163 "smmu.%pa", &ioaddr); 2164 if (err) { 2165 dev_err(dev, "Failed to register iommu in sysfs\n"); 2166 return err; 2167 } 2168 2169 err = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev); 2170 if (err) { 2171 dev_err(dev, "Failed to register iommu\n"); 2172 return err; 2173 } 2174 2175 platform_set_drvdata(pdev, smmu); 2176 arm_smmu_device_reset(smmu); 2177 arm_smmu_test_smr_masks(smmu); 2178 2179 /* 2180 * We want to avoid touching dev->power.lock in fastpaths unless 2181 * it's really going to do something useful - pm_runtime_enabled() 2182 * can serve as an ideal proxy for that decision. So, conditionally 2183 * enable pm_runtime. 2184 */ 2185 if (dev->pm_domain) { 2186 pm_runtime_set_active(dev); 2187 pm_runtime_enable(dev); 2188 } 2189 2190 /* 2191 * For ACPI and generic DT bindings, an SMMU will be probed before 2192 * any device which might need it, so we want the bus ops in place 2193 * ready to handle default domain setup as soon as any SMMU exists. 2194 */ 2195 if (!using_legacy_binding) 2196 return arm_smmu_bus_init(&arm_smmu_ops); 2197 2198 return 0; 2199 } 2200 2201 static int arm_smmu_device_remove(struct platform_device *pdev) 2202 { 2203 struct arm_smmu_device *smmu = platform_get_drvdata(pdev); 2204 2205 if (!smmu) 2206 return -ENODEV; 2207 2208 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS)) 2209 dev_notice(&pdev->dev, "disabling translation\n"); 2210 2211 arm_smmu_bus_init(NULL); 2212 iommu_device_unregister(&smmu->iommu); 2213 iommu_device_sysfs_remove(&smmu->iommu); 2214 2215 arm_smmu_rpm_get(smmu); 2216 /* Turn the thing off */ 2217 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, ARM_SMMU_sCR0_CLIENTPD); 2218 arm_smmu_rpm_put(smmu); 2219 2220 if (pm_runtime_enabled(smmu->dev)) 2221 pm_runtime_force_suspend(smmu->dev); 2222 else 2223 clk_bulk_disable(smmu->num_clks, smmu->clks); 2224 2225 clk_bulk_unprepare(smmu->num_clks, smmu->clks); 2226 return 0; 2227 } 2228 2229 static void arm_smmu_device_shutdown(struct platform_device *pdev) 2230 { 2231 arm_smmu_device_remove(pdev); 2232 } 2233 2234 static int __maybe_unused arm_smmu_runtime_resume(struct device *dev) 2235 { 2236 struct arm_smmu_device *smmu = dev_get_drvdata(dev); 2237 int ret; 2238 2239 ret = clk_bulk_enable(smmu->num_clks, smmu->clks); 2240 if (ret) 2241 return ret; 2242 2243 arm_smmu_device_reset(smmu); 2244 2245 return 0; 2246 } 2247 2248 static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev) 2249 { 2250 struct arm_smmu_device *smmu = dev_get_drvdata(dev); 2251 2252 clk_bulk_disable(smmu->num_clks, smmu->clks); 2253 2254 return 0; 2255 } 2256 2257 static int __maybe_unused arm_smmu_pm_resume(struct device *dev) 2258 { 2259 if (pm_runtime_suspended(dev)) 2260 return 0; 2261 2262 return arm_smmu_runtime_resume(dev); 2263 } 2264 2265 static int __maybe_unused arm_smmu_pm_suspend(struct device *dev) 2266 { 2267 if (pm_runtime_suspended(dev)) 2268 return 0; 2269 2270 return arm_smmu_runtime_suspend(dev); 2271 } 2272 2273 static const struct dev_pm_ops arm_smmu_pm_ops = { 2274 SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_pm_suspend, arm_smmu_pm_resume) 2275 SET_RUNTIME_PM_OPS(arm_smmu_runtime_suspend, 2276 arm_smmu_runtime_resume, NULL) 2277 }; 2278 2279 static struct platform_driver arm_smmu_driver = { 2280 .driver = { 2281 .name = "arm-smmu", 2282 .of_match_table = arm_smmu_of_match, 2283 .pm = &arm_smmu_pm_ops, 2284 .suppress_bind_attrs = true, 2285 }, 2286 .probe = arm_smmu_device_probe, 2287 .remove = arm_smmu_device_remove, 2288 .shutdown = arm_smmu_device_shutdown, 2289 }; 2290 module_platform_driver(arm_smmu_driver); 2291 2292 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations"); 2293 MODULE_AUTHOR("Will Deacon <will@kernel.org>"); 2294 MODULE_ALIAS("platform:arm-smmu"); 2295 MODULE_LICENSE("GPL v2"); 2296