1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * IOMMU API for Renesas VMSA-compatible IPMMU 4 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com> 5 * 6 * Copyright (C) 2014-2020 Renesas Electronics Corporation 7 */ 8 9 #include <linux/bitmap.h> 10 #include <linux/delay.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/err.h> 13 #include <linux/export.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/io-pgtable.h> 18 #include <linux/iommu.h> 19 #include <linux/of.h> 20 #include <linux/of_device.h> 21 #include <linux/of_platform.h> 22 #include <linux/platform_device.h> 23 #include <linux/sizes.h> 24 #include <linux/slab.h> 25 #include <linux/sys_soc.h> 26 27 #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) 28 #include <asm/dma-iommu.h> 29 #else 30 #define arm_iommu_create_mapping(...) NULL 31 #define arm_iommu_attach_device(...) -ENODEV 32 #define arm_iommu_release_mapping(...) do {} while (0) 33 #endif 34 35 #define IPMMU_CTX_MAX 16U 36 #define IPMMU_CTX_INVALID -1 37 38 #define IPMMU_UTLB_MAX 64U 39 40 struct ipmmu_features { 41 bool use_ns_alias_offset; 42 bool has_cache_leaf_nodes; 43 unsigned int number_of_contexts; 44 unsigned int num_utlbs; 45 bool setup_imbuscr; 46 bool twobit_imttbcr_sl0; 47 bool reserved_context; 48 bool cache_snoop; 49 unsigned int ctx_offset_base; 50 unsigned int ctx_offset_stride; 51 unsigned int utlb_offset_base; 52 }; 53 54 struct ipmmu_vmsa_device { 55 struct device *dev; 56 void __iomem *base; 57 struct iommu_device iommu; 58 struct ipmmu_vmsa_device *root; 59 const struct ipmmu_features *features; 60 unsigned int num_ctx; 61 spinlock_t lock; /* Protects ctx and domains[] */ 62 DECLARE_BITMAP(ctx, IPMMU_CTX_MAX); 63 struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX]; 64 s8 utlb_ctx[IPMMU_UTLB_MAX]; 65 66 struct iommu_group *group; 67 struct dma_iommu_mapping *mapping; 68 }; 69 70 struct ipmmu_vmsa_domain { 71 struct ipmmu_vmsa_device *mmu; 72 struct iommu_domain io_domain; 73 74 struct io_pgtable_cfg cfg; 75 struct io_pgtable_ops *iop; 76 77 unsigned int context_id; 78 struct mutex mutex; /* Protects mappings */ 79 }; 80 81 static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom) 82 { 83 return container_of(dom, struct ipmmu_vmsa_domain, io_domain); 84 } 85 86 static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev) 87 { 88 return dev_iommu_priv_get(dev); 89 } 90 91 #define TLB_LOOP_TIMEOUT 100 /* 100us */ 92 93 /* ----------------------------------------------------------------------------- 94 * Registers Definition 95 */ 96 97 #define IM_NS_ALIAS_OFFSET 0x800 98 99 /* MMU "context" registers */ 100 #define IMCTR 0x0000 /* R-Car Gen2/3 */ 101 #define IMCTR_INTEN (1 << 2) /* R-Car Gen2/3 */ 102 #define IMCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */ 103 #define IMCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */ 104 105 #define IMTTBCR 0x0008 /* R-Car Gen2/3 */ 106 #define IMTTBCR_EAE (1 << 31) /* R-Car Gen2/3 */ 107 #define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) /* R-Car Gen2 only */ 108 #define IMTTBCR_ORGN0_WB_WA (1 << 10) /* R-Car Gen2 only */ 109 #define IMTTBCR_IRGN0_WB_WA (1 << 8) /* R-Car Gen2 only */ 110 #define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6) /* R-Car Gen3 only */ 111 #define IMTTBCR_SL0_LVL_1 (1 << 4) /* R-Car Gen2 only */ 112 113 #define IMBUSCR 0x000c /* R-Car Gen2 only */ 114 #define IMBUSCR_DVM (1 << 2) /* R-Car Gen2 only */ 115 #define IMBUSCR_BUSSEL_MASK (3 << 0) /* R-Car Gen2 only */ 116 117 #define IMTTLBR0 0x0010 /* R-Car Gen2/3 */ 118 #define IMTTUBR0 0x0014 /* R-Car Gen2/3 */ 119 120 #define IMSTR 0x0020 /* R-Car Gen2/3 */ 121 #define IMSTR_MHIT (1 << 4) /* R-Car Gen2/3 */ 122 #define IMSTR_ABORT (1 << 2) /* R-Car Gen2/3 */ 123 #define IMSTR_PF (1 << 1) /* R-Car Gen2/3 */ 124 #define IMSTR_TF (1 << 0) /* R-Car Gen2/3 */ 125 126 #define IMMAIR0 0x0028 /* R-Car Gen2/3 */ 127 128 #define IMELAR 0x0030 /* R-Car Gen2/3, IMEAR on R-Car Gen2 */ 129 #define IMEUAR 0x0034 /* R-Car Gen3 only */ 130 131 /* uTLB registers */ 132 #define IMUCTR(n) ((n) < 32 ? IMUCTR0(n) : IMUCTR32(n)) 133 #define IMUCTR0(n) (0x0300 + ((n) * 16)) /* R-Car Gen2/3 */ 134 #define IMUCTR32(n) (0x0600 + (((n) - 32) * 16)) /* R-Car Gen3 only */ 135 #define IMUCTR_TTSEL_MMU(n) ((n) << 4) /* R-Car Gen2/3 */ 136 #define IMUCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */ 137 #define IMUCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */ 138 139 #define IMUASID(n) ((n) < 32 ? IMUASID0(n) : IMUASID32(n)) 140 #define IMUASID0(n) (0x0308 + ((n) * 16)) /* R-Car Gen2/3 */ 141 #define IMUASID32(n) (0x0608 + (((n) - 32) * 16)) /* R-Car Gen3 only */ 142 143 /* ----------------------------------------------------------------------------- 144 * Root device handling 145 */ 146 147 static struct platform_driver ipmmu_driver; 148 149 static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu) 150 { 151 return mmu->root == mmu; 152 } 153 154 static int __ipmmu_check_device(struct device *dev, void *data) 155 { 156 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev); 157 struct ipmmu_vmsa_device **rootp = data; 158 159 if (ipmmu_is_root(mmu)) 160 *rootp = mmu; 161 162 return 0; 163 } 164 165 static struct ipmmu_vmsa_device *ipmmu_find_root(void) 166 { 167 struct ipmmu_vmsa_device *root = NULL; 168 169 return driver_for_each_device(&ipmmu_driver.driver, NULL, &root, 170 __ipmmu_check_device) == 0 ? root : NULL; 171 } 172 173 /* ----------------------------------------------------------------------------- 174 * Read/Write Access 175 */ 176 177 static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset) 178 { 179 return ioread32(mmu->base + offset); 180 } 181 182 static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, 183 u32 data) 184 { 185 iowrite32(data, mmu->base + offset); 186 } 187 188 static unsigned int ipmmu_ctx_reg(struct ipmmu_vmsa_device *mmu, 189 unsigned int context_id, unsigned int reg) 190 { 191 unsigned int base = mmu->features->ctx_offset_base; 192 193 if (context_id > 7) 194 base += 0x800 - 8 * 0x40; 195 196 return base + context_id * mmu->features->ctx_offset_stride + reg; 197 } 198 199 static u32 ipmmu_ctx_read(struct ipmmu_vmsa_device *mmu, 200 unsigned int context_id, unsigned int reg) 201 { 202 return ipmmu_read(mmu, ipmmu_ctx_reg(mmu, context_id, reg)); 203 } 204 205 static void ipmmu_ctx_write(struct ipmmu_vmsa_device *mmu, 206 unsigned int context_id, unsigned int reg, u32 data) 207 { 208 ipmmu_write(mmu, ipmmu_ctx_reg(mmu, context_id, reg), data); 209 } 210 211 static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain, 212 unsigned int reg) 213 { 214 return ipmmu_ctx_read(domain->mmu->root, domain->context_id, reg); 215 } 216 217 static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain, 218 unsigned int reg, u32 data) 219 { 220 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data); 221 } 222 223 static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain, 224 unsigned int reg, u32 data) 225 { 226 if (domain->mmu != domain->mmu->root) 227 ipmmu_ctx_write(domain->mmu, domain->context_id, reg, data); 228 229 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data); 230 } 231 232 static u32 ipmmu_utlb_reg(struct ipmmu_vmsa_device *mmu, unsigned int reg) 233 { 234 return mmu->features->utlb_offset_base + reg; 235 } 236 237 static void ipmmu_imuasid_write(struct ipmmu_vmsa_device *mmu, 238 unsigned int utlb, u32 data) 239 { 240 ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUASID(utlb)), data); 241 } 242 243 static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu, 244 unsigned int utlb, u32 data) 245 { 246 ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUCTR(utlb)), data); 247 } 248 249 /* ----------------------------------------------------------------------------- 250 * TLB and microTLB Management 251 */ 252 253 /* Wait for any pending TLB invalidations to complete */ 254 static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain) 255 { 256 unsigned int count = 0; 257 258 while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) { 259 cpu_relax(); 260 if (++count == TLB_LOOP_TIMEOUT) { 261 dev_err_ratelimited(domain->mmu->dev, 262 "TLB sync timed out -- MMU may be deadlocked\n"); 263 return; 264 } 265 udelay(1); 266 } 267 } 268 269 static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain) 270 { 271 u32 reg; 272 273 reg = ipmmu_ctx_read_root(domain, IMCTR); 274 reg |= IMCTR_FLUSH; 275 ipmmu_ctx_write_all(domain, IMCTR, reg); 276 277 ipmmu_tlb_sync(domain); 278 } 279 280 /* 281 * Enable MMU translation for the microTLB. 282 */ 283 static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain, 284 unsigned int utlb) 285 { 286 struct ipmmu_vmsa_device *mmu = domain->mmu; 287 288 /* 289 * TODO: Reference-count the microTLB as several bus masters can be 290 * connected to the same microTLB. 291 */ 292 293 /* TODO: What should we set the ASID to ? */ 294 ipmmu_imuasid_write(mmu, utlb, 0); 295 /* TODO: Do we need to flush the microTLB ? */ 296 ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) | 297 IMUCTR_FLUSH | IMUCTR_MMUEN); 298 mmu->utlb_ctx[utlb] = domain->context_id; 299 } 300 301 static void ipmmu_tlb_flush_all(void *cookie) 302 { 303 struct ipmmu_vmsa_domain *domain = cookie; 304 305 ipmmu_tlb_invalidate(domain); 306 } 307 308 static void ipmmu_tlb_flush(unsigned long iova, size_t size, 309 size_t granule, void *cookie) 310 { 311 ipmmu_tlb_flush_all(cookie); 312 } 313 314 static const struct iommu_flush_ops ipmmu_flush_ops = { 315 .tlb_flush_all = ipmmu_tlb_flush_all, 316 .tlb_flush_walk = ipmmu_tlb_flush, 317 }; 318 319 /* ----------------------------------------------------------------------------- 320 * Domain/Context Management 321 */ 322 323 static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu, 324 struct ipmmu_vmsa_domain *domain) 325 { 326 unsigned long flags; 327 int ret; 328 329 spin_lock_irqsave(&mmu->lock, flags); 330 331 ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx); 332 if (ret != mmu->num_ctx) { 333 mmu->domains[ret] = domain; 334 set_bit(ret, mmu->ctx); 335 } else 336 ret = -EBUSY; 337 338 spin_unlock_irqrestore(&mmu->lock, flags); 339 340 return ret; 341 } 342 343 static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu, 344 unsigned int context_id) 345 { 346 unsigned long flags; 347 348 spin_lock_irqsave(&mmu->lock, flags); 349 350 clear_bit(context_id, mmu->ctx); 351 mmu->domains[context_id] = NULL; 352 353 spin_unlock_irqrestore(&mmu->lock, flags); 354 } 355 356 static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain) 357 { 358 u64 ttbr; 359 u32 tmp; 360 361 /* TTBR0 */ 362 ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr; 363 ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr); 364 ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32); 365 366 /* 367 * TTBCR 368 * We use long descriptors and allocate the whole 32-bit VA space to 369 * TTBR0. 370 */ 371 if (domain->mmu->features->twobit_imttbcr_sl0) 372 tmp = IMTTBCR_SL0_TWOBIT_LVL_1; 373 else 374 tmp = IMTTBCR_SL0_LVL_1; 375 376 if (domain->mmu->features->cache_snoop) 377 tmp |= IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA | 378 IMTTBCR_IRGN0_WB_WA; 379 380 ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | tmp); 381 382 /* MAIR0 */ 383 ipmmu_ctx_write_root(domain, IMMAIR0, 384 domain->cfg.arm_lpae_s1_cfg.mair); 385 386 /* IMBUSCR */ 387 if (domain->mmu->features->setup_imbuscr) 388 ipmmu_ctx_write_root(domain, IMBUSCR, 389 ipmmu_ctx_read_root(domain, IMBUSCR) & 390 ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK)); 391 392 /* 393 * IMSTR 394 * Clear all interrupt flags. 395 */ 396 ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR)); 397 398 /* 399 * IMCTR 400 * Enable the MMU and interrupt generation. The long-descriptor 401 * translation table format doesn't use TEX remapping. Don't enable AF 402 * software management as we have no use for it. Flush the TLB as 403 * required when modifying the context registers. 404 */ 405 ipmmu_ctx_write_all(domain, IMCTR, 406 IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN); 407 } 408 409 static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) 410 { 411 int ret; 412 413 /* 414 * Allocate the page table operations. 415 * 416 * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory 417 * access, Long-descriptor format" that the NStable bit being set in a 418 * table descriptor will result in the NStable and NS bits of all child 419 * entries being ignored and considered as being set. The IPMMU seems 420 * not to comply with this, as it generates a secure access page fault 421 * if any of the NStable and NS bits isn't set when running in 422 * non-secure mode. 423 */ 424 domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS; 425 domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K; 426 domain->cfg.ias = 32; 427 domain->cfg.oas = 40; 428 domain->cfg.tlb = &ipmmu_flush_ops; 429 domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32); 430 domain->io_domain.geometry.force_aperture = true; 431 /* 432 * TODO: Add support for coherent walk through CCI with DVM and remove 433 * cache handling. For now, delegate it to the io-pgtable code. 434 */ 435 domain->cfg.coherent_walk = false; 436 domain->cfg.iommu_dev = domain->mmu->root->dev; 437 438 /* 439 * Find an unused context. 440 */ 441 ret = ipmmu_domain_allocate_context(domain->mmu->root, domain); 442 if (ret < 0) 443 return ret; 444 445 domain->context_id = ret; 446 447 domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg, 448 domain); 449 if (!domain->iop) { 450 ipmmu_domain_free_context(domain->mmu->root, 451 domain->context_id); 452 return -EINVAL; 453 } 454 455 ipmmu_domain_setup_context(domain); 456 return 0; 457 } 458 459 static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) 460 { 461 if (!domain->mmu) 462 return; 463 464 /* 465 * Disable the context. Flush the TLB as required when modifying the 466 * context registers. 467 * 468 * TODO: Is TLB flush really needed ? 469 */ 470 ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH); 471 ipmmu_tlb_sync(domain); 472 ipmmu_domain_free_context(domain->mmu->root, domain->context_id); 473 } 474 475 /* ----------------------------------------------------------------------------- 476 * Fault Handling 477 */ 478 479 static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain) 480 { 481 const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF; 482 struct ipmmu_vmsa_device *mmu = domain->mmu; 483 unsigned long iova; 484 u32 status; 485 486 status = ipmmu_ctx_read_root(domain, IMSTR); 487 if (!(status & err_mask)) 488 return IRQ_NONE; 489 490 iova = ipmmu_ctx_read_root(domain, IMELAR); 491 if (IS_ENABLED(CONFIG_64BIT)) 492 iova |= (u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32; 493 494 /* 495 * Clear the error status flags. Unlike traditional interrupt flag 496 * registers that must be cleared by writing 1, this status register 497 * seems to require 0. The error address register must be read before, 498 * otherwise its value will be 0. 499 */ 500 ipmmu_ctx_write_root(domain, IMSTR, 0); 501 502 /* Log fatal errors. */ 503 if (status & IMSTR_MHIT) 504 dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%lx\n", 505 iova); 506 if (status & IMSTR_ABORT) 507 dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%lx\n", 508 iova); 509 510 if (!(status & (IMSTR_PF | IMSTR_TF))) 511 return IRQ_NONE; 512 513 /* 514 * Try to handle page faults and translation faults. 515 * 516 * TODO: We need to look up the faulty device based on the I/O VA. Use 517 * the IOMMU device for now. 518 */ 519 if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0)) 520 return IRQ_HANDLED; 521 522 dev_err_ratelimited(mmu->dev, 523 "Unhandled fault: status 0x%08x iova 0x%lx\n", 524 status, iova); 525 526 return IRQ_HANDLED; 527 } 528 529 static irqreturn_t ipmmu_irq(int irq, void *dev) 530 { 531 struct ipmmu_vmsa_device *mmu = dev; 532 irqreturn_t status = IRQ_NONE; 533 unsigned int i; 534 unsigned long flags; 535 536 spin_lock_irqsave(&mmu->lock, flags); 537 538 /* 539 * Check interrupts for all active contexts. 540 */ 541 for (i = 0; i < mmu->num_ctx; i++) { 542 if (!mmu->domains[i]) 543 continue; 544 if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED) 545 status = IRQ_HANDLED; 546 } 547 548 spin_unlock_irqrestore(&mmu->lock, flags); 549 550 return status; 551 } 552 553 /* ----------------------------------------------------------------------------- 554 * IOMMU Operations 555 */ 556 557 static struct iommu_domain *ipmmu_domain_alloc(unsigned type) 558 { 559 struct ipmmu_vmsa_domain *domain; 560 561 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA) 562 return NULL; 563 564 domain = kzalloc(sizeof(*domain), GFP_KERNEL); 565 if (!domain) 566 return NULL; 567 568 mutex_init(&domain->mutex); 569 570 return &domain->io_domain; 571 } 572 573 static void ipmmu_domain_free(struct iommu_domain *io_domain) 574 { 575 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 576 577 /* 578 * Free the domain resources. We assume that all devices have already 579 * been detached. 580 */ 581 ipmmu_domain_destroy_context(domain); 582 free_io_pgtable_ops(domain->iop); 583 kfree(domain); 584 } 585 586 static int ipmmu_attach_device(struct iommu_domain *io_domain, 587 struct device *dev) 588 { 589 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 590 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); 591 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 592 unsigned int i; 593 int ret = 0; 594 595 if (!mmu) { 596 dev_err(dev, "Cannot attach to IPMMU\n"); 597 return -ENXIO; 598 } 599 600 mutex_lock(&domain->mutex); 601 602 if (!domain->mmu) { 603 /* The domain hasn't been used yet, initialize it. */ 604 domain->mmu = mmu; 605 ret = ipmmu_domain_init_context(domain); 606 if (ret < 0) { 607 dev_err(dev, "Unable to initialize IPMMU context\n"); 608 domain->mmu = NULL; 609 } else { 610 dev_info(dev, "Using IPMMU context %u\n", 611 domain->context_id); 612 } 613 } else if (domain->mmu != mmu) { 614 /* 615 * Something is wrong, we can't attach two devices using 616 * different IOMMUs to the same domain. 617 */ 618 ret = -EINVAL; 619 } else 620 dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id); 621 622 mutex_unlock(&domain->mutex); 623 624 if (ret < 0) 625 return ret; 626 627 for (i = 0; i < fwspec->num_ids; ++i) 628 ipmmu_utlb_enable(domain, fwspec->ids[i]); 629 630 return 0; 631 } 632 633 static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, 634 phys_addr_t paddr, size_t pgsize, size_t pgcount, 635 int prot, gfp_t gfp, size_t *mapped) 636 { 637 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 638 639 return domain->iop->map_pages(domain->iop, iova, paddr, pgsize, pgcount, 640 prot, gfp, mapped); 641 } 642 643 static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, 644 size_t pgsize, size_t pgcount, 645 struct iommu_iotlb_gather *gather) 646 { 647 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 648 649 return domain->iop->unmap_pages(domain->iop, iova, pgsize, pgcount, gather); 650 } 651 652 static void ipmmu_flush_iotlb_all(struct iommu_domain *io_domain) 653 { 654 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 655 656 if (domain->mmu) 657 ipmmu_tlb_flush_all(domain); 658 } 659 660 static void ipmmu_iotlb_sync(struct iommu_domain *io_domain, 661 struct iommu_iotlb_gather *gather) 662 { 663 ipmmu_flush_iotlb_all(io_domain); 664 } 665 666 static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, 667 dma_addr_t iova) 668 { 669 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 670 671 /* TODO: Is locking needed ? */ 672 673 return domain->iop->iova_to_phys(domain->iop, iova); 674 } 675 676 static int ipmmu_init_platform_device(struct device *dev, 677 struct of_phandle_args *args) 678 { 679 struct platform_device *ipmmu_pdev; 680 681 ipmmu_pdev = of_find_device_by_node(args->np); 682 if (!ipmmu_pdev) 683 return -ENODEV; 684 685 dev_iommu_priv_set(dev, platform_get_drvdata(ipmmu_pdev)); 686 687 return 0; 688 } 689 690 static const struct soc_device_attribute soc_needs_opt_in[] = { 691 { .family = "R-Car Gen3", }, 692 { .family = "R-Car Gen4", }, 693 { .family = "RZ/G2", }, 694 { /* sentinel */ } 695 }; 696 697 static const struct soc_device_attribute soc_denylist[] = { 698 { .soc_id = "r8a774a1", }, 699 { .soc_id = "r8a7795", .revision = "ES2.*" }, 700 { .soc_id = "r8a7796", }, 701 { /* sentinel */ } 702 }; 703 704 static const char * const devices_allowlist[] = { 705 "ee100000.mmc", 706 "ee120000.mmc", 707 "ee140000.mmc", 708 "ee160000.mmc" 709 }; 710 711 static bool ipmmu_device_is_allowed(struct device *dev) 712 { 713 unsigned int i; 714 715 /* 716 * R-Car Gen3/4 and RZ/G2 use the allow list to opt-in devices. 717 * For Other SoCs, this returns true anyway. 718 */ 719 if (!soc_device_match(soc_needs_opt_in)) 720 return true; 721 722 /* Check whether this SoC can use the IPMMU correctly or not */ 723 if (soc_device_match(soc_denylist)) 724 return false; 725 726 /* Check whether this device can work with the IPMMU */ 727 for (i = 0; i < ARRAY_SIZE(devices_allowlist); i++) { 728 if (!strcmp(dev_name(dev), devices_allowlist[i])) 729 return true; 730 } 731 732 /* Otherwise, do not allow use of IPMMU */ 733 return false; 734 } 735 736 static int ipmmu_of_xlate(struct device *dev, 737 struct of_phandle_args *spec) 738 { 739 if (!ipmmu_device_is_allowed(dev)) 740 return -ENODEV; 741 742 iommu_fwspec_add_ids(dev, spec->args, 1); 743 744 /* Initialize once - xlate() will call multiple times */ 745 if (to_ipmmu(dev)) 746 return 0; 747 748 return ipmmu_init_platform_device(dev, spec); 749 } 750 751 static int ipmmu_init_arm_mapping(struct device *dev) 752 { 753 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); 754 int ret; 755 756 /* 757 * Create the ARM mapping, used by the ARM DMA mapping core to allocate 758 * VAs. This will allocate a corresponding IOMMU domain. 759 * 760 * TODO: 761 * - Create one mapping per context (TLB). 762 * - Make the mapping size configurable ? We currently use a 2GB mapping 763 * at a 1GB offset to ensure that NULL VAs will fault. 764 */ 765 if (!mmu->mapping) { 766 struct dma_iommu_mapping *mapping; 767 768 mapping = arm_iommu_create_mapping(&platform_bus_type, 769 SZ_1G, SZ_2G); 770 if (IS_ERR(mapping)) { 771 dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n"); 772 ret = PTR_ERR(mapping); 773 goto error; 774 } 775 776 mmu->mapping = mapping; 777 } 778 779 /* Attach the ARM VA mapping to the device. */ 780 ret = arm_iommu_attach_device(dev, mmu->mapping); 781 if (ret < 0) { 782 dev_err(dev, "Failed to attach device to VA mapping\n"); 783 goto error; 784 } 785 786 return 0; 787 788 error: 789 if (mmu->mapping) 790 arm_iommu_release_mapping(mmu->mapping); 791 792 return ret; 793 } 794 795 static struct iommu_device *ipmmu_probe_device(struct device *dev) 796 { 797 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); 798 799 /* 800 * Only let through devices that have been verified in xlate() 801 */ 802 if (!mmu) 803 return ERR_PTR(-ENODEV); 804 805 return &mmu->iommu; 806 } 807 808 static void ipmmu_probe_finalize(struct device *dev) 809 { 810 int ret = 0; 811 812 if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)) 813 ret = ipmmu_init_arm_mapping(dev); 814 815 if (ret) 816 dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n"); 817 } 818 819 static void ipmmu_release_device(struct device *dev) 820 { 821 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 822 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); 823 unsigned int i; 824 825 for (i = 0; i < fwspec->num_ids; ++i) { 826 unsigned int utlb = fwspec->ids[i]; 827 828 ipmmu_imuctr_write(mmu, utlb, 0); 829 mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID; 830 } 831 832 arm_iommu_release_mapping(mmu->mapping); 833 } 834 835 static struct iommu_group *ipmmu_find_group(struct device *dev) 836 { 837 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); 838 struct iommu_group *group; 839 840 if (mmu->group) 841 return iommu_group_ref_get(mmu->group); 842 843 group = iommu_group_alloc(); 844 if (!IS_ERR(group)) 845 mmu->group = group; 846 847 return group; 848 } 849 850 static const struct iommu_ops ipmmu_ops = { 851 .domain_alloc = ipmmu_domain_alloc, 852 .probe_device = ipmmu_probe_device, 853 .release_device = ipmmu_release_device, 854 .probe_finalize = ipmmu_probe_finalize, 855 .device_group = IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA) 856 ? generic_device_group : ipmmu_find_group, 857 .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, 858 .of_xlate = ipmmu_of_xlate, 859 .default_domain_ops = &(const struct iommu_domain_ops) { 860 .attach_dev = ipmmu_attach_device, 861 .map_pages = ipmmu_map, 862 .unmap_pages = ipmmu_unmap, 863 .flush_iotlb_all = ipmmu_flush_iotlb_all, 864 .iotlb_sync = ipmmu_iotlb_sync, 865 .iova_to_phys = ipmmu_iova_to_phys, 866 .free = ipmmu_domain_free, 867 } 868 }; 869 870 /* ----------------------------------------------------------------------------- 871 * Probe/remove and init 872 */ 873 874 static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu) 875 { 876 unsigned int i; 877 878 /* Disable all contexts. */ 879 for (i = 0; i < mmu->num_ctx; ++i) 880 ipmmu_ctx_write(mmu, i, IMCTR, 0); 881 } 882 883 static const struct ipmmu_features ipmmu_features_default = { 884 .use_ns_alias_offset = true, 885 .has_cache_leaf_nodes = false, 886 .number_of_contexts = 1, /* software only tested with one context */ 887 .num_utlbs = 32, 888 .setup_imbuscr = true, 889 .twobit_imttbcr_sl0 = false, 890 .reserved_context = false, 891 .cache_snoop = true, 892 .ctx_offset_base = 0, 893 .ctx_offset_stride = 0x40, 894 .utlb_offset_base = 0, 895 }; 896 897 static const struct ipmmu_features ipmmu_features_rcar_gen3 = { 898 .use_ns_alias_offset = false, 899 .has_cache_leaf_nodes = true, 900 .number_of_contexts = 8, 901 .num_utlbs = 48, 902 .setup_imbuscr = false, 903 .twobit_imttbcr_sl0 = true, 904 .reserved_context = true, 905 .cache_snoop = false, 906 .ctx_offset_base = 0, 907 .ctx_offset_stride = 0x40, 908 .utlb_offset_base = 0, 909 }; 910 911 static const struct ipmmu_features ipmmu_features_rcar_gen4 = { 912 .use_ns_alias_offset = false, 913 .has_cache_leaf_nodes = true, 914 .number_of_contexts = 16, 915 .num_utlbs = 64, 916 .setup_imbuscr = false, 917 .twobit_imttbcr_sl0 = true, 918 .reserved_context = true, 919 .cache_snoop = false, 920 .ctx_offset_base = 0x10000, 921 .ctx_offset_stride = 0x1040, 922 .utlb_offset_base = 0x3000, 923 }; 924 925 static const struct of_device_id ipmmu_of_ids[] = { 926 { 927 .compatible = "renesas,ipmmu-vmsa", 928 .data = &ipmmu_features_default, 929 }, { 930 .compatible = "renesas,ipmmu-r8a774a1", 931 .data = &ipmmu_features_rcar_gen3, 932 }, { 933 .compatible = "renesas,ipmmu-r8a774b1", 934 .data = &ipmmu_features_rcar_gen3, 935 }, { 936 .compatible = "renesas,ipmmu-r8a774c0", 937 .data = &ipmmu_features_rcar_gen3, 938 }, { 939 .compatible = "renesas,ipmmu-r8a774e1", 940 .data = &ipmmu_features_rcar_gen3, 941 }, { 942 .compatible = "renesas,ipmmu-r8a7795", 943 .data = &ipmmu_features_rcar_gen3, 944 }, { 945 .compatible = "renesas,ipmmu-r8a7796", 946 .data = &ipmmu_features_rcar_gen3, 947 }, { 948 .compatible = "renesas,ipmmu-r8a77961", 949 .data = &ipmmu_features_rcar_gen3, 950 }, { 951 .compatible = "renesas,ipmmu-r8a77965", 952 .data = &ipmmu_features_rcar_gen3, 953 }, { 954 .compatible = "renesas,ipmmu-r8a77970", 955 .data = &ipmmu_features_rcar_gen3, 956 }, { 957 .compatible = "renesas,ipmmu-r8a77980", 958 .data = &ipmmu_features_rcar_gen3, 959 }, { 960 .compatible = "renesas,ipmmu-r8a77990", 961 .data = &ipmmu_features_rcar_gen3, 962 }, { 963 .compatible = "renesas,ipmmu-r8a77995", 964 .data = &ipmmu_features_rcar_gen3, 965 }, { 966 .compatible = "renesas,ipmmu-r8a779a0", 967 .data = &ipmmu_features_rcar_gen4, 968 }, { 969 .compatible = "renesas,rcar-gen4-ipmmu-vmsa", 970 .data = &ipmmu_features_rcar_gen4, 971 }, { 972 /* Terminator */ 973 }, 974 }; 975 976 static int ipmmu_probe(struct platform_device *pdev) 977 { 978 struct ipmmu_vmsa_device *mmu; 979 struct resource *res; 980 int irq; 981 int ret; 982 983 mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL); 984 if (!mmu) { 985 dev_err(&pdev->dev, "cannot allocate device data\n"); 986 return -ENOMEM; 987 } 988 989 mmu->dev = &pdev->dev; 990 spin_lock_init(&mmu->lock); 991 bitmap_zero(mmu->ctx, IPMMU_CTX_MAX); 992 mmu->features = of_device_get_match_data(&pdev->dev); 993 memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs); 994 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); 995 if (ret) 996 return ret; 997 998 /* Map I/O memory and request IRQ. */ 999 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1000 mmu->base = devm_ioremap_resource(&pdev->dev, res); 1001 if (IS_ERR(mmu->base)) 1002 return PTR_ERR(mmu->base); 1003 1004 /* 1005 * The IPMMU has two register banks, for secure and non-secure modes. 1006 * The bank mapped at the beginning of the IPMMU address space 1007 * corresponds to the running mode of the CPU. When running in secure 1008 * mode the non-secure register bank is also available at an offset. 1009 * 1010 * Secure mode operation isn't clearly documented and is thus currently 1011 * not implemented in the driver. Furthermore, preliminary tests of 1012 * non-secure operation with the main register bank were not successful. 1013 * Offset the registers base unconditionally to point to the non-secure 1014 * alias space for now. 1015 */ 1016 if (mmu->features->use_ns_alias_offset) 1017 mmu->base += IM_NS_ALIAS_OFFSET; 1018 1019 mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts); 1020 1021 /* 1022 * Determine if this IPMMU instance is a root device by checking for 1023 * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property. 1024 */ 1025 if (!mmu->features->has_cache_leaf_nodes || 1026 !of_property_present(pdev->dev.of_node, "renesas,ipmmu-main")) 1027 mmu->root = mmu; 1028 else 1029 mmu->root = ipmmu_find_root(); 1030 1031 /* 1032 * Wait until the root device has been registered for sure. 1033 */ 1034 if (!mmu->root) 1035 return -EPROBE_DEFER; 1036 1037 /* Root devices have mandatory IRQs */ 1038 if (ipmmu_is_root(mmu)) { 1039 irq = platform_get_irq(pdev, 0); 1040 if (irq < 0) 1041 return irq; 1042 1043 ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0, 1044 dev_name(&pdev->dev), mmu); 1045 if (ret < 0) { 1046 dev_err(&pdev->dev, "failed to request IRQ %d\n", irq); 1047 return ret; 1048 } 1049 1050 ipmmu_device_reset(mmu); 1051 1052 if (mmu->features->reserved_context) { 1053 dev_info(&pdev->dev, "IPMMU context 0 is reserved\n"); 1054 set_bit(0, mmu->ctx); 1055 } 1056 } 1057 1058 /* 1059 * Register the IPMMU to the IOMMU subsystem in the following cases: 1060 * - R-Car Gen2 IPMMU (all devices registered) 1061 * - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device) 1062 */ 1063 if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) { 1064 ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, 1065 dev_name(&pdev->dev)); 1066 if (ret) 1067 return ret; 1068 1069 ret = iommu_device_register(&mmu->iommu, &ipmmu_ops, &pdev->dev); 1070 if (ret) 1071 return ret; 1072 } 1073 1074 /* 1075 * We can't create the ARM mapping here as it requires the bus to have 1076 * an IOMMU, which only happens when bus_set_iommu() is called in 1077 * ipmmu_init() after the probe function returns. 1078 */ 1079 1080 platform_set_drvdata(pdev, mmu); 1081 1082 return 0; 1083 } 1084 1085 static void ipmmu_remove(struct platform_device *pdev) 1086 { 1087 struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev); 1088 1089 iommu_device_sysfs_remove(&mmu->iommu); 1090 iommu_device_unregister(&mmu->iommu); 1091 1092 arm_iommu_release_mapping(mmu->mapping); 1093 1094 ipmmu_device_reset(mmu); 1095 } 1096 1097 #ifdef CONFIG_PM_SLEEP 1098 static int ipmmu_resume_noirq(struct device *dev) 1099 { 1100 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev); 1101 unsigned int i; 1102 1103 /* Reset root MMU and restore contexts */ 1104 if (ipmmu_is_root(mmu)) { 1105 ipmmu_device_reset(mmu); 1106 1107 for (i = 0; i < mmu->num_ctx; i++) { 1108 if (!mmu->domains[i]) 1109 continue; 1110 1111 ipmmu_domain_setup_context(mmu->domains[i]); 1112 } 1113 } 1114 1115 /* Re-enable active micro-TLBs */ 1116 for (i = 0; i < mmu->features->num_utlbs; i++) { 1117 if (mmu->utlb_ctx[i] == IPMMU_CTX_INVALID) 1118 continue; 1119 1120 ipmmu_utlb_enable(mmu->root->domains[mmu->utlb_ctx[i]], i); 1121 } 1122 1123 return 0; 1124 } 1125 1126 static const struct dev_pm_ops ipmmu_pm = { 1127 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, ipmmu_resume_noirq) 1128 }; 1129 #define DEV_PM_OPS &ipmmu_pm 1130 #else 1131 #define DEV_PM_OPS NULL 1132 #endif /* CONFIG_PM_SLEEP */ 1133 1134 static struct platform_driver ipmmu_driver = { 1135 .driver = { 1136 .name = "ipmmu-vmsa", 1137 .of_match_table = of_match_ptr(ipmmu_of_ids), 1138 .pm = DEV_PM_OPS, 1139 }, 1140 .probe = ipmmu_probe, 1141 .remove_new = ipmmu_remove, 1142 }; 1143 builtin_platform_driver(ipmmu_driver); 1144