1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * IOMMU API for Renesas VMSA-compatible IPMMU 4 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com> 5 * 6 * Copyright (C) 2014-2020 Renesas Electronics Corporation 7 */ 8 9 #include <linux/bitmap.h> 10 #include <linux/delay.h> 11 #include <linux/dma-iommu.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/err.h> 14 #include <linux/export.h> 15 #include <linux/init.h> 16 #include <linux/interrupt.h> 17 #include <linux/io.h> 18 #include <linux/io-pgtable.h> 19 #include <linux/iommu.h> 20 #include <linux/of.h> 21 #include <linux/of_device.h> 22 #include <linux/of_iommu.h> 23 #include <linux/of_platform.h> 24 #include <linux/platform_device.h> 25 #include <linux/sizes.h> 26 #include <linux/slab.h> 27 #include <linux/sys_soc.h> 28 29 #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) 30 #include <asm/dma-iommu.h> 31 #else 32 #define arm_iommu_create_mapping(...) NULL 33 #define arm_iommu_attach_device(...) -ENODEV 34 #define arm_iommu_release_mapping(...) do {} while (0) 35 #define arm_iommu_detach_device(...) do {} while (0) 36 #endif 37 38 #define IPMMU_CTX_MAX 8U 39 #define IPMMU_CTX_INVALID -1 40 41 #define IPMMU_UTLB_MAX 48U 42 43 struct ipmmu_features { 44 bool use_ns_alias_offset; 45 bool has_cache_leaf_nodes; 46 unsigned int number_of_contexts; 47 unsigned int num_utlbs; 48 bool setup_imbuscr; 49 bool twobit_imttbcr_sl0; 50 bool reserved_context; 51 bool cache_snoop; 52 unsigned int ctx_offset_base; 53 unsigned int ctx_offset_stride; 54 unsigned int utlb_offset_base; 55 }; 56 57 struct ipmmu_vmsa_device { 58 struct device *dev; 59 void __iomem *base; 60 struct iommu_device iommu; 61 struct ipmmu_vmsa_device *root; 62 const struct ipmmu_features *features; 63 unsigned int num_ctx; 64 spinlock_t lock; /* Protects ctx and domains[] */ 65 DECLARE_BITMAP(ctx, IPMMU_CTX_MAX); 66 struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX]; 67 s8 utlb_ctx[IPMMU_UTLB_MAX]; 68 69 struct iommu_group *group; 70 struct dma_iommu_mapping *mapping; 71 }; 72 73 struct ipmmu_vmsa_domain { 74 struct ipmmu_vmsa_device *mmu; 75 struct iommu_domain io_domain; 76 77 struct io_pgtable_cfg cfg; 78 struct io_pgtable_ops *iop; 79 80 unsigned int context_id; 81 struct mutex mutex; /* Protects mappings */ 82 }; 83 84 static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom) 85 { 86 return container_of(dom, struct ipmmu_vmsa_domain, io_domain); 87 } 88 89 static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev) 90 { 91 return dev_iommu_priv_get(dev); 92 } 93 94 #define TLB_LOOP_TIMEOUT 100 /* 100us */ 95 96 /* ----------------------------------------------------------------------------- 97 * Registers Definition 98 */ 99 100 #define IM_NS_ALIAS_OFFSET 0x800 101 102 /* MMU "context" registers */ 103 #define IMCTR 0x0000 /* R-Car Gen2/3 */ 104 #define IMCTR_INTEN (1 << 2) /* R-Car Gen2/3 */ 105 #define IMCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */ 106 #define IMCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */ 107 108 #define IMTTBCR 0x0008 /* R-Car Gen2/3 */ 109 #define IMTTBCR_EAE (1 << 31) /* R-Car Gen2/3 */ 110 #define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) /* R-Car Gen2 only */ 111 #define IMTTBCR_ORGN0_WB_WA (1 << 10) /* R-Car Gen2 only */ 112 #define IMTTBCR_IRGN0_WB_WA (1 << 8) /* R-Car Gen2 only */ 113 #define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6) /* R-Car Gen3 only */ 114 #define IMTTBCR_SL0_LVL_1 (1 << 4) /* R-Car Gen2 only */ 115 116 #define IMBUSCR 0x000c /* R-Car Gen2 only */ 117 #define IMBUSCR_DVM (1 << 2) /* R-Car Gen2 only */ 118 #define IMBUSCR_BUSSEL_MASK (3 << 0) /* R-Car Gen2 only */ 119 120 #define IMTTLBR0 0x0010 /* R-Car Gen2/3 */ 121 #define IMTTUBR0 0x0014 /* R-Car Gen2/3 */ 122 123 #define IMSTR 0x0020 /* R-Car Gen2/3 */ 124 #define IMSTR_MHIT (1 << 4) /* R-Car Gen2/3 */ 125 #define IMSTR_ABORT (1 << 2) /* R-Car Gen2/3 */ 126 #define IMSTR_PF (1 << 1) /* R-Car Gen2/3 */ 127 #define IMSTR_TF (1 << 0) /* R-Car Gen2/3 */ 128 129 #define IMMAIR0 0x0028 /* R-Car Gen2/3 */ 130 131 #define IMELAR 0x0030 /* R-Car Gen2/3, IMEAR on R-Car Gen2 */ 132 #define IMEUAR 0x0034 /* R-Car Gen3 only */ 133 134 /* uTLB registers */ 135 #define IMUCTR(n) ((n) < 32 ? IMUCTR0(n) : IMUCTR32(n)) 136 #define IMUCTR0(n) (0x0300 + ((n) * 16)) /* R-Car Gen2/3 */ 137 #define IMUCTR32(n) (0x0600 + (((n) - 32) * 16)) /* R-Car Gen3 only */ 138 #define IMUCTR_TTSEL_MMU(n) ((n) << 4) /* R-Car Gen2/3 */ 139 #define IMUCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */ 140 #define IMUCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */ 141 142 #define IMUASID(n) ((n) < 32 ? IMUASID0(n) : IMUASID32(n)) 143 #define IMUASID0(n) (0x0308 + ((n) * 16)) /* R-Car Gen2/3 */ 144 #define IMUASID32(n) (0x0608 + (((n) - 32) * 16)) /* R-Car Gen3 only */ 145 146 /* ----------------------------------------------------------------------------- 147 * Root device handling 148 */ 149 150 static struct platform_driver ipmmu_driver; 151 152 static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu) 153 { 154 return mmu->root == mmu; 155 } 156 157 static int __ipmmu_check_device(struct device *dev, void *data) 158 { 159 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev); 160 struct ipmmu_vmsa_device **rootp = data; 161 162 if (ipmmu_is_root(mmu)) 163 *rootp = mmu; 164 165 return 0; 166 } 167 168 static struct ipmmu_vmsa_device *ipmmu_find_root(void) 169 { 170 struct ipmmu_vmsa_device *root = NULL; 171 172 return driver_for_each_device(&ipmmu_driver.driver, NULL, &root, 173 __ipmmu_check_device) == 0 ? root : NULL; 174 } 175 176 /* ----------------------------------------------------------------------------- 177 * Read/Write Access 178 */ 179 180 static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset) 181 { 182 return ioread32(mmu->base + offset); 183 } 184 185 static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, 186 u32 data) 187 { 188 iowrite32(data, mmu->base + offset); 189 } 190 191 static unsigned int ipmmu_ctx_reg(struct ipmmu_vmsa_device *mmu, 192 unsigned int context_id, unsigned int reg) 193 { 194 return mmu->features->ctx_offset_base + 195 context_id * mmu->features->ctx_offset_stride + reg; 196 } 197 198 static u32 ipmmu_ctx_read(struct ipmmu_vmsa_device *mmu, 199 unsigned int context_id, unsigned int reg) 200 { 201 return ipmmu_read(mmu, ipmmu_ctx_reg(mmu, context_id, reg)); 202 } 203 204 static void ipmmu_ctx_write(struct ipmmu_vmsa_device *mmu, 205 unsigned int context_id, unsigned int reg, u32 data) 206 { 207 ipmmu_write(mmu, ipmmu_ctx_reg(mmu, context_id, reg), data); 208 } 209 210 static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain, 211 unsigned int reg) 212 { 213 return ipmmu_ctx_read(domain->mmu->root, domain->context_id, reg); 214 } 215 216 static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain, 217 unsigned int reg, u32 data) 218 { 219 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data); 220 } 221 222 static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain, 223 unsigned int reg, u32 data) 224 { 225 if (domain->mmu != domain->mmu->root) 226 ipmmu_ctx_write(domain->mmu, domain->context_id, reg, data); 227 228 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data); 229 } 230 231 static u32 ipmmu_utlb_reg(struct ipmmu_vmsa_device *mmu, unsigned int reg) 232 { 233 return mmu->features->utlb_offset_base + reg; 234 } 235 236 static void ipmmu_imuasid_write(struct ipmmu_vmsa_device *mmu, 237 unsigned int utlb, u32 data) 238 { 239 ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUASID(utlb)), data); 240 } 241 242 static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu, 243 unsigned int utlb, u32 data) 244 { 245 ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUCTR(utlb)), data); 246 } 247 248 /* ----------------------------------------------------------------------------- 249 * TLB and microTLB Management 250 */ 251 252 /* Wait for any pending TLB invalidations to complete */ 253 static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain) 254 { 255 unsigned int count = 0; 256 257 while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) { 258 cpu_relax(); 259 if (++count == TLB_LOOP_TIMEOUT) { 260 dev_err_ratelimited(domain->mmu->dev, 261 "TLB sync timed out -- MMU may be deadlocked\n"); 262 return; 263 } 264 udelay(1); 265 } 266 } 267 268 static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain) 269 { 270 u32 reg; 271 272 reg = ipmmu_ctx_read_root(domain, IMCTR); 273 reg |= IMCTR_FLUSH; 274 ipmmu_ctx_write_all(domain, IMCTR, reg); 275 276 ipmmu_tlb_sync(domain); 277 } 278 279 /* 280 * Enable MMU translation for the microTLB. 281 */ 282 static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain, 283 unsigned int utlb) 284 { 285 struct ipmmu_vmsa_device *mmu = domain->mmu; 286 287 /* 288 * TODO: Reference-count the microTLB as several bus masters can be 289 * connected to the same microTLB. 290 */ 291 292 /* TODO: What should we set the ASID to ? */ 293 ipmmu_imuasid_write(mmu, utlb, 0); 294 /* TODO: Do we need to flush the microTLB ? */ 295 ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) | 296 IMUCTR_FLUSH | IMUCTR_MMUEN); 297 mmu->utlb_ctx[utlb] = domain->context_id; 298 } 299 300 /* 301 * Disable MMU translation for the microTLB. 302 */ 303 static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain, 304 unsigned int utlb) 305 { 306 struct ipmmu_vmsa_device *mmu = domain->mmu; 307 308 ipmmu_imuctr_write(mmu, utlb, 0); 309 mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID; 310 } 311 312 static void ipmmu_tlb_flush_all(void *cookie) 313 { 314 struct ipmmu_vmsa_domain *domain = cookie; 315 316 ipmmu_tlb_invalidate(domain); 317 } 318 319 static void ipmmu_tlb_flush(unsigned long iova, size_t size, 320 size_t granule, void *cookie) 321 { 322 ipmmu_tlb_flush_all(cookie); 323 } 324 325 static const struct iommu_flush_ops ipmmu_flush_ops = { 326 .tlb_flush_all = ipmmu_tlb_flush_all, 327 .tlb_flush_walk = ipmmu_tlb_flush, 328 }; 329 330 /* ----------------------------------------------------------------------------- 331 * Domain/Context Management 332 */ 333 334 static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu, 335 struct ipmmu_vmsa_domain *domain) 336 { 337 unsigned long flags; 338 int ret; 339 340 spin_lock_irqsave(&mmu->lock, flags); 341 342 ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx); 343 if (ret != mmu->num_ctx) { 344 mmu->domains[ret] = domain; 345 set_bit(ret, mmu->ctx); 346 } else 347 ret = -EBUSY; 348 349 spin_unlock_irqrestore(&mmu->lock, flags); 350 351 return ret; 352 } 353 354 static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu, 355 unsigned int context_id) 356 { 357 unsigned long flags; 358 359 spin_lock_irqsave(&mmu->lock, flags); 360 361 clear_bit(context_id, mmu->ctx); 362 mmu->domains[context_id] = NULL; 363 364 spin_unlock_irqrestore(&mmu->lock, flags); 365 } 366 367 static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain) 368 { 369 u64 ttbr; 370 u32 tmp; 371 372 /* TTBR0 */ 373 ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr; 374 ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr); 375 ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32); 376 377 /* 378 * TTBCR 379 * We use long descriptors and allocate the whole 32-bit VA space to 380 * TTBR0. 381 */ 382 if (domain->mmu->features->twobit_imttbcr_sl0) 383 tmp = IMTTBCR_SL0_TWOBIT_LVL_1; 384 else 385 tmp = IMTTBCR_SL0_LVL_1; 386 387 if (domain->mmu->features->cache_snoop) 388 tmp |= IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA | 389 IMTTBCR_IRGN0_WB_WA; 390 391 ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | tmp); 392 393 /* MAIR0 */ 394 ipmmu_ctx_write_root(domain, IMMAIR0, 395 domain->cfg.arm_lpae_s1_cfg.mair); 396 397 /* IMBUSCR */ 398 if (domain->mmu->features->setup_imbuscr) 399 ipmmu_ctx_write_root(domain, IMBUSCR, 400 ipmmu_ctx_read_root(domain, IMBUSCR) & 401 ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK)); 402 403 /* 404 * IMSTR 405 * Clear all interrupt flags. 406 */ 407 ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR)); 408 409 /* 410 * IMCTR 411 * Enable the MMU and interrupt generation. The long-descriptor 412 * translation table format doesn't use TEX remapping. Don't enable AF 413 * software management as we have no use for it. Flush the TLB as 414 * required when modifying the context registers. 415 */ 416 ipmmu_ctx_write_all(domain, IMCTR, 417 IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN); 418 } 419 420 static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) 421 { 422 int ret; 423 424 /* 425 * Allocate the page table operations. 426 * 427 * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory 428 * access, Long-descriptor format" that the NStable bit being set in a 429 * table descriptor will result in the NStable and NS bits of all child 430 * entries being ignored and considered as being set. The IPMMU seems 431 * not to comply with this, as it generates a secure access page fault 432 * if any of the NStable and NS bits isn't set when running in 433 * non-secure mode. 434 */ 435 domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS; 436 domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K; 437 domain->cfg.ias = 32; 438 domain->cfg.oas = 40; 439 domain->cfg.tlb = &ipmmu_flush_ops; 440 domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32); 441 domain->io_domain.geometry.force_aperture = true; 442 /* 443 * TODO: Add support for coherent walk through CCI with DVM and remove 444 * cache handling. For now, delegate it to the io-pgtable code. 445 */ 446 domain->cfg.coherent_walk = false; 447 domain->cfg.iommu_dev = domain->mmu->root->dev; 448 449 /* 450 * Find an unused context. 451 */ 452 ret = ipmmu_domain_allocate_context(domain->mmu->root, domain); 453 if (ret < 0) 454 return ret; 455 456 domain->context_id = ret; 457 458 domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg, 459 domain); 460 if (!domain->iop) { 461 ipmmu_domain_free_context(domain->mmu->root, 462 domain->context_id); 463 return -EINVAL; 464 } 465 466 ipmmu_domain_setup_context(domain); 467 return 0; 468 } 469 470 static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) 471 { 472 if (!domain->mmu) 473 return; 474 475 /* 476 * Disable the context. Flush the TLB as required when modifying the 477 * context registers. 478 * 479 * TODO: Is TLB flush really needed ? 480 */ 481 ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH); 482 ipmmu_tlb_sync(domain); 483 ipmmu_domain_free_context(domain->mmu->root, domain->context_id); 484 } 485 486 /* ----------------------------------------------------------------------------- 487 * Fault Handling 488 */ 489 490 static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain) 491 { 492 const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF; 493 struct ipmmu_vmsa_device *mmu = domain->mmu; 494 unsigned long iova; 495 u32 status; 496 497 status = ipmmu_ctx_read_root(domain, IMSTR); 498 if (!(status & err_mask)) 499 return IRQ_NONE; 500 501 iova = ipmmu_ctx_read_root(domain, IMELAR); 502 if (IS_ENABLED(CONFIG_64BIT)) 503 iova |= (u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32; 504 505 /* 506 * Clear the error status flags. Unlike traditional interrupt flag 507 * registers that must be cleared by writing 1, this status register 508 * seems to require 0. The error address register must be read before, 509 * otherwise its value will be 0. 510 */ 511 ipmmu_ctx_write_root(domain, IMSTR, 0); 512 513 /* Log fatal errors. */ 514 if (status & IMSTR_MHIT) 515 dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%lx\n", 516 iova); 517 if (status & IMSTR_ABORT) 518 dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%lx\n", 519 iova); 520 521 if (!(status & (IMSTR_PF | IMSTR_TF))) 522 return IRQ_NONE; 523 524 /* 525 * Try to handle page faults and translation faults. 526 * 527 * TODO: We need to look up the faulty device based on the I/O VA. Use 528 * the IOMMU device for now. 529 */ 530 if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0)) 531 return IRQ_HANDLED; 532 533 dev_err_ratelimited(mmu->dev, 534 "Unhandled fault: status 0x%08x iova 0x%lx\n", 535 status, iova); 536 537 return IRQ_HANDLED; 538 } 539 540 static irqreturn_t ipmmu_irq(int irq, void *dev) 541 { 542 struct ipmmu_vmsa_device *mmu = dev; 543 irqreturn_t status = IRQ_NONE; 544 unsigned int i; 545 unsigned long flags; 546 547 spin_lock_irqsave(&mmu->lock, flags); 548 549 /* 550 * Check interrupts for all active contexts. 551 */ 552 for (i = 0; i < mmu->num_ctx; i++) { 553 if (!mmu->domains[i]) 554 continue; 555 if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED) 556 status = IRQ_HANDLED; 557 } 558 559 spin_unlock_irqrestore(&mmu->lock, flags); 560 561 return status; 562 } 563 564 /* ----------------------------------------------------------------------------- 565 * IOMMU Operations 566 */ 567 568 static struct iommu_domain *__ipmmu_domain_alloc(unsigned type) 569 { 570 struct ipmmu_vmsa_domain *domain; 571 572 domain = kzalloc(sizeof(*domain), GFP_KERNEL); 573 if (!domain) 574 return NULL; 575 576 mutex_init(&domain->mutex); 577 578 return &domain->io_domain; 579 } 580 581 static struct iommu_domain *ipmmu_domain_alloc(unsigned type) 582 { 583 struct iommu_domain *io_domain = NULL; 584 585 switch (type) { 586 case IOMMU_DOMAIN_UNMANAGED: 587 io_domain = __ipmmu_domain_alloc(type); 588 break; 589 590 case IOMMU_DOMAIN_DMA: 591 io_domain = __ipmmu_domain_alloc(type); 592 if (io_domain && iommu_get_dma_cookie(io_domain)) { 593 kfree(io_domain); 594 io_domain = NULL; 595 } 596 break; 597 } 598 599 return io_domain; 600 } 601 602 static void ipmmu_domain_free(struct iommu_domain *io_domain) 603 { 604 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 605 606 /* 607 * Free the domain resources. We assume that all devices have already 608 * been detached. 609 */ 610 iommu_put_dma_cookie(io_domain); 611 ipmmu_domain_destroy_context(domain); 612 free_io_pgtable_ops(domain->iop); 613 kfree(domain); 614 } 615 616 static int ipmmu_attach_device(struct iommu_domain *io_domain, 617 struct device *dev) 618 { 619 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 620 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); 621 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 622 unsigned int i; 623 int ret = 0; 624 625 if (!mmu) { 626 dev_err(dev, "Cannot attach to IPMMU\n"); 627 return -ENXIO; 628 } 629 630 mutex_lock(&domain->mutex); 631 632 if (!domain->mmu) { 633 /* The domain hasn't been used yet, initialize it. */ 634 domain->mmu = mmu; 635 ret = ipmmu_domain_init_context(domain); 636 if (ret < 0) { 637 dev_err(dev, "Unable to initialize IPMMU context\n"); 638 domain->mmu = NULL; 639 } else { 640 dev_info(dev, "Using IPMMU context %u\n", 641 domain->context_id); 642 } 643 } else if (domain->mmu != mmu) { 644 /* 645 * Something is wrong, we can't attach two devices using 646 * different IOMMUs to the same domain. 647 */ 648 dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n", 649 dev_name(mmu->dev), dev_name(domain->mmu->dev)); 650 ret = -EINVAL; 651 } else 652 dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id); 653 654 mutex_unlock(&domain->mutex); 655 656 if (ret < 0) 657 return ret; 658 659 for (i = 0; i < fwspec->num_ids; ++i) 660 ipmmu_utlb_enable(domain, fwspec->ids[i]); 661 662 return 0; 663 } 664 665 static void ipmmu_detach_device(struct iommu_domain *io_domain, 666 struct device *dev) 667 { 668 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 669 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 670 unsigned int i; 671 672 for (i = 0; i < fwspec->num_ids; ++i) 673 ipmmu_utlb_disable(domain, fwspec->ids[i]); 674 675 /* 676 * TODO: Optimize by disabling the context when no device is attached. 677 */ 678 } 679 680 static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, 681 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 682 { 683 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 684 685 if (!domain) 686 return -ENODEV; 687 688 return domain->iop->map(domain->iop, iova, paddr, size, prot, gfp); 689 } 690 691 static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, 692 size_t size, struct iommu_iotlb_gather *gather) 693 { 694 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 695 696 return domain->iop->unmap(domain->iop, iova, size, gather); 697 } 698 699 static void ipmmu_flush_iotlb_all(struct iommu_domain *io_domain) 700 { 701 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 702 703 if (domain->mmu) 704 ipmmu_tlb_flush_all(domain); 705 } 706 707 static void ipmmu_iotlb_sync(struct iommu_domain *io_domain, 708 struct iommu_iotlb_gather *gather) 709 { 710 ipmmu_flush_iotlb_all(io_domain); 711 } 712 713 static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, 714 dma_addr_t iova) 715 { 716 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 717 718 /* TODO: Is locking needed ? */ 719 720 return domain->iop->iova_to_phys(domain->iop, iova); 721 } 722 723 static int ipmmu_init_platform_device(struct device *dev, 724 struct of_phandle_args *args) 725 { 726 struct platform_device *ipmmu_pdev; 727 728 ipmmu_pdev = of_find_device_by_node(args->np); 729 if (!ipmmu_pdev) 730 return -ENODEV; 731 732 dev_iommu_priv_set(dev, platform_get_drvdata(ipmmu_pdev)); 733 734 return 0; 735 } 736 737 static const struct soc_device_attribute soc_rcar_gen3[] = { 738 { .soc_id = "r8a774a1", }, 739 { .soc_id = "r8a774b1", }, 740 { .soc_id = "r8a774c0", }, 741 { .soc_id = "r8a774e1", }, 742 { .soc_id = "r8a7795", }, 743 { .soc_id = "r8a77961", }, 744 { .soc_id = "r8a7796", }, 745 { .soc_id = "r8a77965", }, 746 { .soc_id = "r8a77970", }, 747 { .soc_id = "r8a77990", }, 748 { .soc_id = "r8a77995", }, 749 { /* sentinel */ } 750 }; 751 752 static const struct soc_device_attribute soc_rcar_gen3_whitelist[] = { 753 { .soc_id = "r8a774b1", }, 754 { .soc_id = "r8a774c0", }, 755 { .soc_id = "r8a774e1", }, 756 { .soc_id = "r8a7795", .revision = "ES3.*" }, 757 { .soc_id = "r8a77961", }, 758 { .soc_id = "r8a77965", }, 759 { .soc_id = "r8a77990", }, 760 { .soc_id = "r8a77995", }, 761 { /* sentinel */ } 762 }; 763 764 static const char * const rcar_gen3_slave_whitelist[] = { 765 }; 766 767 static bool ipmmu_slave_whitelist(struct device *dev) 768 { 769 unsigned int i; 770 771 /* 772 * For R-Car Gen3 use a white list to opt-in slave devices. 773 * For Other SoCs, this returns true anyway. 774 */ 775 if (!soc_device_match(soc_rcar_gen3)) 776 return true; 777 778 /* Check whether this R-Car Gen3 can use the IPMMU correctly or not */ 779 if (!soc_device_match(soc_rcar_gen3_whitelist)) 780 return false; 781 782 /* Check whether this slave device can work with the IPMMU */ 783 for (i = 0; i < ARRAY_SIZE(rcar_gen3_slave_whitelist); i++) { 784 if (!strcmp(dev_name(dev), rcar_gen3_slave_whitelist[i])) 785 return true; 786 } 787 788 /* Otherwise, do not allow use of IPMMU */ 789 return false; 790 } 791 792 static int ipmmu_of_xlate(struct device *dev, 793 struct of_phandle_args *spec) 794 { 795 if (!ipmmu_slave_whitelist(dev)) 796 return -ENODEV; 797 798 iommu_fwspec_add_ids(dev, spec->args, 1); 799 800 /* Initialize once - xlate() will call multiple times */ 801 if (to_ipmmu(dev)) 802 return 0; 803 804 return ipmmu_init_platform_device(dev, spec); 805 } 806 807 static int ipmmu_init_arm_mapping(struct device *dev) 808 { 809 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); 810 int ret; 811 812 /* 813 * Create the ARM mapping, used by the ARM DMA mapping core to allocate 814 * VAs. This will allocate a corresponding IOMMU domain. 815 * 816 * TODO: 817 * - Create one mapping per context (TLB). 818 * - Make the mapping size configurable ? We currently use a 2GB mapping 819 * at a 1GB offset to ensure that NULL VAs will fault. 820 */ 821 if (!mmu->mapping) { 822 struct dma_iommu_mapping *mapping; 823 824 mapping = arm_iommu_create_mapping(&platform_bus_type, 825 SZ_1G, SZ_2G); 826 if (IS_ERR(mapping)) { 827 dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n"); 828 ret = PTR_ERR(mapping); 829 goto error; 830 } 831 832 mmu->mapping = mapping; 833 } 834 835 /* Attach the ARM VA mapping to the device. */ 836 ret = arm_iommu_attach_device(dev, mmu->mapping); 837 if (ret < 0) { 838 dev_err(dev, "Failed to attach device to VA mapping\n"); 839 goto error; 840 } 841 842 return 0; 843 844 error: 845 if (mmu->mapping) 846 arm_iommu_release_mapping(mmu->mapping); 847 848 return ret; 849 } 850 851 static struct iommu_device *ipmmu_probe_device(struct device *dev) 852 { 853 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); 854 855 /* 856 * Only let through devices that have been verified in xlate() 857 */ 858 if (!mmu) 859 return ERR_PTR(-ENODEV); 860 861 return &mmu->iommu; 862 } 863 864 static void ipmmu_probe_finalize(struct device *dev) 865 { 866 int ret = 0; 867 868 if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)) 869 ret = ipmmu_init_arm_mapping(dev); 870 871 if (ret) 872 dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n"); 873 } 874 875 static void ipmmu_release_device(struct device *dev) 876 { 877 arm_iommu_detach_device(dev); 878 } 879 880 static struct iommu_group *ipmmu_find_group(struct device *dev) 881 { 882 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); 883 struct iommu_group *group; 884 885 if (mmu->group) 886 return iommu_group_ref_get(mmu->group); 887 888 group = iommu_group_alloc(); 889 if (!IS_ERR(group)) 890 mmu->group = group; 891 892 return group; 893 } 894 895 static const struct iommu_ops ipmmu_ops = { 896 .domain_alloc = ipmmu_domain_alloc, 897 .domain_free = ipmmu_domain_free, 898 .attach_dev = ipmmu_attach_device, 899 .detach_dev = ipmmu_detach_device, 900 .map = ipmmu_map, 901 .unmap = ipmmu_unmap, 902 .flush_iotlb_all = ipmmu_flush_iotlb_all, 903 .iotlb_sync = ipmmu_iotlb_sync, 904 .iova_to_phys = ipmmu_iova_to_phys, 905 .probe_device = ipmmu_probe_device, 906 .release_device = ipmmu_release_device, 907 .probe_finalize = ipmmu_probe_finalize, 908 .device_group = IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA) 909 ? generic_device_group : ipmmu_find_group, 910 .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, 911 .of_xlate = ipmmu_of_xlate, 912 }; 913 914 /* ----------------------------------------------------------------------------- 915 * Probe/remove and init 916 */ 917 918 static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu) 919 { 920 unsigned int i; 921 922 /* Disable all contexts. */ 923 for (i = 0; i < mmu->num_ctx; ++i) 924 ipmmu_ctx_write(mmu, i, IMCTR, 0); 925 } 926 927 static const struct ipmmu_features ipmmu_features_default = { 928 .use_ns_alias_offset = true, 929 .has_cache_leaf_nodes = false, 930 .number_of_contexts = 1, /* software only tested with one context */ 931 .num_utlbs = 32, 932 .setup_imbuscr = true, 933 .twobit_imttbcr_sl0 = false, 934 .reserved_context = false, 935 .cache_snoop = true, 936 .ctx_offset_base = 0, 937 .ctx_offset_stride = 0x40, 938 .utlb_offset_base = 0, 939 }; 940 941 static const struct ipmmu_features ipmmu_features_rcar_gen3 = { 942 .use_ns_alias_offset = false, 943 .has_cache_leaf_nodes = true, 944 .number_of_contexts = 8, 945 .num_utlbs = 48, 946 .setup_imbuscr = false, 947 .twobit_imttbcr_sl0 = true, 948 .reserved_context = true, 949 .cache_snoop = false, 950 .ctx_offset_base = 0, 951 .ctx_offset_stride = 0x40, 952 .utlb_offset_base = 0, 953 }; 954 955 static const struct of_device_id ipmmu_of_ids[] = { 956 { 957 .compatible = "renesas,ipmmu-vmsa", 958 .data = &ipmmu_features_default, 959 }, { 960 .compatible = "renesas,ipmmu-r8a774a1", 961 .data = &ipmmu_features_rcar_gen3, 962 }, { 963 .compatible = "renesas,ipmmu-r8a774b1", 964 .data = &ipmmu_features_rcar_gen3, 965 }, { 966 .compatible = "renesas,ipmmu-r8a774c0", 967 .data = &ipmmu_features_rcar_gen3, 968 }, { 969 .compatible = "renesas,ipmmu-r8a774e1", 970 .data = &ipmmu_features_rcar_gen3, 971 }, { 972 .compatible = "renesas,ipmmu-r8a7795", 973 .data = &ipmmu_features_rcar_gen3, 974 }, { 975 .compatible = "renesas,ipmmu-r8a7796", 976 .data = &ipmmu_features_rcar_gen3, 977 }, { 978 .compatible = "renesas,ipmmu-r8a77961", 979 .data = &ipmmu_features_rcar_gen3, 980 }, { 981 .compatible = "renesas,ipmmu-r8a77965", 982 .data = &ipmmu_features_rcar_gen3, 983 }, { 984 .compatible = "renesas,ipmmu-r8a77970", 985 .data = &ipmmu_features_rcar_gen3, 986 }, { 987 .compatible = "renesas,ipmmu-r8a77990", 988 .data = &ipmmu_features_rcar_gen3, 989 }, { 990 .compatible = "renesas,ipmmu-r8a77995", 991 .data = &ipmmu_features_rcar_gen3, 992 }, { 993 /* Terminator */ 994 }, 995 }; 996 997 static int ipmmu_probe(struct platform_device *pdev) 998 { 999 struct ipmmu_vmsa_device *mmu; 1000 struct resource *res; 1001 int irq; 1002 int ret; 1003 1004 mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL); 1005 if (!mmu) { 1006 dev_err(&pdev->dev, "cannot allocate device data\n"); 1007 return -ENOMEM; 1008 } 1009 1010 mmu->dev = &pdev->dev; 1011 spin_lock_init(&mmu->lock); 1012 bitmap_zero(mmu->ctx, IPMMU_CTX_MAX); 1013 mmu->features = of_device_get_match_data(&pdev->dev); 1014 memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs); 1015 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); 1016 1017 /* Map I/O memory and request IRQ. */ 1018 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1019 mmu->base = devm_ioremap_resource(&pdev->dev, res); 1020 if (IS_ERR(mmu->base)) 1021 return PTR_ERR(mmu->base); 1022 1023 /* 1024 * The IPMMU has two register banks, for secure and non-secure modes. 1025 * The bank mapped at the beginning of the IPMMU address space 1026 * corresponds to the running mode of the CPU. When running in secure 1027 * mode the non-secure register bank is also available at an offset. 1028 * 1029 * Secure mode operation isn't clearly documented and is thus currently 1030 * not implemented in the driver. Furthermore, preliminary tests of 1031 * non-secure operation with the main register bank were not successful. 1032 * Offset the registers base unconditionally to point to the non-secure 1033 * alias space for now. 1034 */ 1035 if (mmu->features->use_ns_alias_offset) 1036 mmu->base += IM_NS_ALIAS_OFFSET; 1037 1038 mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts); 1039 1040 /* 1041 * Determine if this IPMMU instance is a root device by checking for 1042 * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property. 1043 */ 1044 if (!mmu->features->has_cache_leaf_nodes || 1045 !of_find_property(pdev->dev.of_node, "renesas,ipmmu-main", NULL)) 1046 mmu->root = mmu; 1047 else 1048 mmu->root = ipmmu_find_root(); 1049 1050 /* 1051 * Wait until the root device has been registered for sure. 1052 */ 1053 if (!mmu->root) 1054 return -EPROBE_DEFER; 1055 1056 /* Root devices have mandatory IRQs */ 1057 if (ipmmu_is_root(mmu)) { 1058 irq = platform_get_irq(pdev, 0); 1059 if (irq < 0) 1060 return irq; 1061 1062 ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0, 1063 dev_name(&pdev->dev), mmu); 1064 if (ret < 0) { 1065 dev_err(&pdev->dev, "failed to request IRQ %d\n", irq); 1066 return ret; 1067 } 1068 1069 ipmmu_device_reset(mmu); 1070 1071 if (mmu->features->reserved_context) { 1072 dev_info(&pdev->dev, "IPMMU context 0 is reserved\n"); 1073 set_bit(0, mmu->ctx); 1074 } 1075 } 1076 1077 /* 1078 * Register the IPMMU to the IOMMU subsystem in the following cases: 1079 * - R-Car Gen2 IPMMU (all devices registered) 1080 * - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device) 1081 */ 1082 if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) { 1083 ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, 1084 dev_name(&pdev->dev)); 1085 if (ret) 1086 return ret; 1087 1088 iommu_device_set_ops(&mmu->iommu, &ipmmu_ops); 1089 iommu_device_set_fwnode(&mmu->iommu, 1090 &pdev->dev.of_node->fwnode); 1091 1092 ret = iommu_device_register(&mmu->iommu); 1093 if (ret) 1094 return ret; 1095 1096 #if defined(CONFIG_IOMMU_DMA) 1097 if (!iommu_present(&platform_bus_type)) 1098 bus_set_iommu(&platform_bus_type, &ipmmu_ops); 1099 #endif 1100 } 1101 1102 /* 1103 * We can't create the ARM mapping here as it requires the bus to have 1104 * an IOMMU, which only happens when bus_set_iommu() is called in 1105 * ipmmu_init() after the probe function returns. 1106 */ 1107 1108 platform_set_drvdata(pdev, mmu); 1109 1110 return 0; 1111 } 1112 1113 static int ipmmu_remove(struct platform_device *pdev) 1114 { 1115 struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev); 1116 1117 iommu_device_sysfs_remove(&mmu->iommu); 1118 iommu_device_unregister(&mmu->iommu); 1119 1120 arm_iommu_release_mapping(mmu->mapping); 1121 1122 ipmmu_device_reset(mmu); 1123 1124 return 0; 1125 } 1126 1127 #ifdef CONFIG_PM_SLEEP 1128 static int ipmmu_resume_noirq(struct device *dev) 1129 { 1130 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev); 1131 unsigned int i; 1132 1133 /* Reset root MMU and restore contexts */ 1134 if (ipmmu_is_root(mmu)) { 1135 ipmmu_device_reset(mmu); 1136 1137 for (i = 0; i < mmu->num_ctx; i++) { 1138 if (!mmu->domains[i]) 1139 continue; 1140 1141 ipmmu_domain_setup_context(mmu->domains[i]); 1142 } 1143 } 1144 1145 /* Re-enable active micro-TLBs */ 1146 for (i = 0; i < mmu->features->num_utlbs; i++) { 1147 if (mmu->utlb_ctx[i] == IPMMU_CTX_INVALID) 1148 continue; 1149 1150 ipmmu_utlb_enable(mmu->root->domains[mmu->utlb_ctx[i]], i); 1151 } 1152 1153 return 0; 1154 } 1155 1156 static const struct dev_pm_ops ipmmu_pm = { 1157 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, ipmmu_resume_noirq) 1158 }; 1159 #define DEV_PM_OPS &ipmmu_pm 1160 #else 1161 #define DEV_PM_OPS NULL 1162 #endif /* CONFIG_PM_SLEEP */ 1163 1164 static struct platform_driver ipmmu_driver = { 1165 .driver = { 1166 .name = "ipmmu-vmsa", 1167 .of_match_table = of_match_ptr(ipmmu_of_ids), 1168 .pm = DEV_PM_OPS, 1169 }, 1170 .probe = ipmmu_probe, 1171 .remove = ipmmu_remove, 1172 }; 1173 1174 static int __init ipmmu_init(void) 1175 { 1176 struct device_node *np; 1177 static bool setup_done; 1178 int ret; 1179 1180 if (setup_done) 1181 return 0; 1182 1183 np = of_find_matching_node(NULL, ipmmu_of_ids); 1184 if (!np) 1185 return 0; 1186 1187 of_node_put(np); 1188 1189 ret = platform_driver_register(&ipmmu_driver); 1190 if (ret < 0) 1191 return ret; 1192 1193 #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) 1194 if (!iommu_present(&platform_bus_type)) 1195 bus_set_iommu(&platform_bus_type, &ipmmu_ops); 1196 #endif 1197 1198 setup_done = true; 1199 return 0; 1200 } 1201 subsys_initcall(ipmmu_init); 1202