1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * IOMMU API for Renesas VMSA-compatible IPMMU 4 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com> 5 * 6 * Copyright (C) 2014 Renesas Electronics Corporation 7 */ 8 9 #include <linux/bitmap.h> 10 #include <linux/delay.h> 11 #include <linux/dma-iommu.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/err.h> 14 #include <linux/export.h> 15 #include <linux/init.h> 16 #include <linux/interrupt.h> 17 #include <linux/io.h> 18 #include <linux/io-pgtable.h> 19 #include <linux/iommu.h> 20 #include <linux/of.h> 21 #include <linux/of_device.h> 22 #include <linux/of_iommu.h> 23 #include <linux/of_platform.h> 24 #include <linux/platform_device.h> 25 #include <linux/sizes.h> 26 #include <linux/slab.h> 27 #include <linux/sys_soc.h> 28 29 #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) 30 #include <asm/dma-iommu.h> 31 #include <asm/pgalloc.h> 32 #else 33 #define arm_iommu_create_mapping(...) NULL 34 #define arm_iommu_attach_device(...) -ENODEV 35 #define arm_iommu_release_mapping(...) do {} while (0) 36 #define arm_iommu_detach_device(...) do {} while (0) 37 #endif 38 39 #define IPMMU_CTX_MAX 8U 40 #define IPMMU_CTX_INVALID -1 41 42 #define IPMMU_UTLB_MAX 48U 43 44 struct ipmmu_features { 45 bool use_ns_alias_offset; 46 bool has_cache_leaf_nodes; 47 unsigned int number_of_contexts; 48 unsigned int num_utlbs; 49 bool setup_imbuscr; 50 bool twobit_imttbcr_sl0; 51 bool reserved_context; 52 bool cache_snoop; 53 unsigned int ctx_offset_base; 54 unsigned int ctx_offset_stride; 55 unsigned int utlb_offset_base; 56 }; 57 58 struct ipmmu_vmsa_device { 59 struct device *dev; 60 void __iomem *base; 61 struct iommu_device iommu; 62 struct ipmmu_vmsa_device *root; 63 const struct ipmmu_features *features; 64 unsigned int num_ctx; 65 spinlock_t lock; /* Protects ctx and domains[] */ 66 DECLARE_BITMAP(ctx, IPMMU_CTX_MAX); 67 struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX]; 68 s8 utlb_ctx[IPMMU_UTLB_MAX]; 69 70 struct iommu_group *group; 71 struct dma_iommu_mapping *mapping; 72 }; 73 74 struct ipmmu_vmsa_domain { 75 struct ipmmu_vmsa_device *mmu; 76 struct iommu_domain io_domain; 77 78 struct io_pgtable_cfg cfg; 79 struct io_pgtable_ops *iop; 80 81 unsigned int context_id; 82 struct mutex mutex; /* Protects mappings */ 83 }; 84 85 static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom) 86 { 87 return container_of(dom, struct ipmmu_vmsa_domain, io_domain); 88 } 89 90 static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev) 91 { 92 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 93 94 return fwspec ? fwspec->iommu_priv : NULL; 95 } 96 97 #define TLB_LOOP_TIMEOUT 100 /* 100us */ 98 99 /* ----------------------------------------------------------------------------- 100 * Registers Definition 101 */ 102 103 #define IM_NS_ALIAS_OFFSET 0x800 104 105 /* MMU "context" registers */ 106 #define IMCTR 0x0000 /* R-Car Gen2/3 */ 107 #define IMCTR_INTEN (1 << 2) /* R-Car Gen2/3 */ 108 #define IMCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */ 109 #define IMCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */ 110 111 #define IMTTBCR 0x0008 /* R-Car Gen2/3 */ 112 #define IMTTBCR_EAE (1 << 31) /* R-Car Gen2/3 */ 113 #define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) /* R-Car Gen2 only */ 114 #define IMTTBCR_ORGN0_WB_WA (1 << 10) /* R-Car Gen2 only */ 115 #define IMTTBCR_IRGN0_WB_WA (1 << 8) /* R-Car Gen2 only */ 116 #define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6) /* R-Car Gen3 only */ 117 #define IMTTBCR_SL0_LVL_1 (1 << 4) /* R-Car Gen2 only */ 118 119 #define IMBUSCR 0x000c /* R-Car Gen2 only */ 120 #define IMBUSCR_DVM (1 << 2) /* R-Car Gen2 only */ 121 #define IMBUSCR_BUSSEL_MASK (3 << 0) /* R-Car Gen2 only */ 122 123 #define IMTTLBR0 0x0010 /* R-Car Gen2/3 */ 124 #define IMTTUBR0 0x0014 /* R-Car Gen2/3 */ 125 126 #define IMSTR 0x0020 /* R-Car Gen2/3 */ 127 #define IMSTR_MHIT (1 << 4) /* R-Car Gen2/3 */ 128 #define IMSTR_ABORT (1 << 2) /* R-Car Gen2/3 */ 129 #define IMSTR_PF (1 << 1) /* R-Car Gen2/3 */ 130 #define IMSTR_TF (1 << 0) /* R-Car Gen2/3 */ 131 132 #define IMMAIR0 0x0028 /* R-Car Gen2/3 */ 133 134 #define IMELAR 0x0030 /* R-Car Gen2/3, IMEAR on R-Car Gen2 */ 135 #define IMEUAR 0x0034 /* R-Car Gen3 only */ 136 137 /* uTLB registers */ 138 #define IMUCTR(n) ((n) < 32 ? IMUCTR0(n) : IMUCTR32(n)) 139 #define IMUCTR0(n) (0x0300 + ((n) * 16)) /* R-Car Gen2/3 */ 140 #define IMUCTR32(n) (0x0600 + (((n) - 32) * 16)) /* R-Car Gen3 only */ 141 #define IMUCTR_TTSEL_MMU(n) ((n) << 4) /* R-Car Gen2/3 */ 142 #define IMUCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */ 143 #define IMUCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */ 144 145 #define IMUASID(n) ((n) < 32 ? IMUASID0(n) : IMUASID32(n)) 146 #define IMUASID0(n) (0x0308 + ((n) * 16)) /* R-Car Gen2/3 */ 147 #define IMUASID32(n) (0x0608 + (((n) - 32) * 16)) /* R-Car Gen3 only */ 148 149 /* ----------------------------------------------------------------------------- 150 * Root device handling 151 */ 152 153 static struct platform_driver ipmmu_driver; 154 155 static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu) 156 { 157 return mmu->root == mmu; 158 } 159 160 static int __ipmmu_check_device(struct device *dev, void *data) 161 { 162 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev); 163 struct ipmmu_vmsa_device **rootp = data; 164 165 if (ipmmu_is_root(mmu)) 166 *rootp = mmu; 167 168 return 0; 169 } 170 171 static struct ipmmu_vmsa_device *ipmmu_find_root(void) 172 { 173 struct ipmmu_vmsa_device *root = NULL; 174 175 return driver_for_each_device(&ipmmu_driver.driver, NULL, &root, 176 __ipmmu_check_device) == 0 ? root : NULL; 177 } 178 179 /* ----------------------------------------------------------------------------- 180 * Read/Write Access 181 */ 182 183 static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset) 184 { 185 return ioread32(mmu->base + offset); 186 } 187 188 static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, 189 u32 data) 190 { 191 iowrite32(data, mmu->base + offset); 192 } 193 194 static unsigned int ipmmu_ctx_reg(struct ipmmu_vmsa_device *mmu, 195 unsigned int context_id, unsigned int reg) 196 { 197 return mmu->features->ctx_offset_base + 198 context_id * mmu->features->ctx_offset_stride + reg; 199 } 200 201 static u32 ipmmu_ctx_read(struct ipmmu_vmsa_device *mmu, 202 unsigned int context_id, unsigned int reg) 203 { 204 return ipmmu_read(mmu, ipmmu_ctx_reg(mmu, context_id, reg)); 205 } 206 207 static void ipmmu_ctx_write(struct ipmmu_vmsa_device *mmu, 208 unsigned int context_id, unsigned int reg, u32 data) 209 { 210 ipmmu_write(mmu, ipmmu_ctx_reg(mmu, context_id, reg), data); 211 } 212 213 static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain, 214 unsigned int reg) 215 { 216 return ipmmu_ctx_read(domain->mmu->root, domain->context_id, reg); 217 } 218 219 static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain, 220 unsigned int reg, u32 data) 221 { 222 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data); 223 } 224 225 static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain, 226 unsigned int reg, u32 data) 227 { 228 if (domain->mmu != domain->mmu->root) 229 ipmmu_ctx_write(domain->mmu, domain->context_id, reg, data); 230 231 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data); 232 } 233 234 static u32 ipmmu_utlb_reg(struct ipmmu_vmsa_device *mmu, unsigned int reg) 235 { 236 return mmu->features->utlb_offset_base + reg; 237 } 238 239 static void ipmmu_imuasid_write(struct ipmmu_vmsa_device *mmu, 240 unsigned int utlb, u32 data) 241 { 242 ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUASID(utlb)), data); 243 } 244 245 static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu, 246 unsigned int utlb, u32 data) 247 { 248 ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUCTR(utlb)), data); 249 } 250 251 /* ----------------------------------------------------------------------------- 252 * TLB and microTLB Management 253 */ 254 255 /* Wait for any pending TLB invalidations to complete */ 256 static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain) 257 { 258 unsigned int count = 0; 259 260 while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) { 261 cpu_relax(); 262 if (++count == TLB_LOOP_TIMEOUT) { 263 dev_err_ratelimited(domain->mmu->dev, 264 "TLB sync timed out -- MMU may be deadlocked\n"); 265 return; 266 } 267 udelay(1); 268 } 269 } 270 271 static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain) 272 { 273 u32 reg; 274 275 reg = ipmmu_ctx_read_root(domain, IMCTR); 276 reg |= IMCTR_FLUSH; 277 ipmmu_ctx_write_all(domain, IMCTR, reg); 278 279 ipmmu_tlb_sync(domain); 280 } 281 282 /* 283 * Enable MMU translation for the microTLB. 284 */ 285 static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain, 286 unsigned int utlb) 287 { 288 struct ipmmu_vmsa_device *mmu = domain->mmu; 289 290 /* 291 * TODO: Reference-count the microTLB as several bus masters can be 292 * connected to the same microTLB. 293 */ 294 295 /* TODO: What should we set the ASID to ? */ 296 ipmmu_imuasid_write(mmu, utlb, 0); 297 /* TODO: Do we need to flush the microTLB ? */ 298 ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) | 299 IMUCTR_FLUSH | IMUCTR_MMUEN); 300 mmu->utlb_ctx[utlb] = domain->context_id; 301 } 302 303 /* 304 * Disable MMU translation for the microTLB. 305 */ 306 static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain, 307 unsigned int utlb) 308 { 309 struct ipmmu_vmsa_device *mmu = domain->mmu; 310 311 ipmmu_imuctr_write(mmu, utlb, 0); 312 mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID; 313 } 314 315 static void ipmmu_tlb_flush_all(void *cookie) 316 { 317 struct ipmmu_vmsa_domain *domain = cookie; 318 319 ipmmu_tlb_invalidate(domain); 320 } 321 322 static void ipmmu_tlb_flush(unsigned long iova, size_t size, 323 size_t granule, void *cookie) 324 { 325 ipmmu_tlb_flush_all(cookie); 326 } 327 328 static const struct iommu_flush_ops ipmmu_flush_ops = { 329 .tlb_flush_all = ipmmu_tlb_flush_all, 330 .tlb_flush_walk = ipmmu_tlb_flush, 331 .tlb_flush_leaf = ipmmu_tlb_flush, 332 }; 333 334 /* ----------------------------------------------------------------------------- 335 * Domain/Context Management 336 */ 337 338 static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu, 339 struct ipmmu_vmsa_domain *domain) 340 { 341 unsigned long flags; 342 int ret; 343 344 spin_lock_irqsave(&mmu->lock, flags); 345 346 ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx); 347 if (ret != mmu->num_ctx) { 348 mmu->domains[ret] = domain; 349 set_bit(ret, mmu->ctx); 350 } else 351 ret = -EBUSY; 352 353 spin_unlock_irqrestore(&mmu->lock, flags); 354 355 return ret; 356 } 357 358 static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu, 359 unsigned int context_id) 360 { 361 unsigned long flags; 362 363 spin_lock_irqsave(&mmu->lock, flags); 364 365 clear_bit(context_id, mmu->ctx); 366 mmu->domains[context_id] = NULL; 367 368 spin_unlock_irqrestore(&mmu->lock, flags); 369 } 370 371 static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain) 372 { 373 u64 ttbr; 374 u32 tmp; 375 376 /* TTBR0 */ 377 ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr; 378 ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr); 379 ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32); 380 381 /* 382 * TTBCR 383 * We use long descriptors and allocate the whole 32-bit VA space to 384 * TTBR0. 385 */ 386 if (domain->mmu->features->twobit_imttbcr_sl0) 387 tmp = IMTTBCR_SL0_TWOBIT_LVL_1; 388 else 389 tmp = IMTTBCR_SL0_LVL_1; 390 391 if (domain->mmu->features->cache_snoop) 392 tmp |= IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA | 393 IMTTBCR_IRGN0_WB_WA; 394 395 ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | tmp); 396 397 /* MAIR0 */ 398 ipmmu_ctx_write_root(domain, IMMAIR0, 399 domain->cfg.arm_lpae_s1_cfg.mair); 400 401 /* IMBUSCR */ 402 if (domain->mmu->features->setup_imbuscr) 403 ipmmu_ctx_write_root(domain, IMBUSCR, 404 ipmmu_ctx_read_root(domain, IMBUSCR) & 405 ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK)); 406 407 /* 408 * IMSTR 409 * Clear all interrupt flags. 410 */ 411 ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR)); 412 413 /* 414 * IMCTR 415 * Enable the MMU and interrupt generation. The long-descriptor 416 * translation table format doesn't use TEX remapping. Don't enable AF 417 * software management as we have no use for it. Flush the TLB as 418 * required when modifying the context registers. 419 */ 420 ipmmu_ctx_write_all(domain, IMCTR, 421 IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN); 422 } 423 424 static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) 425 { 426 int ret; 427 428 /* 429 * Allocate the page table operations. 430 * 431 * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory 432 * access, Long-descriptor format" that the NStable bit being set in a 433 * table descriptor will result in the NStable and NS bits of all child 434 * entries being ignored and considered as being set. The IPMMU seems 435 * not to comply with this, as it generates a secure access page fault 436 * if any of the NStable and NS bits isn't set when running in 437 * non-secure mode. 438 */ 439 domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS; 440 domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K; 441 domain->cfg.ias = 32; 442 domain->cfg.oas = 40; 443 domain->cfg.tlb = &ipmmu_flush_ops; 444 domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32); 445 domain->io_domain.geometry.force_aperture = true; 446 /* 447 * TODO: Add support for coherent walk through CCI with DVM and remove 448 * cache handling. For now, delegate it to the io-pgtable code. 449 */ 450 domain->cfg.coherent_walk = false; 451 domain->cfg.iommu_dev = domain->mmu->root->dev; 452 453 /* 454 * Find an unused context. 455 */ 456 ret = ipmmu_domain_allocate_context(domain->mmu->root, domain); 457 if (ret < 0) 458 return ret; 459 460 domain->context_id = ret; 461 462 domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg, 463 domain); 464 if (!domain->iop) { 465 ipmmu_domain_free_context(domain->mmu->root, 466 domain->context_id); 467 return -EINVAL; 468 } 469 470 ipmmu_domain_setup_context(domain); 471 return 0; 472 } 473 474 static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) 475 { 476 if (!domain->mmu) 477 return; 478 479 /* 480 * Disable the context. Flush the TLB as required when modifying the 481 * context registers. 482 * 483 * TODO: Is TLB flush really needed ? 484 */ 485 ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH); 486 ipmmu_tlb_sync(domain); 487 ipmmu_domain_free_context(domain->mmu->root, domain->context_id); 488 } 489 490 /* ----------------------------------------------------------------------------- 491 * Fault Handling 492 */ 493 494 static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain) 495 { 496 const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF; 497 struct ipmmu_vmsa_device *mmu = domain->mmu; 498 unsigned long iova; 499 u32 status; 500 501 status = ipmmu_ctx_read_root(domain, IMSTR); 502 if (!(status & err_mask)) 503 return IRQ_NONE; 504 505 iova = ipmmu_ctx_read_root(domain, IMELAR); 506 if (IS_ENABLED(CONFIG_64BIT)) 507 iova |= (u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32; 508 509 /* 510 * Clear the error status flags. Unlike traditional interrupt flag 511 * registers that must be cleared by writing 1, this status register 512 * seems to require 0. The error address register must be read before, 513 * otherwise its value will be 0. 514 */ 515 ipmmu_ctx_write_root(domain, IMSTR, 0); 516 517 /* Log fatal errors. */ 518 if (status & IMSTR_MHIT) 519 dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%lx\n", 520 iova); 521 if (status & IMSTR_ABORT) 522 dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%lx\n", 523 iova); 524 525 if (!(status & (IMSTR_PF | IMSTR_TF))) 526 return IRQ_NONE; 527 528 /* 529 * Try to handle page faults and translation faults. 530 * 531 * TODO: We need to look up the faulty device based on the I/O VA. Use 532 * the IOMMU device for now. 533 */ 534 if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0)) 535 return IRQ_HANDLED; 536 537 dev_err_ratelimited(mmu->dev, 538 "Unhandled fault: status 0x%08x iova 0x%lx\n", 539 status, iova); 540 541 return IRQ_HANDLED; 542 } 543 544 static irqreturn_t ipmmu_irq(int irq, void *dev) 545 { 546 struct ipmmu_vmsa_device *mmu = dev; 547 irqreturn_t status = IRQ_NONE; 548 unsigned int i; 549 unsigned long flags; 550 551 spin_lock_irqsave(&mmu->lock, flags); 552 553 /* 554 * Check interrupts for all active contexts. 555 */ 556 for (i = 0; i < mmu->num_ctx; i++) { 557 if (!mmu->domains[i]) 558 continue; 559 if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED) 560 status = IRQ_HANDLED; 561 } 562 563 spin_unlock_irqrestore(&mmu->lock, flags); 564 565 return status; 566 } 567 568 /* ----------------------------------------------------------------------------- 569 * IOMMU Operations 570 */ 571 572 static struct iommu_domain *__ipmmu_domain_alloc(unsigned type) 573 { 574 struct ipmmu_vmsa_domain *domain; 575 576 domain = kzalloc(sizeof(*domain), GFP_KERNEL); 577 if (!domain) 578 return NULL; 579 580 mutex_init(&domain->mutex); 581 582 return &domain->io_domain; 583 } 584 585 static struct iommu_domain *ipmmu_domain_alloc(unsigned type) 586 { 587 struct iommu_domain *io_domain = NULL; 588 589 switch (type) { 590 case IOMMU_DOMAIN_UNMANAGED: 591 io_domain = __ipmmu_domain_alloc(type); 592 break; 593 594 case IOMMU_DOMAIN_DMA: 595 io_domain = __ipmmu_domain_alloc(type); 596 if (io_domain && iommu_get_dma_cookie(io_domain)) { 597 kfree(io_domain); 598 io_domain = NULL; 599 } 600 break; 601 } 602 603 return io_domain; 604 } 605 606 static void ipmmu_domain_free(struct iommu_domain *io_domain) 607 { 608 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 609 610 /* 611 * Free the domain resources. We assume that all devices have already 612 * been detached. 613 */ 614 iommu_put_dma_cookie(io_domain); 615 ipmmu_domain_destroy_context(domain); 616 free_io_pgtable_ops(domain->iop); 617 kfree(domain); 618 } 619 620 static int ipmmu_attach_device(struct iommu_domain *io_domain, 621 struct device *dev) 622 { 623 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 624 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); 625 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 626 unsigned int i; 627 int ret = 0; 628 629 if (!mmu) { 630 dev_err(dev, "Cannot attach to IPMMU\n"); 631 return -ENXIO; 632 } 633 634 mutex_lock(&domain->mutex); 635 636 if (!domain->mmu) { 637 /* The domain hasn't been used yet, initialize it. */ 638 domain->mmu = mmu; 639 ret = ipmmu_domain_init_context(domain); 640 if (ret < 0) { 641 dev_err(dev, "Unable to initialize IPMMU context\n"); 642 domain->mmu = NULL; 643 } else { 644 dev_info(dev, "Using IPMMU context %u\n", 645 domain->context_id); 646 } 647 } else if (domain->mmu != mmu) { 648 /* 649 * Something is wrong, we can't attach two devices using 650 * different IOMMUs to the same domain. 651 */ 652 dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n", 653 dev_name(mmu->dev), dev_name(domain->mmu->dev)); 654 ret = -EINVAL; 655 } else 656 dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id); 657 658 mutex_unlock(&domain->mutex); 659 660 if (ret < 0) 661 return ret; 662 663 for (i = 0; i < fwspec->num_ids; ++i) 664 ipmmu_utlb_enable(domain, fwspec->ids[i]); 665 666 return 0; 667 } 668 669 static void ipmmu_detach_device(struct iommu_domain *io_domain, 670 struct device *dev) 671 { 672 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 673 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 674 unsigned int i; 675 676 for (i = 0; i < fwspec->num_ids; ++i) 677 ipmmu_utlb_disable(domain, fwspec->ids[i]); 678 679 /* 680 * TODO: Optimize by disabling the context when no device is attached. 681 */ 682 } 683 684 static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, 685 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 686 { 687 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 688 689 if (!domain) 690 return -ENODEV; 691 692 return domain->iop->map(domain->iop, iova, paddr, size, prot); 693 } 694 695 static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, 696 size_t size, struct iommu_iotlb_gather *gather) 697 { 698 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 699 700 return domain->iop->unmap(domain->iop, iova, size, gather); 701 } 702 703 static void ipmmu_flush_iotlb_all(struct iommu_domain *io_domain) 704 { 705 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 706 707 if (domain->mmu) 708 ipmmu_tlb_flush_all(domain); 709 } 710 711 static void ipmmu_iotlb_sync(struct iommu_domain *io_domain, 712 struct iommu_iotlb_gather *gather) 713 { 714 ipmmu_flush_iotlb_all(io_domain); 715 } 716 717 static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, 718 dma_addr_t iova) 719 { 720 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 721 722 /* TODO: Is locking needed ? */ 723 724 return domain->iop->iova_to_phys(domain->iop, iova); 725 } 726 727 static int ipmmu_init_platform_device(struct device *dev, 728 struct of_phandle_args *args) 729 { 730 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 731 struct platform_device *ipmmu_pdev; 732 733 ipmmu_pdev = of_find_device_by_node(args->np); 734 if (!ipmmu_pdev) 735 return -ENODEV; 736 737 fwspec->iommu_priv = platform_get_drvdata(ipmmu_pdev); 738 739 return 0; 740 } 741 742 static const struct soc_device_attribute soc_rcar_gen3[] = { 743 { .soc_id = "r8a774a1", }, 744 { .soc_id = "r8a774b1", }, 745 { .soc_id = "r8a774c0", }, 746 { .soc_id = "r8a7795", }, 747 { .soc_id = "r8a7796", }, 748 { .soc_id = "r8a77965", }, 749 { .soc_id = "r8a77970", }, 750 { .soc_id = "r8a77990", }, 751 { .soc_id = "r8a77995", }, 752 { /* sentinel */ } 753 }; 754 755 static const struct soc_device_attribute soc_rcar_gen3_whitelist[] = { 756 { .soc_id = "r8a774b1", }, 757 { .soc_id = "r8a774c0", }, 758 { .soc_id = "r8a7795", .revision = "ES3.*" }, 759 { .soc_id = "r8a77965", }, 760 { .soc_id = "r8a77990", }, 761 { .soc_id = "r8a77995", }, 762 { /* sentinel */ } 763 }; 764 765 static const char * const rcar_gen3_slave_whitelist[] = { 766 }; 767 768 static bool ipmmu_slave_whitelist(struct device *dev) 769 { 770 unsigned int i; 771 772 /* 773 * For R-Car Gen3 use a white list to opt-in slave devices. 774 * For Other SoCs, this returns true anyway. 775 */ 776 if (!soc_device_match(soc_rcar_gen3)) 777 return true; 778 779 /* Check whether this R-Car Gen3 can use the IPMMU correctly or not */ 780 if (!soc_device_match(soc_rcar_gen3_whitelist)) 781 return false; 782 783 /* Check whether this slave device can work with the IPMMU */ 784 for (i = 0; i < ARRAY_SIZE(rcar_gen3_slave_whitelist); i++) { 785 if (!strcmp(dev_name(dev), rcar_gen3_slave_whitelist[i])) 786 return true; 787 } 788 789 /* Otherwise, do not allow use of IPMMU */ 790 return false; 791 } 792 793 static int ipmmu_of_xlate(struct device *dev, 794 struct of_phandle_args *spec) 795 { 796 if (!ipmmu_slave_whitelist(dev)) 797 return -ENODEV; 798 799 iommu_fwspec_add_ids(dev, spec->args, 1); 800 801 /* Initialize once - xlate() will call multiple times */ 802 if (to_ipmmu(dev)) 803 return 0; 804 805 return ipmmu_init_platform_device(dev, spec); 806 } 807 808 static int ipmmu_init_arm_mapping(struct device *dev) 809 { 810 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); 811 struct iommu_group *group; 812 int ret; 813 814 /* Create a device group and add the device to it. */ 815 group = iommu_group_alloc(); 816 if (IS_ERR(group)) { 817 dev_err(dev, "Failed to allocate IOMMU group\n"); 818 return PTR_ERR(group); 819 } 820 821 ret = iommu_group_add_device(group, dev); 822 iommu_group_put(group); 823 824 if (ret < 0) { 825 dev_err(dev, "Failed to add device to IPMMU group\n"); 826 return ret; 827 } 828 829 /* 830 * Create the ARM mapping, used by the ARM DMA mapping core to allocate 831 * VAs. This will allocate a corresponding IOMMU domain. 832 * 833 * TODO: 834 * - Create one mapping per context (TLB). 835 * - Make the mapping size configurable ? We currently use a 2GB mapping 836 * at a 1GB offset to ensure that NULL VAs will fault. 837 */ 838 if (!mmu->mapping) { 839 struct dma_iommu_mapping *mapping; 840 841 mapping = arm_iommu_create_mapping(&platform_bus_type, 842 SZ_1G, SZ_2G); 843 if (IS_ERR(mapping)) { 844 dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n"); 845 ret = PTR_ERR(mapping); 846 goto error; 847 } 848 849 mmu->mapping = mapping; 850 } 851 852 /* Attach the ARM VA mapping to the device. */ 853 ret = arm_iommu_attach_device(dev, mmu->mapping); 854 if (ret < 0) { 855 dev_err(dev, "Failed to attach device to VA mapping\n"); 856 goto error; 857 } 858 859 return 0; 860 861 error: 862 iommu_group_remove_device(dev); 863 if (mmu->mapping) 864 arm_iommu_release_mapping(mmu->mapping); 865 866 return ret; 867 } 868 869 static int ipmmu_add_device(struct device *dev) 870 { 871 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); 872 struct iommu_group *group; 873 int ret; 874 875 /* 876 * Only let through devices that have been verified in xlate() 877 */ 878 if (!mmu) 879 return -ENODEV; 880 881 if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)) { 882 ret = ipmmu_init_arm_mapping(dev); 883 if (ret) 884 return ret; 885 } else { 886 group = iommu_group_get_for_dev(dev); 887 if (IS_ERR(group)) 888 return PTR_ERR(group); 889 890 iommu_group_put(group); 891 } 892 893 iommu_device_link(&mmu->iommu, dev); 894 return 0; 895 } 896 897 static void ipmmu_remove_device(struct device *dev) 898 { 899 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); 900 901 iommu_device_unlink(&mmu->iommu, dev); 902 arm_iommu_detach_device(dev); 903 iommu_group_remove_device(dev); 904 } 905 906 static struct iommu_group *ipmmu_find_group(struct device *dev) 907 { 908 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); 909 struct iommu_group *group; 910 911 if (mmu->group) 912 return iommu_group_ref_get(mmu->group); 913 914 group = iommu_group_alloc(); 915 if (!IS_ERR(group)) 916 mmu->group = group; 917 918 return group; 919 } 920 921 static const struct iommu_ops ipmmu_ops = { 922 .domain_alloc = ipmmu_domain_alloc, 923 .domain_free = ipmmu_domain_free, 924 .attach_dev = ipmmu_attach_device, 925 .detach_dev = ipmmu_detach_device, 926 .map = ipmmu_map, 927 .unmap = ipmmu_unmap, 928 .flush_iotlb_all = ipmmu_flush_iotlb_all, 929 .iotlb_sync = ipmmu_iotlb_sync, 930 .iova_to_phys = ipmmu_iova_to_phys, 931 .add_device = ipmmu_add_device, 932 .remove_device = ipmmu_remove_device, 933 .device_group = ipmmu_find_group, 934 .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, 935 .of_xlate = ipmmu_of_xlate, 936 }; 937 938 /* ----------------------------------------------------------------------------- 939 * Probe/remove and init 940 */ 941 942 static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu) 943 { 944 unsigned int i; 945 946 /* Disable all contexts. */ 947 for (i = 0; i < mmu->num_ctx; ++i) 948 ipmmu_ctx_write(mmu, i, IMCTR, 0); 949 } 950 951 static const struct ipmmu_features ipmmu_features_default = { 952 .use_ns_alias_offset = true, 953 .has_cache_leaf_nodes = false, 954 .number_of_contexts = 1, /* software only tested with one context */ 955 .num_utlbs = 32, 956 .setup_imbuscr = true, 957 .twobit_imttbcr_sl0 = false, 958 .reserved_context = false, 959 .cache_snoop = true, 960 .ctx_offset_base = 0, 961 .ctx_offset_stride = 0x40, 962 .utlb_offset_base = 0, 963 }; 964 965 static const struct ipmmu_features ipmmu_features_rcar_gen3 = { 966 .use_ns_alias_offset = false, 967 .has_cache_leaf_nodes = true, 968 .number_of_contexts = 8, 969 .num_utlbs = 48, 970 .setup_imbuscr = false, 971 .twobit_imttbcr_sl0 = true, 972 .reserved_context = true, 973 .cache_snoop = false, 974 .ctx_offset_base = 0, 975 .ctx_offset_stride = 0x40, 976 .utlb_offset_base = 0, 977 }; 978 979 static const struct of_device_id ipmmu_of_ids[] = { 980 { 981 .compatible = "renesas,ipmmu-vmsa", 982 .data = &ipmmu_features_default, 983 }, { 984 .compatible = "renesas,ipmmu-r8a774a1", 985 .data = &ipmmu_features_rcar_gen3, 986 }, { 987 .compatible = "renesas,ipmmu-r8a774b1", 988 .data = &ipmmu_features_rcar_gen3, 989 }, { 990 .compatible = "renesas,ipmmu-r8a774c0", 991 .data = &ipmmu_features_rcar_gen3, 992 }, { 993 .compatible = "renesas,ipmmu-r8a7795", 994 .data = &ipmmu_features_rcar_gen3, 995 }, { 996 .compatible = "renesas,ipmmu-r8a7796", 997 .data = &ipmmu_features_rcar_gen3, 998 }, { 999 .compatible = "renesas,ipmmu-r8a77965", 1000 .data = &ipmmu_features_rcar_gen3, 1001 }, { 1002 .compatible = "renesas,ipmmu-r8a77970", 1003 .data = &ipmmu_features_rcar_gen3, 1004 }, { 1005 .compatible = "renesas,ipmmu-r8a77990", 1006 .data = &ipmmu_features_rcar_gen3, 1007 }, { 1008 .compatible = "renesas,ipmmu-r8a77995", 1009 .data = &ipmmu_features_rcar_gen3, 1010 }, { 1011 /* Terminator */ 1012 }, 1013 }; 1014 1015 static int ipmmu_probe(struct platform_device *pdev) 1016 { 1017 struct ipmmu_vmsa_device *mmu; 1018 struct resource *res; 1019 int irq; 1020 int ret; 1021 1022 mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL); 1023 if (!mmu) { 1024 dev_err(&pdev->dev, "cannot allocate device data\n"); 1025 return -ENOMEM; 1026 } 1027 1028 mmu->dev = &pdev->dev; 1029 spin_lock_init(&mmu->lock); 1030 bitmap_zero(mmu->ctx, IPMMU_CTX_MAX); 1031 mmu->features = of_device_get_match_data(&pdev->dev); 1032 memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs); 1033 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); 1034 1035 /* Map I/O memory and request IRQ. */ 1036 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1037 mmu->base = devm_ioremap_resource(&pdev->dev, res); 1038 if (IS_ERR(mmu->base)) 1039 return PTR_ERR(mmu->base); 1040 1041 /* 1042 * The IPMMU has two register banks, for secure and non-secure modes. 1043 * The bank mapped at the beginning of the IPMMU address space 1044 * corresponds to the running mode of the CPU. When running in secure 1045 * mode the non-secure register bank is also available at an offset. 1046 * 1047 * Secure mode operation isn't clearly documented and is thus currently 1048 * not implemented in the driver. Furthermore, preliminary tests of 1049 * non-secure operation with the main register bank were not successful. 1050 * Offset the registers base unconditionally to point to the non-secure 1051 * alias space for now. 1052 */ 1053 if (mmu->features->use_ns_alias_offset) 1054 mmu->base += IM_NS_ALIAS_OFFSET; 1055 1056 mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts); 1057 1058 /* 1059 * Determine if this IPMMU instance is a root device by checking for 1060 * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property. 1061 */ 1062 if (!mmu->features->has_cache_leaf_nodes || 1063 !of_find_property(pdev->dev.of_node, "renesas,ipmmu-main", NULL)) 1064 mmu->root = mmu; 1065 else 1066 mmu->root = ipmmu_find_root(); 1067 1068 /* 1069 * Wait until the root device has been registered for sure. 1070 */ 1071 if (!mmu->root) 1072 return -EPROBE_DEFER; 1073 1074 /* Root devices have mandatory IRQs */ 1075 if (ipmmu_is_root(mmu)) { 1076 irq = platform_get_irq(pdev, 0); 1077 if (irq < 0) 1078 return irq; 1079 1080 ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0, 1081 dev_name(&pdev->dev), mmu); 1082 if (ret < 0) { 1083 dev_err(&pdev->dev, "failed to request IRQ %d\n", irq); 1084 return ret; 1085 } 1086 1087 ipmmu_device_reset(mmu); 1088 1089 if (mmu->features->reserved_context) { 1090 dev_info(&pdev->dev, "IPMMU context 0 is reserved\n"); 1091 set_bit(0, mmu->ctx); 1092 } 1093 } 1094 1095 /* 1096 * Register the IPMMU to the IOMMU subsystem in the following cases: 1097 * - R-Car Gen2 IPMMU (all devices registered) 1098 * - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device) 1099 */ 1100 if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) { 1101 ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, 1102 dev_name(&pdev->dev)); 1103 if (ret) 1104 return ret; 1105 1106 iommu_device_set_ops(&mmu->iommu, &ipmmu_ops); 1107 iommu_device_set_fwnode(&mmu->iommu, 1108 &pdev->dev.of_node->fwnode); 1109 1110 ret = iommu_device_register(&mmu->iommu); 1111 if (ret) 1112 return ret; 1113 1114 #if defined(CONFIG_IOMMU_DMA) 1115 if (!iommu_present(&platform_bus_type)) 1116 bus_set_iommu(&platform_bus_type, &ipmmu_ops); 1117 #endif 1118 } 1119 1120 /* 1121 * We can't create the ARM mapping here as it requires the bus to have 1122 * an IOMMU, which only happens when bus_set_iommu() is called in 1123 * ipmmu_init() after the probe function returns. 1124 */ 1125 1126 platform_set_drvdata(pdev, mmu); 1127 1128 return 0; 1129 } 1130 1131 static int ipmmu_remove(struct platform_device *pdev) 1132 { 1133 struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev); 1134 1135 iommu_device_sysfs_remove(&mmu->iommu); 1136 iommu_device_unregister(&mmu->iommu); 1137 1138 arm_iommu_release_mapping(mmu->mapping); 1139 1140 ipmmu_device_reset(mmu); 1141 1142 return 0; 1143 } 1144 1145 #ifdef CONFIG_PM_SLEEP 1146 static int ipmmu_resume_noirq(struct device *dev) 1147 { 1148 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev); 1149 unsigned int i; 1150 1151 /* Reset root MMU and restore contexts */ 1152 if (ipmmu_is_root(mmu)) { 1153 ipmmu_device_reset(mmu); 1154 1155 for (i = 0; i < mmu->num_ctx; i++) { 1156 if (!mmu->domains[i]) 1157 continue; 1158 1159 ipmmu_domain_setup_context(mmu->domains[i]); 1160 } 1161 } 1162 1163 /* Re-enable active micro-TLBs */ 1164 for (i = 0; i < mmu->features->num_utlbs; i++) { 1165 if (mmu->utlb_ctx[i] == IPMMU_CTX_INVALID) 1166 continue; 1167 1168 ipmmu_utlb_enable(mmu->root->domains[mmu->utlb_ctx[i]], i); 1169 } 1170 1171 return 0; 1172 } 1173 1174 static const struct dev_pm_ops ipmmu_pm = { 1175 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, ipmmu_resume_noirq) 1176 }; 1177 #define DEV_PM_OPS &ipmmu_pm 1178 #else 1179 #define DEV_PM_OPS NULL 1180 #endif /* CONFIG_PM_SLEEP */ 1181 1182 static struct platform_driver ipmmu_driver = { 1183 .driver = { 1184 .name = "ipmmu-vmsa", 1185 .of_match_table = of_match_ptr(ipmmu_of_ids), 1186 .pm = DEV_PM_OPS, 1187 }, 1188 .probe = ipmmu_probe, 1189 .remove = ipmmu_remove, 1190 }; 1191 1192 static int __init ipmmu_init(void) 1193 { 1194 struct device_node *np; 1195 static bool setup_done; 1196 int ret; 1197 1198 if (setup_done) 1199 return 0; 1200 1201 np = of_find_matching_node(NULL, ipmmu_of_ids); 1202 if (!np) 1203 return 0; 1204 1205 of_node_put(np); 1206 1207 ret = platform_driver_register(&ipmmu_driver); 1208 if (ret < 0) 1209 return ret; 1210 1211 #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) 1212 if (!iommu_present(&platform_bus_type)) 1213 bus_set_iommu(&platform_bus_type, &ipmmu_ops); 1214 #endif 1215 1216 setup_done = true; 1217 return 0; 1218 } 1219 subsys_initcall(ipmmu_init); 1220