1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * omap iommu: tlb and pagetable primitives 4 * 5 * Copyright (C) 2008-2010 Nokia Corporation 6 * Copyright (C) 2013-2017 Texas Instruments Incorporated - https://www.ti.com/ 7 * 8 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>, 9 * Paul Mundt and Toshihiro Kobayashi 10 */ 11 12 #include <linux/dma-mapping.h> 13 #include <linux/err.h> 14 #include <linux/slab.h> 15 #include <linux/interrupt.h> 16 #include <linux/ioport.h> 17 #include <linux/platform_device.h> 18 #include <linux/iommu.h> 19 #include <linux/omap-iommu.h> 20 #include <linux/mutex.h> 21 #include <linux/spinlock.h> 22 #include <linux/io.h> 23 #include <linux/pm_runtime.h> 24 #include <linux/of.h> 25 #include <linux/of_iommu.h> 26 #include <linux/of_irq.h> 27 #include <linux/of_platform.h> 28 #include <linux/regmap.h> 29 #include <linux/mfd/syscon.h> 30 31 #include <linux/platform_data/iommu-omap.h> 32 33 #include "omap-iopgtable.h" 34 #include "omap-iommu.h" 35 36 static const struct iommu_ops omap_iommu_ops; 37 38 #define to_iommu(dev) ((struct omap_iommu *)dev_get_drvdata(dev)) 39 40 /* bitmap of the page sizes currently supported */ 41 #define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M) 42 43 #define MMU_LOCK_BASE_SHIFT 10 44 #define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT) 45 #define MMU_LOCK_BASE(x) \ 46 ((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT) 47 48 #define MMU_LOCK_VICT_SHIFT 4 49 #define MMU_LOCK_VICT_MASK (0x1f << MMU_LOCK_VICT_SHIFT) 50 #define MMU_LOCK_VICT(x) \ 51 ((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT) 52 53 static struct platform_driver omap_iommu_driver; 54 static struct kmem_cache *iopte_cachep; 55 56 /** 57 * to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain 58 * @dom: generic iommu domain handle 59 **/ 60 static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom) 61 { 62 return container_of(dom, struct omap_iommu_domain, domain); 63 } 64 65 /** 66 * omap_iommu_save_ctx - Save registers for pm off-mode support 67 * @dev: client device 68 * 69 * This should be treated as an deprecated API. It is preserved only 70 * to maintain existing functionality for OMAP3 ISP driver. 71 **/ 72 void omap_iommu_save_ctx(struct device *dev) 73 { 74 struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev); 75 struct omap_iommu *obj; 76 u32 *p; 77 int i; 78 79 if (!arch_data) 80 return; 81 82 while (arch_data->iommu_dev) { 83 obj = arch_data->iommu_dev; 84 p = obj->ctx; 85 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { 86 p[i] = iommu_read_reg(obj, i * sizeof(u32)); 87 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, 88 p[i]); 89 } 90 arch_data++; 91 } 92 } 93 EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); 94 95 /** 96 * omap_iommu_restore_ctx - Restore registers for pm off-mode support 97 * @dev: client device 98 * 99 * This should be treated as an deprecated API. It is preserved only 100 * to maintain existing functionality for OMAP3 ISP driver. 101 **/ 102 void omap_iommu_restore_ctx(struct device *dev) 103 { 104 struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev); 105 struct omap_iommu *obj; 106 u32 *p; 107 int i; 108 109 if (!arch_data) 110 return; 111 112 while (arch_data->iommu_dev) { 113 obj = arch_data->iommu_dev; 114 p = obj->ctx; 115 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { 116 iommu_write_reg(obj, p[i], i * sizeof(u32)); 117 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, 118 p[i]); 119 } 120 arch_data++; 121 } 122 } 123 EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx); 124 125 static void dra7_cfg_dspsys_mmu(struct omap_iommu *obj, bool enable) 126 { 127 u32 val, mask; 128 129 if (!obj->syscfg) 130 return; 131 132 mask = (1 << (obj->id * DSP_SYS_MMU_CONFIG_EN_SHIFT)); 133 val = enable ? mask : 0; 134 regmap_update_bits(obj->syscfg, DSP_SYS_MMU_CONFIG, mask, val); 135 } 136 137 static void __iommu_set_twl(struct omap_iommu *obj, bool on) 138 { 139 u32 l = iommu_read_reg(obj, MMU_CNTL); 140 141 if (on) 142 iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE); 143 else 144 iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE); 145 146 l &= ~MMU_CNTL_MASK; 147 if (on) 148 l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN); 149 else 150 l |= (MMU_CNTL_MMU_EN); 151 152 iommu_write_reg(obj, l, MMU_CNTL); 153 } 154 155 static int omap2_iommu_enable(struct omap_iommu *obj) 156 { 157 u32 l, pa; 158 159 if (!obj->iopgd || !IS_ALIGNED((unsigned long)obj->iopgd, SZ_16K)) 160 return -EINVAL; 161 162 pa = virt_to_phys(obj->iopgd); 163 if (!IS_ALIGNED(pa, SZ_16K)) 164 return -EINVAL; 165 166 l = iommu_read_reg(obj, MMU_REVISION); 167 dev_info(obj->dev, "%s: version %d.%d\n", obj->name, 168 (l >> 4) & 0xf, l & 0xf); 169 170 iommu_write_reg(obj, pa, MMU_TTB); 171 172 dra7_cfg_dspsys_mmu(obj, true); 173 174 if (obj->has_bus_err_back) 175 iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG); 176 177 __iommu_set_twl(obj, true); 178 179 return 0; 180 } 181 182 static void omap2_iommu_disable(struct omap_iommu *obj) 183 { 184 u32 l = iommu_read_reg(obj, MMU_CNTL); 185 186 l &= ~MMU_CNTL_MASK; 187 iommu_write_reg(obj, l, MMU_CNTL); 188 dra7_cfg_dspsys_mmu(obj, false); 189 190 dev_dbg(obj->dev, "%s is shutting down\n", obj->name); 191 } 192 193 static int iommu_enable(struct omap_iommu *obj) 194 { 195 int ret; 196 197 ret = pm_runtime_get_sync(obj->dev); 198 if (ret < 0) 199 pm_runtime_put_noidle(obj->dev); 200 201 return ret < 0 ? ret : 0; 202 } 203 204 static void iommu_disable(struct omap_iommu *obj) 205 { 206 pm_runtime_put_sync(obj->dev); 207 } 208 209 /* 210 * TLB operations 211 */ 212 static u32 iotlb_cr_to_virt(struct cr_regs *cr) 213 { 214 u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK; 215 u32 mask = get_cam_va_mask(cr->cam & page_size); 216 217 return cr->cam & mask; 218 } 219 220 static u32 get_iopte_attr(struct iotlb_entry *e) 221 { 222 u32 attr; 223 224 attr = e->mixed << 5; 225 attr |= e->endian; 226 attr |= e->elsz >> 3; 227 attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) || 228 (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6); 229 return attr; 230 } 231 232 static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da) 233 { 234 u32 status, fault_addr; 235 236 status = iommu_read_reg(obj, MMU_IRQSTATUS); 237 status &= MMU_IRQ_MASK; 238 if (!status) { 239 *da = 0; 240 return 0; 241 } 242 243 fault_addr = iommu_read_reg(obj, MMU_FAULT_AD); 244 *da = fault_addr; 245 246 iommu_write_reg(obj, status, MMU_IRQSTATUS); 247 248 return status; 249 } 250 251 void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l) 252 { 253 u32 val; 254 255 val = iommu_read_reg(obj, MMU_LOCK); 256 257 l->base = MMU_LOCK_BASE(val); 258 l->vict = MMU_LOCK_VICT(val); 259 } 260 261 void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l) 262 { 263 u32 val; 264 265 val = (l->base << MMU_LOCK_BASE_SHIFT); 266 val |= (l->vict << MMU_LOCK_VICT_SHIFT); 267 268 iommu_write_reg(obj, val, MMU_LOCK); 269 } 270 271 static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr) 272 { 273 cr->cam = iommu_read_reg(obj, MMU_READ_CAM); 274 cr->ram = iommu_read_reg(obj, MMU_READ_RAM); 275 } 276 277 static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr) 278 { 279 iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM); 280 iommu_write_reg(obj, cr->ram, MMU_RAM); 281 282 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); 283 iommu_write_reg(obj, 1, MMU_LD_TLB); 284 } 285 286 /* only used in iotlb iteration for-loop */ 287 struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n) 288 { 289 struct cr_regs cr; 290 struct iotlb_lock l; 291 292 iotlb_lock_get(obj, &l); 293 l.vict = n; 294 iotlb_lock_set(obj, &l); 295 iotlb_read_cr(obj, &cr); 296 297 return cr; 298 } 299 300 #ifdef PREFETCH_IOTLB 301 static struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj, 302 struct iotlb_entry *e) 303 { 304 struct cr_regs *cr; 305 306 if (!e) 307 return NULL; 308 309 if (e->da & ~(get_cam_va_mask(e->pgsz))) { 310 dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__, 311 e->da); 312 return ERR_PTR(-EINVAL); 313 } 314 315 cr = kmalloc(sizeof(*cr), GFP_KERNEL); 316 if (!cr) 317 return ERR_PTR(-ENOMEM); 318 319 cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid; 320 cr->ram = e->pa | e->endian | e->elsz | e->mixed; 321 322 return cr; 323 } 324 325 /** 326 * load_iotlb_entry - Set an iommu tlb entry 327 * @obj: target iommu 328 * @e: an iommu tlb entry info 329 **/ 330 static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) 331 { 332 int err = 0; 333 struct iotlb_lock l; 334 struct cr_regs *cr; 335 336 if (!obj || !obj->nr_tlb_entries || !e) 337 return -EINVAL; 338 339 pm_runtime_get_sync(obj->dev); 340 341 iotlb_lock_get(obj, &l); 342 if (l.base == obj->nr_tlb_entries) { 343 dev_warn(obj->dev, "%s: preserve entries full\n", __func__); 344 err = -EBUSY; 345 goto out; 346 } 347 if (!e->prsvd) { 348 int i; 349 struct cr_regs tmp; 350 351 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp) 352 if (!iotlb_cr_valid(&tmp)) 353 break; 354 355 if (i == obj->nr_tlb_entries) { 356 dev_dbg(obj->dev, "%s: full: no entry\n", __func__); 357 err = -EBUSY; 358 goto out; 359 } 360 361 iotlb_lock_get(obj, &l); 362 } else { 363 l.vict = l.base; 364 iotlb_lock_set(obj, &l); 365 } 366 367 cr = iotlb_alloc_cr(obj, e); 368 if (IS_ERR(cr)) { 369 pm_runtime_put_sync(obj->dev); 370 return PTR_ERR(cr); 371 } 372 373 iotlb_load_cr(obj, cr); 374 kfree(cr); 375 376 if (e->prsvd) 377 l.base++; 378 /* increment victim for next tlb load */ 379 if (++l.vict == obj->nr_tlb_entries) 380 l.vict = l.base; 381 iotlb_lock_set(obj, &l); 382 out: 383 pm_runtime_put_sync(obj->dev); 384 return err; 385 } 386 387 #else /* !PREFETCH_IOTLB */ 388 389 static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) 390 { 391 return 0; 392 } 393 394 #endif /* !PREFETCH_IOTLB */ 395 396 static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) 397 { 398 return load_iotlb_entry(obj, e); 399 } 400 401 /** 402 * flush_iotlb_page - Clear an iommu tlb entry 403 * @obj: target iommu 404 * @da: iommu device virtual address 405 * 406 * Clear an iommu tlb entry which includes 'da' address. 407 **/ 408 static void flush_iotlb_page(struct omap_iommu *obj, u32 da) 409 { 410 int i; 411 struct cr_regs cr; 412 413 pm_runtime_get_sync(obj->dev); 414 415 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) { 416 u32 start; 417 size_t bytes; 418 419 if (!iotlb_cr_valid(&cr)) 420 continue; 421 422 start = iotlb_cr_to_virt(&cr); 423 bytes = iopgsz_to_bytes(cr.cam & 3); 424 425 if ((start <= da) && (da < start + bytes)) { 426 dev_dbg(obj->dev, "%s: %08x<=%08x(%zx)\n", 427 __func__, start, da, bytes); 428 iotlb_load_cr(obj, &cr); 429 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); 430 break; 431 } 432 } 433 pm_runtime_put_sync(obj->dev); 434 435 if (i == obj->nr_tlb_entries) 436 dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da); 437 } 438 439 /** 440 * flush_iotlb_all - Clear all iommu tlb entries 441 * @obj: target iommu 442 **/ 443 static void flush_iotlb_all(struct omap_iommu *obj) 444 { 445 struct iotlb_lock l; 446 447 pm_runtime_get_sync(obj->dev); 448 449 l.base = 0; 450 l.vict = 0; 451 iotlb_lock_set(obj, &l); 452 453 iommu_write_reg(obj, 1, MMU_GFLUSH); 454 455 pm_runtime_put_sync(obj->dev); 456 } 457 458 /* 459 * H/W pagetable operations 460 */ 461 static void flush_iopte_range(struct device *dev, dma_addr_t dma, 462 unsigned long offset, int num_entries) 463 { 464 size_t size = num_entries * sizeof(u32); 465 466 dma_sync_single_range_for_device(dev, dma, offset, size, DMA_TO_DEVICE); 467 } 468 469 static void iopte_free(struct omap_iommu *obj, u32 *iopte, bool dma_valid) 470 { 471 dma_addr_t pt_dma; 472 473 /* Note: freed iopte's must be clean ready for re-use */ 474 if (iopte) { 475 if (dma_valid) { 476 pt_dma = virt_to_phys(iopte); 477 dma_unmap_single(obj->dev, pt_dma, IOPTE_TABLE_SIZE, 478 DMA_TO_DEVICE); 479 } 480 481 kmem_cache_free(iopte_cachep, iopte); 482 } 483 } 484 485 static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, 486 dma_addr_t *pt_dma, u32 da) 487 { 488 u32 *iopte; 489 unsigned long offset = iopgd_index(da) * sizeof(da); 490 491 /* a table has already existed */ 492 if (*iopgd) 493 goto pte_ready; 494 495 /* 496 * do the allocation outside the page table lock 497 */ 498 spin_unlock(&obj->page_table_lock); 499 iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL); 500 spin_lock(&obj->page_table_lock); 501 502 if (!*iopgd) { 503 if (!iopte) 504 return ERR_PTR(-ENOMEM); 505 506 *pt_dma = dma_map_single(obj->dev, iopte, IOPTE_TABLE_SIZE, 507 DMA_TO_DEVICE); 508 if (dma_mapping_error(obj->dev, *pt_dma)) { 509 dev_err(obj->dev, "DMA map error for L2 table\n"); 510 iopte_free(obj, iopte, false); 511 return ERR_PTR(-ENOMEM); 512 } 513 514 /* 515 * we rely on dma address and the physical address to be 516 * the same for mapping the L2 table 517 */ 518 if (WARN_ON(*pt_dma != virt_to_phys(iopte))) { 519 dev_err(obj->dev, "DMA translation error for L2 table\n"); 520 dma_unmap_single(obj->dev, *pt_dma, IOPTE_TABLE_SIZE, 521 DMA_TO_DEVICE); 522 iopte_free(obj, iopte, false); 523 return ERR_PTR(-ENOMEM); 524 } 525 526 *iopgd = virt_to_phys(iopte) | IOPGD_TABLE; 527 528 flush_iopte_range(obj->dev, obj->pd_dma, offset, 1); 529 dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte); 530 } else { 531 /* We raced, free the reduniovant table */ 532 iopte_free(obj, iopte, false); 533 } 534 535 pte_ready: 536 iopte = iopte_offset(iopgd, da); 537 *pt_dma = iopgd_page_paddr(iopgd); 538 dev_vdbg(obj->dev, 539 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", 540 __func__, da, iopgd, *iopgd, iopte, *iopte); 541 542 return iopte; 543 } 544 545 static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) 546 { 547 u32 *iopgd = iopgd_offset(obj, da); 548 unsigned long offset = iopgd_index(da) * sizeof(da); 549 550 if ((da | pa) & ~IOSECTION_MASK) { 551 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", 552 __func__, da, pa, IOSECTION_SIZE); 553 return -EINVAL; 554 } 555 556 *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION; 557 flush_iopte_range(obj->dev, obj->pd_dma, offset, 1); 558 return 0; 559 } 560 561 static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) 562 { 563 u32 *iopgd = iopgd_offset(obj, da); 564 unsigned long offset = iopgd_index(da) * sizeof(da); 565 int i; 566 567 if ((da | pa) & ~IOSUPER_MASK) { 568 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", 569 __func__, da, pa, IOSUPER_SIZE); 570 return -EINVAL; 571 } 572 573 for (i = 0; i < 16; i++) 574 *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER; 575 flush_iopte_range(obj->dev, obj->pd_dma, offset, 16); 576 return 0; 577 } 578 579 static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) 580 { 581 u32 *iopgd = iopgd_offset(obj, da); 582 dma_addr_t pt_dma; 583 u32 *iopte = iopte_alloc(obj, iopgd, &pt_dma, da); 584 unsigned long offset = iopte_index(da) * sizeof(da); 585 586 if (IS_ERR(iopte)) 587 return PTR_ERR(iopte); 588 589 *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL; 590 flush_iopte_range(obj->dev, pt_dma, offset, 1); 591 592 dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n", 593 __func__, da, pa, iopte, *iopte); 594 595 return 0; 596 } 597 598 static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) 599 { 600 u32 *iopgd = iopgd_offset(obj, da); 601 dma_addr_t pt_dma; 602 u32 *iopte = iopte_alloc(obj, iopgd, &pt_dma, da); 603 unsigned long offset = iopte_index(da) * sizeof(da); 604 int i; 605 606 if ((da | pa) & ~IOLARGE_MASK) { 607 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", 608 __func__, da, pa, IOLARGE_SIZE); 609 return -EINVAL; 610 } 611 612 if (IS_ERR(iopte)) 613 return PTR_ERR(iopte); 614 615 for (i = 0; i < 16; i++) 616 *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE; 617 flush_iopte_range(obj->dev, pt_dma, offset, 16); 618 return 0; 619 } 620 621 static int 622 iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e) 623 { 624 int (*fn)(struct omap_iommu *, u32, u32, u32); 625 u32 prot; 626 int err; 627 628 if (!obj || !e) 629 return -EINVAL; 630 631 switch (e->pgsz) { 632 case MMU_CAM_PGSZ_16M: 633 fn = iopgd_alloc_super; 634 break; 635 case MMU_CAM_PGSZ_1M: 636 fn = iopgd_alloc_section; 637 break; 638 case MMU_CAM_PGSZ_64K: 639 fn = iopte_alloc_large; 640 break; 641 case MMU_CAM_PGSZ_4K: 642 fn = iopte_alloc_page; 643 break; 644 default: 645 fn = NULL; 646 break; 647 } 648 649 if (WARN_ON(!fn)) 650 return -EINVAL; 651 652 prot = get_iopte_attr(e); 653 654 spin_lock(&obj->page_table_lock); 655 err = fn(obj, e->da, e->pa, prot); 656 spin_unlock(&obj->page_table_lock); 657 658 return err; 659 } 660 661 /** 662 * omap_iopgtable_store_entry - Make an iommu pte entry 663 * @obj: target iommu 664 * @e: an iommu tlb entry info 665 **/ 666 static int 667 omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e) 668 { 669 int err; 670 671 flush_iotlb_page(obj, e->da); 672 err = iopgtable_store_entry_core(obj, e); 673 if (!err) 674 prefetch_iotlb_entry(obj, e); 675 return err; 676 } 677 678 /** 679 * iopgtable_lookup_entry - Lookup an iommu pte entry 680 * @obj: target iommu 681 * @da: iommu device virtual address 682 * @ppgd: iommu pgd entry pointer to be returned 683 * @ppte: iommu pte entry pointer to be returned 684 **/ 685 static void 686 iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte) 687 { 688 u32 *iopgd, *iopte = NULL; 689 690 iopgd = iopgd_offset(obj, da); 691 if (!*iopgd) 692 goto out; 693 694 if (iopgd_is_table(*iopgd)) 695 iopte = iopte_offset(iopgd, da); 696 out: 697 *ppgd = iopgd; 698 *ppte = iopte; 699 } 700 701 static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da) 702 { 703 size_t bytes; 704 u32 *iopgd = iopgd_offset(obj, da); 705 int nent = 1; 706 dma_addr_t pt_dma; 707 unsigned long pd_offset = iopgd_index(da) * sizeof(da); 708 unsigned long pt_offset = iopte_index(da) * sizeof(da); 709 710 if (!*iopgd) 711 return 0; 712 713 if (iopgd_is_table(*iopgd)) { 714 int i; 715 u32 *iopte = iopte_offset(iopgd, da); 716 717 bytes = IOPTE_SIZE; 718 if (*iopte & IOPTE_LARGE) { 719 nent *= 16; 720 /* rewind to the 1st entry */ 721 iopte = iopte_offset(iopgd, (da & IOLARGE_MASK)); 722 } 723 bytes *= nent; 724 memset(iopte, 0, nent * sizeof(*iopte)); 725 pt_dma = iopgd_page_paddr(iopgd); 726 flush_iopte_range(obj->dev, pt_dma, pt_offset, nent); 727 728 /* 729 * do table walk to check if this table is necessary or not 730 */ 731 iopte = iopte_offset(iopgd, 0); 732 for (i = 0; i < PTRS_PER_IOPTE; i++) 733 if (iopte[i]) 734 goto out; 735 736 iopte_free(obj, iopte, true); 737 nent = 1; /* for the next L1 entry */ 738 } else { 739 bytes = IOPGD_SIZE; 740 if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) { 741 nent *= 16; 742 /* rewind to the 1st entry */ 743 iopgd = iopgd_offset(obj, (da & IOSUPER_MASK)); 744 } 745 bytes *= nent; 746 } 747 memset(iopgd, 0, nent * sizeof(*iopgd)); 748 flush_iopte_range(obj->dev, obj->pd_dma, pd_offset, nent); 749 out: 750 return bytes; 751 } 752 753 /** 754 * iopgtable_clear_entry - Remove an iommu pte entry 755 * @obj: target iommu 756 * @da: iommu device virtual address 757 **/ 758 static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da) 759 { 760 size_t bytes; 761 762 spin_lock(&obj->page_table_lock); 763 764 bytes = iopgtable_clear_entry_core(obj, da); 765 flush_iotlb_page(obj, da); 766 767 spin_unlock(&obj->page_table_lock); 768 769 return bytes; 770 } 771 772 static void iopgtable_clear_entry_all(struct omap_iommu *obj) 773 { 774 unsigned long offset; 775 int i; 776 777 spin_lock(&obj->page_table_lock); 778 779 for (i = 0; i < PTRS_PER_IOPGD; i++) { 780 u32 da; 781 u32 *iopgd; 782 783 da = i << IOPGD_SHIFT; 784 iopgd = iopgd_offset(obj, da); 785 offset = iopgd_index(da) * sizeof(da); 786 787 if (!*iopgd) 788 continue; 789 790 if (iopgd_is_table(*iopgd)) 791 iopte_free(obj, iopte_offset(iopgd, 0), true); 792 793 *iopgd = 0; 794 flush_iopte_range(obj->dev, obj->pd_dma, offset, 1); 795 } 796 797 flush_iotlb_all(obj); 798 799 spin_unlock(&obj->page_table_lock); 800 } 801 802 /* 803 * Device IOMMU generic operations 804 */ 805 static irqreturn_t iommu_fault_handler(int irq, void *data) 806 { 807 u32 da, errs; 808 u32 *iopgd, *iopte; 809 struct omap_iommu *obj = data; 810 struct iommu_domain *domain = obj->domain; 811 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 812 813 if (!omap_domain->dev) 814 return IRQ_NONE; 815 816 errs = iommu_report_fault(obj, &da); 817 if (errs == 0) 818 return IRQ_HANDLED; 819 820 /* Fault callback or TLB/PTE Dynamic loading */ 821 if (!report_iommu_fault(domain, obj->dev, da, 0)) 822 return IRQ_HANDLED; 823 824 iommu_write_reg(obj, 0, MMU_IRQENABLE); 825 826 iopgd = iopgd_offset(obj, da); 827 828 if (!iopgd_is_table(*iopgd)) { 829 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n", 830 obj->name, errs, da, iopgd, *iopgd); 831 return IRQ_NONE; 832 } 833 834 iopte = iopte_offset(iopgd, da); 835 836 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n", 837 obj->name, errs, da, iopgd, *iopgd, iopte, *iopte); 838 839 return IRQ_NONE; 840 } 841 842 /** 843 * omap_iommu_attach() - attach iommu device to an iommu domain 844 * @obj: target omap iommu device 845 * @iopgd: page table 846 **/ 847 static int omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd) 848 { 849 int err; 850 851 spin_lock(&obj->iommu_lock); 852 853 obj->pd_dma = dma_map_single(obj->dev, iopgd, IOPGD_TABLE_SIZE, 854 DMA_TO_DEVICE); 855 if (dma_mapping_error(obj->dev, obj->pd_dma)) { 856 dev_err(obj->dev, "DMA map error for L1 table\n"); 857 err = -ENOMEM; 858 goto out_err; 859 } 860 861 obj->iopgd = iopgd; 862 err = iommu_enable(obj); 863 if (err) 864 goto out_err; 865 flush_iotlb_all(obj); 866 867 spin_unlock(&obj->iommu_lock); 868 869 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); 870 871 return 0; 872 873 out_err: 874 spin_unlock(&obj->iommu_lock); 875 876 return err; 877 } 878 879 /** 880 * omap_iommu_detach - release iommu device 881 * @obj: target iommu 882 **/ 883 static void omap_iommu_detach(struct omap_iommu *obj) 884 { 885 if (!obj || IS_ERR(obj)) 886 return; 887 888 spin_lock(&obj->iommu_lock); 889 890 dma_unmap_single(obj->dev, obj->pd_dma, IOPGD_TABLE_SIZE, 891 DMA_TO_DEVICE); 892 obj->pd_dma = 0; 893 obj->iopgd = NULL; 894 iommu_disable(obj); 895 896 spin_unlock(&obj->iommu_lock); 897 898 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); 899 } 900 901 static void omap_iommu_save_tlb_entries(struct omap_iommu *obj) 902 { 903 struct iotlb_lock lock; 904 struct cr_regs cr; 905 struct cr_regs *tmp; 906 int i; 907 908 /* check if there are any locked tlbs to save */ 909 iotlb_lock_get(obj, &lock); 910 obj->num_cr_ctx = lock.base; 911 if (!obj->num_cr_ctx) 912 return; 913 914 tmp = obj->cr_ctx; 915 for_each_iotlb_cr(obj, obj->num_cr_ctx, i, cr) 916 * tmp++ = cr; 917 } 918 919 static void omap_iommu_restore_tlb_entries(struct omap_iommu *obj) 920 { 921 struct iotlb_lock l; 922 struct cr_regs *tmp; 923 int i; 924 925 /* no locked tlbs to restore */ 926 if (!obj->num_cr_ctx) 927 return; 928 929 l.base = 0; 930 tmp = obj->cr_ctx; 931 for (i = 0; i < obj->num_cr_ctx; i++, tmp++) { 932 l.vict = i; 933 iotlb_lock_set(obj, &l); 934 iotlb_load_cr(obj, tmp); 935 } 936 l.base = obj->num_cr_ctx; 937 l.vict = i; 938 iotlb_lock_set(obj, &l); 939 } 940 941 /** 942 * omap_iommu_domain_deactivate - deactivate attached iommu devices 943 * @domain: iommu domain attached to the target iommu device 944 * 945 * This API allows the client devices of IOMMU devices to suspend 946 * the IOMMUs they control at runtime, after they are idled and 947 * suspended all activity. System Suspend will leverage the PM 948 * driver late callbacks. 949 **/ 950 int omap_iommu_domain_deactivate(struct iommu_domain *domain) 951 { 952 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 953 struct omap_iommu_device *iommu; 954 struct omap_iommu *oiommu; 955 int i; 956 957 if (!omap_domain->dev) 958 return 0; 959 960 iommu = omap_domain->iommus; 961 iommu += (omap_domain->num_iommus - 1); 962 for (i = 0; i < omap_domain->num_iommus; i++, iommu--) { 963 oiommu = iommu->iommu_dev; 964 pm_runtime_put_sync(oiommu->dev); 965 } 966 967 return 0; 968 } 969 EXPORT_SYMBOL_GPL(omap_iommu_domain_deactivate); 970 971 /** 972 * omap_iommu_domain_activate - activate attached iommu devices 973 * @domain: iommu domain attached to the target iommu device 974 * 975 * This API allows the client devices of IOMMU devices to resume the 976 * IOMMUs they control at runtime, before they can resume operations. 977 * System Resume will leverage the PM driver late callbacks. 978 **/ 979 int omap_iommu_domain_activate(struct iommu_domain *domain) 980 { 981 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 982 struct omap_iommu_device *iommu; 983 struct omap_iommu *oiommu; 984 int i; 985 986 if (!omap_domain->dev) 987 return 0; 988 989 iommu = omap_domain->iommus; 990 for (i = 0; i < omap_domain->num_iommus; i++, iommu++) { 991 oiommu = iommu->iommu_dev; 992 pm_runtime_get_sync(oiommu->dev); 993 } 994 995 return 0; 996 } 997 EXPORT_SYMBOL_GPL(omap_iommu_domain_activate); 998 999 /** 1000 * omap_iommu_runtime_suspend - disable an iommu device 1001 * @dev: iommu device 1002 * 1003 * This function performs all that is necessary to disable an 1004 * IOMMU device, either during final detachment from a client 1005 * device, or during system/runtime suspend of the device. This 1006 * includes programming all the appropriate IOMMU registers, and 1007 * managing the associated omap_hwmod's state and the device's 1008 * reset line. This function also saves the context of any 1009 * locked TLBs if suspending. 1010 **/ 1011 static __maybe_unused int omap_iommu_runtime_suspend(struct device *dev) 1012 { 1013 struct platform_device *pdev = to_platform_device(dev); 1014 struct iommu_platform_data *pdata = dev_get_platdata(dev); 1015 struct omap_iommu *obj = to_iommu(dev); 1016 int ret; 1017 1018 /* save the TLBs only during suspend, and not for power down */ 1019 if (obj->domain && obj->iopgd) 1020 omap_iommu_save_tlb_entries(obj); 1021 1022 omap2_iommu_disable(obj); 1023 1024 if (pdata && pdata->device_idle) 1025 pdata->device_idle(pdev); 1026 1027 if (pdata && pdata->assert_reset) 1028 pdata->assert_reset(pdev, pdata->reset_name); 1029 1030 if (pdata && pdata->set_pwrdm_constraint) { 1031 ret = pdata->set_pwrdm_constraint(pdev, false, &obj->pwrst); 1032 if (ret) { 1033 dev_warn(obj->dev, "pwrdm_constraint failed to be reset, status = %d\n", 1034 ret); 1035 } 1036 } 1037 1038 return 0; 1039 } 1040 1041 /** 1042 * omap_iommu_runtime_resume - enable an iommu device 1043 * @dev: iommu device 1044 * 1045 * This function performs all that is necessary to enable an 1046 * IOMMU device, either during initial attachment to a client 1047 * device, or during system/runtime resume of the device. This 1048 * includes programming all the appropriate IOMMU registers, and 1049 * managing the associated omap_hwmod's state and the device's 1050 * reset line. The function also restores any locked TLBs if 1051 * resuming after a suspend. 1052 **/ 1053 static __maybe_unused int omap_iommu_runtime_resume(struct device *dev) 1054 { 1055 struct platform_device *pdev = to_platform_device(dev); 1056 struct iommu_platform_data *pdata = dev_get_platdata(dev); 1057 struct omap_iommu *obj = to_iommu(dev); 1058 int ret = 0; 1059 1060 if (pdata && pdata->set_pwrdm_constraint) { 1061 ret = pdata->set_pwrdm_constraint(pdev, true, &obj->pwrst); 1062 if (ret) { 1063 dev_warn(obj->dev, "pwrdm_constraint failed to be set, status = %d\n", 1064 ret); 1065 } 1066 } 1067 1068 if (pdata && pdata->deassert_reset) { 1069 ret = pdata->deassert_reset(pdev, pdata->reset_name); 1070 if (ret) { 1071 dev_err(dev, "deassert_reset failed: %d\n", ret); 1072 return ret; 1073 } 1074 } 1075 1076 if (pdata && pdata->device_enable) 1077 pdata->device_enable(pdev); 1078 1079 /* restore the TLBs only during resume, and not for power up */ 1080 if (obj->domain) 1081 omap_iommu_restore_tlb_entries(obj); 1082 1083 ret = omap2_iommu_enable(obj); 1084 1085 return ret; 1086 } 1087 1088 /** 1089 * omap_iommu_suspend_prepare - prepare() dev_pm_ops implementation 1090 * @dev: iommu device 1091 * 1092 * This function performs the necessary checks to determine if the IOMMU 1093 * device needs suspending or not. The function checks if the runtime_pm 1094 * status of the device is suspended, and returns 1 in that case. This 1095 * results in the PM core to skip invoking any of the Sleep PM callbacks 1096 * (suspend, suspend_late, resume, resume_early etc). 1097 */ 1098 static int omap_iommu_prepare(struct device *dev) 1099 { 1100 if (pm_runtime_status_suspended(dev)) 1101 return 1; 1102 return 0; 1103 } 1104 1105 static bool omap_iommu_can_register(struct platform_device *pdev) 1106 { 1107 struct device_node *np = pdev->dev.of_node; 1108 1109 if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu")) 1110 return true; 1111 1112 /* 1113 * restrict IOMMU core registration only for processor-port MDMA MMUs 1114 * on DRA7 DSPs 1115 */ 1116 if ((!strcmp(dev_name(&pdev->dev), "40d01000.mmu")) || 1117 (!strcmp(dev_name(&pdev->dev), "41501000.mmu"))) 1118 return true; 1119 1120 return false; 1121 } 1122 1123 static int omap_iommu_dra7_get_dsp_system_cfg(struct platform_device *pdev, 1124 struct omap_iommu *obj) 1125 { 1126 struct device_node *np = pdev->dev.of_node; 1127 int ret; 1128 1129 if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu")) 1130 return 0; 1131 1132 if (!of_property_read_bool(np, "ti,syscon-mmuconfig")) { 1133 dev_err(&pdev->dev, "ti,syscon-mmuconfig property is missing\n"); 1134 return -EINVAL; 1135 } 1136 1137 obj->syscfg = 1138 syscon_regmap_lookup_by_phandle(np, "ti,syscon-mmuconfig"); 1139 if (IS_ERR(obj->syscfg)) { 1140 /* can fail with -EPROBE_DEFER */ 1141 ret = PTR_ERR(obj->syscfg); 1142 return ret; 1143 } 1144 1145 if (of_property_read_u32_index(np, "ti,syscon-mmuconfig", 1, 1146 &obj->id)) { 1147 dev_err(&pdev->dev, "couldn't get the IOMMU instance id within subsystem\n"); 1148 return -EINVAL; 1149 } 1150 1151 if (obj->id != 0 && obj->id != 1) { 1152 dev_err(&pdev->dev, "invalid IOMMU instance id\n"); 1153 return -EINVAL; 1154 } 1155 1156 return 0; 1157 } 1158 1159 /* 1160 * OMAP Device MMU(IOMMU) detection 1161 */ 1162 static int omap_iommu_probe(struct platform_device *pdev) 1163 { 1164 int err = -ENODEV; 1165 int irq; 1166 struct omap_iommu *obj; 1167 struct resource *res; 1168 struct device_node *of = pdev->dev.of_node; 1169 1170 if (!of) { 1171 pr_err("%s: only DT-based devices are supported\n", __func__); 1172 return -ENODEV; 1173 } 1174 1175 obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); 1176 if (!obj) 1177 return -ENOMEM; 1178 1179 /* 1180 * self-manage the ordering dependencies between omap_device_enable/idle 1181 * and omap_device_assert/deassert_hardreset API 1182 */ 1183 if (pdev->dev.pm_domain) { 1184 dev_dbg(&pdev->dev, "device pm_domain is being reset\n"); 1185 pdev->dev.pm_domain = NULL; 1186 } 1187 1188 obj->name = dev_name(&pdev->dev); 1189 obj->nr_tlb_entries = 32; 1190 err = of_property_read_u32(of, "ti,#tlb-entries", &obj->nr_tlb_entries); 1191 if (err && err != -EINVAL) 1192 return err; 1193 if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8) 1194 return -EINVAL; 1195 if (of_find_property(of, "ti,iommu-bus-err-back", NULL)) 1196 obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN; 1197 1198 obj->dev = &pdev->dev; 1199 obj->ctx = (void *)obj + sizeof(*obj); 1200 obj->cr_ctx = devm_kzalloc(&pdev->dev, 1201 sizeof(*obj->cr_ctx) * obj->nr_tlb_entries, 1202 GFP_KERNEL); 1203 if (!obj->cr_ctx) 1204 return -ENOMEM; 1205 1206 spin_lock_init(&obj->iommu_lock); 1207 spin_lock_init(&obj->page_table_lock); 1208 1209 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1210 obj->regbase = devm_ioremap_resource(obj->dev, res); 1211 if (IS_ERR(obj->regbase)) 1212 return PTR_ERR(obj->regbase); 1213 1214 err = omap_iommu_dra7_get_dsp_system_cfg(pdev, obj); 1215 if (err) 1216 return err; 1217 1218 irq = platform_get_irq(pdev, 0); 1219 if (irq < 0) 1220 return -ENODEV; 1221 1222 err = devm_request_irq(obj->dev, irq, iommu_fault_handler, IRQF_SHARED, 1223 dev_name(obj->dev), obj); 1224 if (err < 0) 1225 return err; 1226 platform_set_drvdata(pdev, obj); 1227 1228 if (omap_iommu_can_register(pdev)) { 1229 obj->group = iommu_group_alloc(); 1230 if (IS_ERR(obj->group)) 1231 return PTR_ERR(obj->group); 1232 1233 err = iommu_device_sysfs_add(&obj->iommu, obj->dev, NULL, 1234 obj->name); 1235 if (err) 1236 goto out_group; 1237 1238 err = iommu_device_register(&obj->iommu, &omap_iommu_ops, &pdev->dev); 1239 if (err) 1240 goto out_sysfs; 1241 } 1242 1243 pm_runtime_enable(obj->dev); 1244 1245 omap_iommu_debugfs_add(obj); 1246 1247 dev_info(&pdev->dev, "%s registered\n", obj->name); 1248 1249 /* Re-probe bus to probe device attached to this IOMMU */ 1250 bus_iommu_probe(&platform_bus_type); 1251 1252 return 0; 1253 1254 out_sysfs: 1255 iommu_device_sysfs_remove(&obj->iommu); 1256 out_group: 1257 iommu_group_put(obj->group); 1258 return err; 1259 } 1260 1261 static int omap_iommu_remove(struct platform_device *pdev) 1262 { 1263 struct omap_iommu *obj = platform_get_drvdata(pdev); 1264 1265 if (obj->group) { 1266 iommu_group_put(obj->group); 1267 obj->group = NULL; 1268 1269 iommu_device_sysfs_remove(&obj->iommu); 1270 iommu_device_unregister(&obj->iommu); 1271 } 1272 1273 omap_iommu_debugfs_remove(obj); 1274 1275 pm_runtime_disable(obj->dev); 1276 1277 dev_info(&pdev->dev, "%s removed\n", obj->name); 1278 return 0; 1279 } 1280 1281 static const struct dev_pm_ops omap_iommu_pm_ops = { 1282 .prepare = omap_iommu_prepare, 1283 SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 1284 pm_runtime_force_resume) 1285 SET_RUNTIME_PM_OPS(omap_iommu_runtime_suspend, 1286 omap_iommu_runtime_resume, NULL) 1287 }; 1288 1289 static const struct of_device_id omap_iommu_of_match[] = { 1290 { .compatible = "ti,omap2-iommu" }, 1291 { .compatible = "ti,omap4-iommu" }, 1292 { .compatible = "ti,dra7-iommu" }, 1293 { .compatible = "ti,dra7-dsp-iommu" }, 1294 {}, 1295 }; 1296 1297 static struct platform_driver omap_iommu_driver = { 1298 .probe = omap_iommu_probe, 1299 .remove = omap_iommu_remove, 1300 .driver = { 1301 .name = "omap-iommu", 1302 .pm = &omap_iommu_pm_ops, 1303 .of_match_table = of_match_ptr(omap_iommu_of_match), 1304 }, 1305 }; 1306 1307 static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz) 1308 { 1309 memset(e, 0, sizeof(*e)); 1310 1311 e->da = da; 1312 e->pa = pa; 1313 e->valid = MMU_CAM_V; 1314 e->pgsz = pgsz; 1315 e->endian = MMU_RAM_ENDIAN_LITTLE; 1316 e->elsz = MMU_RAM_ELSZ_8; 1317 e->mixed = 0; 1318 1319 return iopgsz_to_bytes(e->pgsz); 1320 } 1321 1322 static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, 1323 phys_addr_t pa, size_t bytes, int prot, gfp_t gfp) 1324 { 1325 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 1326 struct device *dev = omap_domain->dev; 1327 struct omap_iommu_device *iommu; 1328 struct omap_iommu *oiommu; 1329 struct iotlb_entry e; 1330 int omap_pgsz; 1331 u32 ret = -EINVAL; 1332 int i; 1333 1334 omap_pgsz = bytes_to_iopgsz(bytes); 1335 if (omap_pgsz < 0) { 1336 dev_err(dev, "invalid size to map: %zu\n", bytes); 1337 return -EINVAL; 1338 } 1339 1340 dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%zx\n", da, &pa, bytes); 1341 1342 iotlb_init_entry(&e, da, pa, omap_pgsz); 1343 1344 iommu = omap_domain->iommus; 1345 for (i = 0; i < omap_domain->num_iommus; i++, iommu++) { 1346 oiommu = iommu->iommu_dev; 1347 ret = omap_iopgtable_store_entry(oiommu, &e); 1348 if (ret) { 1349 dev_err(dev, "omap_iopgtable_store_entry failed: %d\n", 1350 ret); 1351 break; 1352 } 1353 } 1354 1355 if (ret) { 1356 while (i--) { 1357 iommu--; 1358 oiommu = iommu->iommu_dev; 1359 iopgtable_clear_entry(oiommu, da); 1360 } 1361 } 1362 1363 return ret; 1364 } 1365 1366 static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, 1367 size_t size, struct iommu_iotlb_gather *gather) 1368 { 1369 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 1370 struct device *dev = omap_domain->dev; 1371 struct omap_iommu_device *iommu; 1372 struct omap_iommu *oiommu; 1373 bool error = false; 1374 size_t bytes = 0; 1375 int i; 1376 1377 dev_dbg(dev, "unmapping da 0x%lx size %zu\n", da, size); 1378 1379 iommu = omap_domain->iommus; 1380 for (i = 0; i < omap_domain->num_iommus; i++, iommu++) { 1381 oiommu = iommu->iommu_dev; 1382 bytes = iopgtable_clear_entry(oiommu, da); 1383 if (!bytes) 1384 error = true; 1385 } 1386 1387 /* 1388 * simplify return - we are only checking if any of the iommus 1389 * reported an error, but not if all of them are unmapping the 1390 * same number of entries. This should not occur due to the 1391 * mirror programming. 1392 */ 1393 return error ? 0 : bytes; 1394 } 1395 1396 static int omap_iommu_count(struct device *dev) 1397 { 1398 struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev); 1399 int count = 0; 1400 1401 while (arch_data->iommu_dev) { 1402 count++; 1403 arch_data++; 1404 } 1405 1406 return count; 1407 } 1408 1409 /* caller should call cleanup if this function fails */ 1410 static int omap_iommu_attach_init(struct device *dev, 1411 struct omap_iommu_domain *odomain) 1412 { 1413 struct omap_iommu_device *iommu; 1414 int i; 1415 1416 odomain->num_iommus = omap_iommu_count(dev); 1417 if (!odomain->num_iommus) 1418 return -EINVAL; 1419 1420 odomain->iommus = kcalloc(odomain->num_iommus, sizeof(*iommu), 1421 GFP_ATOMIC); 1422 if (!odomain->iommus) 1423 return -ENOMEM; 1424 1425 iommu = odomain->iommus; 1426 for (i = 0; i < odomain->num_iommus; i++, iommu++) { 1427 iommu->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_ATOMIC); 1428 if (!iommu->pgtable) 1429 return -ENOMEM; 1430 1431 /* 1432 * should never fail, but please keep this around to ensure 1433 * we keep the hardware happy 1434 */ 1435 if (WARN_ON(!IS_ALIGNED((long)iommu->pgtable, 1436 IOPGD_TABLE_SIZE))) 1437 return -EINVAL; 1438 } 1439 1440 return 0; 1441 } 1442 1443 static void omap_iommu_detach_fini(struct omap_iommu_domain *odomain) 1444 { 1445 int i; 1446 struct omap_iommu_device *iommu = odomain->iommus; 1447 1448 for (i = 0; iommu && i < odomain->num_iommus; i++, iommu++) 1449 kfree(iommu->pgtable); 1450 1451 kfree(odomain->iommus); 1452 odomain->num_iommus = 0; 1453 odomain->iommus = NULL; 1454 } 1455 1456 static int 1457 omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) 1458 { 1459 struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev); 1460 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 1461 struct omap_iommu_device *iommu; 1462 struct omap_iommu *oiommu; 1463 int ret = 0; 1464 int i; 1465 1466 if (!arch_data || !arch_data->iommu_dev) { 1467 dev_err(dev, "device doesn't have an associated iommu\n"); 1468 return -EINVAL; 1469 } 1470 1471 spin_lock(&omap_domain->lock); 1472 1473 /* only a single client device can be attached to a domain */ 1474 if (omap_domain->dev) { 1475 dev_err(dev, "iommu domain is already attached\n"); 1476 ret = -EBUSY; 1477 goto out; 1478 } 1479 1480 ret = omap_iommu_attach_init(dev, omap_domain); 1481 if (ret) { 1482 dev_err(dev, "failed to allocate required iommu data %d\n", 1483 ret); 1484 goto init_fail; 1485 } 1486 1487 iommu = omap_domain->iommus; 1488 for (i = 0; i < omap_domain->num_iommus; i++, iommu++, arch_data++) { 1489 /* configure and enable the omap iommu */ 1490 oiommu = arch_data->iommu_dev; 1491 ret = omap_iommu_attach(oiommu, iommu->pgtable); 1492 if (ret) { 1493 dev_err(dev, "can't get omap iommu: %d\n", ret); 1494 goto attach_fail; 1495 } 1496 1497 oiommu->domain = domain; 1498 iommu->iommu_dev = oiommu; 1499 } 1500 1501 omap_domain->dev = dev; 1502 1503 goto out; 1504 1505 attach_fail: 1506 while (i--) { 1507 iommu--; 1508 arch_data--; 1509 oiommu = iommu->iommu_dev; 1510 omap_iommu_detach(oiommu); 1511 iommu->iommu_dev = NULL; 1512 oiommu->domain = NULL; 1513 } 1514 init_fail: 1515 omap_iommu_detach_fini(omap_domain); 1516 out: 1517 spin_unlock(&omap_domain->lock); 1518 return ret; 1519 } 1520 1521 static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain, 1522 struct device *dev) 1523 { 1524 struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev); 1525 struct omap_iommu_device *iommu = omap_domain->iommus; 1526 struct omap_iommu *oiommu; 1527 int i; 1528 1529 if (!omap_domain->dev) { 1530 dev_err(dev, "domain has no attached device\n"); 1531 return; 1532 } 1533 1534 /* only a single device is supported per domain for now */ 1535 if (omap_domain->dev != dev) { 1536 dev_err(dev, "invalid attached device\n"); 1537 return; 1538 } 1539 1540 /* 1541 * cleanup in the reverse order of attachment - this addresses 1542 * any h/w dependencies between multiple instances, if any 1543 */ 1544 iommu += (omap_domain->num_iommus - 1); 1545 arch_data += (omap_domain->num_iommus - 1); 1546 for (i = 0; i < omap_domain->num_iommus; i++, iommu--, arch_data--) { 1547 oiommu = iommu->iommu_dev; 1548 iopgtable_clear_entry_all(oiommu); 1549 1550 omap_iommu_detach(oiommu); 1551 iommu->iommu_dev = NULL; 1552 oiommu->domain = NULL; 1553 } 1554 1555 omap_iommu_detach_fini(omap_domain); 1556 1557 omap_domain->dev = NULL; 1558 } 1559 1560 static void omap_iommu_detach_dev(struct iommu_domain *domain, 1561 struct device *dev) 1562 { 1563 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 1564 1565 spin_lock(&omap_domain->lock); 1566 _omap_iommu_detach_dev(omap_domain, dev); 1567 spin_unlock(&omap_domain->lock); 1568 } 1569 1570 static struct iommu_domain *omap_iommu_domain_alloc(unsigned type) 1571 { 1572 struct omap_iommu_domain *omap_domain; 1573 1574 if (type != IOMMU_DOMAIN_UNMANAGED) 1575 return NULL; 1576 1577 omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL); 1578 if (!omap_domain) 1579 return NULL; 1580 1581 spin_lock_init(&omap_domain->lock); 1582 1583 omap_domain->domain.geometry.aperture_start = 0; 1584 omap_domain->domain.geometry.aperture_end = (1ULL << 32) - 1; 1585 omap_domain->domain.geometry.force_aperture = true; 1586 1587 return &omap_domain->domain; 1588 } 1589 1590 static void omap_iommu_domain_free(struct iommu_domain *domain) 1591 { 1592 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 1593 1594 /* 1595 * An iommu device is still attached 1596 * (currently, only one device can be attached) ? 1597 */ 1598 if (omap_domain->dev) 1599 _omap_iommu_detach_dev(omap_domain, omap_domain->dev); 1600 1601 kfree(omap_domain); 1602 } 1603 1604 static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, 1605 dma_addr_t da) 1606 { 1607 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 1608 struct omap_iommu_device *iommu = omap_domain->iommus; 1609 struct omap_iommu *oiommu = iommu->iommu_dev; 1610 struct device *dev = oiommu->dev; 1611 u32 *pgd, *pte; 1612 phys_addr_t ret = 0; 1613 1614 /* 1615 * all the iommus within the domain will have identical programming, 1616 * so perform the lookup using just the first iommu 1617 */ 1618 iopgtable_lookup_entry(oiommu, da, &pgd, &pte); 1619 1620 if (pte) { 1621 if (iopte_is_small(*pte)) 1622 ret = omap_iommu_translate(*pte, da, IOPTE_MASK); 1623 else if (iopte_is_large(*pte)) 1624 ret = omap_iommu_translate(*pte, da, IOLARGE_MASK); 1625 else 1626 dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte, 1627 (unsigned long long)da); 1628 } else { 1629 if (iopgd_is_section(*pgd)) 1630 ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK); 1631 else if (iopgd_is_super(*pgd)) 1632 ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK); 1633 else 1634 dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd, 1635 (unsigned long long)da); 1636 } 1637 1638 return ret; 1639 } 1640 1641 static struct iommu_device *omap_iommu_probe_device(struct device *dev) 1642 { 1643 struct omap_iommu_arch_data *arch_data, *tmp; 1644 struct platform_device *pdev; 1645 struct omap_iommu *oiommu; 1646 struct device_node *np; 1647 int num_iommus, i; 1648 1649 /* 1650 * Allocate the per-device iommu structure for DT-based devices. 1651 * 1652 * TODO: Simplify this when removing non-DT support completely from the 1653 * IOMMU users. 1654 */ 1655 if (!dev->of_node) 1656 return ERR_PTR(-ENODEV); 1657 1658 /* 1659 * retrieve the count of IOMMU nodes using phandle size as element size 1660 * since #iommu-cells = 0 for OMAP 1661 */ 1662 num_iommus = of_property_count_elems_of_size(dev->of_node, "iommus", 1663 sizeof(phandle)); 1664 if (num_iommus < 0) 1665 return 0; 1666 1667 arch_data = kcalloc(num_iommus + 1, sizeof(*arch_data), GFP_KERNEL); 1668 if (!arch_data) 1669 return ERR_PTR(-ENOMEM); 1670 1671 for (i = 0, tmp = arch_data; i < num_iommus; i++, tmp++) { 1672 np = of_parse_phandle(dev->of_node, "iommus", i); 1673 if (!np) { 1674 kfree(arch_data); 1675 return ERR_PTR(-EINVAL); 1676 } 1677 1678 pdev = of_find_device_by_node(np); 1679 if (!pdev) { 1680 of_node_put(np); 1681 kfree(arch_data); 1682 return ERR_PTR(-ENODEV); 1683 } 1684 1685 oiommu = platform_get_drvdata(pdev); 1686 if (!oiommu) { 1687 of_node_put(np); 1688 kfree(arch_data); 1689 return ERR_PTR(-EINVAL); 1690 } 1691 1692 tmp->iommu_dev = oiommu; 1693 tmp->dev = &pdev->dev; 1694 1695 of_node_put(np); 1696 } 1697 1698 dev_iommu_priv_set(dev, arch_data); 1699 1700 /* 1701 * use the first IOMMU alone for the sysfs device linking. 1702 * TODO: Evaluate if a single iommu_group needs to be 1703 * maintained for both IOMMUs 1704 */ 1705 oiommu = arch_data->iommu_dev; 1706 1707 return &oiommu->iommu; 1708 } 1709 1710 static void omap_iommu_release_device(struct device *dev) 1711 { 1712 struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev); 1713 1714 if (!dev->of_node || !arch_data) 1715 return; 1716 1717 dev_iommu_priv_set(dev, NULL); 1718 kfree(arch_data); 1719 1720 } 1721 1722 static struct iommu_group *omap_iommu_device_group(struct device *dev) 1723 { 1724 struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev); 1725 struct iommu_group *group = ERR_PTR(-EINVAL); 1726 1727 if (!arch_data) 1728 return ERR_PTR(-ENODEV); 1729 1730 if (arch_data->iommu_dev) 1731 group = iommu_group_ref_get(arch_data->iommu_dev->group); 1732 1733 return group; 1734 } 1735 1736 static const struct iommu_ops omap_iommu_ops = { 1737 .domain_alloc = omap_iommu_domain_alloc, 1738 .domain_free = omap_iommu_domain_free, 1739 .attach_dev = omap_iommu_attach_dev, 1740 .detach_dev = omap_iommu_detach_dev, 1741 .map = omap_iommu_map, 1742 .unmap = omap_iommu_unmap, 1743 .iova_to_phys = omap_iommu_iova_to_phys, 1744 .probe_device = omap_iommu_probe_device, 1745 .release_device = omap_iommu_release_device, 1746 .device_group = omap_iommu_device_group, 1747 .pgsize_bitmap = OMAP_IOMMU_PGSIZES, 1748 }; 1749 1750 static int __init omap_iommu_init(void) 1751 { 1752 struct kmem_cache *p; 1753 const slab_flags_t flags = SLAB_HWCACHE_ALIGN; 1754 size_t align = 1 << 10; /* L2 pagetable alignement */ 1755 struct device_node *np; 1756 int ret; 1757 1758 np = of_find_matching_node(NULL, omap_iommu_of_match); 1759 if (!np) 1760 return 0; 1761 1762 of_node_put(np); 1763 1764 p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, 1765 NULL); 1766 if (!p) 1767 return -ENOMEM; 1768 iopte_cachep = p; 1769 1770 omap_iommu_debugfs_init(); 1771 1772 ret = platform_driver_register(&omap_iommu_driver); 1773 if (ret) { 1774 pr_err("%s: failed to register driver\n", __func__); 1775 goto fail_driver; 1776 } 1777 1778 ret = bus_set_iommu(&platform_bus_type, &omap_iommu_ops); 1779 if (ret) 1780 goto fail_bus; 1781 1782 return 0; 1783 1784 fail_bus: 1785 platform_driver_unregister(&omap_iommu_driver); 1786 fail_driver: 1787 kmem_cache_destroy(iopte_cachep); 1788 return ret; 1789 } 1790 subsys_initcall(omap_iommu_init); 1791 /* must be ready before omap3isp is probed */ 1792