1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * omap iommu: tlb and pagetable primitives 4 * 5 * Copyright (C) 2008-2010 Nokia Corporation 6 * Copyright (C) 2013-2017 Texas Instruments Incorporated - http://www.ti.com/ 7 * 8 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>, 9 * Paul Mundt and Toshihiro Kobayashi 10 */ 11 12 #include <linux/dma-mapping.h> 13 #include <linux/err.h> 14 #include <linux/slab.h> 15 #include <linux/interrupt.h> 16 #include <linux/ioport.h> 17 #include <linux/platform_device.h> 18 #include <linux/iommu.h> 19 #include <linux/omap-iommu.h> 20 #include <linux/mutex.h> 21 #include <linux/spinlock.h> 22 #include <linux/io.h> 23 #include <linux/pm_runtime.h> 24 #include <linux/of.h> 25 #include <linux/of_iommu.h> 26 #include <linux/of_irq.h> 27 #include <linux/of_platform.h> 28 #include <linux/regmap.h> 29 #include <linux/mfd/syscon.h> 30 31 #include <linux/platform_data/iommu-omap.h> 32 33 #include "omap-iopgtable.h" 34 #include "omap-iommu.h" 35 36 static const struct iommu_ops omap_iommu_ops; 37 38 #define to_iommu(dev) ((struct omap_iommu *)dev_get_drvdata(dev)) 39 40 /* bitmap of the page sizes currently supported */ 41 #define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M) 42 43 #define MMU_LOCK_BASE_SHIFT 10 44 #define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT) 45 #define MMU_LOCK_BASE(x) \ 46 ((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT) 47 48 #define MMU_LOCK_VICT_SHIFT 4 49 #define MMU_LOCK_VICT_MASK (0x1f << MMU_LOCK_VICT_SHIFT) 50 #define MMU_LOCK_VICT(x) \ 51 ((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT) 52 53 static struct platform_driver omap_iommu_driver; 54 static struct kmem_cache *iopte_cachep; 55 56 /** 57 * to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain 58 * @dom: generic iommu domain handle 59 **/ 60 static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom) 61 { 62 return container_of(dom, struct omap_iommu_domain, domain); 63 } 64 65 /** 66 * omap_iommu_save_ctx - Save registers for pm off-mode support 67 * @dev: client device 68 * 69 * This should be treated as an deprecated API. It is preserved only 70 * to maintain existing functionality for OMAP3 ISP driver. 71 **/ 72 void omap_iommu_save_ctx(struct device *dev) 73 { 74 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; 75 struct omap_iommu *obj; 76 u32 *p; 77 int i; 78 79 if (!arch_data) 80 return; 81 82 while (arch_data->iommu_dev) { 83 obj = arch_data->iommu_dev; 84 p = obj->ctx; 85 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { 86 p[i] = iommu_read_reg(obj, i * sizeof(u32)); 87 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, 88 p[i]); 89 } 90 arch_data++; 91 } 92 } 93 EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); 94 95 /** 96 * omap_iommu_restore_ctx - Restore registers for pm off-mode support 97 * @dev: client device 98 * 99 * This should be treated as an deprecated API. It is preserved only 100 * to maintain existing functionality for OMAP3 ISP driver. 101 **/ 102 void omap_iommu_restore_ctx(struct device *dev) 103 { 104 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; 105 struct omap_iommu *obj; 106 u32 *p; 107 int i; 108 109 if (!arch_data) 110 return; 111 112 while (arch_data->iommu_dev) { 113 obj = arch_data->iommu_dev; 114 p = obj->ctx; 115 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { 116 iommu_write_reg(obj, p[i], i * sizeof(u32)); 117 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, 118 p[i]); 119 } 120 arch_data++; 121 } 122 } 123 EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx); 124 125 static void dra7_cfg_dspsys_mmu(struct omap_iommu *obj, bool enable) 126 { 127 u32 val, mask; 128 129 if (!obj->syscfg) 130 return; 131 132 mask = (1 << (obj->id * DSP_SYS_MMU_CONFIG_EN_SHIFT)); 133 val = enable ? mask : 0; 134 regmap_update_bits(obj->syscfg, DSP_SYS_MMU_CONFIG, mask, val); 135 } 136 137 static void __iommu_set_twl(struct omap_iommu *obj, bool on) 138 { 139 u32 l = iommu_read_reg(obj, MMU_CNTL); 140 141 if (on) 142 iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE); 143 else 144 iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE); 145 146 l &= ~MMU_CNTL_MASK; 147 if (on) 148 l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN); 149 else 150 l |= (MMU_CNTL_MMU_EN); 151 152 iommu_write_reg(obj, l, MMU_CNTL); 153 } 154 155 static int omap2_iommu_enable(struct omap_iommu *obj) 156 { 157 u32 l, pa; 158 159 if (!obj->iopgd || !IS_ALIGNED((unsigned long)obj->iopgd, SZ_16K)) 160 return -EINVAL; 161 162 pa = virt_to_phys(obj->iopgd); 163 if (!IS_ALIGNED(pa, SZ_16K)) 164 return -EINVAL; 165 166 l = iommu_read_reg(obj, MMU_REVISION); 167 dev_info(obj->dev, "%s: version %d.%d\n", obj->name, 168 (l >> 4) & 0xf, l & 0xf); 169 170 iommu_write_reg(obj, pa, MMU_TTB); 171 172 dra7_cfg_dspsys_mmu(obj, true); 173 174 if (obj->has_bus_err_back) 175 iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG); 176 177 __iommu_set_twl(obj, true); 178 179 return 0; 180 } 181 182 static void omap2_iommu_disable(struct omap_iommu *obj) 183 { 184 u32 l = iommu_read_reg(obj, MMU_CNTL); 185 186 l &= ~MMU_CNTL_MASK; 187 iommu_write_reg(obj, l, MMU_CNTL); 188 dra7_cfg_dspsys_mmu(obj, false); 189 190 dev_dbg(obj->dev, "%s is shutting down\n", obj->name); 191 } 192 193 static int iommu_enable(struct omap_iommu *obj) 194 { 195 int ret; 196 197 ret = pm_runtime_get_sync(obj->dev); 198 if (ret < 0) 199 pm_runtime_put_noidle(obj->dev); 200 201 return ret < 0 ? ret : 0; 202 } 203 204 static void iommu_disable(struct omap_iommu *obj) 205 { 206 pm_runtime_put_sync(obj->dev); 207 } 208 209 /* 210 * TLB operations 211 */ 212 static u32 iotlb_cr_to_virt(struct cr_regs *cr) 213 { 214 u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK; 215 u32 mask = get_cam_va_mask(cr->cam & page_size); 216 217 return cr->cam & mask; 218 } 219 220 static u32 get_iopte_attr(struct iotlb_entry *e) 221 { 222 u32 attr; 223 224 attr = e->mixed << 5; 225 attr |= e->endian; 226 attr |= e->elsz >> 3; 227 attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) || 228 (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6); 229 return attr; 230 } 231 232 static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da) 233 { 234 u32 status, fault_addr; 235 236 status = iommu_read_reg(obj, MMU_IRQSTATUS); 237 status &= MMU_IRQ_MASK; 238 if (!status) { 239 *da = 0; 240 return 0; 241 } 242 243 fault_addr = iommu_read_reg(obj, MMU_FAULT_AD); 244 *da = fault_addr; 245 246 iommu_write_reg(obj, status, MMU_IRQSTATUS); 247 248 return status; 249 } 250 251 void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l) 252 { 253 u32 val; 254 255 val = iommu_read_reg(obj, MMU_LOCK); 256 257 l->base = MMU_LOCK_BASE(val); 258 l->vict = MMU_LOCK_VICT(val); 259 } 260 261 void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l) 262 { 263 u32 val; 264 265 val = (l->base << MMU_LOCK_BASE_SHIFT); 266 val |= (l->vict << MMU_LOCK_VICT_SHIFT); 267 268 iommu_write_reg(obj, val, MMU_LOCK); 269 } 270 271 static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr) 272 { 273 cr->cam = iommu_read_reg(obj, MMU_READ_CAM); 274 cr->ram = iommu_read_reg(obj, MMU_READ_RAM); 275 } 276 277 static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr) 278 { 279 iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM); 280 iommu_write_reg(obj, cr->ram, MMU_RAM); 281 282 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); 283 iommu_write_reg(obj, 1, MMU_LD_TLB); 284 } 285 286 /* only used in iotlb iteration for-loop */ 287 struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n) 288 { 289 struct cr_regs cr; 290 struct iotlb_lock l; 291 292 iotlb_lock_get(obj, &l); 293 l.vict = n; 294 iotlb_lock_set(obj, &l); 295 iotlb_read_cr(obj, &cr); 296 297 return cr; 298 } 299 300 #ifdef PREFETCH_IOTLB 301 static struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj, 302 struct iotlb_entry *e) 303 { 304 struct cr_regs *cr; 305 306 if (!e) 307 return NULL; 308 309 if (e->da & ~(get_cam_va_mask(e->pgsz))) { 310 dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__, 311 e->da); 312 return ERR_PTR(-EINVAL); 313 } 314 315 cr = kmalloc(sizeof(*cr), GFP_KERNEL); 316 if (!cr) 317 return ERR_PTR(-ENOMEM); 318 319 cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid; 320 cr->ram = e->pa | e->endian | e->elsz | e->mixed; 321 322 return cr; 323 } 324 325 /** 326 * load_iotlb_entry - Set an iommu tlb entry 327 * @obj: target iommu 328 * @e: an iommu tlb entry info 329 **/ 330 static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) 331 { 332 int err = 0; 333 struct iotlb_lock l; 334 struct cr_regs *cr; 335 336 if (!obj || !obj->nr_tlb_entries || !e) 337 return -EINVAL; 338 339 pm_runtime_get_sync(obj->dev); 340 341 iotlb_lock_get(obj, &l); 342 if (l.base == obj->nr_tlb_entries) { 343 dev_warn(obj->dev, "%s: preserve entries full\n", __func__); 344 err = -EBUSY; 345 goto out; 346 } 347 if (!e->prsvd) { 348 int i; 349 struct cr_regs tmp; 350 351 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp) 352 if (!iotlb_cr_valid(&tmp)) 353 break; 354 355 if (i == obj->nr_tlb_entries) { 356 dev_dbg(obj->dev, "%s: full: no entry\n", __func__); 357 err = -EBUSY; 358 goto out; 359 } 360 361 iotlb_lock_get(obj, &l); 362 } else { 363 l.vict = l.base; 364 iotlb_lock_set(obj, &l); 365 } 366 367 cr = iotlb_alloc_cr(obj, e); 368 if (IS_ERR(cr)) { 369 pm_runtime_put_sync(obj->dev); 370 return PTR_ERR(cr); 371 } 372 373 iotlb_load_cr(obj, cr); 374 kfree(cr); 375 376 if (e->prsvd) 377 l.base++; 378 /* increment victim for next tlb load */ 379 if (++l.vict == obj->nr_tlb_entries) 380 l.vict = l.base; 381 iotlb_lock_set(obj, &l); 382 out: 383 pm_runtime_put_sync(obj->dev); 384 return err; 385 } 386 387 #else /* !PREFETCH_IOTLB */ 388 389 static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) 390 { 391 return 0; 392 } 393 394 #endif /* !PREFETCH_IOTLB */ 395 396 static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) 397 { 398 return load_iotlb_entry(obj, e); 399 } 400 401 /** 402 * flush_iotlb_page - Clear an iommu tlb entry 403 * @obj: target iommu 404 * @da: iommu device virtual address 405 * 406 * Clear an iommu tlb entry which includes 'da' address. 407 **/ 408 static void flush_iotlb_page(struct omap_iommu *obj, u32 da) 409 { 410 int i; 411 struct cr_regs cr; 412 413 pm_runtime_get_sync(obj->dev); 414 415 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) { 416 u32 start; 417 size_t bytes; 418 419 if (!iotlb_cr_valid(&cr)) 420 continue; 421 422 start = iotlb_cr_to_virt(&cr); 423 bytes = iopgsz_to_bytes(cr.cam & 3); 424 425 if ((start <= da) && (da < start + bytes)) { 426 dev_dbg(obj->dev, "%s: %08x<=%08x(%zx)\n", 427 __func__, start, da, bytes); 428 iotlb_load_cr(obj, &cr); 429 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); 430 break; 431 } 432 } 433 pm_runtime_put_sync(obj->dev); 434 435 if (i == obj->nr_tlb_entries) 436 dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da); 437 } 438 439 /** 440 * flush_iotlb_all - Clear all iommu tlb entries 441 * @obj: target iommu 442 **/ 443 static void flush_iotlb_all(struct omap_iommu *obj) 444 { 445 struct iotlb_lock l; 446 447 pm_runtime_get_sync(obj->dev); 448 449 l.base = 0; 450 l.vict = 0; 451 iotlb_lock_set(obj, &l); 452 453 iommu_write_reg(obj, 1, MMU_GFLUSH); 454 455 pm_runtime_put_sync(obj->dev); 456 } 457 458 /* 459 * H/W pagetable operations 460 */ 461 static void flush_iopte_range(struct device *dev, dma_addr_t dma, 462 unsigned long offset, int num_entries) 463 { 464 size_t size = num_entries * sizeof(u32); 465 466 dma_sync_single_range_for_device(dev, dma, offset, size, DMA_TO_DEVICE); 467 } 468 469 static void iopte_free(struct omap_iommu *obj, u32 *iopte, bool dma_valid) 470 { 471 dma_addr_t pt_dma; 472 473 /* Note: freed iopte's must be clean ready for re-use */ 474 if (iopte) { 475 if (dma_valid) { 476 pt_dma = virt_to_phys(iopte); 477 dma_unmap_single(obj->dev, pt_dma, IOPTE_TABLE_SIZE, 478 DMA_TO_DEVICE); 479 } 480 481 kmem_cache_free(iopte_cachep, iopte); 482 } 483 } 484 485 static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, 486 dma_addr_t *pt_dma, u32 da) 487 { 488 u32 *iopte; 489 unsigned long offset = iopgd_index(da) * sizeof(da); 490 491 /* a table has already existed */ 492 if (*iopgd) 493 goto pte_ready; 494 495 /* 496 * do the allocation outside the page table lock 497 */ 498 spin_unlock(&obj->page_table_lock); 499 iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL); 500 spin_lock(&obj->page_table_lock); 501 502 if (!*iopgd) { 503 if (!iopte) 504 return ERR_PTR(-ENOMEM); 505 506 *pt_dma = dma_map_single(obj->dev, iopte, IOPTE_TABLE_SIZE, 507 DMA_TO_DEVICE); 508 if (dma_mapping_error(obj->dev, *pt_dma)) { 509 dev_err(obj->dev, "DMA map error for L2 table\n"); 510 iopte_free(obj, iopte, false); 511 return ERR_PTR(-ENOMEM); 512 } 513 514 /* 515 * we rely on dma address and the physical address to be 516 * the same for mapping the L2 table 517 */ 518 if (WARN_ON(*pt_dma != virt_to_phys(iopte))) { 519 dev_err(obj->dev, "DMA translation error for L2 table\n"); 520 dma_unmap_single(obj->dev, *pt_dma, IOPTE_TABLE_SIZE, 521 DMA_TO_DEVICE); 522 iopte_free(obj, iopte, false); 523 return ERR_PTR(-ENOMEM); 524 } 525 526 *iopgd = virt_to_phys(iopte) | IOPGD_TABLE; 527 528 flush_iopte_range(obj->dev, obj->pd_dma, offset, 1); 529 dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte); 530 } else { 531 /* We raced, free the reduniovant table */ 532 iopte_free(obj, iopte, false); 533 } 534 535 pte_ready: 536 iopte = iopte_offset(iopgd, da); 537 *pt_dma = iopgd_page_paddr(iopgd); 538 dev_vdbg(obj->dev, 539 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", 540 __func__, da, iopgd, *iopgd, iopte, *iopte); 541 542 return iopte; 543 } 544 545 static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) 546 { 547 u32 *iopgd = iopgd_offset(obj, da); 548 unsigned long offset = iopgd_index(da) * sizeof(da); 549 550 if ((da | pa) & ~IOSECTION_MASK) { 551 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", 552 __func__, da, pa, IOSECTION_SIZE); 553 return -EINVAL; 554 } 555 556 *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION; 557 flush_iopte_range(obj->dev, obj->pd_dma, offset, 1); 558 return 0; 559 } 560 561 static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) 562 { 563 u32 *iopgd = iopgd_offset(obj, da); 564 unsigned long offset = iopgd_index(da) * sizeof(da); 565 int i; 566 567 if ((da | pa) & ~IOSUPER_MASK) { 568 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", 569 __func__, da, pa, IOSUPER_SIZE); 570 return -EINVAL; 571 } 572 573 for (i = 0; i < 16; i++) 574 *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER; 575 flush_iopte_range(obj->dev, obj->pd_dma, offset, 16); 576 return 0; 577 } 578 579 static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) 580 { 581 u32 *iopgd = iopgd_offset(obj, da); 582 dma_addr_t pt_dma; 583 u32 *iopte = iopte_alloc(obj, iopgd, &pt_dma, da); 584 unsigned long offset = iopte_index(da) * sizeof(da); 585 586 if (IS_ERR(iopte)) 587 return PTR_ERR(iopte); 588 589 *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL; 590 flush_iopte_range(obj->dev, pt_dma, offset, 1); 591 592 dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n", 593 __func__, da, pa, iopte, *iopte); 594 595 return 0; 596 } 597 598 static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) 599 { 600 u32 *iopgd = iopgd_offset(obj, da); 601 dma_addr_t pt_dma; 602 u32 *iopte = iopte_alloc(obj, iopgd, &pt_dma, da); 603 unsigned long offset = iopte_index(da) * sizeof(da); 604 int i; 605 606 if ((da | pa) & ~IOLARGE_MASK) { 607 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", 608 __func__, da, pa, IOLARGE_SIZE); 609 return -EINVAL; 610 } 611 612 if (IS_ERR(iopte)) 613 return PTR_ERR(iopte); 614 615 for (i = 0; i < 16; i++) 616 *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE; 617 flush_iopte_range(obj->dev, pt_dma, offset, 16); 618 return 0; 619 } 620 621 static int 622 iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e) 623 { 624 int (*fn)(struct omap_iommu *, u32, u32, u32); 625 u32 prot; 626 int err; 627 628 if (!obj || !e) 629 return -EINVAL; 630 631 switch (e->pgsz) { 632 case MMU_CAM_PGSZ_16M: 633 fn = iopgd_alloc_super; 634 break; 635 case MMU_CAM_PGSZ_1M: 636 fn = iopgd_alloc_section; 637 break; 638 case MMU_CAM_PGSZ_64K: 639 fn = iopte_alloc_large; 640 break; 641 case MMU_CAM_PGSZ_4K: 642 fn = iopte_alloc_page; 643 break; 644 default: 645 fn = NULL; 646 break; 647 } 648 649 if (WARN_ON(!fn)) 650 return -EINVAL; 651 652 prot = get_iopte_attr(e); 653 654 spin_lock(&obj->page_table_lock); 655 err = fn(obj, e->da, e->pa, prot); 656 spin_unlock(&obj->page_table_lock); 657 658 return err; 659 } 660 661 /** 662 * omap_iopgtable_store_entry - Make an iommu pte entry 663 * @obj: target iommu 664 * @e: an iommu tlb entry info 665 **/ 666 static int 667 omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e) 668 { 669 int err; 670 671 flush_iotlb_page(obj, e->da); 672 err = iopgtable_store_entry_core(obj, e); 673 if (!err) 674 prefetch_iotlb_entry(obj, e); 675 return err; 676 } 677 678 /** 679 * iopgtable_lookup_entry - Lookup an iommu pte entry 680 * @obj: target iommu 681 * @da: iommu device virtual address 682 * @ppgd: iommu pgd entry pointer to be returned 683 * @ppte: iommu pte entry pointer to be returned 684 **/ 685 static void 686 iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte) 687 { 688 u32 *iopgd, *iopte = NULL; 689 690 iopgd = iopgd_offset(obj, da); 691 if (!*iopgd) 692 goto out; 693 694 if (iopgd_is_table(*iopgd)) 695 iopte = iopte_offset(iopgd, da); 696 out: 697 *ppgd = iopgd; 698 *ppte = iopte; 699 } 700 701 static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da) 702 { 703 size_t bytes; 704 u32 *iopgd = iopgd_offset(obj, da); 705 int nent = 1; 706 dma_addr_t pt_dma; 707 unsigned long pd_offset = iopgd_index(da) * sizeof(da); 708 unsigned long pt_offset = iopte_index(da) * sizeof(da); 709 710 if (!*iopgd) 711 return 0; 712 713 if (iopgd_is_table(*iopgd)) { 714 int i; 715 u32 *iopte = iopte_offset(iopgd, da); 716 717 bytes = IOPTE_SIZE; 718 if (*iopte & IOPTE_LARGE) { 719 nent *= 16; 720 /* rewind to the 1st entry */ 721 iopte = iopte_offset(iopgd, (da & IOLARGE_MASK)); 722 } 723 bytes *= nent; 724 memset(iopte, 0, nent * sizeof(*iopte)); 725 pt_dma = iopgd_page_paddr(iopgd); 726 flush_iopte_range(obj->dev, pt_dma, pt_offset, nent); 727 728 /* 729 * do table walk to check if this table is necessary or not 730 */ 731 iopte = iopte_offset(iopgd, 0); 732 for (i = 0; i < PTRS_PER_IOPTE; i++) 733 if (iopte[i]) 734 goto out; 735 736 iopte_free(obj, iopte, true); 737 nent = 1; /* for the next L1 entry */ 738 } else { 739 bytes = IOPGD_SIZE; 740 if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) { 741 nent *= 16; 742 /* rewind to the 1st entry */ 743 iopgd = iopgd_offset(obj, (da & IOSUPER_MASK)); 744 } 745 bytes *= nent; 746 } 747 memset(iopgd, 0, nent * sizeof(*iopgd)); 748 flush_iopte_range(obj->dev, obj->pd_dma, pd_offset, nent); 749 out: 750 return bytes; 751 } 752 753 /** 754 * iopgtable_clear_entry - Remove an iommu pte entry 755 * @obj: target iommu 756 * @da: iommu device virtual address 757 **/ 758 static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da) 759 { 760 size_t bytes; 761 762 spin_lock(&obj->page_table_lock); 763 764 bytes = iopgtable_clear_entry_core(obj, da); 765 flush_iotlb_page(obj, da); 766 767 spin_unlock(&obj->page_table_lock); 768 769 return bytes; 770 } 771 772 static void iopgtable_clear_entry_all(struct omap_iommu *obj) 773 { 774 unsigned long offset; 775 int i; 776 777 spin_lock(&obj->page_table_lock); 778 779 for (i = 0; i < PTRS_PER_IOPGD; i++) { 780 u32 da; 781 u32 *iopgd; 782 783 da = i << IOPGD_SHIFT; 784 iopgd = iopgd_offset(obj, da); 785 offset = iopgd_index(da) * sizeof(da); 786 787 if (!*iopgd) 788 continue; 789 790 if (iopgd_is_table(*iopgd)) 791 iopte_free(obj, iopte_offset(iopgd, 0), true); 792 793 *iopgd = 0; 794 flush_iopte_range(obj->dev, obj->pd_dma, offset, 1); 795 } 796 797 flush_iotlb_all(obj); 798 799 spin_unlock(&obj->page_table_lock); 800 } 801 802 /* 803 * Device IOMMU generic operations 804 */ 805 static irqreturn_t iommu_fault_handler(int irq, void *data) 806 { 807 u32 da, errs; 808 u32 *iopgd, *iopte; 809 struct omap_iommu *obj = data; 810 struct iommu_domain *domain = obj->domain; 811 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 812 813 if (!omap_domain->dev) 814 return IRQ_NONE; 815 816 errs = iommu_report_fault(obj, &da); 817 if (errs == 0) 818 return IRQ_HANDLED; 819 820 /* Fault callback or TLB/PTE Dynamic loading */ 821 if (!report_iommu_fault(domain, obj->dev, da, 0)) 822 return IRQ_HANDLED; 823 824 iommu_write_reg(obj, 0, MMU_IRQENABLE); 825 826 iopgd = iopgd_offset(obj, da); 827 828 if (!iopgd_is_table(*iopgd)) { 829 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n", 830 obj->name, errs, da, iopgd, *iopgd); 831 return IRQ_NONE; 832 } 833 834 iopte = iopte_offset(iopgd, da); 835 836 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n", 837 obj->name, errs, da, iopgd, *iopgd, iopte, *iopte); 838 839 return IRQ_NONE; 840 } 841 842 /** 843 * omap_iommu_attach() - attach iommu device to an iommu domain 844 * @obj: target omap iommu device 845 * @iopgd: page table 846 **/ 847 static int omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd) 848 { 849 int err; 850 851 spin_lock(&obj->iommu_lock); 852 853 obj->pd_dma = dma_map_single(obj->dev, iopgd, IOPGD_TABLE_SIZE, 854 DMA_TO_DEVICE); 855 if (dma_mapping_error(obj->dev, obj->pd_dma)) { 856 dev_err(obj->dev, "DMA map error for L1 table\n"); 857 err = -ENOMEM; 858 goto out_err; 859 } 860 861 obj->iopgd = iopgd; 862 err = iommu_enable(obj); 863 if (err) 864 goto out_err; 865 flush_iotlb_all(obj); 866 867 spin_unlock(&obj->iommu_lock); 868 869 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); 870 871 return 0; 872 873 out_err: 874 spin_unlock(&obj->iommu_lock); 875 876 return err; 877 } 878 879 /** 880 * omap_iommu_detach - release iommu device 881 * @obj: target iommu 882 **/ 883 static void omap_iommu_detach(struct omap_iommu *obj) 884 { 885 if (!obj || IS_ERR(obj)) 886 return; 887 888 spin_lock(&obj->iommu_lock); 889 890 dma_unmap_single(obj->dev, obj->pd_dma, IOPGD_TABLE_SIZE, 891 DMA_TO_DEVICE); 892 obj->pd_dma = 0; 893 obj->iopgd = NULL; 894 iommu_disable(obj); 895 896 spin_unlock(&obj->iommu_lock); 897 898 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); 899 } 900 901 static void omap_iommu_save_tlb_entries(struct omap_iommu *obj) 902 { 903 struct iotlb_lock lock; 904 struct cr_regs cr; 905 struct cr_regs *tmp; 906 int i; 907 908 /* check if there are any locked tlbs to save */ 909 iotlb_lock_get(obj, &lock); 910 obj->num_cr_ctx = lock.base; 911 if (!obj->num_cr_ctx) 912 return; 913 914 tmp = obj->cr_ctx; 915 for_each_iotlb_cr(obj, obj->num_cr_ctx, i, cr) 916 * tmp++ = cr; 917 } 918 919 static void omap_iommu_restore_tlb_entries(struct omap_iommu *obj) 920 { 921 struct iotlb_lock l; 922 struct cr_regs *tmp; 923 int i; 924 925 /* no locked tlbs to restore */ 926 if (!obj->num_cr_ctx) 927 return; 928 929 l.base = 0; 930 tmp = obj->cr_ctx; 931 for (i = 0; i < obj->num_cr_ctx; i++, tmp++) { 932 l.vict = i; 933 iotlb_lock_set(obj, &l); 934 iotlb_load_cr(obj, tmp); 935 } 936 l.base = obj->num_cr_ctx; 937 l.vict = i; 938 iotlb_lock_set(obj, &l); 939 } 940 941 /** 942 * omap_iommu_domain_deactivate - deactivate attached iommu devices 943 * @domain: iommu domain attached to the target iommu device 944 * 945 * This API allows the client devices of IOMMU devices to suspend 946 * the IOMMUs they control at runtime, after they are idled and 947 * suspended all activity. System Suspend will leverage the PM 948 * driver late callbacks. 949 **/ 950 int omap_iommu_domain_deactivate(struct iommu_domain *domain) 951 { 952 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 953 struct omap_iommu_device *iommu; 954 struct omap_iommu *oiommu; 955 int i; 956 957 if (!omap_domain->dev) 958 return 0; 959 960 iommu = omap_domain->iommus; 961 iommu += (omap_domain->num_iommus - 1); 962 for (i = 0; i < omap_domain->num_iommus; i++, iommu--) { 963 oiommu = iommu->iommu_dev; 964 pm_runtime_put_sync(oiommu->dev); 965 } 966 967 return 0; 968 } 969 EXPORT_SYMBOL_GPL(omap_iommu_domain_deactivate); 970 971 /** 972 * omap_iommu_domain_activate - activate attached iommu devices 973 * @domain: iommu domain attached to the target iommu device 974 * 975 * This API allows the client devices of IOMMU devices to resume the 976 * IOMMUs they control at runtime, before they can resume operations. 977 * System Resume will leverage the PM driver late callbacks. 978 **/ 979 int omap_iommu_domain_activate(struct iommu_domain *domain) 980 { 981 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 982 struct omap_iommu_device *iommu; 983 struct omap_iommu *oiommu; 984 int i; 985 986 if (!omap_domain->dev) 987 return 0; 988 989 iommu = omap_domain->iommus; 990 for (i = 0; i < omap_domain->num_iommus; i++, iommu++) { 991 oiommu = iommu->iommu_dev; 992 pm_runtime_get_sync(oiommu->dev); 993 } 994 995 return 0; 996 } 997 EXPORT_SYMBOL_GPL(omap_iommu_domain_activate); 998 999 /** 1000 * omap_iommu_runtime_suspend - disable an iommu device 1001 * @dev: iommu device 1002 * 1003 * This function performs all that is necessary to disable an 1004 * IOMMU device, either during final detachment from a client 1005 * device, or during system/runtime suspend of the device. This 1006 * includes programming all the appropriate IOMMU registers, and 1007 * managing the associated omap_hwmod's state and the device's 1008 * reset line. This function also saves the context of any 1009 * locked TLBs if suspending. 1010 **/ 1011 static __maybe_unused int omap_iommu_runtime_suspend(struct device *dev) 1012 { 1013 struct platform_device *pdev = to_platform_device(dev); 1014 struct iommu_platform_data *pdata = dev_get_platdata(dev); 1015 struct omap_iommu *obj = to_iommu(dev); 1016 int ret; 1017 1018 /* save the TLBs only during suspend, and not for power down */ 1019 if (obj->domain && obj->iopgd) 1020 omap_iommu_save_tlb_entries(obj); 1021 1022 omap2_iommu_disable(obj); 1023 1024 if (pdata && pdata->device_idle) 1025 pdata->device_idle(pdev); 1026 1027 if (pdata && pdata->assert_reset) 1028 pdata->assert_reset(pdev, pdata->reset_name); 1029 1030 if (pdata && pdata->set_pwrdm_constraint) { 1031 ret = pdata->set_pwrdm_constraint(pdev, false, &obj->pwrst); 1032 if (ret) { 1033 dev_warn(obj->dev, "pwrdm_constraint failed to be reset, status = %d\n", 1034 ret); 1035 } 1036 } 1037 1038 return 0; 1039 } 1040 1041 /** 1042 * omap_iommu_runtime_resume - enable an iommu device 1043 * @dev: iommu device 1044 * 1045 * This function performs all that is necessary to enable an 1046 * IOMMU device, either during initial attachment to a client 1047 * device, or during system/runtime resume of the device. This 1048 * includes programming all the appropriate IOMMU registers, and 1049 * managing the associated omap_hwmod's state and the device's 1050 * reset line. The function also restores any locked TLBs if 1051 * resuming after a suspend. 1052 **/ 1053 static __maybe_unused int omap_iommu_runtime_resume(struct device *dev) 1054 { 1055 struct platform_device *pdev = to_platform_device(dev); 1056 struct iommu_platform_data *pdata = dev_get_platdata(dev); 1057 struct omap_iommu *obj = to_iommu(dev); 1058 int ret = 0; 1059 1060 if (pdata && pdata->set_pwrdm_constraint) { 1061 ret = pdata->set_pwrdm_constraint(pdev, true, &obj->pwrst); 1062 if (ret) { 1063 dev_warn(obj->dev, "pwrdm_constraint failed to be set, status = %d\n", 1064 ret); 1065 } 1066 } 1067 1068 if (pdata && pdata->deassert_reset) { 1069 ret = pdata->deassert_reset(pdev, pdata->reset_name); 1070 if (ret) { 1071 dev_err(dev, "deassert_reset failed: %d\n", ret); 1072 return ret; 1073 } 1074 } 1075 1076 if (pdata && pdata->device_enable) 1077 pdata->device_enable(pdev); 1078 1079 /* restore the TLBs only during resume, and not for power up */ 1080 if (obj->domain) 1081 omap_iommu_restore_tlb_entries(obj); 1082 1083 ret = omap2_iommu_enable(obj); 1084 1085 return ret; 1086 } 1087 1088 /** 1089 * omap_iommu_suspend_prepare - prepare() dev_pm_ops implementation 1090 * @dev: iommu device 1091 * 1092 * This function performs the necessary checks to determine if the IOMMU 1093 * device needs suspending or not. The function checks if the runtime_pm 1094 * status of the device is suspended, and returns 1 in that case. This 1095 * results in the PM core to skip invoking any of the Sleep PM callbacks 1096 * (suspend, suspend_late, resume, resume_early etc). 1097 */ 1098 static int omap_iommu_prepare(struct device *dev) 1099 { 1100 if (pm_runtime_status_suspended(dev)) 1101 return 1; 1102 return 0; 1103 } 1104 1105 static bool omap_iommu_can_register(struct platform_device *pdev) 1106 { 1107 struct device_node *np = pdev->dev.of_node; 1108 1109 if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu")) 1110 return true; 1111 1112 /* 1113 * restrict IOMMU core registration only for processor-port MDMA MMUs 1114 * on DRA7 DSPs 1115 */ 1116 if ((!strcmp(dev_name(&pdev->dev), "40d01000.mmu")) || 1117 (!strcmp(dev_name(&pdev->dev), "41501000.mmu"))) 1118 return true; 1119 1120 return false; 1121 } 1122 1123 static int omap_iommu_dra7_get_dsp_system_cfg(struct platform_device *pdev, 1124 struct omap_iommu *obj) 1125 { 1126 struct device_node *np = pdev->dev.of_node; 1127 int ret; 1128 1129 if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu")) 1130 return 0; 1131 1132 if (!of_property_read_bool(np, "ti,syscon-mmuconfig")) { 1133 dev_err(&pdev->dev, "ti,syscon-mmuconfig property is missing\n"); 1134 return -EINVAL; 1135 } 1136 1137 obj->syscfg = 1138 syscon_regmap_lookup_by_phandle(np, "ti,syscon-mmuconfig"); 1139 if (IS_ERR(obj->syscfg)) { 1140 /* can fail with -EPROBE_DEFER */ 1141 ret = PTR_ERR(obj->syscfg); 1142 return ret; 1143 } 1144 1145 if (of_property_read_u32_index(np, "ti,syscon-mmuconfig", 1, 1146 &obj->id)) { 1147 dev_err(&pdev->dev, "couldn't get the IOMMU instance id within subsystem\n"); 1148 return -EINVAL; 1149 } 1150 1151 if (obj->id != 0 && obj->id != 1) { 1152 dev_err(&pdev->dev, "invalid IOMMU instance id\n"); 1153 return -EINVAL; 1154 } 1155 1156 return 0; 1157 } 1158 1159 /* 1160 * OMAP Device MMU(IOMMU) detection 1161 */ 1162 static int omap_iommu_probe(struct platform_device *pdev) 1163 { 1164 int err = -ENODEV; 1165 int irq; 1166 struct omap_iommu *obj; 1167 struct resource *res; 1168 struct device_node *of = pdev->dev.of_node; 1169 1170 if (!of) { 1171 pr_err("%s: only DT-based devices are supported\n", __func__); 1172 return -ENODEV; 1173 } 1174 1175 obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); 1176 if (!obj) 1177 return -ENOMEM; 1178 1179 /* 1180 * self-manage the ordering dependencies between omap_device_enable/idle 1181 * and omap_device_assert/deassert_hardreset API 1182 */ 1183 if (pdev->dev.pm_domain) { 1184 dev_dbg(&pdev->dev, "device pm_domain is being reset\n"); 1185 pdev->dev.pm_domain = NULL; 1186 } 1187 1188 obj->name = dev_name(&pdev->dev); 1189 obj->nr_tlb_entries = 32; 1190 err = of_property_read_u32(of, "ti,#tlb-entries", &obj->nr_tlb_entries); 1191 if (err && err != -EINVAL) 1192 return err; 1193 if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8) 1194 return -EINVAL; 1195 if (of_find_property(of, "ti,iommu-bus-err-back", NULL)) 1196 obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN; 1197 1198 obj->dev = &pdev->dev; 1199 obj->ctx = (void *)obj + sizeof(*obj); 1200 obj->cr_ctx = devm_kzalloc(&pdev->dev, 1201 sizeof(*obj->cr_ctx) * obj->nr_tlb_entries, 1202 GFP_KERNEL); 1203 if (!obj->cr_ctx) 1204 return -ENOMEM; 1205 1206 spin_lock_init(&obj->iommu_lock); 1207 spin_lock_init(&obj->page_table_lock); 1208 1209 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1210 obj->regbase = devm_ioremap_resource(obj->dev, res); 1211 if (IS_ERR(obj->regbase)) 1212 return PTR_ERR(obj->regbase); 1213 1214 err = omap_iommu_dra7_get_dsp_system_cfg(pdev, obj); 1215 if (err) 1216 return err; 1217 1218 irq = platform_get_irq(pdev, 0); 1219 if (irq < 0) 1220 return -ENODEV; 1221 1222 err = devm_request_irq(obj->dev, irq, iommu_fault_handler, IRQF_SHARED, 1223 dev_name(obj->dev), obj); 1224 if (err < 0) 1225 return err; 1226 platform_set_drvdata(pdev, obj); 1227 1228 if (omap_iommu_can_register(pdev)) { 1229 obj->group = iommu_group_alloc(); 1230 if (IS_ERR(obj->group)) 1231 return PTR_ERR(obj->group); 1232 1233 err = iommu_device_sysfs_add(&obj->iommu, obj->dev, NULL, 1234 obj->name); 1235 if (err) 1236 goto out_group; 1237 1238 iommu_device_set_ops(&obj->iommu, &omap_iommu_ops); 1239 1240 err = iommu_device_register(&obj->iommu); 1241 if (err) 1242 goto out_sysfs; 1243 } 1244 1245 pm_runtime_enable(obj->dev); 1246 1247 omap_iommu_debugfs_add(obj); 1248 1249 dev_info(&pdev->dev, "%s registered\n", obj->name); 1250 1251 /* Re-probe bus to probe device attached to this IOMMU */ 1252 bus_iommu_probe(&platform_bus_type); 1253 1254 return 0; 1255 1256 out_sysfs: 1257 iommu_device_sysfs_remove(&obj->iommu); 1258 out_group: 1259 iommu_group_put(obj->group); 1260 return err; 1261 } 1262 1263 static int omap_iommu_remove(struct platform_device *pdev) 1264 { 1265 struct omap_iommu *obj = platform_get_drvdata(pdev); 1266 1267 if (obj->group) { 1268 iommu_group_put(obj->group); 1269 obj->group = NULL; 1270 1271 iommu_device_sysfs_remove(&obj->iommu); 1272 iommu_device_unregister(&obj->iommu); 1273 } 1274 1275 omap_iommu_debugfs_remove(obj); 1276 1277 pm_runtime_disable(obj->dev); 1278 1279 dev_info(&pdev->dev, "%s removed\n", obj->name); 1280 return 0; 1281 } 1282 1283 static const struct dev_pm_ops omap_iommu_pm_ops = { 1284 .prepare = omap_iommu_prepare, 1285 SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 1286 pm_runtime_force_resume) 1287 SET_RUNTIME_PM_OPS(omap_iommu_runtime_suspend, 1288 omap_iommu_runtime_resume, NULL) 1289 }; 1290 1291 static const struct of_device_id omap_iommu_of_match[] = { 1292 { .compatible = "ti,omap2-iommu" }, 1293 { .compatible = "ti,omap4-iommu" }, 1294 { .compatible = "ti,dra7-iommu" }, 1295 { .compatible = "ti,dra7-dsp-iommu" }, 1296 {}, 1297 }; 1298 1299 static struct platform_driver omap_iommu_driver = { 1300 .probe = omap_iommu_probe, 1301 .remove = omap_iommu_remove, 1302 .driver = { 1303 .name = "omap-iommu", 1304 .pm = &omap_iommu_pm_ops, 1305 .of_match_table = of_match_ptr(omap_iommu_of_match), 1306 }, 1307 }; 1308 1309 static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz) 1310 { 1311 memset(e, 0, sizeof(*e)); 1312 1313 e->da = da; 1314 e->pa = pa; 1315 e->valid = MMU_CAM_V; 1316 e->pgsz = pgsz; 1317 e->endian = MMU_RAM_ENDIAN_LITTLE; 1318 e->elsz = MMU_RAM_ELSZ_8; 1319 e->mixed = 0; 1320 1321 return iopgsz_to_bytes(e->pgsz); 1322 } 1323 1324 static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, 1325 phys_addr_t pa, size_t bytes, int prot, gfp_t gfp) 1326 { 1327 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 1328 struct device *dev = omap_domain->dev; 1329 struct omap_iommu_device *iommu; 1330 struct omap_iommu *oiommu; 1331 struct iotlb_entry e; 1332 int omap_pgsz; 1333 u32 ret = -EINVAL; 1334 int i; 1335 1336 omap_pgsz = bytes_to_iopgsz(bytes); 1337 if (omap_pgsz < 0) { 1338 dev_err(dev, "invalid size to map: %zu\n", bytes); 1339 return -EINVAL; 1340 } 1341 1342 dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%zx\n", da, &pa, bytes); 1343 1344 iotlb_init_entry(&e, da, pa, omap_pgsz); 1345 1346 iommu = omap_domain->iommus; 1347 for (i = 0; i < omap_domain->num_iommus; i++, iommu++) { 1348 oiommu = iommu->iommu_dev; 1349 ret = omap_iopgtable_store_entry(oiommu, &e); 1350 if (ret) { 1351 dev_err(dev, "omap_iopgtable_store_entry failed: %d\n", 1352 ret); 1353 break; 1354 } 1355 } 1356 1357 if (ret) { 1358 while (i--) { 1359 iommu--; 1360 oiommu = iommu->iommu_dev; 1361 iopgtable_clear_entry(oiommu, da); 1362 } 1363 } 1364 1365 return ret; 1366 } 1367 1368 static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, 1369 size_t size, struct iommu_iotlb_gather *gather) 1370 { 1371 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 1372 struct device *dev = omap_domain->dev; 1373 struct omap_iommu_device *iommu; 1374 struct omap_iommu *oiommu; 1375 bool error = false; 1376 size_t bytes = 0; 1377 int i; 1378 1379 dev_dbg(dev, "unmapping da 0x%lx size %zu\n", da, size); 1380 1381 iommu = omap_domain->iommus; 1382 for (i = 0; i < omap_domain->num_iommus; i++, iommu++) { 1383 oiommu = iommu->iommu_dev; 1384 bytes = iopgtable_clear_entry(oiommu, da); 1385 if (!bytes) 1386 error = true; 1387 } 1388 1389 /* 1390 * simplify return - we are only checking if any of the iommus 1391 * reported an error, but not if all of them are unmapping the 1392 * same number of entries. This should not occur due to the 1393 * mirror programming. 1394 */ 1395 return error ? 0 : bytes; 1396 } 1397 1398 static int omap_iommu_count(struct device *dev) 1399 { 1400 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; 1401 int count = 0; 1402 1403 while (arch_data->iommu_dev) { 1404 count++; 1405 arch_data++; 1406 } 1407 1408 return count; 1409 } 1410 1411 /* caller should call cleanup if this function fails */ 1412 static int omap_iommu_attach_init(struct device *dev, 1413 struct omap_iommu_domain *odomain) 1414 { 1415 struct omap_iommu_device *iommu; 1416 int i; 1417 1418 odomain->num_iommus = omap_iommu_count(dev); 1419 if (!odomain->num_iommus) 1420 return -EINVAL; 1421 1422 odomain->iommus = kcalloc(odomain->num_iommus, sizeof(*iommu), 1423 GFP_ATOMIC); 1424 if (!odomain->iommus) 1425 return -ENOMEM; 1426 1427 iommu = odomain->iommus; 1428 for (i = 0; i < odomain->num_iommus; i++, iommu++) { 1429 iommu->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_ATOMIC); 1430 if (!iommu->pgtable) 1431 return -ENOMEM; 1432 1433 /* 1434 * should never fail, but please keep this around to ensure 1435 * we keep the hardware happy 1436 */ 1437 if (WARN_ON(!IS_ALIGNED((long)iommu->pgtable, 1438 IOPGD_TABLE_SIZE))) 1439 return -EINVAL; 1440 } 1441 1442 return 0; 1443 } 1444 1445 static void omap_iommu_detach_fini(struct omap_iommu_domain *odomain) 1446 { 1447 int i; 1448 struct omap_iommu_device *iommu = odomain->iommus; 1449 1450 for (i = 0; iommu && i < odomain->num_iommus; i++, iommu++) 1451 kfree(iommu->pgtable); 1452 1453 kfree(odomain->iommus); 1454 odomain->num_iommus = 0; 1455 odomain->iommus = NULL; 1456 } 1457 1458 static int 1459 omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) 1460 { 1461 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 1462 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; 1463 struct omap_iommu_device *iommu; 1464 struct omap_iommu *oiommu; 1465 int ret = 0; 1466 int i; 1467 1468 if (!arch_data || !arch_data->iommu_dev) { 1469 dev_err(dev, "device doesn't have an associated iommu\n"); 1470 return -EINVAL; 1471 } 1472 1473 spin_lock(&omap_domain->lock); 1474 1475 /* only a single client device can be attached to a domain */ 1476 if (omap_domain->dev) { 1477 dev_err(dev, "iommu domain is already attached\n"); 1478 ret = -EBUSY; 1479 goto out; 1480 } 1481 1482 ret = omap_iommu_attach_init(dev, omap_domain); 1483 if (ret) { 1484 dev_err(dev, "failed to allocate required iommu data %d\n", 1485 ret); 1486 goto init_fail; 1487 } 1488 1489 iommu = omap_domain->iommus; 1490 for (i = 0; i < omap_domain->num_iommus; i++, iommu++, arch_data++) { 1491 /* configure and enable the omap iommu */ 1492 oiommu = arch_data->iommu_dev; 1493 ret = omap_iommu_attach(oiommu, iommu->pgtable); 1494 if (ret) { 1495 dev_err(dev, "can't get omap iommu: %d\n", ret); 1496 goto attach_fail; 1497 } 1498 1499 oiommu->domain = domain; 1500 iommu->iommu_dev = oiommu; 1501 } 1502 1503 omap_domain->dev = dev; 1504 1505 goto out; 1506 1507 attach_fail: 1508 while (i--) { 1509 iommu--; 1510 arch_data--; 1511 oiommu = iommu->iommu_dev; 1512 omap_iommu_detach(oiommu); 1513 iommu->iommu_dev = NULL; 1514 oiommu->domain = NULL; 1515 } 1516 init_fail: 1517 omap_iommu_detach_fini(omap_domain); 1518 out: 1519 spin_unlock(&omap_domain->lock); 1520 return ret; 1521 } 1522 1523 static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain, 1524 struct device *dev) 1525 { 1526 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; 1527 struct omap_iommu_device *iommu = omap_domain->iommus; 1528 struct omap_iommu *oiommu; 1529 int i; 1530 1531 if (!omap_domain->dev) { 1532 dev_err(dev, "domain has no attached device\n"); 1533 return; 1534 } 1535 1536 /* only a single device is supported per domain for now */ 1537 if (omap_domain->dev != dev) { 1538 dev_err(dev, "invalid attached device\n"); 1539 return; 1540 } 1541 1542 /* 1543 * cleanup in the reverse order of attachment - this addresses 1544 * any h/w dependencies between multiple instances, if any 1545 */ 1546 iommu += (omap_domain->num_iommus - 1); 1547 arch_data += (omap_domain->num_iommus - 1); 1548 for (i = 0; i < omap_domain->num_iommus; i++, iommu--, arch_data--) { 1549 oiommu = iommu->iommu_dev; 1550 iopgtable_clear_entry_all(oiommu); 1551 1552 omap_iommu_detach(oiommu); 1553 iommu->iommu_dev = NULL; 1554 oiommu->domain = NULL; 1555 } 1556 1557 omap_iommu_detach_fini(omap_domain); 1558 1559 omap_domain->dev = NULL; 1560 } 1561 1562 static void omap_iommu_detach_dev(struct iommu_domain *domain, 1563 struct device *dev) 1564 { 1565 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 1566 1567 spin_lock(&omap_domain->lock); 1568 _omap_iommu_detach_dev(omap_domain, dev); 1569 spin_unlock(&omap_domain->lock); 1570 } 1571 1572 static struct iommu_domain *omap_iommu_domain_alloc(unsigned type) 1573 { 1574 struct omap_iommu_domain *omap_domain; 1575 1576 if (type != IOMMU_DOMAIN_UNMANAGED) 1577 return NULL; 1578 1579 omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL); 1580 if (!omap_domain) 1581 return NULL; 1582 1583 spin_lock_init(&omap_domain->lock); 1584 1585 omap_domain->domain.geometry.aperture_start = 0; 1586 omap_domain->domain.geometry.aperture_end = (1ULL << 32) - 1; 1587 omap_domain->domain.geometry.force_aperture = true; 1588 1589 return &omap_domain->domain; 1590 } 1591 1592 static void omap_iommu_domain_free(struct iommu_domain *domain) 1593 { 1594 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 1595 1596 /* 1597 * An iommu device is still attached 1598 * (currently, only one device can be attached) ? 1599 */ 1600 if (omap_domain->dev) 1601 _omap_iommu_detach_dev(omap_domain, omap_domain->dev); 1602 1603 kfree(omap_domain); 1604 } 1605 1606 static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, 1607 dma_addr_t da) 1608 { 1609 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 1610 struct omap_iommu_device *iommu = omap_domain->iommus; 1611 struct omap_iommu *oiommu = iommu->iommu_dev; 1612 struct device *dev = oiommu->dev; 1613 u32 *pgd, *pte; 1614 phys_addr_t ret = 0; 1615 1616 /* 1617 * all the iommus within the domain will have identical programming, 1618 * so perform the lookup using just the first iommu 1619 */ 1620 iopgtable_lookup_entry(oiommu, da, &pgd, &pte); 1621 1622 if (pte) { 1623 if (iopte_is_small(*pte)) 1624 ret = omap_iommu_translate(*pte, da, IOPTE_MASK); 1625 else if (iopte_is_large(*pte)) 1626 ret = omap_iommu_translate(*pte, da, IOLARGE_MASK); 1627 else 1628 dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte, 1629 (unsigned long long)da); 1630 } else { 1631 if (iopgd_is_section(*pgd)) 1632 ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK); 1633 else if (iopgd_is_super(*pgd)) 1634 ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK); 1635 else 1636 dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd, 1637 (unsigned long long)da); 1638 } 1639 1640 return ret; 1641 } 1642 1643 static struct iommu_device *omap_iommu_probe_device(struct device *dev) 1644 { 1645 struct omap_iommu_arch_data *arch_data, *tmp; 1646 struct platform_device *pdev; 1647 struct omap_iommu *oiommu; 1648 struct device_node *np; 1649 int num_iommus, i; 1650 1651 /* 1652 * Allocate the archdata iommu structure for DT-based devices. 1653 * 1654 * TODO: Simplify this when removing non-DT support completely from the 1655 * IOMMU users. 1656 */ 1657 if (!dev->of_node) 1658 return ERR_PTR(-ENODEV); 1659 1660 /* 1661 * retrieve the count of IOMMU nodes using phandle size as element size 1662 * since #iommu-cells = 0 for OMAP 1663 */ 1664 num_iommus = of_property_count_elems_of_size(dev->of_node, "iommus", 1665 sizeof(phandle)); 1666 if (num_iommus < 0) 1667 return 0; 1668 1669 arch_data = kcalloc(num_iommus + 1, sizeof(*arch_data), GFP_KERNEL); 1670 if (!arch_data) 1671 return ERR_PTR(-ENOMEM); 1672 1673 for (i = 0, tmp = arch_data; i < num_iommus; i++, tmp++) { 1674 np = of_parse_phandle(dev->of_node, "iommus", i); 1675 if (!np) { 1676 kfree(arch_data); 1677 return ERR_PTR(-EINVAL); 1678 } 1679 1680 pdev = of_find_device_by_node(np); 1681 if (!pdev) { 1682 of_node_put(np); 1683 kfree(arch_data); 1684 return ERR_PTR(-ENODEV); 1685 } 1686 1687 oiommu = platform_get_drvdata(pdev); 1688 if (!oiommu) { 1689 of_node_put(np); 1690 kfree(arch_data); 1691 return ERR_PTR(-EINVAL); 1692 } 1693 1694 tmp->iommu_dev = oiommu; 1695 tmp->dev = &pdev->dev; 1696 1697 of_node_put(np); 1698 } 1699 1700 dev->archdata.iommu = arch_data; 1701 1702 /* 1703 * use the first IOMMU alone for the sysfs device linking. 1704 * TODO: Evaluate if a single iommu_group needs to be 1705 * maintained for both IOMMUs 1706 */ 1707 oiommu = arch_data->iommu_dev; 1708 1709 return &oiommu->iommu; 1710 } 1711 1712 static void omap_iommu_release_device(struct device *dev) 1713 { 1714 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; 1715 1716 if (!dev->of_node || !arch_data) 1717 return; 1718 1719 dev->archdata.iommu = NULL; 1720 kfree(arch_data); 1721 1722 } 1723 1724 static struct iommu_group *omap_iommu_device_group(struct device *dev) 1725 { 1726 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; 1727 struct iommu_group *group = ERR_PTR(-EINVAL); 1728 1729 if (!arch_data) 1730 return ERR_PTR(-ENODEV); 1731 1732 if (arch_data->iommu_dev) 1733 group = iommu_group_ref_get(arch_data->iommu_dev->group); 1734 1735 return group; 1736 } 1737 1738 static const struct iommu_ops omap_iommu_ops = { 1739 .domain_alloc = omap_iommu_domain_alloc, 1740 .domain_free = omap_iommu_domain_free, 1741 .attach_dev = omap_iommu_attach_dev, 1742 .detach_dev = omap_iommu_detach_dev, 1743 .map = omap_iommu_map, 1744 .unmap = omap_iommu_unmap, 1745 .iova_to_phys = omap_iommu_iova_to_phys, 1746 .probe_device = omap_iommu_probe_device, 1747 .release_device = omap_iommu_release_device, 1748 .device_group = omap_iommu_device_group, 1749 .pgsize_bitmap = OMAP_IOMMU_PGSIZES, 1750 }; 1751 1752 static int __init omap_iommu_init(void) 1753 { 1754 struct kmem_cache *p; 1755 const slab_flags_t flags = SLAB_HWCACHE_ALIGN; 1756 size_t align = 1 << 10; /* L2 pagetable alignement */ 1757 struct device_node *np; 1758 int ret; 1759 1760 np = of_find_matching_node(NULL, omap_iommu_of_match); 1761 if (!np) 1762 return 0; 1763 1764 of_node_put(np); 1765 1766 p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, 1767 NULL); 1768 if (!p) 1769 return -ENOMEM; 1770 iopte_cachep = p; 1771 1772 omap_iommu_debugfs_init(); 1773 1774 ret = platform_driver_register(&omap_iommu_driver); 1775 if (ret) { 1776 pr_err("%s: failed to register driver\n", __func__); 1777 goto fail_driver; 1778 } 1779 1780 ret = bus_set_iommu(&platform_bus_type, &omap_iommu_ops); 1781 if (ret) 1782 goto fail_bus; 1783 1784 return 0; 1785 1786 fail_bus: 1787 platform_driver_unregister(&omap_iommu_driver); 1788 fail_driver: 1789 kmem_cache_destroy(iopte_cachep); 1790 return ret; 1791 } 1792 subsys_initcall(omap_iommu_init); 1793 /* must be ready before omap3isp is probed */ 1794