1 /* 2 * omap iommu: tlb and pagetable primitives 3 * 4 * Copyright (C) 2008-2010 Nokia Corporation 5 * 6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>, 7 * Paul Mundt and Toshihiro Kobayashi 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14 #include <linux/dma-mapping.h> 15 #include <linux/err.h> 16 #include <linux/slab.h> 17 #include <linux/interrupt.h> 18 #include <linux/ioport.h> 19 #include <linux/platform_device.h> 20 #include <linux/iommu.h> 21 #include <linux/omap-iommu.h> 22 #include <linux/mutex.h> 23 #include <linux/spinlock.h> 24 #include <linux/io.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/of.h> 27 #include <linux/of_iommu.h> 28 #include <linux/of_irq.h> 29 #include <linux/of_platform.h> 30 #include <linux/regmap.h> 31 #include <linux/mfd/syscon.h> 32 33 #include <linux/platform_data/iommu-omap.h> 34 35 #include "omap-iopgtable.h" 36 #include "omap-iommu.h" 37 38 static const struct iommu_ops omap_iommu_ops; 39 40 #define to_iommu(dev) \ 41 ((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev))) 42 43 /* bitmap of the page sizes currently supported */ 44 #define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M) 45 46 #define MMU_LOCK_BASE_SHIFT 10 47 #define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT) 48 #define MMU_LOCK_BASE(x) \ 49 ((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT) 50 51 #define MMU_LOCK_VICT_SHIFT 4 52 #define MMU_LOCK_VICT_MASK (0x1f << MMU_LOCK_VICT_SHIFT) 53 #define MMU_LOCK_VICT(x) \ 54 ((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT) 55 56 static struct platform_driver omap_iommu_driver; 57 static struct kmem_cache *iopte_cachep; 58 59 /** 60 * to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain 61 * @dom: generic iommu domain handle 62 **/ 63 static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom) 64 { 65 return container_of(dom, struct omap_iommu_domain, domain); 66 } 67 68 /** 69 * omap_iommu_save_ctx - Save registers for pm off-mode support 70 * @dev: client device 71 **/ 72 void omap_iommu_save_ctx(struct device *dev) 73 { 74 struct omap_iommu *obj = dev_to_omap_iommu(dev); 75 u32 *p = obj->ctx; 76 int i; 77 78 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { 79 p[i] = iommu_read_reg(obj, i * sizeof(u32)); 80 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]); 81 } 82 } 83 EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); 84 85 /** 86 * omap_iommu_restore_ctx - Restore registers for pm off-mode support 87 * @dev: client device 88 **/ 89 void omap_iommu_restore_ctx(struct device *dev) 90 { 91 struct omap_iommu *obj = dev_to_omap_iommu(dev); 92 u32 *p = obj->ctx; 93 int i; 94 95 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { 96 iommu_write_reg(obj, p[i], i * sizeof(u32)); 97 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]); 98 } 99 } 100 EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx); 101 102 static void dra7_cfg_dspsys_mmu(struct omap_iommu *obj, bool enable) 103 { 104 u32 val, mask; 105 106 if (!obj->syscfg) 107 return; 108 109 mask = (1 << (obj->id * DSP_SYS_MMU_CONFIG_EN_SHIFT)); 110 val = enable ? mask : 0; 111 regmap_update_bits(obj->syscfg, DSP_SYS_MMU_CONFIG, mask, val); 112 } 113 114 static void __iommu_set_twl(struct omap_iommu *obj, bool on) 115 { 116 u32 l = iommu_read_reg(obj, MMU_CNTL); 117 118 if (on) 119 iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE); 120 else 121 iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE); 122 123 l &= ~MMU_CNTL_MASK; 124 if (on) 125 l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN); 126 else 127 l |= (MMU_CNTL_MMU_EN); 128 129 iommu_write_reg(obj, l, MMU_CNTL); 130 } 131 132 static int omap2_iommu_enable(struct omap_iommu *obj) 133 { 134 u32 l, pa; 135 136 if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K)) 137 return -EINVAL; 138 139 pa = virt_to_phys(obj->iopgd); 140 if (!IS_ALIGNED(pa, SZ_16K)) 141 return -EINVAL; 142 143 l = iommu_read_reg(obj, MMU_REVISION); 144 dev_info(obj->dev, "%s: version %d.%d\n", obj->name, 145 (l >> 4) & 0xf, l & 0xf); 146 147 iommu_write_reg(obj, pa, MMU_TTB); 148 149 dra7_cfg_dspsys_mmu(obj, true); 150 151 if (obj->has_bus_err_back) 152 iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG); 153 154 __iommu_set_twl(obj, true); 155 156 return 0; 157 } 158 159 static void omap2_iommu_disable(struct omap_iommu *obj) 160 { 161 u32 l = iommu_read_reg(obj, MMU_CNTL); 162 163 l &= ~MMU_CNTL_MASK; 164 iommu_write_reg(obj, l, MMU_CNTL); 165 dra7_cfg_dspsys_mmu(obj, false); 166 167 dev_dbg(obj->dev, "%s is shutting down\n", obj->name); 168 } 169 170 static int iommu_enable(struct omap_iommu *obj) 171 { 172 int err; 173 struct platform_device *pdev = to_platform_device(obj->dev); 174 struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev); 175 176 if (pdata && pdata->deassert_reset) { 177 err = pdata->deassert_reset(pdev, pdata->reset_name); 178 if (err) { 179 dev_err(obj->dev, "deassert_reset failed: %d\n", err); 180 return err; 181 } 182 } 183 184 pm_runtime_get_sync(obj->dev); 185 186 err = omap2_iommu_enable(obj); 187 188 return err; 189 } 190 191 static void iommu_disable(struct omap_iommu *obj) 192 { 193 struct platform_device *pdev = to_platform_device(obj->dev); 194 struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev); 195 196 omap2_iommu_disable(obj); 197 198 pm_runtime_put_sync(obj->dev); 199 200 if (pdata && pdata->assert_reset) 201 pdata->assert_reset(pdev, pdata->reset_name); 202 } 203 204 /* 205 * TLB operations 206 */ 207 static u32 iotlb_cr_to_virt(struct cr_regs *cr) 208 { 209 u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK; 210 u32 mask = get_cam_va_mask(cr->cam & page_size); 211 212 return cr->cam & mask; 213 } 214 215 static u32 get_iopte_attr(struct iotlb_entry *e) 216 { 217 u32 attr; 218 219 attr = e->mixed << 5; 220 attr |= e->endian; 221 attr |= e->elsz >> 3; 222 attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) || 223 (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6); 224 return attr; 225 } 226 227 static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da) 228 { 229 u32 status, fault_addr; 230 231 status = iommu_read_reg(obj, MMU_IRQSTATUS); 232 status &= MMU_IRQ_MASK; 233 if (!status) { 234 *da = 0; 235 return 0; 236 } 237 238 fault_addr = iommu_read_reg(obj, MMU_FAULT_AD); 239 *da = fault_addr; 240 241 iommu_write_reg(obj, status, MMU_IRQSTATUS); 242 243 return status; 244 } 245 246 void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l) 247 { 248 u32 val; 249 250 val = iommu_read_reg(obj, MMU_LOCK); 251 252 l->base = MMU_LOCK_BASE(val); 253 l->vict = MMU_LOCK_VICT(val); 254 } 255 256 void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l) 257 { 258 u32 val; 259 260 val = (l->base << MMU_LOCK_BASE_SHIFT); 261 val |= (l->vict << MMU_LOCK_VICT_SHIFT); 262 263 iommu_write_reg(obj, val, MMU_LOCK); 264 } 265 266 static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr) 267 { 268 cr->cam = iommu_read_reg(obj, MMU_READ_CAM); 269 cr->ram = iommu_read_reg(obj, MMU_READ_RAM); 270 } 271 272 static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr) 273 { 274 iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM); 275 iommu_write_reg(obj, cr->ram, MMU_RAM); 276 277 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); 278 iommu_write_reg(obj, 1, MMU_LD_TLB); 279 } 280 281 /* only used in iotlb iteration for-loop */ 282 struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n) 283 { 284 struct cr_regs cr; 285 struct iotlb_lock l; 286 287 iotlb_lock_get(obj, &l); 288 l.vict = n; 289 iotlb_lock_set(obj, &l); 290 iotlb_read_cr(obj, &cr); 291 292 return cr; 293 } 294 295 #ifdef PREFETCH_IOTLB 296 static struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj, 297 struct iotlb_entry *e) 298 { 299 struct cr_regs *cr; 300 301 if (!e) 302 return NULL; 303 304 if (e->da & ~(get_cam_va_mask(e->pgsz))) { 305 dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__, 306 e->da); 307 return ERR_PTR(-EINVAL); 308 } 309 310 cr = kmalloc(sizeof(*cr), GFP_KERNEL); 311 if (!cr) 312 return ERR_PTR(-ENOMEM); 313 314 cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid; 315 cr->ram = e->pa | e->endian | e->elsz | e->mixed; 316 317 return cr; 318 } 319 320 /** 321 * load_iotlb_entry - Set an iommu tlb entry 322 * @obj: target iommu 323 * @e: an iommu tlb entry info 324 **/ 325 static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) 326 { 327 int err = 0; 328 struct iotlb_lock l; 329 struct cr_regs *cr; 330 331 if (!obj || !obj->nr_tlb_entries || !e) 332 return -EINVAL; 333 334 pm_runtime_get_sync(obj->dev); 335 336 iotlb_lock_get(obj, &l); 337 if (l.base == obj->nr_tlb_entries) { 338 dev_warn(obj->dev, "%s: preserve entries full\n", __func__); 339 err = -EBUSY; 340 goto out; 341 } 342 if (!e->prsvd) { 343 int i; 344 struct cr_regs tmp; 345 346 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp) 347 if (!iotlb_cr_valid(&tmp)) 348 break; 349 350 if (i == obj->nr_tlb_entries) { 351 dev_dbg(obj->dev, "%s: full: no entry\n", __func__); 352 err = -EBUSY; 353 goto out; 354 } 355 356 iotlb_lock_get(obj, &l); 357 } else { 358 l.vict = l.base; 359 iotlb_lock_set(obj, &l); 360 } 361 362 cr = iotlb_alloc_cr(obj, e); 363 if (IS_ERR(cr)) { 364 pm_runtime_put_sync(obj->dev); 365 return PTR_ERR(cr); 366 } 367 368 iotlb_load_cr(obj, cr); 369 kfree(cr); 370 371 if (e->prsvd) 372 l.base++; 373 /* increment victim for next tlb load */ 374 if (++l.vict == obj->nr_tlb_entries) 375 l.vict = l.base; 376 iotlb_lock_set(obj, &l); 377 out: 378 pm_runtime_put_sync(obj->dev); 379 return err; 380 } 381 382 #else /* !PREFETCH_IOTLB */ 383 384 static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) 385 { 386 return 0; 387 } 388 389 #endif /* !PREFETCH_IOTLB */ 390 391 static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) 392 { 393 return load_iotlb_entry(obj, e); 394 } 395 396 /** 397 * flush_iotlb_page - Clear an iommu tlb entry 398 * @obj: target iommu 399 * @da: iommu device virtual address 400 * 401 * Clear an iommu tlb entry which includes 'da' address. 402 **/ 403 static void flush_iotlb_page(struct omap_iommu *obj, u32 da) 404 { 405 int i; 406 struct cr_regs cr; 407 408 pm_runtime_get_sync(obj->dev); 409 410 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) { 411 u32 start; 412 size_t bytes; 413 414 if (!iotlb_cr_valid(&cr)) 415 continue; 416 417 start = iotlb_cr_to_virt(&cr); 418 bytes = iopgsz_to_bytes(cr.cam & 3); 419 420 if ((start <= da) && (da < start + bytes)) { 421 dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n", 422 __func__, start, da, bytes); 423 iotlb_load_cr(obj, &cr); 424 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); 425 break; 426 } 427 } 428 pm_runtime_put_sync(obj->dev); 429 430 if (i == obj->nr_tlb_entries) 431 dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da); 432 } 433 434 /** 435 * flush_iotlb_all - Clear all iommu tlb entries 436 * @obj: target iommu 437 **/ 438 static void flush_iotlb_all(struct omap_iommu *obj) 439 { 440 struct iotlb_lock l; 441 442 pm_runtime_get_sync(obj->dev); 443 444 l.base = 0; 445 l.vict = 0; 446 iotlb_lock_set(obj, &l); 447 448 iommu_write_reg(obj, 1, MMU_GFLUSH); 449 450 pm_runtime_put_sync(obj->dev); 451 } 452 453 /* 454 * H/W pagetable operations 455 */ 456 static void flush_iopte_range(struct device *dev, dma_addr_t dma, 457 unsigned long offset, int num_entries) 458 { 459 size_t size = num_entries * sizeof(u32); 460 461 dma_sync_single_range_for_device(dev, dma, offset, size, DMA_TO_DEVICE); 462 } 463 464 static void iopte_free(struct omap_iommu *obj, u32 *iopte, bool dma_valid) 465 { 466 dma_addr_t pt_dma; 467 468 /* Note: freed iopte's must be clean ready for re-use */ 469 if (iopte) { 470 if (dma_valid) { 471 pt_dma = virt_to_phys(iopte); 472 dma_unmap_single(obj->dev, pt_dma, IOPTE_TABLE_SIZE, 473 DMA_TO_DEVICE); 474 } 475 476 kmem_cache_free(iopte_cachep, iopte); 477 } 478 } 479 480 static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, 481 dma_addr_t *pt_dma, u32 da) 482 { 483 u32 *iopte; 484 unsigned long offset = iopgd_index(da) * sizeof(da); 485 486 /* a table has already existed */ 487 if (*iopgd) 488 goto pte_ready; 489 490 /* 491 * do the allocation outside the page table lock 492 */ 493 spin_unlock(&obj->page_table_lock); 494 iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL); 495 spin_lock(&obj->page_table_lock); 496 497 if (!*iopgd) { 498 if (!iopte) 499 return ERR_PTR(-ENOMEM); 500 501 *pt_dma = dma_map_single(obj->dev, iopte, IOPTE_TABLE_SIZE, 502 DMA_TO_DEVICE); 503 if (dma_mapping_error(obj->dev, *pt_dma)) { 504 dev_err(obj->dev, "DMA map error for L2 table\n"); 505 iopte_free(obj, iopte, false); 506 return ERR_PTR(-ENOMEM); 507 } 508 509 /* 510 * we rely on dma address and the physical address to be 511 * the same for mapping the L2 table 512 */ 513 if (WARN_ON(*pt_dma != virt_to_phys(iopte))) { 514 dev_err(obj->dev, "DMA translation error for L2 table\n"); 515 dma_unmap_single(obj->dev, *pt_dma, IOPTE_TABLE_SIZE, 516 DMA_TO_DEVICE); 517 iopte_free(obj, iopte, false); 518 return ERR_PTR(-ENOMEM); 519 } 520 521 *iopgd = virt_to_phys(iopte) | IOPGD_TABLE; 522 523 flush_iopte_range(obj->dev, obj->pd_dma, offset, 1); 524 dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte); 525 } else { 526 /* We raced, free the reduniovant table */ 527 iopte_free(obj, iopte, false); 528 } 529 530 pte_ready: 531 iopte = iopte_offset(iopgd, da); 532 *pt_dma = virt_to_phys(iopte); 533 dev_vdbg(obj->dev, 534 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", 535 __func__, da, iopgd, *iopgd, iopte, *iopte); 536 537 return iopte; 538 } 539 540 static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) 541 { 542 u32 *iopgd = iopgd_offset(obj, da); 543 unsigned long offset = iopgd_index(da) * sizeof(da); 544 545 if ((da | pa) & ~IOSECTION_MASK) { 546 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", 547 __func__, da, pa, IOSECTION_SIZE); 548 return -EINVAL; 549 } 550 551 *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION; 552 flush_iopte_range(obj->dev, obj->pd_dma, offset, 1); 553 return 0; 554 } 555 556 static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) 557 { 558 u32 *iopgd = iopgd_offset(obj, da); 559 unsigned long offset = iopgd_index(da) * sizeof(da); 560 int i; 561 562 if ((da | pa) & ~IOSUPER_MASK) { 563 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", 564 __func__, da, pa, IOSUPER_SIZE); 565 return -EINVAL; 566 } 567 568 for (i = 0; i < 16; i++) 569 *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER; 570 flush_iopte_range(obj->dev, obj->pd_dma, offset, 16); 571 return 0; 572 } 573 574 static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) 575 { 576 u32 *iopgd = iopgd_offset(obj, da); 577 dma_addr_t pt_dma; 578 u32 *iopte = iopte_alloc(obj, iopgd, &pt_dma, da); 579 unsigned long offset = iopte_index(da) * sizeof(da); 580 581 if (IS_ERR(iopte)) 582 return PTR_ERR(iopte); 583 584 *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL; 585 flush_iopte_range(obj->dev, pt_dma, offset, 1); 586 587 dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n", 588 __func__, da, pa, iopte, *iopte); 589 590 return 0; 591 } 592 593 static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) 594 { 595 u32 *iopgd = iopgd_offset(obj, da); 596 dma_addr_t pt_dma; 597 u32 *iopte = iopte_alloc(obj, iopgd, &pt_dma, da); 598 unsigned long offset = iopte_index(da) * sizeof(da); 599 int i; 600 601 if ((da | pa) & ~IOLARGE_MASK) { 602 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", 603 __func__, da, pa, IOLARGE_SIZE); 604 return -EINVAL; 605 } 606 607 if (IS_ERR(iopte)) 608 return PTR_ERR(iopte); 609 610 for (i = 0; i < 16; i++) 611 *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE; 612 flush_iopte_range(obj->dev, pt_dma, offset, 16); 613 return 0; 614 } 615 616 static int 617 iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e) 618 { 619 int (*fn)(struct omap_iommu *, u32, u32, u32); 620 u32 prot; 621 int err; 622 623 if (!obj || !e) 624 return -EINVAL; 625 626 switch (e->pgsz) { 627 case MMU_CAM_PGSZ_16M: 628 fn = iopgd_alloc_super; 629 break; 630 case MMU_CAM_PGSZ_1M: 631 fn = iopgd_alloc_section; 632 break; 633 case MMU_CAM_PGSZ_64K: 634 fn = iopte_alloc_large; 635 break; 636 case MMU_CAM_PGSZ_4K: 637 fn = iopte_alloc_page; 638 break; 639 default: 640 fn = NULL; 641 break; 642 } 643 644 if (WARN_ON(!fn)) 645 return -EINVAL; 646 647 prot = get_iopte_attr(e); 648 649 spin_lock(&obj->page_table_lock); 650 err = fn(obj, e->da, e->pa, prot); 651 spin_unlock(&obj->page_table_lock); 652 653 return err; 654 } 655 656 /** 657 * omap_iopgtable_store_entry - Make an iommu pte entry 658 * @obj: target iommu 659 * @e: an iommu tlb entry info 660 **/ 661 static int 662 omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e) 663 { 664 int err; 665 666 flush_iotlb_page(obj, e->da); 667 err = iopgtable_store_entry_core(obj, e); 668 if (!err) 669 prefetch_iotlb_entry(obj, e); 670 return err; 671 } 672 673 /** 674 * iopgtable_lookup_entry - Lookup an iommu pte entry 675 * @obj: target iommu 676 * @da: iommu device virtual address 677 * @ppgd: iommu pgd entry pointer to be returned 678 * @ppte: iommu pte entry pointer to be returned 679 **/ 680 static void 681 iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte) 682 { 683 u32 *iopgd, *iopte = NULL; 684 685 iopgd = iopgd_offset(obj, da); 686 if (!*iopgd) 687 goto out; 688 689 if (iopgd_is_table(*iopgd)) 690 iopte = iopte_offset(iopgd, da); 691 out: 692 *ppgd = iopgd; 693 *ppte = iopte; 694 } 695 696 static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da) 697 { 698 size_t bytes; 699 u32 *iopgd = iopgd_offset(obj, da); 700 int nent = 1; 701 dma_addr_t pt_dma; 702 unsigned long pd_offset = iopgd_index(da) * sizeof(da); 703 unsigned long pt_offset = iopte_index(da) * sizeof(da); 704 705 if (!*iopgd) 706 return 0; 707 708 if (iopgd_is_table(*iopgd)) { 709 int i; 710 u32 *iopte = iopte_offset(iopgd, da); 711 712 bytes = IOPTE_SIZE; 713 if (*iopte & IOPTE_LARGE) { 714 nent *= 16; 715 /* rewind to the 1st entry */ 716 iopte = iopte_offset(iopgd, (da & IOLARGE_MASK)); 717 } 718 bytes *= nent; 719 memset(iopte, 0, nent * sizeof(*iopte)); 720 pt_dma = virt_to_phys(iopte); 721 flush_iopte_range(obj->dev, pt_dma, pt_offset, nent); 722 723 /* 724 * do table walk to check if this table is necessary or not 725 */ 726 iopte = iopte_offset(iopgd, 0); 727 for (i = 0; i < PTRS_PER_IOPTE; i++) 728 if (iopte[i]) 729 goto out; 730 731 iopte_free(obj, iopte, true); 732 nent = 1; /* for the next L1 entry */ 733 } else { 734 bytes = IOPGD_SIZE; 735 if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) { 736 nent *= 16; 737 /* rewind to the 1st entry */ 738 iopgd = iopgd_offset(obj, (da & IOSUPER_MASK)); 739 } 740 bytes *= nent; 741 } 742 memset(iopgd, 0, nent * sizeof(*iopgd)); 743 flush_iopte_range(obj->dev, obj->pd_dma, pd_offset, nent); 744 out: 745 return bytes; 746 } 747 748 /** 749 * iopgtable_clear_entry - Remove an iommu pte entry 750 * @obj: target iommu 751 * @da: iommu device virtual address 752 **/ 753 static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da) 754 { 755 size_t bytes; 756 757 spin_lock(&obj->page_table_lock); 758 759 bytes = iopgtable_clear_entry_core(obj, da); 760 flush_iotlb_page(obj, da); 761 762 spin_unlock(&obj->page_table_lock); 763 764 return bytes; 765 } 766 767 static void iopgtable_clear_entry_all(struct omap_iommu *obj) 768 { 769 unsigned long offset; 770 int i; 771 772 spin_lock(&obj->page_table_lock); 773 774 for (i = 0; i < PTRS_PER_IOPGD; i++) { 775 u32 da; 776 u32 *iopgd; 777 778 da = i << IOPGD_SHIFT; 779 iopgd = iopgd_offset(obj, da); 780 offset = iopgd_index(da) * sizeof(da); 781 782 if (!*iopgd) 783 continue; 784 785 if (iopgd_is_table(*iopgd)) 786 iopte_free(obj, iopte_offset(iopgd, 0), true); 787 788 *iopgd = 0; 789 flush_iopte_range(obj->dev, obj->pd_dma, offset, 1); 790 } 791 792 flush_iotlb_all(obj); 793 794 spin_unlock(&obj->page_table_lock); 795 } 796 797 /* 798 * Device IOMMU generic operations 799 */ 800 static irqreturn_t iommu_fault_handler(int irq, void *data) 801 { 802 u32 da, errs; 803 u32 *iopgd, *iopte; 804 struct omap_iommu *obj = data; 805 struct iommu_domain *domain = obj->domain; 806 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 807 808 if (!omap_domain->iommu_dev) 809 return IRQ_NONE; 810 811 errs = iommu_report_fault(obj, &da); 812 if (errs == 0) 813 return IRQ_HANDLED; 814 815 /* Fault callback or TLB/PTE Dynamic loading */ 816 if (!report_iommu_fault(domain, obj->dev, da, 0)) 817 return IRQ_HANDLED; 818 819 iommu_write_reg(obj, 0, MMU_IRQENABLE); 820 821 iopgd = iopgd_offset(obj, da); 822 823 if (!iopgd_is_table(*iopgd)) { 824 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n", 825 obj->name, errs, da, iopgd, *iopgd); 826 return IRQ_NONE; 827 } 828 829 iopte = iopte_offset(iopgd, da); 830 831 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n", 832 obj->name, errs, da, iopgd, *iopgd, iopte, *iopte); 833 834 return IRQ_NONE; 835 } 836 837 /** 838 * omap_iommu_attach() - attach iommu device to an iommu domain 839 * @obj: target omap iommu device 840 * @iopgd: page table 841 **/ 842 static int omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd) 843 { 844 int err; 845 846 spin_lock(&obj->iommu_lock); 847 848 obj->pd_dma = dma_map_single(obj->dev, iopgd, IOPGD_TABLE_SIZE, 849 DMA_TO_DEVICE); 850 if (dma_mapping_error(obj->dev, obj->pd_dma)) { 851 dev_err(obj->dev, "DMA map error for L1 table\n"); 852 err = -ENOMEM; 853 goto out_err; 854 } 855 856 obj->iopgd = iopgd; 857 err = iommu_enable(obj); 858 if (err) 859 goto out_err; 860 flush_iotlb_all(obj); 861 862 spin_unlock(&obj->iommu_lock); 863 864 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); 865 866 return 0; 867 868 out_err: 869 spin_unlock(&obj->iommu_lock); 870 871 return err; 872 } 873 874 /** 875 * omap_iommu_detach - release iommu device 876 * @obj: target iommu 877 **/ 878 static void omap_iommu_detach(struct omap_iommu *obj) 879 { 880 if (!obj || IS_ERR(obj)) 881 return; 882 883 spin_lock(&obj->iommu_lock); 884 885 dma_unmap_single(obj->dev, obj->pd_dma, IOPGD_TABLE_SIZE, 886 DMA_TO_DEVICE); 887 iommu_disable(obj); 888 obj->pd_dma = 0; 889 obj->iopgd = NULL; 890 891 spin_unlock(&obj->iommu_lock); 892 893 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); 894 } 895 896 static int omap_iommu_dra7_get_dsp_system_cfg(struct platform_device *pdev, 897 struct omap_iommu *obj) 898 { 899 struct device_node *np = pdev->dev.of_node; 900 int ret; 901 902 if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu")) 903 return 0; 904 905 if (!of_property_read_bool(np, "ti,syscon-mmuconfig")) { 906 dev_err(&pdev->dev, "ti,syscon-mmuconfig property is missing\n"); 907 return -EINVAL; 908 } 909 910 obj->syscfg = 911 syscon_regmap_lookup_by_phandle(np, "ti,syscon-mmuconfig"); 912 if (IS_ERR(obj->syscfg)) { 913 /* can fail with -EPROBE_DEFER */ 914 ret = PTR_ERR(obj->syscfg); 915 return ret; 916 } 917 918 if (of_property_read_u32_index(np, "ti,syscon-mmuconfig", 1, 919 &obj->id)) { 920 dev_err(&pdev->dev, "couldn't get the IOMMU instance id within subsystem\n"); 921 return -EINVAL; 922 } 923 924 if (obj->id != 0 && obj->id != 1) { 925 dev_err(&pdev->dev, "invalid IOMMU instance id\n"); 926 return -EINVAL; 927 } 928 929 return 0; 930 } 931 932 /* 933 * OMAP Device MMU(IOMMU) detection 934 */ 935 static int omap_iommu_probe(struct platform_device *pdev) 936 { 937 int err = -ENODEV; 938 int irq; 939 struct omap_iommu *obj; 940 struct resource *res; 941 struct device_node *of = pdev->dev.of_node; 942 943 if (!of) { 944 pr_err("%s: only DT-based devices are supported\n", __func__); 945 return -ENODEV; 946 } 947 948 obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); 949 if (!obj) 950 return -ENOMEM; 951 952 obj->name = dev_name(&pdev->dev); 953 obj->nr_tlb_entries = 32; 954 err = of_property_read_u32(of, "ti,#tlb-entries", &obj->nr_tlb_entries); 955 if (err && err != -EINVAL) 956 return err; 957 if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8) 958 return -EINVAL; 959 if (of_find_property(of, "ti,iommu-bus-err-back", NULL)) 960 obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN; 961 962 obj->dev = &pdev->dev; 963 obj->ctx = (void *)obj + sizeof(*obj); 964 965 spin_lock_init(&obj->iommu_lock); 966 spin_lock_init(&obj->page_table_lock); 967 968 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 969 obj->regbase = devm_ioremap_resource(obj->dev, res); 970 if (IS_ERR(obj->regbase)) 971 return PTR_ERR(obj->regbase); 972 973 err = omap_iommu_dra7_get_dsp_system_cfg(pdev, obj); 974 if (err) 975 return err; 976 977 irq = platform_get_irq(pdev, 0); 978 if (irq < 0) 979 return -ENODEV; 980 981 err = devm_request_irq(obj->dev, irq, iommu_fault_handler, IRQF_SHARED, 982 dev_name(obj->dev), obj); 983 if (err < 0) 984 return err; 985 platform_set_drvdata(pdev, obj); 986 987 obj->group = iommu_group_alloc(); 988 if (IS_ERR(obj->group)) 989 return PTR_ERR(obj->group); 990 991 err = iommu_device_sysfs_add(&obj->iommu, obj->dev, NULL, obj->name); 992 if (err) 993 goto out_group; 994 995 iommu_device_set_ops(&obj->iommu, &omap_iommu_ops); 996 997 err = iommu_device_register(&obj->iommu); 998 if (err) 999 goto out_sysfs; 1000 1001 pm_runtime_irq_safe(obj->dev); 1002 pm_runtime_enable(obj->dev); 1003 1004 omap_iommu_debugfs_add(obj); 1005 1006 dev_info(&pdev->dev, "%s registered\n", obj->name); 1007 1008 return 0; 1009 1010 out_sysfs: 1011 iommu_device_sysfs_remove(&obj->iommu); 1012 out_group: 1013 iommu_group_put(obj->group); 1014 return err; 1015 } 1016 1017 static int omap_iommu_remove(struct platform_device *pdev) 1018 { 1019 struct omap_iommu *obj = platform_get_drvdata(pdev); 1020 1021 iommu_group_put(obj->group); 1022 obj->group = NULL; 1023 1024 iommu_device_sysfs_remove(&obj->iommu); 1025 iommu_device_unregister(&obj->iommu); 1026 1027 omap_iommu_debugfs_remove(obj); 1028 1029 pm_runtime_disable(obj->dev); 1030 1031 dev_info(&pdev->dev, "%s removed\n", obj->name); 1032 return 0; 1033 } 1034 1035 static const struct of_device_id omap_iommu_of_match[] = { 1036 { .compatible = "ti,omap2-iommu" }, 1037 { .compatible = "ti,omap4-iommu" }, 1038 { .compatible = "ti,dra7-iommu" }, 1039 { .compatible = "ti,dra7-dsp-iommu" }, 1040 {}, 1041 }; 1042 1043 static struct platform_driver omap_iommu_driver = { 1044 .probe = omap_iommu_probe, 1045 .remove = omap_iommu_remove, 1046 .driver = { 1047 .name = "omap-iommu", 1048 .of_match_table = of_match_ptr(omap_iommu_of_match), 1049 }, 1050 }; 1051 1052 static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz) 1053 { 1054 memset(e, 0, sizeof(*e)); 1055 1056 e->da = da; 1057 e->pa = pa; 1058 e->valid = MMU_CAM_V; 1059 e->pgsz = pgsz; 1060 e->endian = MMU_RAM_ENDIAN_LITTLE; 1061 e->elsz = MMU_RAM_ELSZ_8; 1062 e->mixed = 0; 1063 1064 return iopgsz_to_bytes(e->pgsz); 1065 } 1066 1067 static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, 1068 phys_addr_t pa, size_t bytes, int prot) 1069 { 1070 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 1071 struct omap_iommu *oiommu = omap_domain->iommu_dev; 1072 struct device *dev = oiommu->dev; 1073 struct iotlb_entry e; 1074 int omap_pgsz; 1075 u32 ret; 1076 1077 omap_pgsz = bytes_to_iopgsz(bytes); 1078 if (omap_pgsz < 0) { 1079 dev_err(dev, "invalid size to map: %d\n", bytes); 1080 return -EINVAL; 1081 } 1082 1083 dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%x\n", da, &pa, bytes); 1084 1085 iotlb_init_entry(&e, da, pa, omap_pgsz); 1086 1087 ret = omap_iopgtable_store_entry(oiommu, &e); 1088 if (ret) 1089 dev_err(dev, "omap_iopgtable_store_entry failed: %d\n", ret); 1090 1091 return ret; 1092 } 1093 1094 static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, 1095 size_t size) 1096 { 1097 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 1098 struct omap_iommu *oiommu = omap_domain->iommu_dev; 1099 struct device *dev = oiommu->dev; 1100 1101 dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size); 1102 1103 return iopgtable_clear_entry(oiommu, da); 1104 } 1105 1106 static int 1107 omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) 1108 { 1109 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 1110 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; 1111 struct omap_iommu *oiommu; 1112 int ret = 0; 1113 1114 if (!arch_data || !arch_data->iommu_dev) { 1115 dev_err(dev, "device doesn't have an associated iommu\n"); 1116 return -EINVAL; 1117 } 1118 1119 spin_lock(&omap_domain->lock); 1120 1121 /* only a single device is supported per domain for now */ 1122 if (omap_domain->iommu_dev) { 1123 dev_err(dev, "iommu domain is already attached\n"); 1124 ret = -EBUSY; 1125 goto out; 1126 } 1127 1128 oiommu = arch_data->iommu_dev; 1129 1130 /* get a handle to and enable the omap iommu */ 1131 ret = omap_iommu_attach(oiommu, omap_domain->pgtable); 1132 if (ret) { 1133 dev_err(dev, "can't get omap iommu: %d\n", ret); 1134 goto out; 1135 } 1136 1137 omap_domain->iommu_dev = oiommu; 1138 omap_domain->dev = dev; 1139 oiommu->domain = domain; 1140 1141 out: 1142 spin_unlock(&omap_domain->lock); 1143 return ret; 1144 } 1145 1146 static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain, 1147 struct device *dev) 1148 { 1149 struct omap_iommu *oiommu = dev_to_omap_iommu(dev); 1150 1151 /* only a single device is supported per domain for now */ 1152 if (omap_domain->iommu_dev != oiommu) { 1153 dev_err(dev, "invalid iommu device\n"); 1154 return; 1155 } 1156 1157 iopgtable_clear_entry_all(oiommu); 1158 1159 omap_iommu_detach(oiommu); 1160 1161 omap_domain->iommu_dev = NULL; 1162 omap_domain->dev = NULL; 1163 oiommu->domain = NULL; 1164 } 1165 1166 static void omap_iommu_detach_dev(struct iommu_domain *domain, 1167 struct device *dev) 1168 { 1169 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 1170 1171 spin_lock(&omap_domain->lock); 1172 _omap_iommu_detach_dev(omap_domain, dev); 1173 spin_unlock(&omap_domain->lock); 1174 } 1175 1176 static struct iommu_domain *omap_iommu_domain_alloc(unsigned type) 1177 { 1178 struct omap_iommu_domain *omap_domain; 1179 1180 if (type != IOMMU_DOMAIN_UNMANAGED) 1181 return NULL; 1182 1183 omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL); 1184 if (!omap_domain) 1185 goto out; 1186 1187 omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL); 1188 if (!omap_domain->pgtable) 1189 goto fail_nomem; 1190 1191 /* 1192 * should never fail, but please keep this around to ensure 1193 * we keep the hardware happy 1194 */ 1195 if (WARN_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE))) 1196 goto fail_align; 1197 1198 spin_lock_init(&omap_domain->lock); 1199 1200 omap_domain->domain.geometry.aperture_start = 0; 1201 omap_domain->domain.geometry.aperture_end = (1ULL << 32) - 1; 1202 omap_domain->domain.geometry.force_aperture = true; 1203 1204 return &omap_domain->domain; 1205 1206 fail_align: 1207 kfree(omap_domain->pgtable); 1208 fail_nomem: 1209 kfree(omap_domain); 1210 out: 1211 return NULL; 1212 } 1213 1214 static void omap_iommu_domain_free(struct iommu_domain *domain) 1215 { 1216 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 1217 1218 /* 1219 * An iommu device is still attached 1220 * (currently, only one device can be attached) ? 1221 */ 1222 if (omap_domain->iommu_dev) 1223 _omap_iommu_detach_dev(omap_domain, omap_domain->dev); 1224 1225 kfree(omap_domain->pgtable); 1226 kfree(omap_domain); 1227 } 1228 1229 static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, 1230 dma_addr_t da) 1231 { 1232 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 1233 struct omap_iommu *oiommu = omap_domain->iommu_dev; 1234 struct device *dev = oiommu->dev; 1235 u32 *pgd, *pte; 1236 phys_addr_t ret = 0; 1237 1238 iopgtable_lookup_entry(oiommu, da, &pgd, &pte); 1239 1240 if (pte) { 1241 if (iopte_is_small(*pte)) 1242 ret = omap_iommu_translate(*pte, da, IOPTE_MASK); 1243 else if (iopte_is_large(*pte)) 1244 ret = omap_iommu_translate(*pte, da, IOLARGE_MASK); 1245 else 1246 dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte, 1247 (unsigned long long)da); 1248 } else { 1249 if (iopgd_is_section(*pgd)) 1250 ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK); 1251 else if (iopgd_is_super(*pgd)) 1252 ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK); 1253 else 1254 dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd, 1255 (unsigned long long)da); 1256 } 1257 1258 return ret; 1259 } 1260 1261 static int omap_iommu_add_device(struct device *dev) 1262 { 1263 struct omap_iommu_arch_data *arch_data; 1264 struct omap_iommu *oiommu; 1265 struct iommu_group *group; 1266 struct device_node *np; 1267 struct platform_device *pdev; 1268 int ret; 1269 1270 /* 1271 * Allocate the archdata iommu structure for DT-based devices. 1272 * 1273 * TODO: Simplify this when removing non-DT support completely from the 1274 * IOMMU users. 1275 */ 1276 if (!dev->of_node) 1277 return 0; 1278 1279 np = of_parse_phandle(dev->of_node, "iommus", 0); 1280 if (!np) 1281 return 0; 1282 1283 pdev = of_find_device_by_node(np); 1284 if (WARN_ON(!pdev)) { 1285 of_node_put(np); 1286 return -EINVAL; 1287 } 1288 1289 oiommu = platform_get_drvdata(pdev); 1290 if (!oiommu) { 1291 of_node_put(np); 1292 return -EINVAL; 1293 } 1294 1295 arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL); 1296 if (!arch_data) { 1297 of_node_put(np); 1298 return -ENOMEM; 1299 } 1300 1301 ret = iommu_device_link(&oiommu->iommu, dev); 1302 if (ret) { 1303 kfree(arch_data); 1304 of_node_put(np); 1305 return ret; 1306 } 1307 1308 arch_data->iommu_dev = oiommu; 1309 dev->archdata.iommu = arch_data; 1310 1311 /* 1312 * IOMMU group initialization calls into omap_iommu_device_group, which 1313 * needs a valid dev->archdata.iommu pointer 1314 */ 1315 group = iommu_group_get_for_dev(dev); 1316 if (IS_ERR(group)) { 1317 iommu_device_unlink(&oiommu->iommu, dev); 1318 dev->archdata.iommu = NULL; 1319 kfree(arch_data); 1320 return PTR_ERR(group); 1321 } 1322 iommu_group_put(group); 1323 1324 of_node_put(np); 1325 1326 return 0; 1327 } 1328 1329 static void omap_iommu_remove_device(struct device *dev) 1330 { 1331 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; 1332 1333 if (!dev->of_node || !arch_data) 1334 return; 1335 1336 iommu_device_unlink(&arch_data->iommu_dev->iommu, dev); 1337 iommu_group_remove_device(dev); 1338 1339 dev->archdata.iommu = NULL; 1340 kfree(arch_data); 1341 1342 } 1343 1344 static struct iommu_group *omap_iommu_device_group(struct device *dev) 1345 { 1346 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; 1347 struct iommu_group *group = ERR_PTR(-EINVAL); 1348 1349 if (arch_data->iommu_dev) 1350 group = arch_data->iommu_dev->group; 1351 1352 return group; 1353 } 1354 1355 static const struct iommu_ops omap_iommu_ops = { 1356 .domain_alloc = omap_iommu_domain_alloc, 1357 .domain_free = omap_iommu_domain_free, 1358 .attach_dev = omap_iommu_attach_dev, 1359 .detach_dev = omap_iommu_detach_dev, 1360 .map = omap_iommu_map, 1361 .unmap = omap_iommu_unmap, 1362 .map_sg = default_iommu_map_sg, 1363 .iova_to_phys = omap_iommu_iova_to_phys, 1364 .add_device = omap_iommu_add_device, 1365 .remove_device = omap_iommu_remove_device, 1366 .device_group = omap_iommu_device_group, 1367 .pgsize_bitmap = OMAP_IOMMU_PGSIZES, 1368 }; 1369 1370 static int __init omap_iommu_init(void) 1371 { 1372 struct kmem_cache *p; 1373 const unsigned long flags = SLAB_HWCACHE_ALIGN; 1374 size_t align = 1 << 10; /* L2 pagetable alignement */ 1375 struct device_node *np; 1376 int ret; 1377 1378 np = of_find_matching_node(NULL, omap_iommu_of_match); 1379 if (!np) 1380 return 0; 1381 1382 of_node_put(np); 1383 1384 p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, 1385 NULL); 1386 if (!p) 1387 return -ENOMEM; 1388 iopte_cachep = p; 1389 1390 omap_iommu_debugfs_init(); 1391 1392 ret = platform_driver_register(&omap_iommu_driver); 1393 if (ret) { 1394 pr_err("%s: failed to register driver\n", __func__); 1395 goto fail_driver; 1396 } 1397 1398 ret = bus_set_iommu(&platform_bus_type, &omap_iommu_ops); 1399 if (ret) 1400 goto fail_bus; 1401 1402 return 0; 1403 1404 fail_bus: 1405 platform_driver_unregister(&omap_iommu_driver); 1406 fail_driver: 1407 kmem_cache_destroy(iopte_cachep); 1408 return ret; 1409 } 1410 subsys_initcall(omap_iommu_init); 1411 /* must be ready before omap3isp is probed */ 1412