1 /* 2 * omap iommu: tlb and pagetable primitives 3 * 4 * Copyright (C) 2008-2010 Nokia Corporation 5 * 6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>, 7 * Paul Mundt and Toshihiro Kobayashi 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14 #include <linux/err.h> 15 #include <linux/slab.h> 16 #include <linux/interrupt.h> 17 #include <linux/ioport.h> 18 #include <linux/platform_device.h> 19 #include <linux/iommu.h> 20 #include <linux/omap-iommu.h> 21 #include <linux/mutex.h> 22 #include <linux/spinlock.h> 23 #include <linux/io.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/of.h> 26 #include <linux/of_iommu.h> 27 #include <linux/of_irq.h> 28 #include <linux/of_platform.h> 29 #include <linux/regmap.h> 30 #include <linux/mfd/syscon.h> 31 32 #include <asm/cacheflush.h> 33 34 #include <linux/platform_data/iommu-omap.h> 35 36 #include "omap-iopgtable.h" 37 #include "omap-iommu.h" 38 39 static const struct iommu_ops omap_iommu_ops; 40 41 #define to_iommu(dev) \ 42 ((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev))) 43 44 /* bitmap of the page sizes currently supported */ 45 #define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M) 46 47 #define MMU_LOCK_BASE_SHIFT 10 48 #define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT) 49 #define MMU_LOCK_BASE(x) \ 50 ((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT) 51 52 #define MMU_LOCK_VICT_SHIFT 4 53 #define MMU_LOCK_VICT_MASK (0x1f << MMU_LOCK_VICT_SHIFT) 54 #define MMU_LOCK_VICT(x) \ 55 ((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT) 56 57 static struct platform_driver omap_iommu_driver; 58 static struct kmem_cache *iopte_cachep; 59 60 /** 61 * to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain 62 * @dom: generic iommu domain handle 63 **/ 64 static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom) 65 { 66 return container_of(dom, struct omap_iommu_domain, domain); 67 } 68 69 /** 70 * omap_iommu_save_ctx - Save registers for pm off-mode support 71 * @dev: client device 72 **/ 73 void omap_iommu_save_ctx(struct device *dev) 74 { 75 struct omap_iommu *obj = dev_to_omap_iommu(dev); 76 u32 *p = obj->ctx; 77 int i; 78 79 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { 80 p[i] = iommu_read_reg(obj, i * sizeof(u32)); 81 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]); 82 } 83 } 84 EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); 85 86 /** 87 * omap_iommu_restore_ctx - Restore registers for pm off-mode support 88 * @dev: client device 89 **/ 90 void omap_iommu_restore_ctx(struct device *dev) 91 { 92 struct omap_iommu *obj = dev_to_omap_iommu(dev); 93 u32 *p = obj->ctx; 94 int i; 95 96 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { 97 iommu_write_reg(obj, p[i], i * sizeof(u32)); 98 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]); 99 } 100 } 101 EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx); 102 103 static void dra7_cfg_dspsys_mmu(struct omap_iommu *obj, bool enable) 104 { 105 u32 val, mask; 106 107 if (!obj->syscfg) 108 return; 109 110 mask = (1 << (obj->id * DSP_SYS_MMU_CONFIG_EN_SHIFT)); 111 val = enable ? mask : 0; 112 regmap_update_bits(obj->syscfg, DSP_SYS_MMU_CONFIG, mask, val); 113 } 114 115 static void __iommu_set_twl(struct omap_iommu *obj, bool on) 116 { 117 u32 l = iommu_read_reg(obj, MMU_CNTL); 118 119 if (on) 120 iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE); 121 else 122 iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE); 123 124 l &= ~MMU_CNTL_MASK; 125 if (on) 126 l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN); 127 else 128 l |= (MMU_CNTL_MMU_EN); 129 130 iommu_write_reg(obj, l, MMU_CNTL); 131 } 132 133 static int omap2_iommu_enable(struct omap_iommu *obj) 134 { 135 u32 l, pa; 136 137 if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K)) 138 return -EINVAL; 139 140 pa = virt_to_phys(obj->iopgd); 141 if (!IS_ALIGNED(pa, SZ_16K)) 142 return -EINVAL; 143 144 l = iommu_read_reg(obj, MMU_REVISION); 145 dev_info(obj->dev, "%s: version %d.%d\n", obj->name, 146 (l >> 4) & 0xf, l & 0xf); 147 148 iommu_write_reg(obj, pa, MMU_TTB); 149 150 dra7_cfg_dspsys_mmu(obj, true); 151 152 if (obj->has_bus_err_back) 153 iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG); 154 155 __iommu_set_twl(obj, true); 156 157 return 0; 158 } 159 160 static void omap2_iommu_disable(struct omap_iommu *obj) 161 { 162 u32 l = iommu_read_reg(obj, MMU_CNTL); 163 164 l &= ~MMU_CNTL_MASK; 165 iommu_write_reg(obj, l, MMU_CNTL); 166 dra7_cfg_dspsys_mmu(obj, false); 167 168 dev_dbg(obj->dev, "%s is shutting down\n", obj->name); 169 } 170 171 static int iommu_enable(struct omap_iommu *obj) 172 { 173 int err; 174 struct platform_device *pdev = to_platform_device(obj->dev); 175 struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev); 176 177 if (pdata && pdata->deassert_reset) { 178 err = pdata->deassert_reset(pdev, pdata->reset_name); 179 if (err) { 180 dev_err(obj->dev, "deassert_reset failed: %d\n", err); 181 return err; 182 } 183 } 184 185 pm_runtime_get_sync(obj->dev); 186 187 err = omap2_iommu_enable(obj); 188 189 return err; 190 } 191 192 static void iommu_disable(struct omap_iommu *obj) 193 { 194 struct platform_device *pdev = to_platform_device(obj->dev); 195 struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev); 196 197 omap2_iommu_disable(obj); 198 199 pm_runtime_put_sync(obj->dev); 200 201 if (pdata && pdata->assert_reset) 202 pdata->assert_reset(pdev, pdata->reset_name); 203 } 204 205 /* 206 * TLB operations 207 */ 208 static u32 iotlb_cr_to_virt(struct cr_regs *cr) 209 { 210 u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK; 211 u32 mask = get_cam_va_mask(cr->cam & page_size); 212 213 return cr->cam & mask; 214 } 215 216 static u32 get_iopte_attr(struct iotlb_entry *e) 217 { 218 u32 attr; 219 220 attr = e->mixed << 5; 221 attr |= e->endian; 222 attr |= e->elsz >> 3; 223 attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) || 224 (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6); 225 return attr; 226 } 227 228 static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da) 229 { 230 u32 status, fault_addr; 231 232 status = iommu_read_reg(obj, MMU_IRQSTATUS); 233 status &= MMU_IRQ_MASK; 234 if (!status) { 235 *da = 0; 236 return 0; 237 } 238 239 fault_addr = iommu_read_reg(obj, MMU_FAULT_AD); 240 *da = fault_addr; 241 242 iommu_write_reg(obj, status, MMU_IRQSTATUS); 243 244 return status; 245 } 246 247 void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l) 248 { 249 u32 val; 250 251 val = iommu_read_reg(obj, MMU_LOCK); 252 253 l->base = MMU_LOCK_BASE(val); 254 l->vict = MMU_LOCK_VICT(val); 255 } 256 257 void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l) 258 { 259 u32 val; 260 261 val = (l->base << MMU_LOCK_BASE_SHIFT); 262 val |= (l->vict << MMU_LOCK_VICT_SHIFT); 263 264 iommu_write_reg(obj, val, MMU_LOCK); 265 } 266 267 static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr) 268 { 269 cr->cam = iommu_read_reg(obj, MMU_READ_CAM); 270 cr->ram = iommu_read_reg(obj, MMU_READ_RAM); 271 } 272 273 static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr) 274 { 275 iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM); 276 iommu_write_reg(obj, cr->ram, MMU_RAM); 277 278 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); 279 iommu_write_reg(obj, 1, MMU_LD_TLB); 280 } 281 282 /* only used in iotlb iteration for-loop */ 283 struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n) 284 { 285 struct cr_regs cr; 286 struct iotlb_lock l; 287 288 iotlb_lock_get(obj, &l); 289 l.vict = n; 290 iotlb_lock_set(obj, &l); 291 iotlb_read_cr(obj, &cr); 292 293 return cr; 294 } 295 296 #ifdef PREFETCH_IOTLB 297 static struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj, 298 struct iotlb_entry *e) 299 { 300 struct cr_regs *cr; 301 302 if (!e) 303 return NULL; 304 305 if (e->da & ~(get_cam_va_mask(e->pgsz))) { 306 dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__, 307 e->da); 308 return ERR_PTR(-EINVAL); 309 } 310 311 cr = kmalloc(sizeof(*cr), GFP_KERNEL); 312 if (!cr) 313 return ERR_PTR(-ENOMEM); 314 315 cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid; 316 cr->ram = e->pa | e->endian | e->elsz | e->mixed; 317 318 return cr; 319 } 320 321 /** 322 * load_iotlb_entry - Set an iommu tlb entry 323 * @obj: target iommu 324 * @e: an iommu tlb entry info 325 **/ 326 static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) 327 { 328 int err = 0; 329 struct iotlb_lock l; 330 struct cr_regs *cr; 331 332 if (!obj || !obj->nr_tlb_entries || !e) 333 return -EINVAL; 334 335 pm_runtime_get_sync(obj->dev); 336 337 iotlb_lock_get(obj, &l); 338 if (l.base == obj->nr_tlb_entries) { 339 dev_warn(obj->dev, "%s: preserve entries full\n", __func__); 340 err = -EBUSY; 341 goto out; 342 } 343 if (!e->prsvd) { 344 int i; 345 struct cr_regs tmp; 346 347 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp) 348 if (!iotlb_cr_valid(&tmp)) 349 break; 350 351 if (i == obj->nr_tlb_entries) { 352 dev_dbg(obj->dev, "%s: full: no entry\n", __func__); 353 err = -EBUSY; 354 goto out; 355 } 356 357 iotlb_lock_get(obj, &l); 358 } else { 359 l.vict = l.base; 360 iotlb_lock_set(obj, &l); 361 } 362 363 cr = iotlb_alloc_cr(obj, e); 364 if (IS_ERR(cr)) { 365 pm_runtime_put_sync(obj->dev); 366 return PTR_ERR(cr); 367 } 368 369 iotlb_load_cr(obj, cr); 370 kfree(cr); 371 372 if (e->prsvd) 373 l.base++; 374 /* increment victim for next tlb load */ 375 if (++l.vict == obj->nr_tlb_entries) 376 l.vict = l.base; 377 iotlb_lock_set(obj, &l); 378 out: 379 pm_runtime_put_sync(obj->dev); 380 return err; 381 } 382 383 #else /* !PREFETCH_IOTLB */ 384 385 static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) 386 { 387 return 0; 388 } 389 390 #endif /* !PREFETCH_IOTLB */ 391 392 static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) 393 { 394 return load_iotlb_entry(obj, e); 395 } 396 397 /** 398 * flush_iotlb_page - Clear an iommu tlb entry 399 * @obj: target iommu 400 * @da: iommu device virtual address 401 * 402 * Clear an iommu tlb entry which includes 'da' address. 403 **/ 404 static void flush_iotlb_page(struct omap_iommu *obj, u32 da) 405 { 406 int i; 407 struct cr_regs cr; 408 409 pm_runtime_get_sync(obj->dev); 410 411 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) { 412 u32 start; 413 size_t bytes; 414 415 if (!iotlb_cr_valid(&cr)) 416 continue; 417 418 start = iotlb_cr_to_virt(&cr); 419 bytes = iopgsz_to_bytes(cr.cam & 3); 420 421 if ((start <= da) && (da < start + bytes)) { 422 dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n", 423 __func__, start, da, bytes); 424 iotlb_load_cr(obj, &cr); 425 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); 426 break; 427 } 428 } 429 pm_runtime_put_sync(obj->dev); 430 431 if (i == obj->nr_tlb_entries) 432 dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da); 433 } 434 435 /** 436 * flush_iotlb_all - Clear all iommu tlb entries 437 * @obj: target iommu 438 **/ 439 static void flush_iotlb_all(struct omap_iommu *obj) 440 { 441 struct iotlb_lock l; 442 443 pm_runtime_get_sync(obj->dev); 444 445 l.base = 0; 446 l.vict = 0; 447 iotlb_lock_set(obj, &l); 448 449 iommu_write_reg(obj, 1, MMU_GFLUSH); 450 451 pm_runtime_put_sync(obj->dev); 452 } 453 454 /* 455 * H/W pagetable operations 456 */ 457 static void flush_iopgd_range(u32 *first, u32 *last) 458 { 459 /* FIXME: L2 cache should be taken care of if it exists */ 460 do { 461 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd" 462 : : "r" (first)); 463 first += L1_CACHE_BYTES / sizeof(*first); 464 } while (first <= last); 465 } 466 467 static void flush_iopte_range(u32 *first, u32 *last) 468 { 469 /* FIXME: L2 cache should be taken care of if it exists */ 470 do { 471 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte" 472 : : "r" (first)); 473 first += L1_CACHE_BYTES / sizeof(*first); 474 } while (first <= last); 475 } 476 477 static void iopte_free(u32 *iopte) 478 { 479 /* Note: freed iopte's must be clean ready for re-use */ 480 if (iopte) 481 kmem_cache_free(iopte_cachep, iopte); 482 } 483 484 static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da) 485 { 486 u32 *iopte; 487 488 /* a table has already existed */ 489 if (*iopgd) 490 goto pte_ready; 491 492 /* 493 * do the allocation outside the page table lock 494 */ 495 spin_unlock(&obj->page_table_lock); 496 iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL); 497 spin_lock(&obj->page_table_lock); 498 499 if (!*iopgd) { 500 if (!iopte) 501 return ERR_PTR(-ENOMEM); 502 503 *iopgd = virt_to_phys(iopte) | IOPGD_TABLE; 504 flush_iopgd_range(iopgd, iopgd); 505 506 dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte); 507 } else { 508 /* We raced, free the reduniovant table */ 509 iopte_free(iopte); 510 } 511 512 pte_ready: 513 iopte = iopte_offset(iopgd, da); 514 515 dev_vdbg(obj->dev, 516 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", 517 __func__, da, iopgd, *iopgd, iopte, *iopte); 518 519 return iopte; 520 } 521 522 static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) 523 { 524 u32 *iopgd = iopgd_offset(obj, da); 525 526 if ((da | pa) & ~IOSECTION_MASK) { 527 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", 528 __func__, da, pa, IOSECTION_SIZE); 529 return -EINVAL; 530 } 531 532 *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION; 533 flush_iopgd_range(iopgd, iopgd); 534 return 0; 535 } 536 537 static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) 538 { 539 u32 *iopgd = iopgd_offset(obj, da); 540 int i; 541 542 if ((da | pa) & ~IOSUPER_MASK) { 543 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", 544 __func__, da, pa, IOSUPER_SIZE); 545 return -EINVAL; 546 } 547 548 for (i = 0; i < 16; i++) 549 *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER; 550 flush_iopgd_range(iopgd, iopgd + 15); 551 return 0; 552 } 553 554 static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) 555 { 556 u32 *iopgd = iopgd_offset(obj, da); 557 u32 *iopte = iopte_alloc(obj, iopgd, da); 558 559 if (IS_ERR(iopte)) 560 return PTR_ERR(iopte); 561 562 *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL; 563 flush_iopte_range(iopte, iopte); 564 565 dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n", 566 __func__, da, pa, iopte, *iopte); 567 568 return 0; 569 } 570 571 static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) 572 { 573 u32 *iopgd = iopgd_offset(obj, da); 574 u32 *iopte = iopte_alloc(obj, iopgd, da); 575 int i; 576 577 if ((da | pa) & ~IOLARGE_MASK) { 578 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", 579 __func__, da, pa, IOLARGE_SIZE); 580 return -EINVAL; 581 } 582 583 if (IS_ERR(iopte)) 584 return PTR_ERR(iopte); 585 586 for (i = 0; i < 16; i++) 587 *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE; 588 flush_iopte_range(iopte, iopte + 15); 589 return 0; 590 } 591 592 static int 593 iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e) 594 { 595 int (*fn)(struct omap_iommu *, u32, u32, u32); 596 u32 prot; 597 int err; 598 599 if (!obj || !e) 600 return -EINVAL; 601 602 switch (e->pgsz) { 603 case MMU_CAM_PGSZ_16M: 604 fn = iopgd_alloc_super; 605 break; 606 case MMU_CAM_PGSZ_1M: 607 fn = iopgd_alloc_section; 608 break; 609 case MMU_CAM_PGSZ_64K: 610 fn = iopte_alloc_large; 611 break; 612 case MMU_CAM_PGSZ_4K: 613 fn = iopte_alloc_page; 614 break; 615 default: 616 fn = NULL; 617 break; 618 } 619 620 if (WARN_ON(!fn)) 621 return -EINVAL; 622 623 prot = get_iopte_attr(e); 624 625 spin_lock(&obj->page_table_lock); 626 err = fn(obj, e->da, e->pa, prot); 627 spin_unlock(&obj->page_table_lock); 628 629 return err; 630 } 631 632 /** 633 * omap_iopgtable_store_entry - Make an iommu pte entry 634 * @obj: target iommu 635 * @e: an iommu tlb entry info 636 **/ 637 static int 638 omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e) 639 { 640 int err; 641 642 flush_iotlb_page(obj, e->da); 643 err = iopgtable_store_entry_core(obj, e); 644 if (!err) 645 prefetch_iotlb_entry(obj, e); 646 return err; 647 } 648 649 /** 650 * iopgtable_lookup_entry - Lookup an iommu pte entry 651 * @obj: target iommu 652 * @da: iommu device virtual address 653 * @ppgd: iommu pgd entry pointer to be returned 654 * @ppte: iommu pte entry pointer to be returned 655 **/ 656 static void 657 iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte) 658 { 659 u32 *iopgd, *iopte = NULL; 660 661 iopgd = iopgd_offset(obj, da); 662 if (!*iopgd) 663 goto out; 664 665 if (iopgd_is_table(*iopgd)) 666 iopte = iopte_offset(iopgd, da); 667 out: 668 *ppgd = iopgd; 669 *ppte = iopte; 670 } 671 672 static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da) 673 { 674 size_t bytes; 675 u32 *iopgd = iopgd_offset(obj, da); 676 int nent = 1; 677 678 if (!*iopgd) 679 return 0; 680 681 if (iopgd_is_table(*iopgd)) { 682 int i; 683 u32 *iopte = iopte_offset(iopgd, da); 684 685 bytes = IOPTE_SIZE; 686 if (*iopte & IOPTE_LARGE) { 687 nent *= 16; 688 /* rewind to the 1st entry */ 689 iopte = iopte_offset(iopgd, (da & IOLARGE_MASK)); 690 } 691 bytes *= nent; 692 memset(iopte, 0, nent * sizeof(*iopte)); 693 flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte)); 694 695 /* 696 * do table walk to check if this table is necessary or not 697 */ 698 iopte = iopte_offset(iopgd, 0); 699 for (i = 0; i < PTRS_PER_IOPTE; i++) 700 if (iopte[i]) 701 goto out; 702 703 iopte_free(iopte); 704 nent = 1; /* for the next L1 entry */ 705 } else { 706 bytes = IOPGD_SIZE; 707 if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) { 708 nent *= 16; 709 /* rewind to the 1st entry */ 710 iopgd = iopgd_offset(obj, (da & IOSUPER_MASK)); 711 } 712 bytes *= nent; 713 } 714 memset(iopgd, 0, nent * sizeof(*iopgd)); 715 flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd)); 716 out: 717 return bytes; 718 } 719 720 /** 721 * iopgtable_clear_entry - Remove an iommu pte entry 722 * @obj: target iommu 723 * @da: iommu device virtual address 724 **/ 725 static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da) 726 { 727 size_t bytes; 728 729 spin_lock(&obj->page_table_lock); 730 731 bytes = iopgtable_clear_entry_core(obj, da); 732 flush_iotlb_page(obj, da); 733 734 spin_unlock(&obj->page_table_lock); 735 736 return bytes; 737 } 738 739 static void iopgtable_clear_entry_all(struct omap_iommu *obj) 740 { 741 int i; 742 743 spin_lock(&obj->page_table_lock); 744 745 for (i = 0; i < PTRS_PER_IOPGD; i++) { 746 u32 da; 747 u32 *iopgd; 748 749 da = i << IOPGD_SHIFT; 750 iopgd = iopgd_offset(obj, da); 751 752 if (!*iopgd) 753 continue; 754 755 if (iopgd_is_table(*iopgd)) 756 iopte_free(iopte_offset(iopgd, 0)); 757 758 *iopgd = 0; 759 flush_iopgd_range(iopgd, iopgd); 760 } 761 762 flush_iotlb_all(obj); 763 764 spin_unlock(&obj->page_table_lock); 765 } 766 767 /* 768 * Device IOMMU generic operations 769 */ 770 static irqreturn_t iommu_fault_handler(int irq, void *data) 771 { 772 u32 da, errs; 773 u32 *iopgd, *iopte; 774 struct omap_iommu *obj = data; 775 struct iommu_domain *domain = obj->domain; 776 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 777 778 if (!omap_domain->iommu_dev) 779 return IRQ_NONE; 780 781 errs = iommu_report_fault(obj, &da); 782 if (errs == 0) 783 return IRQ_HANDLED; 784 785 /* Fault callback or TLB/PTE Dynamic loading */ 786 if (!report_iommu_fault(domain, obj->dev, da, 0)) 787 return IRQ_HANDLED; 788 789 iommu_disable(obj); 790 791 iopgd = iopgd_offset(obj, da); 792 793 if (!iopgd_is_table(*iopgd)) { 794 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n", 795 obj->name, errs, da, iopgd, *iopgd); 796 return IRQ_NONE; 797 } 798 799 iopte = iopte_offset(iopgd, da); 800 801 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n", 802 obj->name, errs, da, iopgd, *iopgd, iopte, *iopte); 803 804 return IRQ_NONE; 805 } 806 807 /** 808 * omap_iommu_attach() - attach iommu device to an iommu domain 809 * @obj: target omap iommu device 810 * @iopgd: page table 811 **/ 812 static int omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd) 813 { 814 int err; 815 816 spin_lock(&obj->iommu_lock); 817 818 obj->iopgd = iopgd; 819 err = iommu_enable(obj); 820 if (err) 821 goto err_enable; 822 flush_iotlb_all(obj); 823 824 spin_unlock(&obj->iommu_lock); 825 826 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); 827 828 return 0; 829 830 err_enable: 831 spin_unlock(&obj->iommu_lock); 832 833 return err; 834 } 835 836 /** 837 * omap_iommu_detach - release iommu device 838 * @obj: target iommu 839 **/ 840 static void omap_iommu_detach(struct omap_iommu *obj) 841 { 842 if (!obj || IS_ERR(obj)) 843 return; 844 845 spin_lock(&obj->iommu_lock); 846 847 iommu_disable(obj); 848 obj->iopgd = NULL; 849 850 spin_unlock(&obj->iommu_lock); 851 852 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); 853 } 854 855 static int omap_iommu_dra7_get_dsp_system_cfg(struct platform_device *pdev, 856 struct omap_iommu *obj) 857 { 858 struct device_node *np = pdev->dev.of_node; 859 int ret; 860 861 if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu")) 862 return 0; 863 864 if (!of_property_read_bool(np, "ti,syscon-mmuconfig")) { 865 dev_err(&pdev->dev, "ti,syscon-mmuconfig property is missing\n"); 866 return -EINVAL; 867 } 868 869 obj->syscfg = 870 syscon_regmap_lookup_by_phandle(np, "ti,syscon-mmuconfig"); 871 if (IS_ERR(obj->syscfg)) { 872 /* can fail with -EPROBE_DEFER */ 873 ret = PTR_ERR(obj->syscfg); 874 return ret; 875 } 876 877 if (of_property_read_u32_index(np, "ti,syscon-mmuconfig", 1, 878 &obj->id)) { 879 dev_err(&pdev->dev, "couldn't get the IOMMU instance id within subsystem\n"); 880 return -EINVAL; 881 } 882 883 if (obj->id != 0 && obj->id != 1) { 884 dev_err(&pdev->dev, "invalid IOMMU instance id\n"); 885 return -EINVAL; 886 } 887 888 return 0; 889 } 890 891 /* 892 * OMAP Device MMU(IOMMU) detection 893 */ 894 static int omap_iommu_probe(struct platform_device *pdev) 895 { 896 int err = -ENODEV; 897 int irq; 898 struct omap_iommu *obj; 899 struct resource *res; 900 struct device_node *of = pdev->dev.of_node; 901 902 if (!of) { 903 pr_err("%s: only DT-based devices are supported\n", __func__); 904 return -ENODEV; 905 } 906 907 obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); 908 if (!obj) 909 return -ENOMEM; 910 911 obj->name = dev_name(&pdev->dev); 912 obj->nr_tlb_entries = 32; 913 err = of_property_read_u32(of, "ti,#tlb-entries", &obj->nr_tlb_entries); 914 if (err && err != -EINVAL) 915 return err; 916 if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8) 917 return -EINVAL; 918 if (of_find_property(of, "ti,iommu-bus-err-back", NULL)) 919 obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN; 920 921 obj->dev = &pdev->dev; 922 obj->ctx = (void *)obj + sizeof(*obj); 923 924 spin_lock_init(&obj->iommu_lock); 925 spin_lock_init(&obj->page_table_lock); 926 927 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 928 obj->regbase = devm_ioremap_resource(obj->dev, res); 929 if (IS_ERR(obj->regbase)) 930 return PTR_ERR(obj->regbase); 931 932 err = omap_iommu_dra7_get_dsp_system_cfg(pdev, obj); 933 if (err) 934 return err; 935 936 irq = platform_get_irq(pdev, 0); 937 if (irq < 0) 938 return -ENODEV; 939 940 err = devm_request_irq(obj->dev, irq, iommu_fault_handler, IRQF_SHARED, 941 dev_name(obj->dev), obj); 942 if (err < 0) 943 return err; 944 platform_set_drvdata(pdev, obj); 945 946 obj->group = iommu_group_alloc(); 947 if (IS_ERR(obj->group)) 948 return PTR_ERR(obj->group); 949 950 err = iommu_device_sysfs_add(&obj->iommu, obj->dev, NULL, obj->name); 951 if (err) 952 goto out_group; 953 954 iommu_device_set_ops(&obj->iommu, &omap_iommu_ops); 955 956 err = iommu_device_register(&obj->iommu); 957 if (err) 958 goto out_sysfs; 959 960 pm_runtime_irq_safe(obj->dev); 961 pm_runtime_enable(obj->dev); 962 963 omap_iommu_debugfs_add(obj); 964 965 dev_info(&pdev->dev, "%s registered\n", obj->name); 966 967 return 0; 968 969 out_sysfs: 970 iommu_device_sysfs_remove(&obj->iommu); 971 out_group: 972 iommu_group_put(obj->group); 973 return err; 974 } 975 976 static int omap_iommu_remove(struct platform_device *pdev) 977 { 978 struct omap_iommu *obj = platform_get_drvdata(pdev); 979 980 iommu_group_put(obj->group); 981 obj->group = NULL; 982 983 iommu_device_sysfs_remove(&obj->iommu); 984 iommu_device_unregister(&obj->iommu); 985 986 omap_iommu_debugfs_remove(obj); 987 988 pm_runtime_disable(obj->dev); 989 990 dev_info(&pdev->dev, "%s removed\n", obj->name); 991 return 0; 992 } 993 994 static const struct of_device_id omap_iommu_of_match[] = { 995 { .compatible = "ti,omap2-iommu" }, 996 { .compatible = "ti,omap4-iommu" }, 997 { .compatible = "ti,dra7-iommu" }, 998 { .compatible = "ti,dra7-dsp-iommu" }, 999 {}, 1000 }; 1001 1002 static struct platform_driver omap_iommu_driver = { 1003 .probe = omap_iommu_probe, 1004 .remove = omap_iommu_remove, 1005 .driver = { 1006 .name = "omap-iommu", 1007 .of_match_table = of_match_ptr(omap_iommu_of_match), 1008 }, 1009 }; 1010 1011 static void iopte_cachep_ctor(void *iopte) 1012 { 1013 clean_dcache_area(iopte, IOPTE_TABLE_SIZE); 1014 } 1015 1016 static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz) 1017 { 1018 memset(e, 0, sizeof(*e)); 1019 1020 e->da = da; 1021 e->pa = pa; 1022 e->valid = MMU_CAM_V; 1023 e->pgsz = pgsz; 1024 e->endian = MMU_RAM_ENDIAN_LITTLE; 1025 e->elsz = MMU_RAM_ELSZ_8; 1026 e->mixed = 0; 1027 1028 return iopgsz_to_bytes(e->pgsz); 1029 } 1030 1031 static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, 1032 phys_addr_t pa, size_t bytes, int prot) 1033 { 1034 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 1035 struct omap_iommu *oiommu = omap_domain->iommu_dev; 1036 struct device *dev = oiommu->dev; 1037 struct iotlb_entry e; 1038 int omap_pgsz; 1039 u32 ret; 1040 1041 omap_pgsz = bytes_to_iopgsz(bytes); 1042 if (omap_pgsz < 0) { 1043 dev_err(dev, "invalid size to map: %d\n", bytes); 1044 return -EINVAL; 1045 } 1046 1047 dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%x\n", da, &pa, bytes); 1048 1049 iotlb_init_entry(&e, da, pa, omap_pgsz); 1050 1051 ret = omap_iopgtable_store_entry(oiommu, &e); 1052 if (ret) 1053 dev_err(dev, "omap_iopgtable_store_entry failed: %d\n", ret); 1054 1055 return ret; 1056 } 1057 1058 static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, 1059 size_t size) 1060 { 1061 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 1062 struct omap_iommu *oiommu = omap_domain->iommu_dev; 1063 struct device *dev = oiommu->dev; 1064 1065 dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size); 1066 1067 return iopgtable_clear_entry(oiommu, da); 1068 } 1069 1070 static int 1071 omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) 1072 { 1073 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 1074 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; 1075 struct omap_iommu *oiommu; 1076 int ret = 0; 1077 1078 if (!arch_data || !arch_data->iommu_dev) { 1079 dev_err(dev, "device doesn't have an associated iommu\n"); 1080 return -EINVAL; 1081 } 1082 1083 spin_lock(&omap_domain->lock); 1084 1085 /* only a single device is supported per domain for now */ 1086 if (omap_domain->iommu_dev) { 1087 dev_err(dev, "iommu domain is already attached\n"); 1088 ret = -EBUSY; 1089 goto out; 1090 } 1091 1092 oiommu = arch_data->iommu_dev; 1093 1094 /* get a handle to and enable the omap iommu */ 1095 ret = omap_iommu_attach(oiommu, omap_domain->pgtable); 1096 if (ret) { 1097 dev_err(dev, "can't get omap iommu: %d\n", ret); 1098 goto out; 1099 } 1100 1101 omap_domain->iommu_dev = oiommu; 1102 omap_domain->dev = dev; 1103 oiommu->domain = domain; 1104 1105 out: 1106 spin_unlock(&omap_domain->lock); 1107 return ret; 1108 } 1109 1110 static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain, 1111 struct device *dev) 1112 { 1113 struct omap_iommu *oiommu = dev_to_omap_iommu(dev); 1114 1115 /* only a single device is supported per domain for now */ 1116 if (omap_domain->iommu_dev != oiommu) { 1117 dev_err(dev, "invalid iommu device\n"); 1118 return; 1119 } 1120 1121 iopgtable_clear_entry_all(oiommu); 1122 1123 omap_iommu_detach(oiommu); 1124 1125 omap_domain->iommu_dev = NULL; 1126 omap_domain->dev = NULL; 1127 oiommu->domain = NULL; 1128 } 1129 1130 static void omap_iommu_detach_dev(struct iommu_domain *domain, 1131 struct device *dev) 1132 { 1133 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 1134 1135 spin_lock(&omap_domain->lock); 1136 _omap_iommu_detach_dev(omap_domain, dev); 1137 spin_unlock(&omap_domain->lock); 1138 } 1139 1140 static struct iommu_domain *omap_iommu_domain_alloc(unsigned type) 1141 { 1142 struct omap_iommu_domain *omap_domain; 1143 1144 if (type != IOMMU_DOMAIN_UNMANAGED) 1145 return NULL; 1146 1147 omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL); 1148 if (!omap_domain) 1149 goto out; 1150 1151 omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL); 1152 if (!omap_domain->pgtable) 1153 goto fail_nomem; 1154 1155 /* 1156 * should never fail, but please keep this around to ensure 1157 * we keep the hardware happy 1158 */ 1159 if (WARN_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE))) 1160 goto fail_align; 1161 1162 clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE); 1163 spin_lock_init(&omap_domain->lock); 1164 1165 omap_domain->domain.geometry.aperture_start = 0; 1166 omap_domain->domain.geometry.aperture_end = (1ULL << 32) - 1; 1167 omap_domain->domain.geometry.force_aperture = true; 1168 1169 return &omap_domain->domain; 1170 1171 fail_align: 1172 kfree(omap_domain->pgtable); 1173 fail_nomem: 1174 kfree(omap_domain); 1175 out: 1176 return NULL; 1177 } 1178 1179 static void omap_iommu_domain_free(struct iommu_domain *domain) 1180 { 1181 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 1182 1183 /* 1184 * An iommu device is still attached 1185 * (currently, only one device can be attached) ? 1186 */ 1187 if (omap_domain->iommu_dev) 1188 _omap_iommu_detach_dev(omap_domain, omap_domain->dev); 1189 1190 kfree(omap_domain->pgtable); 1191 kfree(omap_domain); 1192 } 1193 1194 static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, 1195 dma_addr_t da) 1196 { 1197 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); 1198 struct omap_iommu *oiommu = omap_domain->iommu_dev; 1199 struct device *dev = oiommu->dev; 1200 u32 *pgd, *pte; 1201 phys_addr_t ret = 0; 1202 1203 iopgtable_lookup_entry(oiommu, da, &pgd, &pte); 1204 1205 if (pte) { 1206 if (iopte_is_small(*pte)) 1207 ret = omap_iommu_translate(*pte, da, IOPTE_MASK); 1208 else if (iopte_is_large(*pte)) 1209 ret = omap_iommu_translate(*pte, da, IOLARGE_MASK); 1210 else 1211 dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte, 1212 (unsigned long long)da); 1213 } else { 1214 if (iopgd_is_section(*pgd)) 1215 ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK); 1216 else if (iopgd_is_super(*pgd)) 1217 ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK); 1218 else 1219 dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd, 1220 (unsigned long long)da); 1221 } 1222 1223 return ret; 1224 } 1225 1226 static int omap_iommu_add_device(struct device *dev) 1227 { 1228 struct omap_iommu_arch_data *arch_data; 1229 struct omap_iommu *oiommu; 1230 struct iommu_group *group; 1231 struct device_node *np; 1232 struct platform_device *pdev; 1233 int ret; 1234 1235 /* 1236 * Allocate the archdata iommu structure for DT-based devices. 1237 * 1238 * TODO: Simplify this when removing non-DT support completely from the 1239 * IOMMU users. 1240 */ 1241 if (!dev->of_node) 1242 return 0; 1243 1244 np = of_parse_phandle(dev->of_node, "iommus", 0); 1245 if (!np) 1246 return 0; 1247 1248 pdev = of_find_device_by_node(np); 1249 if (WARN_ON(!pdev)) { 1250 of_node_put(np); 1251 return -EINVAL; 1252 } 1253 1254 oiommu = platform_get_drvdata(pdev); 1255 if (!oiommu) { 1256 of_node_put(np); 1257 return -EINVAL; 1258 } 1259 1260 arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL); 1261 if (!arch_data) { 1262 of_node_put(np); 1263 return -ENOMEM; 1264 } 1265 1266 ret = iommu_device_link(&oiommu->iommu, dev); 1267 if (ret) { 1268 kfree(arch_data); 1269 of_node_put(np); 1270 return ret; 1271 } 1272 1273 arch_data->iommu_dev = oiommu; 1274 dev->archdata.iommu = arch_data; 1275 1276 /* 1277 * IOMMU group initialization calls into omap_iommu_device_group, which 1278 * needs a valid dev->archdata.iommu pointer 1279 */ 1280 group = iommu_group_get_for_dev(dev); 1281 if (IS_ERR(group)) { 1282 iommu_device_unlink(&oiommu->iommu, dev); 1283 dev->archdata.iommu = NULL; 1284 kfree(arch_data); 1285 return PTR_ERR(group); 1286 } 1287 iommu_group_put(group); 1288 1289 of_node_put(np); 1290 1291 return 0; 1292 } 1293 1294 static void omap_iommu_remove_device(struct device *dev) 1295 { 1296 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; 1297 1298 if (!dev->of_node || !arch_data) 1299 return; 1300 1301 iommu_device_unlink(&arch_data->iommu_dev->iommu, dev); 1302 iommu_group_remove_device(dev); 1303 1304 dev->archdata.iommu = NULL; 1305 kfree(arch_data); 1306 1307 } 1308 1309 static struct iommu_group *omap_iommu_device_group(struct device *dev) 1310 { 1311 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; 1312 struct iommu_group *group = ERR_PTR(-EINVAL); 1313 1314 if (arch_data->iommu_dev) 1315 group = arch_data->iommu_dev->group; 1316 1317 return group; 1318 } 1319 1320 static const struct iommu_ops omap_iommu_ops = { 1321 .domain_alloc = omap_iommu_domain_alloc, 1322 .domain_free = omap_iommu_domain_free, 1323 .attach_dev = omap_iommu_attach_dev, 1324 .detach_dev = omap_iommu_detach_dev, 1325 .map = omap_iommu_map, 1326 .unmap = omap_iommu_unmap, 1327 .map_sg = default_iommu_map_sg, 1328 .iova_to_phys = omap_iommu_iova_to_phys, 1329 .add_device = omap_iommu_add_device, 1330 .remove_device = omap_iommu_remove_device, 1331 .device_group = omap_iommu_device_group, 1332 .pgsize_bitmap = OMAP_IOMMU_PGSIZES, 1333 }; 1334 1335 static int __init omap_iommu_init(void) 1336 { 1337 struct kmem_cache *p; 1338 const unsigned long flags = SLAB_HWCACHE_ALIGN; 1339 size_t align = 1 << 10; /* L2 pagetable alignement */ 1340 struct device_node *np; 1341 int ret; 1342 1343 np = of_find_matching_node(NULL, omap_iommu_of_match); 1344 if (!np) 1345 return 0; 1346 1347 of_node_put(np); 1348 1349 p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, 1350 iopte_cachep_ctor); 1351 if (!p) 1352 return -ENOMEM; 1353 iopte_cachep = p; 1354 1355 omap_iommu_debugfs_init(); 1356 1357 ret = platform_driver_register(&omap_iommu_driver); 1358 if (ret) { 1359 pr_err("%s: failed to register driver\n", __func__); 1360 goto fail_driver; 1361 } 1362 1363 ret = bus_set_iommu(&platform_bus_type, &omap_iommu_ops); 1364 if (ret) 1365 goto fail_bus; 1366 1367 return 0; 1368 1369 fail_bus: 1370 platform_driver_unregister(&omap_iommu_driver); 1371 fail_driver: 1372 kmem_cache_destroy(iopte_cachep); 1373 return ret; 1374 } 1375 subsys_initcall(omap_iommu_init); 1376 /* must be ready before omap3isp is probed */ 1377