1 /* linux/drivers/iommu/exynos_iommu.c 2 * 3 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 4 * http://www.samsung.com 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #ifdef CONFIG_EXYNOS_IOMMU_DEBUG 12 #define DEBUG 13 #endif 14 15 #include <linux/io.h> 16 #include <linux/interrupt.h> 17 #include <linux/platform_device.h> 18 #include <linux/slab.h> 19 #include <linux/pm_runtime.h> 20 #include <linux/clk.h> 21 #include <linux/err.h> 22 #include <linux/mm.h> 23 #include <linux/iommu.h> 24 #include <linux/errno.h> 25 #include <linux/list.h> 26 #include <linux/memblock.h> 27 #include <linux/export.h> 28 29 #include <asm/cacheflush.h> 30 #include <asm/pgtable.h> 31 32 #include <mach/sysmmu.h> 33 34 /* We does not consider super section mapping (16MB) */ 35 #define SECT_ORDER 20 36 #define LPAGE_ORDER 16 37 #define SPAGE_ORDER 12 38 39 #define SECT_SIZE (1 << SECT_ORDER) 40 #define LPAGE_SIZE (1 << LPAGE_ORDER) 41 #define SPAGE_SIZE (1 << SPAGE_ORDER) 42 43 #define SECT_MASK (~(SECT_SIZE - 1)) 44 #define LPAGE_MASK (~(LPAGE_SIZE - 1)) 45 #define SPAGE_MASK (~(SPAGE_SIZE - 1)) 46 47 #define lv1ent_fault(sent) (((*(sent) & 3) == 0) || ((*(sent) & 3) == 3)) 48 #define lv1ent_page(sent) ((*(sent) & 3) == 1) 49 #define lv1ent_section(sent) ((*(sent) & 3) == 2) 50 51 #define lv2ent_fault(pent) ((*(pent) & 3) == 0) 52 #define lv2ent_small(pent) ((*(pent) & 2) == 2) 53 #define lv2ent_large(pent) ((*(pent) & 3) == 1) 54 55 #define section_phys(sent) (*(sent) & SECT_MASK) 56 #define section_offs(iova) ((iova) & 0xFFFFF) 57 #define lpage_phys(pent) (*(pent) & LPAGE_MASK) 58 #define lpage_offs(iova) ((iova) & 0xFFFF) 59 #define spage_phys(pent) (*(pent) & SPAGE_MASK) 60 #define spage_offs(iova) ((iova) & 0xFFF) 61 62 #define lv1ent_offset(iova) ((iova) >> SECT_ORDER) 63 #define lv2ent_offset(iova) (((iova) & 0xFF000) >> SPAGE_ORDER) 64 65 #define NUM_LV1ENTRIES 4096 66 #define NUM_LV2ENTRIES 256 67 68 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long)) 69 70 #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE) 71 72 #define lv2table_base(sent) (*(sent) & 0xFFFFFC00) 73 74 #define mk_lv1ent_sect(pa) ((pa) | 2) 75 #define mk_lv1ent_page(pa) ((pa) | 1) 76 #define mk_lv2ent_lpage(pa) ((pa) | 1) 77 #define mk_lv2ent_spage(pa) ((pa) | 2) 78 79 #define CTRL_ENABLE 0x5 80 #define CTRL_BLOCK 0x7 81 #define CTRL_DISABLE 0x0 82 83 #define REG_MMU_CTRL 0x000 84 #define REG_MMU_CFG 0x004 85 #define REG_MMU_STATUS 0x008 86 #define REG_MMU_FLUSH 0x00C 87 #define REG_MMU_FLUSH_ENTRY 0x010 88 #define REG_PT_BASE_ADDR 0x014 89 #define REG_INT_STATUS 0x018 90 #define REG_INT_CLEAR 0x01C 91 92 #define REG_PAGE_FAULT_ADDR 0x024 93 #define REG_AW_FAULT_ADDR 0x028 94 #define REG_AR_FAULT_ADDR 0x02C 95 #define REG_DEFAULT_SLAVE_ADDR 0x030 96 97 #define REG_MMU_VERSION 0x034 98 99 #define REG_PB0_SADDR 0x04C 100 #define REG_PB0_EADDR 0x050 101 #define REG_PB1_SADDR 0x054 102 #define REG_PB1_EADDR 0x058 103 104 static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova) 105 { 106 return pgtable + lv1ent_offset(iova); 107 } 108 109 static unsigned long *page_entry(unsigned long *sent, unsigned long iova) 110 { 111 return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova); 112 } 113 114 enum exynos_sysmmu_inttype { 115 SYSMMU_PAGEFAULT, 116 SYSMMU_AR_MULTIHIT, 117 SYSMMU_AW_MULTIHIT, 118 SYSMMU_BUSERROR, 119 SYSMMU_AR_SECURITY, 120 SYSMMU_AR_ACCESS, 121 SYSMMU_AW_SECURITY, 122 SYSMMU_AW_PROTECTION, /* 7 */ 123 SYSMMU_FAULT_UNKNOWN, 124 SYSMMU_FAULTS_NUM 125 }; 126 127 /* 128 * @itype: type of fault. 129 * @pgtable_base: the physical address of page table base. This is 0 if @itype 130 * is SYSMMU_BUSERROR. 131 * @fault_addr: the device (virtual) address that the System MMU tried to 132 * translated. This is 0 if @itype is SYSMMU_BUSERROR. 133 */ 134 typedef int (*sysmmu_fault_handler_t)(enum exynos_sysmmu_inttype itype, 135 unsigned long pgtable_base, unsigned long fault_addr); 136 137 static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = { 138 REG_PAGE_FAULT_ADDR, 139 REG_AR_FAULT_ADDR, 140 REG_AW_FAULT_ADDR, 141 REG_DEFAULT_SLAVE_ADDR, 142 REG_AR_FAULT_ADDR, 143 REG_AR_FAULT_ADDR, 144 REG_AW_FAULT_ADDR, 145 REG_AW_FAULT_ADDR 146 }; 147 148 static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = { 149 "PAGE FAULT", 150 "AR MULTI-HIT FAULT", 151 "AW MULTI-HIT FAULT", 152 "BUS ERROR", 153 "AR SECURITY PROTECTION FAULT", 154 "AR ACCESS PROTECTION FAULT", 155 "AW SECURITY PROTECTION FAULT", 156 "AW ACCESS PROTECTION FAULT", 157 "UNKNOWN FAULT" 158 }; 159 160 struct exynos_iommu_domain { 161 struct list_head clients; /* list of sysmmu_drvdata.node */ 162 unsigned long *pgtable; /* lv1 page table, 16KB */ 163 short *lv2entcnt; /* free lv2 entry counter for each section */ 164 spinlock_t lock; /* lock for this structure */ 165 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */ 166 }; 167 168 struct sysmmu_drvdata { 169 struct list_head node; /* entry of exynos_iommu_domain.clients */ 170 struct device *sysmmu; /* System MMU's device descriptor */ 171 struct device *dev; /* Owner of system MMU */ 172 char *dbgname; 173 int nsfrs; 174 void __iomem **sfrbases; 175 struct clk *clk[2]; 176 int activations; 177 rwlock_t lock; 178 struct iommu_domain *domain; 179 sysmmu_fault_handler_t fault_handler; 180 unsigned long pgtable; 181 }; 182 183 static bool set_sysmmu_active(struct sysmmu_drvdata *data) 184 { 185 /* return true if the System MMU was not active previously 186 and it needs to be initialized */ 187 return ++data->activations == 1; 188 } 189 190 static bool set_sysmmu_inactive(struct sysmmu_drvdata *data) 191 { 192 /* return true if the System MMU is needed to be disabled */ 193 BUG_ON(data->activations < 1); 194 return --data->activations == 0; 195 } 196 197 static bool is_sysmmu_active(struct sysmmu_drvdata *data) 198 { 199 return data->activations > 0; 200 } 201 202 static void sysmmu_unblock(void __iomem *sfrbase) 203 { 204 __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL); 205 } 206 207 static bool sysmmu_block(void __iomem *sfrbase) 208 { 209 int i = 120; 210 211 __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL); 212 while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) 213 --i; 214 215 if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) { 216 sysmmu_unblock(sfrbase); 217 return false; 218 } 219 220 return true; 221 } 222 223 static void __sysmmu_tlb_invalidate(void __iomem *sfrbase) 224 { 225 __raw_writel(0x1, sfrbase + REG_MMU_FLUSH); 226 } 227 228 static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase, 229 unsigned long iova) 230 { 231 __raw_writel((iova & SPAGE_MASK) | 1, sfrbase + REG_MMU_FLUSH_ENTRY); 232 } 233 234 static void __sysmmu_set_ptbase(void __iomem *sfrbase, 235 unsigned long pgd) 236 { 237 __raw_writel(0x1, sfrbase + REG_MMU_CFG); /* 16KB LV1, LRU */ 238 __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR); 239 240 __sysmmu_tlb_invalidate(sfrbase); 241 } 242 243 static void __sysmmu_set_prefbuf(void __iomem *sfrbase, unsigned long base, 244 unsigned long size, int idx) 245 { 246 __raw_writel(base, sfrbase + REG_PB0_SADDR + idx * 8); 247 __raw_writel(size - 1 + base, sfrbase + REG_PB0_EADDR + idx * 8); 248 } 249 250 static void __set_fault_handler(struct sysmmu_drvdata *data, 251 sysmmu_fault_handler_t handler) 252 { 253 unsigned long flags; 254 255 write_lock_irqsave(&data->lock, flags); 256 data->fault_handler = handler; 257 write_unlock_irqrestore(&data->lock, flags); 258 } 259 260 void exynos_sysmmu_set_fault_handler(struct device *dev, 261 sysmmu_fault_handler_t handler) 262 { 263 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); 264 265 __set_fault_handler(data, handler); 266 } 267 268 static int default_fault_handler(enum exynos_sysmmu_inttype itype, 269 unsigned long pgtable_base, unsigned long fault_addr) 270 { 271 unsigned long *ent; 272 273 if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT)) 274 itype = SYSMMU_FAULT_UNKNOWN; 275 276 pr_err("%s occurred at 0x%lx(Page table base: 0x%lx)\n", 277 sysmmu_fault_name[itype], fault_addr, pgtable_base); 278 279 ent = section_entry(__va(pgtable_base), fault_addr); 280 pr_err("\tLv1 entry: 0x%lx\n", *ent); 281 282 if (lv1ent_page(ent)) { 283 ent = page_entry(ent, fault_addr); 284 pr_err("\t Lv2 entry: 0x%lx\n", *ent); 285 } 286 287 pr_err("Generating Kernel OOPS... because it is unrecoverable.\n"); 288 289 BUG(); 290 291 return 0; 292 } 293 294 static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id) 295 { 296 /* SYSMMU is in blocked when interrupt occurred. */ 297 struct sysmmu_drvdata *data = dev_id; 298 struct resource *irqres; 299 struct platform_device *pdev; 300 enum exynos_sysmmu_inttype itype; 301 unsigned long addr = -1; 302 303 int i, ret = -ENOSYS; 304 305 read_lock(&data->lock); 306 307 WARN_ON(!is_sysmmu_active(data)); 308 309 pdev = to_platform_device(data->sysmmu); 310 for (i = 0; i < (pdev->num_resources / 2); i++) { 311 irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i); 312 if (irqres && ((int)irqres->start == irq)) 313 break; 314 } 315 316 if (i == pdev->num_resources) { 317 itype = SYSMMU_FAULT_UNKNOWN; 318 } else { 319 itype = (enum exynos_sysmmu_inttype) 320 __ffs(__raw_readl(data->sfrbases[i] + REG_INT_STATUS)); 321 if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN)))) 322 itype = SYSMMU_FAULT_UNKNOWN; 323 else 324 addr = __raw_readl( 325 data->sfrbases[i] + fault_reg_offset[itype]); 326 } 327 328 if (data->domain) 329 ret = report_iommu_fault(data->domain, data->dev, 330 addr, itype); 331 332 if ((ret == -ENOSYS) && data->fault_handler) { 333 unsigned long base = data->pgtable; 334 if (itype != SYSMMU_FAULT_UNKNOWN) 335 base = __raw_readl( 336 data->sfrbases[i] + REG_PT_BASE_ADDR); 337 ret = data->fault_handler(itype, base, addr); 338 } 339 340 if (!ret && (itype != SYSMMU_FAULT_UNKNOWN)) 341 __raw_writel(1 << itype, data->sfrbases[i] + REG_INT_CLEAR); 342 else 343 dev_dbg(data->sysmmu, "(%s) %s is not handled.\n", 344 data->dbgname, sysmmu_fault_name[itype]); 345 346 if (itype != SYSMMU_FAULT_UNKNOWN) 347 sysmmu_unblock(data->sfrbases[i]); 348 349 read_unlock(&data->lock); 350 351 return IRQ_HANDLED; 352 } 353 354 static bool __exynos_sysmmu_disable(struct sysmmu_drvdata *data) 355 { 356 unsigned long flags; 357 bool disabled = false; 358 int i; 359 360 write_lock_irqsave(&data->lock, flags); 361 362 if (!set_sysmmu_inactive(data)) 363 goto finish; 364 365 for (i = 0; i < data->nsfrs; i++) 366 __raw_writel(CTRL_DISABLE, data->sfrbases[i] + REG_MMU_CTRL); 367 368 if (data->clk[1]) 369 clk_disable(data->clk[1]); 370 if (data->clk[0]) 371 clk_disable(data->clk[0]); 372 373 disabled = true; 374 data->pgtable = 0; 375 data->domain = NULL; 376 finish: 377 write_unlock_irqrestore(&data->lock, flags); 378 379 if (disabled) 380 dev_dbg(data->sysmmu, "(%s) Disabled\n", data->dbgname); 381 else 382 dev_dbg(data->sysmmu, "(%s) %d times left to be disabled\n", 383 data->dbgname, data->activations); 384 385 return disabled; 386 } 387 388 /* __exynos_sysmmu_enable: Enables System MMU 389 * 390 * returns -error if an error occurred and System MMU is not enabled, 391 * 0 if the System MMU has been just enabled and 1 if System MMU was already 392 * enabled before. 393 */ 394 static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data, 395 unsigned long pgtable, struct iommu_domain *domain) 396 { 397 int i, ret = 0; 398 unsigned long flags; 399 400 write_lock_irqsave(&data->lock, flags); 401 402 if (!set_sysmmu_active(data)) { 403 if (WARN_ON(pgtable != data->pgtable)) { 404 ret = -EBUSY; 405 set_sysmmu_inactive(data); 406 } else { 407 ret = 1; 408 } 409 410 dev_dbg(data->sysmmu, "(%s) Already enabled\n", data->dbgname); 411 goto finish; 412 } 413 414 if (data->clk[0]) 415 clk_enable(data->clk[0]); 416 if (data->clk[1]) 417 clk_enable(data->clk[1]); 418 419 data->pgtable = pgtable; 420 421 for (i = 0; i < data->nsfrs; i++) { 422 __sysmmu_set_ptbase(data->sfrbases[i], pgtable); 423 424 if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) { 425 /* System MMU version is 3.x */ 426 __raw_writel((1 << 12) | (2 << 28), 427 data->sfrbases[i] + REG_MMU_CFG); 428 __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 0); 429 __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 1); 430 } 431 432 __raw_writel(CTRL_ENABLE, data->sfrbases[i] + REG_MMU_CTRL); 433 } 434 435 data->domain = domain; 436 437 dev_dbg(data->sysmmu, "(%s) Enabled\n", data->dbgname); 438 finish: 439 write_unlock_irqrestore(&data->lock, flags); 440 441 return ret; 442 } 443 444 int exynos_sysmmu_enable(struct device *dev, unsigned long pgtable) 445 { 446 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); 447 int ret; 448 449 BUG_ON(!memblock_is_memory(pgtable)); 450 451 ret = pm_runtime_get_sync(data->sysmmu); 452 if (ret < 0) { 453 dev_dbg(data->sysmmu, "(%s) Failed to enable\n", data->dbgname); 454 return ret; 455 } 456 457 ret = __exynos_sysmmu_enable(data, pgtable, NULL); 458 if (WARN_ON(ret < 0)) { 459 pm_runtime_put(data->sysmmu); 460 dev_err(data->sysmmu, 461 "(%s) Already enabled with page table %#lx\n", 462 data->dbgname, data->pgtable); 463 } else { 464 data->dev = dev; 465 } 466 467 return ret; 468 } 469 470 static bool exynos_sysmmu_disable(struct device *dev) 471 { 472 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); 473 bool disabled; 474 475 disabled = __exynos_sysmmu_disable(data); 476 pm_runtime_put(data->sysmmu); 477 478 return disabled; 479 } 480 481 static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova) 482 { 483 unsigned long flags; 484 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); 485 486 read_lock_irqsave(&data->lock, flags); 487 488 if (is_sysmmu_active(data)) { 489 int i; 490 for (i = 0; i < data->nsfrs; i++) { 491 if (sysmmu_block(data->sfrbases[i])) { 492 __sysmmu_tlb_invalidate_entry( 493 data->sfrbases[i], iova); 494 sysmmu_unblock(data->sfrbases[i]); 495 } 496 } 497 } else { 498 dev_dbg(data->sysmmu, 499 "(%s) Disabled. Skipping invalidating TLB.\n", 500 data->dbgname); 501 } 502 503 read_unlock_irqrestore(&data->lock, flags); 504 } 505 506 void exynos_sysmmu_tlb_invalidate(struct device *dev) 507 { 508 unsigned long flags; 509 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); 510 511 read_lock_irqsave(&data->lock, flags); 512 513 if (is_sysmmu_active(data)) { 514 int i; 515 for (i = 0; i < data->nsfrs; i++) { 516 if (sysmmu_block(data->sfrbases[i])) { 517 __sysmmu_tlb_invalidate(data->sfrbases[i]); 518 sysmmu_unblock(data->sfrbases[i]); 519 } 520 } 521 } else { 522 dev_dbg(data->sysmmu, 523 "(%s) Disabled. Skipping invalidating TLB.\n", 524 data->dbgname); 525 } 526 527 read_unlock_irqrestore(&data->lock, flags); 528 } 529 530 static int exynos_sysmmu_probe(struct platform_device *pdev) 531 { 532 int i, ret; 533 struct device *dev; 534 struct sysmmu_drvdata *data; 535 536 dev = &pdev->dev; 537 538 data = kzalloc(sizeof(*data), GFP_KERNEL); 539 if (!data) { 540 dev_dbg(dev, "Not enough memory\n"); 541 ret = -ENOMEM; 542 goto err_alloc; 543 } 544 545 dev_set_drvdata(dev, data); 546 data->nsfrs = pdev->num_resources / 2; 547 data->sfrbases = kmalloc(sizeof(*data->sfrbases) * data->nsfrs, 548 GFP_KERNEL); 549 if (data->sfrbases == NULL) { 550 dev_dbg(dev, "Not enough memory\n"); 551 ret = -ENOMEM; 552 goto err_init; 553 } 554 555 for (i = 0; i < data->nsfrs; i++) { 556 struct resource *res; 557 res = platform_get_resource(pdev, IORESOURCE_MEM, i); 558 if (!res) { 559 dev_dbg(dev, "Unable to find IOMEM region\n"); 560 ret = -ENOENT; 561 goto err_res; 562 } 563 564 data->sfrbases[i] = ioremap(res->start, resource_size(res)); 565 if (!data->sfrbases[i]) { 566 dev_dbg(dev, "Unable to map IOMEM @ PA:%#x\n", 567 res->start); 568 ret = -ENOENT; 569 goto err_res; 570 } 571 } 572 573 for (i = 0; i < data->nsfrs; i++) { 574 ret = platform_get_irq(pdev, i); 575 if (ret <= 0) { 576 dev_dbg(dev, "Unable to find IRQ resource\n"); 577 goto err_irq; 578 } 579 580 ret = request_irq(ret, exynos_sysmmu_irq, 0, 581 dev_name(dev), data); 582 if (ret) { 583 dev_dbg(dev, "Unabled to register interrupt handler\n"); 584 goto err_irq; 585 } 586 } 587 588 if (dev_get_platdata(dev)) { 589 char *deli, *beg; 590 struct sysmmu_platform_data *platdata = dev_get_platdata(dev); 591 592 beg = platdata->clockname; 593 594 for (deli = beg; (*deli != '\0') && (*deli != ','); deli++) 595 /* NOTHING */; 596 597 if (*deli == '\0') 598 deli = NULL; 599 else 600 *deli = '\0'; 601 602 data->clk[0] = clk_get(dev, beg); 603 if (IS_ERR(data->clk[0])) { 604 data->clk[0] = NULL; 605 dev_dbg(dev, "No clock descriptor registered\n"); 606 } 607 608 if (data->clk[0] && deli) { 609 *deli = ','; 610 data->clk[1] = clk_get(dev, deli + 1); 611 if (IS_ERR(data->clk[1])) 612 data->clk[1] = NULL; 613 } 614 615 data->dbgname = platdata->dbgname; 616 } 617 618 data->sysmmu = dev; 619 rwlock_init(&data->lock); 620 INIT_LIST_HEAD(&data->node); 621 622 __set_fault_handler(data, &default_fault_handler); 623 624 if (dev->parent) 625 pm_runtime_enable(dev); 626 627 dev_dbg(dev, "(%s) Initialized\n", data->dbgname); 628 return 0; 629 err_irq: 630 while (i-- > 0) { 631 int irq; 632 633 irq = platform_get_irq(pdev, i); 634 free_irq(irq, data); 635 } 636 err_res: 637 while (data->nsfrs-- > 0) 638 iounmap(data->sfrbases[data->nsfrs]); 639 kfree(data->sfrbases); 640 err_init: 641 kfree(data); 642 err_alloc: 643 dev_err(dev, "Failed to initialize\n"); 644 return ret; 645 } 646 647 static struct platform_driver exynos_sysmmu_driver = { 648 .probe = exynos_sysmmu_probe, 649 .driver = { 650 .owner = THIS_MODULE, 651 .name = "exynos-sysmmu", 652 } 653 }; 654 655 static inline void pgtable_flush(void *vastart, void *vaend) 656 { 657 dmac_flush_range(vastart, vaend); 658 outer_flush_range(virt_to_phys(vastart), 659 virt_to_phys(vaend)); 660 } 661 662 static int exynos_iommu_domain_init(struct iommu_domain *domain) 663 { 664 struct exynos_iommu_domain *priv; 665 666 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 667 if (!priv) 668 return -ENOMEM; 669 670 priv->pgtable = (unsigned long *)__get_free_pages( 671 GFP_KERNEL | __GFP_ZERO, 2); 672 if (!priv->pgtable) 673 goto err_pgtable; 674 675 priv->lv2entcnt = (short *)__get_free_pages( 676 GFP_KERNEL | __GFP_ZERO, 1); 677 if (!priv->lv2entcnt) 678 goto err_counter; 679 680 pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES); 681 682 spin_lock_init(&priv->lock); 683 spin_lock_init(&priv->pgtablelock); 684 INIT_LIST_HEAD(&priv->clients); 685 686 domain->geometry.aperture_start = 0; 687 domain->geometry.aperture_end = ~0UL; 688 domain->geometry.force_aperture = true; 689 690 domain->priv = priv; 691 return 0; 692 693 err_counter: 694 free_pages((unsigned long)priv->pgtable, 2); 695 err_pgtable: 696 kfree(priv); 697 return -ENOMEM; 698 } 699 700 static void exynos_iommu_domain_destroy(struct iommu_domain *domain) 701 { 702 struct exynos_iommu_domain *priv = domain->priv; 703 struct sysmmu_drvdata *data; 704 unsigned long flags; 705 int i; 706 707 WARN_ON(!list_empty(&priv->clients)); 708 709 spin_lock_irqsave(&priv->lock, flags); 710 711 list_for_each_entry(data, &priv->clients, node) { 712 while (!exynos_sysmmu_disable(data->dev)) 713 ; /* until System MMU is actually disabled */ 714 } 715 716 spin_unlock_irqrestore(&priv->lock, flags); 717 718 for (i = 0; i < NUM_LV1ENTRIES; i++) 719 if (lv1ent_page(priv->pgtable + i)) 720 kfree(__va(lv2table_base(priv->pgtable + i))); 721 722 free_pages((unsigned long)priv->pgtable, 2); 723 free_pages((unsigned long)priv->lv2entcnt, 1); 724 kfree(domain->priv); 725 domain->priv = NULL; 726 } 727 728 static int exynos_iommu_attach_device(struct iommu_domain *domain, 729 struct device *dev) 730 { 731 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); 732 struct exynos_iommu_domain *priv = domain->priv; 733 unsigned long flags; 734 int ret; 735 736 ret = pm_runtime_get_sync(data->sysmmu); 737 if (ret < 0) 738 return ret; 739 740 ret = 0; 741 742 spin_lock_irqsave(&priv->lock, flags); 743 744 ret = __exynos_sysmmu_enable(data, __pa(priv->pgtable), domain); 745 746 if (ret == 0) { 747 /* 'data->node' must not be appeared in priv->clients */ 748 BUG_ON(!list_empty(&data->node)); 749 data->dev = dev; 750 list_add_tail(&data->node, &priv->clients); 751 } 752 753 spin_unlock_irqrestore(&priv->lock, flags); 754 755 if (ret < 0) { 756 dev_err(dev, "%s: Failed to attach IOMMU with pgtable %#lx\n", 757 __func__, __pa(priv->pgtable)); 758 pm_runtime_put(data->sysmmu); 759 } else if (ret > 0) { 760 dev_dbg(dev, "%s: IOMMU with pgtable 0x%lx already attached\n", 761 __func__, __pa(priv->pgtable)); 762 } else { 763 dev_dbg(dev, "%s: Attached new IOMMU with pgtable 0x%lx\n", 764 __func__, __pa(priv->pgtable)); 765 } 766 767 return ret; 768 } 769 770 static void exynos_iommu_detach_device(struct iommu_domain *domain, 771 struct device *dev) 772 { 773 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); 774 struct exynos_iommu_domain *priv = domain->priv; 775 struct list_head *pos; 776 unsigned long flags; 777 bool found = false; 778 779 spin_lock_irqsave(&priv->lock, flags); 780 781 list_for_each(pos, &priv->clients) { 782 if (list_entry(pos, struct sysmmu_drvdata, node) == data) { 783 found = true; 784 break; 785 } 786 } 787 788 if (!found) 789 goto finish; 790 791 if (__exynos_sysmmu_disable(data)) { 792 dev_dbg(dev, "%s: Detached IOMMU with pgtable %#lx\n", 793 __func__, __pa(priv->pgtable)); 794 list_del_init(&data->node); 795 796 } else { 797 dev_dbg(dev, "%s: Detaching IOMMU with pgtable %#lx delayed", 798 __func__, __pa(priv->pgtable)); 799 } 800 801 finish: 802 spin_unlock_irqrestore(&priv->lock, flags); 803 804 if (found) 805 pm_runtime_put(data->sysmmu); 806 } 807 808 static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova, 809 short *pgcounter) 810 { 811 if (lv1ent_fault(sent)) { 812 unsigned long *pent; 813 814 pent = kzalloc(LV2TABLE_SIZE, GFP_ATOMIC); 815 BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1)); 816 if (!pent) 817 return NULL; 818 819 *sent = mk_lv1ent_page(__pa(pent)); 820 *pgcounter = NUM_LV2ENTRIES; 821 pgtable_flush(pent, pent + NUM_LV2ENTRIES); 822 pgtable_flush(sent, sent + 1); 823 } 824 825 return page_entry(sent, iova); 826 } 827 828 static int lv1set_section(unsigned long *sent, phys_addr_t paddr, short *pgcnt) 829 { 830 if (lv1ent_section(sent)) 831 return -EADDRINUSE; 832 833 if (lv1ent_page(sent)) { 834 if (*pgcnt != NUM_LV2ENTRIES) 835 return -EADDRINUSE; 836 837 kfree(page_entry(sent, 0)); 838 839 *pgcnt = 0; 840 } 841 842 *sent = mk_lv1ent_sect(paddr); 843 844 pgtable_flush(sent, sent + 1); 845 846 return 0; 847 } 848 849 static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size, 850 short *pgcnt) 851 { 852 if (size == SPAGE_SIZE) { 853 if (!lv2ent_fault(pent)) 854 return -EADDRINUSE; 855 856 *pent = mk_lv2ent_spage(paddr); 857 pgtable_flush(pent, pent + 1); 858 *pgcnt -= 1; 859 } else { /* size == LPAGE_SIZE */ 860 int i; 861 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) { 862 if (!lv2ent_fault(pent)) { 863 memset(pent, 0, sizeof(*pent) * i); 864 return -EADDRINUSE; 865 } 866 867 *pent = mk_lv2ent_lpage(paddr); 868 } 869 pgtable_flush(pent - SPAGES_PER_LPAGE, pent); 870 *pgcnt -= SPAGES_PER_LPAGE; 871 } 872 873 return 0; 874 } 875 876 static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova, 877 phys_addr_t paddr, size_t size, int prot) 878 { 879 struct exynos_iommu_domain *priv = domain->priv; 880 unsigned long *entry; 881 unsigned long flags; 882 int ret = -ENOMEM; 883 884 BUG_ON(priv->pgtable == NULL); 885 886 spin_lock_irqsave(&priv->pgtablelock, flags); 887 888 entry = section_entry(priv->pgtable, iova); 889 890 if (size == SECT_SIZE) { 891 ret = lv1set_section(entry, paddr, 892 &priv->lv2entcnt[lv1ent_offset(iova)]); 893 } else { 894 unsigned long *pent; 895 896 pent = alloc_lv2entry(entry, iova, 897 &priv->lv2entcnt[lv1ent_offset(iova)]); 898 899 if (!pent) 900 ret = -ENOMEM; 901 else 902 ret = lv2set_page(pent, paddr, size, 903 &priv->lv2entcnt[lv1ent_offset(iova)]); 904 } 905 906 if (ret) { 907 pr_debug("%s: Failed to map iova 0x%lx/0x%x bytes\n", 908 __func__, iova, size); 909 } 910 911 spin_unlock_irqrestore(&priv->pgtablelock, flags); 912 913 return ret; 914 } 915 916 static size_t exynos_iommu_unmap(struct iommu_domain *domain, 917 unsigned long iova, size_t size) 918 { 919 struct exynos_iommu_domain *priv = domain->priv; 920 struct sysmmu_drvdata *data; 921 unsigned long flags; 922 unsigned long *ent; 923 924 BUG_ON(priv->pgtable == NULL); 925 926 spin_lock_irqsave(&priv->pgtablelock, flags); 927 928 ent = section_entry(priv->pgtable, iova); 929 930 if (lv1ent_section(ent)) { 931 BUG_ON(size < SECT_SIZE); 932 933 *ent = 0; 934 pgtable_flush(ent, ent + 1); 935 size = SECT_SIZE; 936 goto done; 937 } 938 939 if (unlikely(lv1ent_fault(ent))) { 940 if (size > SECT_SIZE) 941 size = SECT_SIZE; 942 goto done; 943 } 944 945 /* lv1ent_page(sent) == true here */ 946 947 ent = page_entry(ent, iova); 948 949 if (unlikely(lv2ent_fault(ent))) { 950 size = SPAGE_SIZE; 951 goto done; 952 } 953 954 if (lv2ent_small(ent)) { 955 *ent = 0; 956 size = SPAGE_SIZE; 957 priv->lv2entcnt[lv1ent_offset(iova)] += 1; 958 goto done; 959 } 960 961 /* lv1ent_large(ent) == true here */ 962 BUG_ON(size < LPAGE_SIZE); 963 964 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE); 965 966 size = LPAGE_SIZE; 967 priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE; 968 done: 969 spin_unlock_irqrestore(&priv->pgtablelock, flags); 970 971 spin_lock_irqsave(&priv->lock, flags); 972 list_for_each_entry(data, &priv->clients, node) 973 sysmmu_tlb_invalidate_entry(data->dev, iova); 974 spin_unlock_irqrestore(&priv->lock, flags); 975 976 977 return size; 978 } 979 980 static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain, 981 dma_addr_t iova) 982 { 983 struct exynos_iommu_domain *priv = domain->priv; 984 unsigned long *entry; 985 unsigned long flags; 986 phys_addr_t phys = 0; 987 988 spin_lock_irqsave(&priv->pgtablelock, flags); 989 990 entry = section_entry(priv->pgtable, iova); 991 992 if (lv1ent_section(entry)) { 993 phys = section_phys(entry) + section_offs(iova); 994 } else if (lv1ent_page(entry)) { 995 entry = page_entry(entry, iova); 996 997 if (lv2ent_large(entry)) 998 phys = lpage_phys(entry) + lpage_offs(iova); 999 else if (lv2ent_small(entry)) 1000 phys = spage_phys(entry) + spage_offs(iova); 1001 } 1002 1003 spin_unlock_irqrestore(&priv->pgtablelock, flags); 1004 1005 return phys; 1006 } 1007 1008 static struct iommu_ops exynos_iommu_ops = { 1009 .domain_init = exynos_iommu_domain_init, 1010 .domain_destroy = exynos_iommu_domain_destroy, 1011 .attach_dev = exynos_iommu_attach_device, 1012 .detach_dev = exynos_iommu_detach_device, 1013 .map = exynos_iommu_map, 1014 .unmap = exynos_iommu_unmap, 1015 .iova_to_phys = exynos_iommu_iova_to_phys, 1016 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE, 1017 }; 1018 1019 static int __init exynos_iommu_init(void) 1020 { 1021 int ret; 1022 1023 ret = platform_driver_register(&exynos_sysmmu_driver); 1024 1025 if (ret == 0) 1026 bus_set_iommu(&platform_bus_type, &exynos_iommu_ops); 1027 1028 return ret; 1029 } 1030 subsys_initcall(exynos_iommu_init); 1031