1 /* 2 * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 */ 8 9 #include <linux/bitops.h> 10 #include <linux/debugfs.h> 11 #include <linux/err.h> 12 #include <linux/iommu.h> 13 #include <linux/kernel.h> 14 #include <linux/of.h> 15 #include <linux/of_device.h> 16 #include <linux/platform_device.h> 17 #include <linux/slab.h> 18 #include <linux/dma-mapping.h> 19 20 #include <soc/tegra/ahb.h> 21 #include <soc/tegra/mc.h> 22 23 struct tegra_smmu_group { 24 struct list_head list; 25 const struct tegra_smmu_group_soc *soc; 26 struct iommu_group *group; 27 }; 28 29 struct tegra_smmu { 30 void __iomem *regs; 31 struct device *dev; 32 33 struct tegra_mc *mc; 34 const struct tegra_smmu_soc *soc; 35 36 struct list_head groups; 37 38 unsigned long pfn_mask; 39 unsigned long tlb_mask; 40 41 unsigned long *asids; 42 struct mutex lock; 43 44 struct list_head list; 45 46 struct dentry *debugfs; 47 48 struct iommu_device iommu; /* IOMMU Core code handle */ 49 }; 50 51 struct tegra_smmu_as { 52 struct iommu_domain domain; 53 struct tegra_smmu *smmu; 54 unsigned int use_count; 55 u32 *count; 56 struct page **pts; 57 struct page *pd; 58 dma_addr_t pd_dma; 59 unsigned id; 60 u32 attr; 61 }; 62 63 static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom) 64 { 65 return container_of(dom, struct tegra_smmu_as, domain); 66 } 67 68 static inline void smmu_writel(struct tegra_smmu *smmu, u32 value, 69 unsigned long offset) 70 { 71 writel(value, smmu->regs + offset); 72 } 73 74 static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset) 75 { 76 return readl(smmu->regs + offset); 77 } 78 79 #define SMMU_CONFIG 0x010 80 #define SMMU_CONFIG_ENABLE (1 << 0) 81 82 #define SMMU_TLB_CONFIG 0x14 83 #define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29) 84 #define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28) 85 #define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \ 86 ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask) 87 88 #define SMMU_PTC_CONFIG 0x18 89 #define SMMU_PTC_CONFIG_ENABLE (1 << 29) 90 #define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24) 91 #define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f) 92 93 #define SMMU_PTB_ASID 0x01c 94 #define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f) 95 96 #define SMMU_PTB_DATA 0x020 97 #define SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr)) 98 99 #define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr)) 100 101 #define SMMU_TLB_FLUSH 0x030 102 #define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0) 103 #define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0) 104 #define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0) 105 #define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \ 106 SMMU_TLB_FLUSH_VA_MATCH_SECTION) 107 #define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \ 108 SMMU_TLB_FLUSH_VA_MATCH_GROUP) 109 #define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31) 110 111 #define SMMU_PTC_FLUSH 0x034 112 #define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0) 113 #define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0) 114 115 #define SMMU_PTC_FLUSH_HI 0x9b8 116 #define SMMU_PTC_FLUSH_HI_MASK 0x3 117 118 /* per-SWGROUP SMMU_*_ASID register */ 119 #define SMMU_ASID_ENABLE (1 << 31) 120 #define SMMU_ASID_MASK 0x7f 121 #define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK) 122 123 /* page table definitions */ 124 #define SMMU_NUM_PDE 1024 125 #define SMMU_NUM_PTE 1024 126 127 #define SMMU_SIZE_PD (SMMU_NUM_PDE * 4) 128 #define SMMU_SIZE_PT (SMMU_NUM_PTE * 4) 129 130 #define SMMU_PDE_SHIFT 22 131 #define SMMU_PTE_SHIFT 12 132 133 #define SMMU_PD_READABLE (1 << 31) 134 #define SMMU_PD_WRITABLE (1 << 30) 135 #define SMMU_PD_NONSECURE (1 << 29) 136 137 #define SMMU_PDE_READABLE (1 << 31) 138 #define SMMU_PDE_WRITABLE (1 << 30) 139 #define SMMU_PDE_NONSECURE (1 << 29) 140 #define SMMU_PDE_NEXT (1 << 28) 141 142 #define SMMU_PTE_READABLE (1 << 31) 143 #define SMMU_PTE_WRITABLE (1 << 30) 144 #define SMMU_PTE_NONSECURE (1 << 29) 145 146 #define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \ 147 SMMU_PDE_NONSECURE) 148 149 static unsigned int iova_pd_index(unsigned long iova) 150 { 151 return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1); 152 } 153 154 static unsigned int iova_pt_index(unsigned long iova) 155 { 156 return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1); 157 } 158 159 static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr) 160 { 161 addr >>= 12; 162 return (addr & smmu->pfn_mask) == addr; 163 } 164 165 static dma_addr_t smmu_pde_to_dma(u32 pde) 166 { 167 return pde << 12; 168 } 169 170 static void smmu_flush_ptc_all(struct tegra_smmu *smmu) 171 { 172 smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH); 173 } 174 175 static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma, 176 unsigned long offset) 177 { 178 u32 value; 179 180 offset &= ~(smmu->mc->soc->atom_size - 1); 181 182 if (smmu->mc->soc->num_address_bits > 32) { 183 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 184 value = (dma >> 32) & SMMU_PTC_FLUSH_HI_MASK; 185 #else 186 value = 0; 187 #endif 188 smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI); 189 } 190 191 value = (dma + offset) | SMMU_PTC_FLUSH_TYPE_ADR; 192 smmu_writel(smmu, value, SMMU_PTC_FLUSH); 193 } 194 195 static inline void smmu_flush_tlb(struct tegra_smmu *smmu) 196 { 197 smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH); 198 } 199 200 static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu, 201 unsigned long asid) 202 { 203 u32 value; 204 205 if (smmu->soc->num_asids == 4) 206 value = (asid & 0x3) << 29; 207 else 208 value = (asid & 0x7f) << 24; 209 210 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_MATCH_ALL; 211 smmu_writel(smmu, value, SMMU_TLB_FLUSH); 212 } 213 214 static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu, 215 unsigned long asid, 216 unsigned long iova) 217 { 218 u32 value; 219 220 if (smmu->soc->num_asids == 4) 221 value = (asid & 0x3) << 29; 222 else 223 value = (asid & 0x7f) << 24; 224 225 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova); 226 smmu_writel(smmu, value, SMMU_TLB_FLUSH); 227 } 228 229 static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu, 230 unsigned long asid, 231 unsigned long iova) 232 { 233 u32 value; 234 235 if (smmu->soc->num_asids == 4) 236 value = (asid & 0x3) << 29; 237 else 238 value = (asid & 0x7f) << 24; 239 240 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova); 241 smmu_writel(smmu, value, SMMU_TLB_FLUSH); 242 } 243 244 static inline void smmu_flush(struct tegra_smmu *smmu) 245 { 246 smmu_readl(smmu, SMMU_CONFIG); 247 } 248 249 static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp) 250 { 251 unsigned long id; 252 253 mutex_lock(&smmu->lock); 254 255 id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids); 256 if (id >= smmu->soc->num_asids) { 257 mutex_unlock(&smmu->lock); 258 return -ENOSPC; 259 } 260 261 set_bit(id, smmu->asids); 262 *idp = id; 263 264 mutex_unlock(&smmu->lock); 265 return 0; 266 } 267 268 static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id) 269 { 270 mutex_lock(&smmu->lock); 271 clear_bit(id, smmu->asids); 272 mutex_unlock(&smmu->lock); 273 } 274 275 static bool tegra_smmu_capable(enum iommu_cap cap) 276 { 277 return false; 278 } 279 280 static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type) 281 { 282 struct tegra_smmu_as *as; 283 284 if (type != IOMMU_DOMAIN_UNMANAGED) 285 return NULL; 286 287 as = kzalloc(sizeof(*as), GFP_KERNEL); 288 if (!as) 289 return NULL; 290 291 as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE; 292 293 as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO); 294 if (!as->pd) { 295 kfree(as); 296 return NULL; 297 } 298 299 as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL); 300 if (!as->count) { 301 __free_page(as->pd); 302 kfree(as); 303 return NULL; 304 } 305 306 as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL); 307 if (!as->pts) { 308 kfree(as->count); 309 __free_page(as->pd); 310 kfree(as); 311 return NULL; 312 } 313 314 /* setup aperture */ 315 as->domain.geometry.aperture_start = 0; 316 as->domain.geometry.aperture_end = 0xffffffff; 317 as->domain.geometry.force_aperture = true; 318 319 return &as->domain; 320 } 321 322 static void tegra_smmu_domain_free(struct iommu_domain *domain) 323 { 324 struct tegra_smmu_as *as = to_smmu_as(domain); 325 326 /* TODO: free page directory and page tables */ 327 328 WARN_ON_ONCE(as->use_count); 329 kfree(as->count); 330 kfree(as->pts); 331 kfree(as); 332 } 333 334 static const struct tegra_smmu_swgroup * 335 tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup) 336 { 337 const struct tegra_smmu_swgroup *group = NULL; 338 unsigned int i; 339 340 for (i = 0; i < smmu->soc->num_swgroups; i++) { 341 if (smmu->soc->swgroups[i].swgroup == swgroup) { 342 group = &smmu->soc->swgroups[i]; 343 break; 344 } 345 } 346 347 return group; 348 } 349 350 static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup, 351 unsigned int asid) 352 { 353 const struct tegra_smmu_swgroup *group; 354 unsigned int i; 355 u32 value; 356 357 for (i = 0; i < smmu->soc->num_clients; i++) { 358 const struct tegra_mc_client *client = &smmu->soc->clients[i]; 359 360 if (client->swgroup != swgroup) 361 continue; 362 363 value = smmu_readl(smmu, client->smmu.reg); 364 value |= BIT(client->smmu.bit); 365 smmu_writel(smmu, value, client->smmu.reg); 366 } 367 368 group = tegra_smmu_find_swgroup(smmu, swgroup); 369 if (group) { 370 value = smmu_readl(smmu, group->reg); 371 value &= ~SMMU_ASID_MASK; 372 value |= SMMU_ASID_VALUE(asid); 373 value |= SMMU_ASID_ENABLE; 374 smmu_writel(smmu, value, group->reg); 375 } 376 } 377 378 static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup, 379 unsigned int asid) 380 { 381 const struct tegra_smmu_swgroup *group; 382 unsigned int i; 383 u32 value; 384 385 group = tegra_smmu_find_swgroup(smmu, swgroup); 386 if (group) { 387 value = smmu_readl(smmu, group->reg); 388 value &= ~SMMU_ASID_MASK; 389 value |= SMMU_ASID_VALUE(asid); 390 value &= ~SMMU_ASID_ENABLE; 391 smmu_writel(smmu, value, group->reg); 392 } 393 394 for (i = 0; i < smmu->soc->num_clients; i++) { 395 const struct tegra_mc_client *client = &smmu->soc->clients[i]; 396 397 if (client->swgroup != swgroup) 398 continue; 399 400 value = smmu_readl(smmu, client->smmu.reg); 401 value &= ~BIT(client->smmu.bit); 402 smmu_writel(smmu, value, client->smmu.reg); 403 } 404 } 405 406 static int tegra_smmu_as_prepare(struct tegra_smmu *smmu, 407 struct tegra_smmu_as *as) 408 { 409 u32 value; 410 int err; 411 412 if (as->use_count > 0) { 413 as->use_count++; 414 return 0; 415 } 416 417 as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD, 418 DMA_TO_DEVICE); 419 if (dma_mapping_error(smmu->dev, as->pd_dma)) 420 return -ENOMEM; 421 422 /* We can't handle 64-bit DMA addresses */ 423 if (!smmu_dma_addr_valid(smmu, as->pd_dma)) { 424 err = -ENOMEM; 425 goto err_unmap; 426 } 427 428 err = tegra_smmu_alloc_asid(smmu, &as->id); 429 if (err < 0) 430 goto err_unmap; 431 432 smmu_flush_ptc(smmu, as->pd_dma, 0); 433 smmu_flush_tlb_asid(smmu, as->id); 434 435 smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID); 436 value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr); 437 smmu_writel(smmu, value, SMMU_PTB_DATA); 438 smmu_flush(smmu); 439 440 as->smmu = smmu; 441 as->use_count++; 442 443 return 0; 444 445 err_unmap: 446 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); 447 return err; 448 } 449 450 static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu, 451 struct tegra_smmu_as *as) 452 { 453 if (--as->use_count > 0) 454 return; 455 456 tegra_smmu_free_asid(smmu, as->id); 457 458 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); 459 460 as->smmu = NULL; 461 } 462 463 static int tegra_smmu_attach_dev(struct iommu_domain *domain, 464 struct device *dev) 465 { 466 struct tegra_smmu *smmu = dev->archdata.iommu; 467 struct tegra_smmu_as *as = to_smmu_as(domain); 468 struct device_node *np = dev->of_node; 469 struct of_phandle_args args; 470 unsigned int index = 0; 471 int err = 0; 472 473 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index, 474 &args)) { 475 unsigned int swgroup = args.args[0]; 476 477 if (args.np != smmu->dev->of_node) { 478 of_node_put(args.np); 479 continue; 480 } 481 482 of_node_put(args.np); 483 484 err = tegra_smmu_as_prepare(smmu, as); 485 if (err < 0) 486 return err; 487 488 tegra_smmu_enable(smmu, swgroup, as->id); 489 index++; 490 } 491 492 if (index == 0) 493 return -ENODEV; 494 495 return 0; 496 } 497 498 static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) 499 { 500 struct tegra_smmu_as *as = to_smmu_as(domain); 501 struct device_node *np = dev->of_node; 502 struct tegra_smmu *smmu = as->smmu; 503 struct of_phandle_args args; 504 unsigned int index = 0; 505 506 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index, 507 &args)) { 508 unsigned int swgroup = args.args[0]; 509 510 if (args.np != smmu->dev->of_node) { 511 of_node_put(args.np); 512 continue; 513 } 514 515 of_node_put(args.np); 516 517 tegra_smmu_disable(smmu, swgroup, as->id); 518 tegra_smmu_as_unprepare(smmu, as); 519 index++; 520 } 521 } 522 523 static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova, 524 u32 value) 525 { 526 unsigned int pd_index = iova_pd_index(iova); 527 struct tegra_smmu *smmu = as->smmu; 528 u32 *pd = page_address(as->pd); 529 unsigned long offset = pd_index * sizeof(*pd); 530 531 /* Set the page directory entry first */ 532 pd[pd_index] = value; 533 534 /* The flush the page directory entry from caches */ 535 dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset, 536 sizeof(*pd), DMA_TO_DEVICE); 537 538 /* And flush the iommu */ 539 smmu_flush_ptc(smmu, as->pd_dma, offset); 540 smmu_flush_tlb_section(smmu, as->id, iova); 541 smmu_flush(smmu); 542 } 543 544 static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova) 545 { 546 u32 *pt = page_address(pt_page); 547 548 return pt + iova_pt_index(iova); 549 } 550 551 static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova, 552 dma_addr_t *dmap) 553 { 554 unsigned int pd_index = iova_pd_index(iova); 555 struct page *pt_page; 556 u32 *pd; 557 558 pt_page = as->pts[pd_index]; 559 if (!pt_page) 560 return NULL; 561 562 pd = page_address(as->pd); 563 *dmap = smmu_pde_to_dma(pd[pd_index]); 564 565 return tegra_smmu_pte_offset(pt_page, iova); 566 } 567 568 static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, 569 dma_addr_t *dmap) 570 { 571 unsigned int pde = iova_pd_index(iova); 572 struct tegra_smmu *smmu = as->smmu; 573 574 if (!as->pts[pde]) { 575 struct page *page; 576 dma_addr_t dma; 577 578 page = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO); 579 if (!page) 580 return NULL; 581 582 dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT, 583 DMA_TO_DEVICE); 584 if (dma_mapping_error(smmu->dev, dma)) { 585 __free_page(page); 586 return NULL; 587 } 588 589 if (!smmu_dma_addr_valid(smmu, dma)) { 590 dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT, 591 DMA_TO_DEVICE); 592 __free_page(page); 593 return NULL; 594 } 595 596 as->pts[pde] = page; 597 598 tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR | 599 SMMU_PDE_NEXT)); 600 601 *dmap = dma; 602 } else { 603 u32 *pd = page_address(as->pd); 604 605 *dmap = smmu_pde_to_dma(pd[pde]); 606 } 607 608 return tegra_smmu_pte_offset(as->pts[pde], iova); 609 } 610 611 static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova) 612 { 613 unsigned int pd_index = iova_pd_index(iova); 614 615 as->count[pd_index]++; 616 } 617 618 static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova) 619 { 620 unsigned int pde = iova_pd_index(iova); 621 struct page *page = as->pts[pde]; 622 623 /* 624 * When no entries in this page table are used anymore, return the 625 * memory page to the system. 626 */ 627 if (--as->count[pde] == 0) { 628 struct tegra_smmu *smmu = as->smmu; 629 u32 *pd = page_address(as->pd); 630 dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]); 631 632 tegra_smmu_set_pde(as, iova, 0); 633 634 dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE); 635 __free_page(page); 636 as->pts[pde] = NULL; 637 } 638 } 639 640 static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova, 641 u32 *pte, dma_addr_t pte_dma, u32 val) 642 { 643 struct tegra_smmu *smmu = as->smmu; 644 unsigned long offset = offset_in_page(pte); 645 646 *pte = val; 647 648 dma_sync_single_range_for_device(smmu->dev, pte_dma, offset, 649 4, DMA_TO_DEVICE); 650 smmu_flush_ptc(smmu, pte_dma, offset); 651 smmu_flush_tlb_group(smmu, as->id, iova); 652 smmu_flush(smmu); 653 } 654 655 static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova, 656 phys_addr_t paddr, size_t size, int prot) 657 { 658 struct tegra_smmu_as *as = to_smmu_as(domain); 659 dma_addr_t pte_dma; 660 u32 pte_attrs; 661 u32 *pte; 662 663 pte = as_get_pte(as, iova, &pte_dma); 664 if (!pte) 665 return -ENOMEM; 666 667 /* If we aren't overwriting a pre-existing entry, increment use */ 668 if (*pte == 0) 669 tegra_smmu_pte_get_use(as, iova); 670 671 pte_attrs = SMMU_PTE_NONSECURE; 672 673 if (prot & IOMMU_READ) 674 pte_attrs |= SMMU_PTE_READABLE; 675 676 if (prot & IOMMU_WRITE) 677 pte_attrs |= SMMU_PTE_WRITABLE; 678 679 tegra_smmu_set_pte(as, iova, pte, pte_dma, 680 __phys_to_pfn(paddr) | pte_attrs); 681 682 return 0; 683 } 684 685 static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova, 686 size_t size) 687 { 688 struct tegra_smmu_as *as = to_smmu_as(domain); 689 dma_addr_t pte_dma; 690 u32 *pte; 691 692 pte = tegra_smmu_pte_lookup(as, iova, &pte_dma); 693 if (!pte || !*pte) 694 return 0; 695 696 tegra_smmu_set_pte(as, iova, pte, pte_dma, 0); 697 tegra_smmu_pte_put_use(as, iova); 698 699 return size; 700 } 701 702 static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain, 703 dma_addr_t iova) 704 { 705 struct tegra_smmu_as *as = to_smmu_as(domain); 706 unsigned long pfn; 707 dma_addr_t pte_dma; 708 u32 *pte; 709 710 pte = tegra_smmu_pte_lookup(as, iova, &pte_dma); 711 if (!pte || !*pte) 712 return 0; 713 714 pfn = *pte & as->smmu->pfn_mask; 715 716 return PFN_PHYS(pfn); 717 } 718 719 static struct tegra_smmu *tegra_smmu_find(struct device_node *np) 720 { 721 struct platform_device *pdev; 722 struct tegra_mc *mc; 723 724 pdev = of_find_device_by_node(np); 725 if (!pdev) 726 return NULL; 727 728 mc = platform_get_drvdata(pdev); 729 if (!mc) 730 return NULL; 731 732 return mc->smmu; 733 } 734 735 static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev, 736 struct of_phandle_args *args) 737 { 738 const struct iommu_ops *ops = smmu->iommu.ops; 739 int err; 740 741 err = iommu_fwspec_init(dev, &dev->of_node->fwnode, ops); 742 if (err < 0) { 743 dev_err(dev, "failed to initialize fwspec: %d\n", err); 744 return err; 745 } 746 747 err = ops->of_xlate(dev, args); 748 if (err < 0) { 749 dev_err(dev, "failed to parse SW group ID: %d\n", err); 750 iommu_fwspec_free(dev); 751 return err; 752 } 753 754 return 0; 755 } 756 757 static int tegra_smmu_add_device(struct device *dev) 758 { 759 struct device_node *np = dev->of_node; 760 struct tegra_smmu *smmu = NULL; 761 struct iommu_group *group; 762 struct of_phandle_args args; 763 unsigned int index = 0; 764 int err; 765 766 while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index, 767 &args) == 0) { 768 smmu = tegra_smmu_find(args.np); 769 if (smmu) { 770 err = tegra_smmu_configure(smmu, dev, &args); 771 of_node_put(args.np); 772 773 if (err < 0) 774 return err; 775 776 /* 777 * Only a single IOMMU master interface is currently 778 * supported by the Linux kernel, so abort after the 779 * first match. 780 */ 781 dev->archdata.iommu = smmu; 782 783 iommu_device_link(&smmu->iommu, dev); 784 785 break; 786 } 787 788 of_node_put(args.np); 789 index++; 790 } 791 792 if (!smmu) 793 return -ENODEV; 794 795 group = iommu_group_get_for_dev(dev); 796 if (IS_ERR(group)) 797 return PTR_ERR(group); 798 799 iommu_group_put(group); 800 801 return 0; 802 } 803 804 static void tegra_smmu_remove_device(struct device *dev) 805 { 806 struct tegra_smmu *smmu = dev->archdata.iommu; 807 808 if (smmu) 809 iommu_device_unlink(&smmu->iommu, dev); 810 811 dev->archdata.iommu = NULL; 812 iommu_group_remove_device(dev); 813 } 814 815 static const struct tegra_smmu_group_soc * 816 tegra_smmu_find_group(struct tegra_smmu *smmu, unsigned int swgroup) 817 { 818 unsigned int i, j; 819 820 for (i = 0; i < smmu->soc->num_groups; i++) 821 for (j = 0; j < smmu->soc->groups[i].num_swgroups; j++) 822 if (smmu->soc->groups[i].swgroups[j] == swgroup) 823 return &smmu->soc->groups[i]; 824 825 return NULL; 826 } 827 828 static struct iommu_group *tegra_smmu_group_get(struct tegra_smmu *smmu, 829 unsigned int swgroup) 830 { 831 const struct tegra_smmu_group_soc *soc; 832 struct tegra_smmu_group *group; 833 834 soc = tegra_smmu_find_group(smmu, swgroup); 835 if (!soc) 836 return NULL; 837 838 mutex_lock(&smmu->lock); 839 840 list_for_each_entry(group, &smmu->groups, list) 841 if (group->soc == soc) { 842 mutex_unlock(&smmu->lock); 843 return group->group; 844 } 845 846 group = devm_kzalloc(smmu->dev, sizeof(*group), GFP_KERNEL); 847 if (!group) { 848 mutex_unlock(&smmu->lock); 849 return NULL; 850 } 851 852 INIT_LIST_HEAD(&group->list); 853 group->soc = soc; 854 855 group->group = iommu_group_alloc(); 856 if (IS_ERR(group->group)) { 857 devm_kfree(smmu->dev, group); 858 mutex_unlock(&smmu->lock); 859 return NULL; 860 } 861 862 list_add_tail(&group->list, &smmu->groups); 863 mutex_unlock(&smmu->lock); 864 865 return group->group; 866 } 867 868 static struct iommu_group *tegra_smmu_device_group(struct device *dev) 869 { 870 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 871 struct tegra_smmu *smmu = dev->archdata.iommu; 872 struct iommu_group *group; 873 874 group = tegra_smmu_group_get(smmu, fwspec->ids[0]); 875 if (!group) 876 group = generic_device_group(dev); 877 878 return group; 879 } 880 881 static int tegra_smmu_of_xlate(struct device *dev, 882 struct of_phandle_args *args) 883 { 884 u32 id = args->args[0]; 885 886 return iommu_fwspec_add_ids(dev, &id, 1); 887 } 888 889 static const struct iommu_ops tegra_smmu_ops = { 890 .capable = tegra_smmu_capable, 891 .domain_alloc = tegra_smmu_domain_alloc, 892 .domain_free = tegra_smmu_domain_free, 893 .attach_dev = tegra_smmu_attach_dev, 894 .detach_dev = tegra_smmu_detach_dev, 895 .add_device = tegra_smmu_add_device, 896 .remove_device = tegra_smmu_remove_device, 897 .device_group = tegra_smmu_device_group, 898 .map = tegra_smmu_map, 899 .unmap = tegra_smmu_unmap, 900 .iova_to_phys = tegra_smmu_iova_to_phys, 901 .of_xlate = tegra_smmu_of_xlate, 902 .pgsize_bitmap = SZ_4K, 903 }; 904 905 static void tegra_smmu_ahb_enable(void) 906 { 907 static const struct of_device_id ahb_match[] = { 908 { .compatible = "nvidia,tegra30-ahb", }, 909 { } 910 }; 911 struct device_node *ahb; 912 913 ahb = of_find_matching_node(NULL, ahb_match); 914 if (ahb) { 915 tegra_ahb_enable_smmu(ahb); 916 of_node_put(ahb); 917 } 918 } 919 920 static int tegra_smmu_swgroups_show(struct seq_file *s, void *data) 921 { 922 struct tegra_smmu *smmu = s->private; 923 unsigned int i; 924 u32 value; 925 926 seq_printf(s, "swgroup enabled ASID\n"); 927 seq_printf(s, "------------------------\n"); 928 929 for (i = 0; i < smmu->soc->num_swgroups; i++) { 930 const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i]; 931 const char *status; 932 unsigned int asid; 933 934 value = smmu_readl(smmu, group->reg); 935 936 if (value & SMMU_ASID_ENABLE) 937 status = "yes"; 938 else 939 status = "no"; 940 941 asid = value & SMMU_ASID_MASK; 942 943 seq_printf(s, "%-9s %-7s %#04x\n", group->name, status, 944 asid); 945 } 946 947 return 0; 948 } 949 950 DEFINE_SHOW_ATTRIBUTE(tegra_smmu_swgroups); 951 952 static int tegra_smmu_clients_show(struct seq_file *s, void *data) 953 { 954 struct tegra_smmu *smmu = s->private; 955 unsigned int i; 956 u32 value; 957 958 seq_printf(s, "client enabled\n"); 959 seq_printf(s, "--------------------\n"); 960 961 for (i = 0; i < smmu->soc->num_clients; i++) { 962 const struct tegra_mc_client *client = &smmu->soc->clients[i]; 963 const char *status; 964 965 value = smmu_readl(smmu, client->smmu.reg); 966 967 if (value & BIT(client->smmu.bit)) 968 status = "yes"; 969 else 970 status = "no"; 971 972 seq_printf(s, "%-12s %s\n", client->name, status); 973 } 974 975 return 0; 976 } 977 978 DEFINE_SHOW_ATTRIBUTE(tegra_smmu_clients); 979 980 static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu) 981 { 982 smmu->debugfs = debugfs_create_dir("smmu", NULL); 983 if (!smmu->debugfs) 984 return; 985 986 debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu, 987 &tegra_smmu_swgroups_fops); 988 debugfs_create_file("clients", S_IRUGO, smmu->debugfs, smmu, 989 &tegra_smmu_clients_fops); 990 } 991 992 static void tegra_smmu_debugfs_exit(struct tegra_smmu *smmu) 993 { 994 debugfs_remove_recursive(smmu->debugfs); 995 } 996 997 struct tegra_smmu *tegra_smmu_probe(struct device *dev, 998 const struct tegra_smmu_soc *soc, 999 struct tegra_mc *mc) 1000 { 1001 struct tegra_smmu *smmu; 1002 size_t size; 1003 u32 value; 1004 int err; 1005 1006 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); 1007 if (!smmu) 1008 return ERR_PTR(-ENOMEM); 1009 1010 /* 1011 * This is a bit of a hack. Ideally we'd want to simply return this 1012 * value. However the IOMMU registration process will attempt to add 1013 * all devices to the IOMMU when bus_set_iommu() is called. In order 1014 * not to rely on global variables to track the IOMMU instance, we 1015 * set it here so that it can be looked up from the .add_device() 1016 * callback via the IOMMU device's .drvdata field. 1017 */ 1018 mc->smmu = smmu; 1019 1020 size = BITS_TO_LONGS(soc->num_asids) * sizeof(long); 1021 1022 smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL); 1023 if (!smmu->asids) 1024 return ERR_PTR(-ENOMEM); 1025 1026 INIT_LIST_HEAD(&smmu->groups); 1027 mutex_init(&smmu->lock); 1028 1029 smmu->regs = mc->regs; 1030 smmu->soc = soc; 1031 smmu->dev = dev; 1032 smmu->mc = mc; 1033 1034 smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1; 1035 dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n", 1036 mc->soc->num_address_bits, smmu->pfn_mask); 1037 smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1; 1038 dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines, 1039 smmu->tlb_mask); 1040 1041 value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f); 1042 1043 if (soc->supports_request_limit) 1044 value |= SMMU_PTC_CONFIG_REQ_LIMIT(8); 1045 1046 smmu_writel(smmu, value, SMMU_PTC_CONFIG); 1047 1048 value = SMMU_TLB_CONFIG_HIT_UNDER_MISS | 1049 SMMU_TLB_CONFIG_ACTIVE_LINES(smmu); 1050 1051 if (soc->supports_round_robin_arbitration) 1052 value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION; 1053 1054 smmu_writel(smmu, value, SMMU_TLB_CONFIG); 1055 1056 smmu_flush_ptc_all(smmu); 1057 smmu_flush_tlb(smmu); 1058 smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG); 1059 smmu_flush(smmu); 1060 1061 tegra_smmu_ahb_enable(); 1062 1063 err = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, dev_name(dev)); 1064 if (err) 1065 return ERR_PTR(err); 1066 1067 iommu_device_set_ops(&smmu->iommu, &tegra_smmu_ops); 1068 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode); 1069 1070 err = iommu_device_register(&smmu->iommu); 1071 if (err) { 1072 iommu_device_sysfs_remove(&smmu->iommu); 1073 return ERR_PTR(err); 1074 } 1075 1076 err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops); 1077 if (err < 0) { 1078 iommu_device_unregister(&smmu->iommu); 1079 iommu_device_sysfs_remove(&smmu->iommu); 1080 return ERR_PTR(err); 1081 } 1082 1083 if (IS_ENABLED(CONFIG_DEBUG_FS)) 1084 tegra_smmu_debugfs_init(smmu); 1085 1086 return smmu; 1087 } 1088 1089 void tegra_smmu_remove(struct tegra_smmu *smmu) 1090 { 1091 iommu_device_unregister(&smmu->iommu); 1092 iommu_device_sysfs_remove(&smmu->iommu); 1093 1094 if (IS_ENABLED(CONFIG_DEBUG_FS)) 1095 tegra_smmu_debugfs_exit(smmu); 1096 } 1097