1 /* 2 * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 */ 8 9 #include <linux/bitops.h> 10 #include <linux/debugfs.h> 11 #include <linux/err.h> 12 #include <linux/iommu.h> 13 #include <linux/kernel.h> 14 #include <linux/of.h> 15 #include <linux/of_device.h> 16 #include <linux/platform_device.h> 17 #include <linux/slab.h> 18 #include <linux/dma-mapping.h> 19 20 #include <soc/tegra/ahb.h> 21 #include <soc/tegra/mc.h> 22 23 struct tegra_smmu_group { 24 struct list_head list; 25 const struct tegra_smmu_group_soc *soc; 26 struct iommu_group *group; 27 }; 28 29 struct tegra_smmu { 30 void __iomem *regs; 31 struct device *dev; 32 33 struct tegra_mc *mc; 34 const struct tegra_smmu_soc *soc; 35 36 struct list_head groups; 37 38 unsigned long pfn_mask; 39 unsigned long tlb_mask; 40 41 unsigned long *asids; 42 struct mutex lock; 43 44 struct list_head list; 45 46 struct dentry *debugfs; 47 48 struct iommu_device iommu; /* IOMMU Core code handle */ 49 }; 50 51 struct tegra_smmu_as { 52 struct iommu_domain domain; 53 struct tegra_smmu *smmu; 54 unsigned int use_count; 55 u32 *count; 56 struct page **pts; 57 struct page *pd; 58 dma_addr_t pd_dma; 59 unsigned id; 60 u32 attr; 61 }; 62 63 static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom) 64 { 65 return container_of(dom, struct tegra_smmu_as, domain); 66 } 67 68 static inline void smmu_writel(struct tegra_smmu *smmu, u32 value, 69 unsigned long offset) 70 { 71 writel(value, smmu->regs + offset); 72 } 73 74 static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset) 75 { 76 return readl(smmu->regs + offset); 77 } 78 79 #define SMMU_CONFIG 0x010 80 #define SMMU_CONFIG_ENABLE (1 << 0) 81 82 #define SMMU_TLB_CONFIG 0x14 83 #define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29) 84 #define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28) 85 #define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \ 86 ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask) 87 88 #define SMMU_PTC_CONFIG 0x18 89 #define SMMU_PTC_CONFIG_ENABLE (1 << 29) 90 #define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24) 91 #define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f) 92 93 #define SMMU_PTB_ASID 0x01c 94 #define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f) 95 96 #define SMMU_PTB_DATA 0x020 97 #define SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr)) 98 99 #define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr)) 100 101 #define SMMU_TLB_FLUSH 0x030 102 #define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0) 103 #define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0) 104 #define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0) 105 #define SMMU_TLB_FLUSH_ASID(x) (((x) & 0x7f) << 24) 106 #define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \ 107 SMMU_TLB_FLUSH_VA_MATCH_SECTION) 108 #define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \ 109 SMMU_TLB_FLUSH_VA_MATCH_GROUP) 110 #define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31) 111 112 #define SMMU_PTC_FLUSH 0x034 113 #define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0) 114 #define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0) 115 116 #define SMMU_PTC_FLUSH_HI 0x9b8 117 #define SMMU_PTC_FLUSH_HI_MASK 0x3 118 119 /* per-SWGROUP SMMU_*_ASID register */ 120 #define SMMU_ASID_ENABLE (1 << 31) 121 #define SMMU_ASID_MASK 0x7f 122 #define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK) 123 124 /* page table definitions */ 125 #define SMMU_NUM_PDE 1024 126 #define SMMU_NUM_PTE 1024 127 128 #define SMMU_SIZE_PD (SMMU_NUM_PDE * 4) 129 #define SMMU_SIZE_PT (SMMU_NUM_PTE * 4) 130 131 #define SMMU_PDE_SHIFT 22 132 #define SMMU_PTE_SHIFT 12 133 134 #define SMMU_PD_READABLE (1 << 31) 135 #define SMMU_PD_WRITABLE (1 << 30) 136 #define SMMU_PD_NONSECURE (1 << 29) 137 138 #define SMMU_PDE_READABLE (1 << 31) 139 #define SMMU_PDE_WRITABLE (1 << 30) 140 #define SMMU_PDE_NONSECURE (1 << 29) 141 #define SMMU_PDE_NEXT (1 << 28) 142 143 #define SMMU_PTE_READABLE (1 << 31) 144 #define SMMU_PTE_WRITABLE (1 << 30) 145 #define SMMU_PTE_NONSECURE (1 << 29) 146 147 #define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \ 148 SMMU_PDE_NONSECURE) 149 #define SMMU_PTE_ATTR (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \ 150 SMMU_PTE_NONSECURE) 151 152 static unsigned int iova_pd_index(unsigned long iova) 153 { 154 return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1); 155 } 156 157 static unsigned int iova_pt_index(unsigned long iova) 158 { 159 return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1); 160 } 161 162 static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr) 163 { 164 addr >>= 12; 165 return (addr & smmu->pfn_mask) == addr; 166 } 167 168 static dma_addr_t smmu_pde_to_dma(u32 pde) 169 { 170 return pde << 12; 171 } 172 173 static void smmu_flush_ptc_all(struct tegra_smmu *smmu) 174 { 175 smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH); 176 } 177 178 static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma, 179 unsigned long offset) 180 { 181 u32 value; 182 183 offset &= ~(smmu->mc->soc->atom_size - 1); 184 185 if (smmu->mc->soc->num_address_bits > 32) { 186 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 187 value = (dma >> 32) & SMMU_PTC_FLUSH_HI_MASK; 188 #else 189 value = 0; 190 #endif 191 smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI); 192 } 193 194 value = (dma + offset) | SMMU_PTC_FLUSH_TYPE_ADR; 195 smmu_writel(smmu, value, SMMU_PTC_FLUSH); 196 } 197 198 static inline void smmu_flush_tlb(struct tegra_smmu *smmu) 199 { 200 smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH); 201 } 202 203 static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu, 204 unsigned long asid) 205 { 206 u32 value; 207 208 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) | 209 SMMU_TLB_FLUSH_VA_MATCH_ALL; 210 smmu_writel(smmu, value, SMMU_TLB_FLUSH); 211 } 212 213 static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu, 214 unsigned long asid, 215 unsigned long iova) 216 { 217 u32 value; 218 219 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) | 220 SMMU_TLB_FLUSH_VA_SECTION(iova); 221 smmu_writel(smmu, value, SMMU_TLB_FLUSH); 222 } 223 224 static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu, 225 unsigned long asid, 226 unsigned long iova) 227 { 228 u32 value; 229 230 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) | 231 SMMU_TLB_FLUSH_VA_GROUP(iova); 232 smmu_writel(smmu, value, SMMU_TLB_FLUSH); 233 } 234 235 static inline void smmu_flush(struct tegra_smmu *smmu) 236 { 237 smmu_readl(smmu, SMMU_CONFIG); 238 } 239 240 static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp) 241 { 242 unsigned long id; 243 244 mutex_lock(&smmu->lock); 245 246 id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids); 247 if (id >= smmu->soc->num_asids) { 248 mutex_unlock(&smmu->lock); 249 return -ENOSPC; 250 } 251 252 set_bit(id, smmu->asids); 253 *idp = id; 254 255 mutex_unlock(&smmu->lock); 256 return 0; 257 } 258 259 static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id) 260 { 261 mutex_lock(&smmu->lock); 262 clear_bit(id, smmu->asids); 263 mutex_unlock(&smmu->lock); 264 } 265 266 static bool tegra_smmu_capable(enum iommu_cap cap) 267 { 268 return false; 269 } 270 271 static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type) 272 { 273 struct tegra_smmu_as *as; 274 275 if (type != IOMMU_DOMAIN_UNMANAGED) 276 return NULL; 277 278 as = kzalloc(sizeof(*as), GFP_KERNEL); 279 if (!as) 280 return NULL; 281 282 as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE; 283 284 as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO); 285 if (!as->pd) { 286 kfree(as); 287 return NULL; 288 } 289 290 as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL); 291 if (!as->count) { 292 __free_page(as->pd); 293 kfree(as); 294 return NULL; 295 } 296 297 as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL); 298 if (!as->pts) { 299 kfree(as->count); 300 __free_page(as->pd); 301 kfree(as); 302 return NULL; 303 } 304 305 /* setup aperture */ 306 as->domain.geometry.aperture_start = 0; 307 as->domain.geometry.aperture_end = 0xffffffff; 308 as->domain.geometry.force_aperture = true; 309 310 return &as->domain; 311 } 312 313 static void tegra_smmu_domain_free(struct iommu_domain *domain) 314 { 315 struct tegra_smmu_as *as = to_smmu_as(domain); 316 317 /* TODO: free page directory and page tables */ 318 319 kfree(as); 320 } 321 322 static const struct tegra_smmu_swgroup * 323 tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup) 324 { 325 const struct tegra_smmu_swgroup *group = NULL; 326 unsigned int i; 327 328 for (i = 0; i < smmu->soc->num_swgroups; i++) { 329 if (smmu->soc->swgroups[i].swgroup == swgroup) { 330 group = &smmu->soc->swgroups[i]; 331 break; 332 } 333 } 334 335 return group; 336 } 337 338 static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup, 339 unsigned int asid) 340 { 341 const struct tegra_smmu_swgroup *group; 342 unsigned int i; 343 u32 value; 344 345 for (i = 0; i < smmu->soc->num_clients; i++) { 346 const struct tegra_mc_client *client = &smmu->soc->clients[i]; 347 348 if (client->swgroup != swgroup) 349 continue; 350 351 value = smmu_readl(smmu, client->smmu.reg); 352 value |= BIT(client->smmu.bit); 353 smmu_writel(smmu, value, client->smmu.reg); 354 } 355 356 group = tegra_smmu_find_swgroup(smmu, swgroup); 357 if (group) { 358 value = smmu_readl(smmu, group->reg); 359 value &= ~SMMU_ASID_MASK; 360 value |= SMMU_ASID_VALUE(asid); 361 value |= SMMU_ASID_ENABLE; 362 smmu_writel(smmu, value, group->reg); 363 } 364 } 365 366 static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup, 367 unsigned int asid) 368 { 369 const struct tegra_smmu_swgroup *group; 370 unsigned int i; 371 u32 value; 372 373 group = tegra_smmu_find_swgroup(smmu, swgroup); 374 if (group) { 375 value = smmu_readl(smmu, group->reg); 376 value &= ~SMMU_ASID_MASK; 377 value |= SMMU_ASID_VALUE(asid); 378 value &= ~SMMU_ASID_ENABLE; 379 smmu_writel(smmu, value, group->reg); 380 } 381 382 for (i = 0; i < smmu->soc->num_clients; i++) { 383 const struct tegra_mc_client *client = &smmu->soc->clients[i]; 384 385 if (client->swgroup != swgroup) 386 continue; 387 388 value = smmu_readl(smmu, client->smmu.reg); 389 value &= ~BIT(client->smmu.bit); 390 smmu_writel(smmu, value, client->smmu.reg); 391 } 392 } 393 394 static int tegra_smmu_as_prepare(struct tegra_smmu *smmu, 395 struct tegra_smmu_as *as) 396 { 397 u32 value; 398 int err; 399 400 if (as->use_count > 0) { 401 as->use_count++; 402 return 0; 403 } 404 405 as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD, 406 DMA_TO_DEVICE); 407 if (dma_mapping_error(smmu->dev, as->pd_dma)) 408 return -ENOMEM; 409 410 /* We can't handle 64-bit DMA addresses */ 411 if (!smmu_dma_addr_valid(smmu, as->pd_dma)) { 412 err = -ENOMEM; 413 goto err_unmap; 414 } 415 416 err = tegra_smmu_alloc_asid(smmu, &as->id); 417 if (err < 0) 418 goto err_unmap; 419 420 smmu_flush_ptc(smmu, as->pd_dma, 0); 421 smmu_flush_tlb_asid(smmu, as->id); 422 423 smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID); 424 value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr); 425 smmu_writel(smmu, value, SMMU_PTB_DATA); 426 smmu_flush(smmu); 427 428 as->smmu = smmu; 429 as->use_count++; 430 431 return 0; 432 433 err_unmap: 434 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); 435 return err; 436 } 437 438 static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu, 439 struct tegra_smmu_as *as) 440 { 441 if (--as->use_count > 0) 442 return; 443 444 tegra_smmu_free_asid(smmu, as->id); 445 446 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); 447 448 as->smmu = NULL; 449 } 450 451 static int tegra_smmu_attach_dev(struct iommu_domain *domain, 452 struct device *dev) 453 { 454 struct tegra_smmu *smmu = dev->archdata.iommu; 455 struct tegra_smmu_as *as = to_smmu_as(domain); 456 struct device_node *np = dev->of_node; 457 struct of_phandle_args args; 458 unsigned int index = 0; 459 int err = 0; 460 461 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index, 462 &args)) { 463 unsigned int swgroup = args.args[0]; 464 465 if (args.np != smmu->dev->of_node) { 466 of_node_put(args.np); 467 continue; 468 } 469 470 of_node_put(args.np); 471 472 err = tegra_smmu_as_prepare(smmu, as); 473 if (err < 0) 474 return err; 475 476 tegra_smmu_enable(smmu, swgroup, as->id); 477 index++; 478 } 479 480 if (index == 0) 481 return -ENODEV; 482 483 return 0; 484 } 485 486 static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) 487 { 488 struct tegra_smmu_as *as = to_smmu_as(domain); 489 struct device_node *np = dev->of_node; 490 struct tegra_smmu *smmu = as->smmu; 491 struct of_phandle_args args; 492 unsigned int index = 0; 493 494 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index, 495 &args)) { 496 unsigned int swgroup = args.args[0]; 497 498 if (args.np != smmu->dev->of_node) { 499 of_node_put(args.np); 500 continue; 501 } 502 503 of_node_put(args.np); 504 505 tegra_smmu_disable(smmu, swgroup, as->id); 506 tegra_smmu_as_unprepare(smmu, as); 507 index++; 508 } 509 } 510 511 static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova, 512 u32 value) 513 { 514 unsigned int pd_index = iova_pd_index(iova); 515 struct tegra_smmu *smmu = as->smmu; 516 u32 *pd = page_address(as->pd); 517 unsigned long offset = pd_index * sizeof(*pd); 518 519 /* Set the page directory entry first */ 520 pd[pd_index] = value; 521 522 /* The flush the page directory entry from caches */ 523 dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset, 524 sizeof(*pd), DMA_TO_DEVICE); 525 526 /* And flush the iommu */ 527 smmu_flush_ptc(smmu, as->pd_dma, offset); 528 smmu_flush_tlb_section(smmu, as->id, iova); 529 smmu_flush(smmu); 530 } 531 532 static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova) 533 { 534 u32 *pt = page_address(pt_page); 535 536 return pt + iova_pt_index(iova); 537 } 538 539 static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova, 540 dma_addr_t *dmap) 541 { 542 unsigned int pd_index = iova_pd_index(iova); 543 struct page *pt_page; 544 u32 *pd; 545 546 pt_page = as->pts[pd_index]; 547 if (!pt_page) 548 return NULL; 549 550 pd = page_address(as->pd); 551 *dmap = smmu_pde_to_dma(pd[pd_index]); 552 553 return tegra_smmu_pte_offset(pt_page, iova); 554 } 555 556 static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, 557 dma_addr_t *dmap) 558 { 559 unsigned int pde = iova_pd_index(iova); 560 struct tegra_smmu *smmu = as->smmu; 561 562 if (!as->pts[pde]) { 563 struct page *page; 564 dma_addr_t dma; 565 566 page = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO); 567 if (!page) 568 return NULL; 569 570 dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT, 571 DMA_TO_DEVICE); 572 if (dma_mapping_error(smmu->dev, dma)) { 573 __free_page(page); 574 return NULL; 575 } 576 577 if (!smmu_dma_addr_valid(smmu, dma)) { 578 dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT, 579 DMA_TO_DEVICE); 580 __free_page(page); 581 return NULL; 582 } 583 584 as->pts[pde] = page; 585 586 tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR | 587 SMMU_PDE_NEXT)); 588 589 *dmap = dma; 590 } else { 591 u32 *pd = page_address(as->pd); 592 593 *dmap = smmu_pde_to_dma(pd[pde]); 594 } 595 596 return tegra_smmu_pte_offset(as->pts[pde], iova); 597 } 598 599 static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova) 600 { 601 unsigned int pd_index = iova_pd_index(iova); 602 603 as->count[pd_index]++; 604 } 605 606 static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova) 607 { 608 unsigned int pde = iova_pd_index(iova); 609 struct page *page = as->pts[pde]; 610 611 /* 612 * When no entries in this page table are used anymore, return the 613 * memory page to the system. 614 */ 615 if (--as->count[pde] == 0) { 616 struct tegra_smmu *smmu = as->smmu; 617 u32 *pd = page_address(as->pd); 618 dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]); 619 620 tegra_smmu_set_pde(as, iova, 0); 621 622 dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE); 623 __free_page(page); 624 as->pts[pde] = NULL; 625 } 626 } 627 628 static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova, 629 u32 *pte, dma_addr_t pte_dma, u32 val) 630 { 631 struct tegra_smmu *smmu = as->smmu; 632 unsigned long offset = offset_in_page(pte); 633 634 *pte = val; 635 636 dma_sync_single_range_for_device(smmu->dev, pte_dma, offset, 637 4, DMA_TO_DEVICE); 638 smmu_flush_ptc(smmu, pte_dma, offset); 639 smmu_flush_tlb_group(smmu, as->id, iova); 640 smmu_flush(smmu); 641 } 642 643 static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova, 644 phys_addr_t paddr, size_t size, int prot) 645 { 646 struct tegra_smmu_as *as = to_smmu_as(domain); 647 dma_addr_t pte_dma; 648 u32 *pte; 649 650 pte = as_get_pte(as, iova, &pte_dma); 651 if (!pte) 652 return -ENOMEM; 653 654 /* If we aren't overwriting a pre-existing entry, increment use */ 655 if (*pte == 0) 656 tegra_smmu_pte_get_use(as, iova); 657 658 tegra_smmu_set_pte(as, iova, pte, pte_dma, 659 __phys_to_pfn(paddr) | SMMU_PTE_ATTR); 660 661 return 0; 662 } 663 664 static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova, 665 size_t size) 666 { 667 struct tegra_smmu_as *as = to_smmu_as(domain); 668 dma_addr_t pte_dma; 669 u32 *pte; 670 671 pte = tegra_smmu_pte_lookup(as, iova, &pte_dma); 672 if (!pte || !*pte) 673 return 0; 674 675 tegra_smmu_set_pte(as, iova, pte, pte_dma, 0); 676 tegra_smmu_pte_put_use(as, iova); 677 678 return size; 679 } 680 681 static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain, 682 dma_addr_t iova) 683 { 684 struct tegra_smmu_as *as = to_smmu_as(domain); 685 unsigned long pfn; 686 dma_addr_t pte_dma; 687 u32 *pte; 688 689 pte = tegra_smmu_pte_lookup(as, iova, &pte_dma); 690 if (!pte || !*pte) 691 return 0; 692 693 pfn = *pte & as->smmu->pfn_mask; 694 695 return PFN_PHYS(pfn); 696 } 697 698 static struct tegra_smmu *tegra_smmu_find(struct device_node *np) 699 { 700 struct platform_device *pdev; 701 struct tegra_mc *mc; 702 703 pdev = of_find_device_by_node(np); 704 if (!pdev) 705 return NULL; 706 707 mc = platform_get_drvdata(pdev); 708 if (!mc) 709 return NULL; 710 711 return mc->smmu; 712 } 713 714 static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev, 715 struct of_phandle_args *args) 716 { 717 const struct iommu_ops *ops = smmu->iommu.ops; 718 int err; 719 720 err = iommu_fwspec_init(dev, &dev->of_node->fwnode, ops); 721 if (err < 0) { 722 dev_err(dev, "failed to initialize fwspec: %d\n", err); 723 return err; 724 } 725 726 err = ops->of_xlate(dev, args); 727 if (err < 0) { 728 dev_err(dev, "failed to parse SW group ID: %d\n", err); 729 iommu_fwspec_free(dev); 730 return err; 731 } 732 733 return 0; 734 } 735 736 static int tegra_smmu_add_device(struct device *dev) 737 { 738 struct device_node *np = dev->of_node; 739 struct tegra_smmu *smmu = NULL; 740 struct iommu_group *group; 741 struct of_phandle_args args; 742 unsigned int index = 0; 743 int err; 744 745 while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index, 746 &args) == 0) { 747 smmu = tegra_smmu_find(args.np); 748 if (smmu) { 749 err = tegra_smmu_configure(smmu, dev, &args); 750 of_node_put(args.np); 751 752 if (err < 0) 753 return err; 754 755 /* 756 * Only a single IOMMU master interface is currently 757 * supported by the Linux kernel, so abort after the 758 * first match. 759 */ 760 dev->archdata.iommu = smmu; 761 762 iommu_device_link(&smmu->iommu, dev); 763 764 break; 765 } 766 767 of_node_put(args.np); 768 index++; 769 } 770 771 if (!smmu) 772 return -ENODEV; 773 774 group = iommu_group_get_for_dev(dev); 775 if (IS_ERR(group)) 776 return PTR_ERR(group); 777 778 iommu_group_put(group); 779 780 return 0; 781 } 782 783 static void tegra_smmu_remove_device(struct device *dev) 784 { 785 struct tegra_smmu *smmu = dev->archdata.iommu; 786 787 if (smmu) 788 iommu_device_unlink(&smmu->iommu, dev); 789 790 dev->archdata.iommu = NULL; 791 iommu_group_remove_device(dev); 792 } 793 794 static const struct tegra_smmu_group_soc * 795 tegra_smmu_find_group(struct tegra_smmu *smmu, unsigned int swgroup) 796 { 797 unsigned int i, j; 798 799 for (i = 0; i < smmu->soc->num_groups; i++) 800 for (j = 0; j < smmu->soc->groups[i].num_swgroups; j++) 801 if (smmu->soc->groups[i].swgroups[j] == swgroup) 802 return &smmu->soc->groups[i]; 803 804 return NULL; 805 } 806 807 static struct iommu_group *tegra_smmu_group_get(struct tegra_smmu *smmu, 808 unsigned int swgroup) 809 { 810 const struct tegra_smmu_group_soc *soc; 811 struct tegra_smmu_group *group; 812 813 soc = tegra_smmu_find_group(smmu, swgroup); 814 if (!soc) 815 return NULL; 816 817 mutex_lock(&smmu->lock); 818 819 list_for_each_entry(group, &smmu->groups, list) 820 if (group->soc == soc) { 821 mutex_unlock(&smmu->lock); 822 return group->group; 823 } 824 825 group = devm_kzalloc(smmu->dev, sizeof(*group), GFP_KERNEL); 826 if (!group) { 827 mutex_unlock(&smmu->lock); 828 return NULL; 829 } 830 831 INIT_LIST_HEAD(&group->list); 832 group->soc = soc; 833 834 group->group = iommu_group_alloc(); 835 if (IS_ERR(group->group)) { 836 devm_kfree(smmu->dev, group); 837 mutex_unlock(&smmu->lock); 838 return NULL; 839 } 840 841 list_add_tail(&group->list, &smmu->groups); 842 mutex_unlock(&smmu->lock); 843 844 return group->group; 845 } 846 847 static struct iommu_group *tegra_smmu_device_group(struct device *dev) 848 { 849 struct iommu_fwspec *fwspec = dev->iommu_fwspec; 850 struct tegra_smmu *smmu = dev->archdata.iommu; 851 struct iommu_group *group; 852 853 group = tegra_smmu_group_get(smmu, fwspec->ids[0]); 854 if (!group) 855 group = generic_device_group(dev); 856 857 return group; 858 } 859 860 static int tegra_smmu_of_xlate(struct device *dev, 861 struct of_phandle_args *args) 862 { 863 u32 id = args->args[0]; 864 865 return iommu_fwspec_add_ids(dev, &id, 1); 866 } 867 868 static const struct iommu_ops tegra_smmu_ops = { 869 .capable = tegra_smmu_capable, 870 .domain_alloc = tegra_smmu_domain_alloc, 871 .domain_free = tegra_smmu_domain_free, 872 .attach_dev = tegra_smmu_attach_dev, 873 .detach_dev = tegra_smmu_detach_dev, 874 .add_device = tegra_smmu_add_device, 875 .remove_device = tegra_smmu_remove_device, 876 .device_group = tegra_smmu_device_group, 877 .map = tegra_smmu_map, 878 .unmap = tegra_smmu_unmap, 879 .map_sg = default_iommu_map_sg, 880 .iova_to_phys = tegra_smmu_iova_to_phys, 881 .of_xlate = tegra_smmu_of_xlate, 882 .pgsize_bitmap = SZ_4K, 883 }; 884 885 static void tegra_smmu_ahb_enable(void) 886 { 887 static const struct of_device_id ahb_match[] = { 888 { .compatible = "nvidia,tegra30-ahb", }, 889 { } 890 }; 891 struct device_node *ahb; 892 893 ahb = of_find_matching_node(NULL, ahb_match); 894 if (ahb) { 895 tegra_ahb_enable_smmu(ahb); 896 of_node_put(ahb); 897 } 898 } 899 900 static int tegra_smmu_swgroups_show(struct seq_file *s, void *data) 901 { 902 struct tegra_smmu *smmu = s->private; 903 unsigned int i; 904 u32 value; 905 906 seq_printf(s, "swgroup enabled ASID\n"); 907 seq_printf(s, "------------------------\n"); 908 909 for (i = 0; i < smmu->soc->num_swgroups; i++) { 910 const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i]; 911 const char *status; 912 unsigned int asid; 913 914 value = smmu_readl(smmu, group->reg); 915 916 if (value & SMMU_ASID_ENABLE) 917 status = "yes"; 918 else 919 status = "no"; 920 921 asid = value & SMMU_ASID_MASK; 922 923 seq_printf(s, "%-9s %-7s %#04x\n", group->name, status, 924 asid); 925 } 926 927 return 0; 928 } 929 930 static int tegra_smmu_swgroups_open(struct inode *inode, struct file *file) 931 { 932 return single_open(file, tegra_smmu_swgroups_show, inode->i_private); 933 } 934 935 static const struct file_operations tegra_smmu_swgroups_fops = { 936 .open = tegra_smmu_swgroups_open, 937 .read = seq_read, 938 .llseek = seq_lseek, 939 .release = single_release, 940 }; 941 942 static int tegra_smmu_clients_show(struct seq_file *s, void *data) 943 { 944 struct tegra_smmu *smmu = s->private; 945 unsigned int i; 946 u32 value; 947 948 seq_printf(s, "client enabled\n"); 949 seq_printf(s, "--------------------\n"); 950 951 for (i = 0; i < smmu->soc->num_clients; i++) { 952 const struct tegra_mc_client *client = &smmu->soc->clients[i]; 953 const char *status; 954 955 value = smmu_readl(smmu, client->smmu.reg); 956 957 if (value & BIT(client->smmu.bit)) 958 status = "yes"; 959 else 960 status = "no"; 961 962 seq_printf(s, "%-12s %s\n", client->name, status); 963 } 964 965 return 0; 966 } 967 968 static int tegra_smmu_clients_open(struct inode *inode, struct file *file) 969 { 970 return single_open(file, tegra_smmu_clients_show, inode->i_private); 971 } 972 973 static const struct file_operations tegra_smmu_clients_fops = { 974 .open = tegra_smmu_clients_open, 975 .read = seq_read, 976 .llseek = seq_lseek, 977 .release = single_release, 978 }; 979 980 static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu) 981 { 982 smmu->debugfs = debugfs_create_dir("smmu", NULL); 983 if (!smmu->debugfs) 984 return; 985 986 debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu, 987 &tegra_smmu_swgroups_fops); 988 debugfs_create_file("clients", S_IRUGO, smmu->debugfs, smmu, 989 &tegra_smmu_clients_fops); 990 } 991 992 static void tegra_smmu_debugfs_exit(struct tegra_smmu *smmu) 993 { 994 debugfs_remove_recursive(smmu->debugfs); 995 } 996 997 struct tegra_smmu *tegra_smmu_probe(struct device *dev, 998 const struct tegra_smmu_soc *soc, 999 struct tegra_mc *mc) 1000 { 1001 struct tegra_smmu *smmu; 1002 size_t size; 1003 u32 value; 1004 int err; 1005 1006 /* This can happen on Tegra20 which doesn't have an SMMU */ 1007 if (!soc) 1008 return NULL; 1009 1010 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); 1011 if (!smmu) 1012 return ERR_PTR(-ENOMEM); 1013 1014 /* 1015 * This is a bit of a hack. Ideally we'd want to simply return this 1016 * value. However the IOMMU registration process will attempt to add 1017 * all devices to the IOMMU when bus_set_iommu() is called. In order 1018 * not to rely on global variables to track the IOMMU instance, we 1019 * set it here so that it can be looked up from the .add_device() 1020 * callback via the IOMMU device's .drvdata field. 1021 */ 1022 mc->smmu = smmu; 1023 1024 size = BITS_TO_LONGS(soc->num_asids) * sizeof(long); 1025 1026 smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL); 1027 if (!smmu->asids) 1028 return ERR_PTR(-ENOMEM); 1029 1030 INIT_LIST_HEAD(&smmu->groups); 1031 mutex_init(&smmu->lock); 1032 1033 smmu->regs = mc->regs; 1034 smmu->soc = soc; 1035 smmu->dev = dev; 1036 smmu->mc = mc; 1037 1038 smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1; 1039 dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n", 1040 mc->soc->num_address_bits, smmu->pfn_mask); 1041 smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1; 1042 dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines, 1043 smmu->tlb_mask); 1044 1045 value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f); 1046 1047 if (soc->supports_request_limit) 1048 value |= SMMU_PTC_CONFIG_REQ_LIMIT(8); 1049 1050 smmu_writel(smmu, value, SMMU_PTC_CONFIG); 1051 1052 value = SMMU_TLB_CONFIG_HIT_UNDER_MISS | 1053 SMMU_TLB_CONFIG_ACTIVE_LINES(smmu); 1054 1055 if (soc->supports_round_robin_arbitration) 1056 value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION; 1057 1058 smmu_writel(smmu, value, SMMU_TLB_CONFIG); 1059 1060 smmu_flush_ptc_all(smmu); 1061 smmu_flush_tlb(smmu); 1062 smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG); 1063 smmu_flush(smmu); 1064 1065 tegra_smmu_ahb_enable(); 1066 1067 err = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, dev_name(dev)); 1068 if (err) 1069 return ERR_PTR(err); 1070 1071 iommu_device_set_ops(&smmu->iommu, &tegra_smmu_ops); 1072 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode); 1073 1074 err = iommu_device_register(&smmu->iommu); 1075 if (err) { 1076 iommu_device_sysfs_remove(&smmu->iommu); 1077 return ERR_PTR(err); 1078 } 1079 1080 err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops); 1081 if (err < 0) { 1082 iommu_device_unregister(&smmu->iommu); 1083 iommu_device_sysfs_remove(&smmu->iommu); 1084 return ERR_PTR(err); 1085 } 1086 1087 if (IS_ENABLED(CONFIG_DEBUG_FS)) 1088 tegra_smmu_debugfs_init(smmu); 1089 1090 return smmu; 1091 } 1092 1093 void tegra_smmu_remove(struct tegra_smmu *smmu) 1094 { 1095 iommu_device_unregister(&smmu->iommu); 1096 iommu_device_sysfs_remove(&smmu->iommu); 1097 1098 if (IS_ENABLED(CONFIG_DEBUG_FS)) 1099 tegra_smmu_debugfs_exit(smmu); 1100 } 1101