1 /* 2 * IOMMU API for GART in Tegra20 3 * 4 * Copyright (c) 2010-2012, NVIDIA CORPORATION. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program; if not, write to the Free Software Foundation, Inc., 17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 */ 19 20 #define pr_fmt(fmt) "%s(): " fmt, __func__ 21 22 #include <linux/module.h> 23 #include <linux/platform_device.h> 24 #include <linux/spinlock.h> 25 #include <linux/slab.h> 26 #include <linux/vmalloc.h> 27 #include <linux/mm.h> 28 #include <linux/list.h> 29 #include <linux/device.h> 30 #include <linux/io.h> 31 #include <linux/iommu.h> 32 #include <linux/of.h> 33 34 #include <asm/cacheflush.h> 35 36 /* bitmap of the page sizes currently supported */ 37 #define GART_IOMMU_PGSIZES (SZ_4K) 38 39 #define GART_REG_BASE 0x24 40 #define GART_CONFIG (0x24 - GART_REG_BASE) 41 #define GART_ENTRY_ADDR (0x28 - GART_REG_BASE) 42 #define GART_ENTRY_DATA (0x2c - GART_REG_BASE) 43 #define GART_ENTRY_PHYS_ADDR_VALID (1 << 31) 44 45 #define GART_PAGE_SHIFT 12 46 #define GART_PAGE_SIZE (1 << GART_PAGE_SHIFT) 47 #define GART_PAGE_MASK \ 48 (~(GART_PAGE_SIZE - 1) & ~GART_ENTRY_PHYS_ADDR_VALID) 49 50 struct gart_client { 51 struct device *dev; 52 struct list_head list; 53 }; 54 55 struct gart_device { 56 void __iomem *regs; 57 u32 *savedata; 58 u32 page_count; /* total remappable size */ 59 dma_addr_t iovmm_base; /* offset to vmm_area */ 60 spinlock_t pte_lock; /* for pagetable */ 61 struct list_head client; 62 spinlock_t client_lock; /* for client list */ 63 struct device *dev; 64 65 struct iommu_device iommu; /* IOMMU Core handle */ 66 }; 67 68 struct gart_domain { 69 struct iommu_domain domain; /* generic domain handle */ 70 struct gart_device *gart; /* link to gart device */ 71 }; 72 73 static struct gart_device *gart_handle; /* unique for a system */ 74 75 #define GART_PTE(_pfn) \ 76 (GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT)) 77 78 static struct gart_domain *to_gart_domain(struct iommu_domain *dom) 79 { 80 return container_of(dom, struct gart_domain, domain); 81 } 82 83 /* 84 * Any interaction between any block on PPSB and a block on APB or AHB 85 * must have these read-back to ensure the APB/AHB bus transaction is 86 * complete before initiating activity on the PPSB block. 87 */ 88 #define FLUSH_GART_REGS(gart) ((void)readl((gart)->regs + GART_CONFIG)) 89 90 #define for_each_gart_pte(gart, iova) \ 91 for (iova = gart->iovmm_base; \ 92 iova < gart->iovmm_base + GART_PAGE_SIZE * gart->page_count; \ 93 iova += GART_PAGE_SIZE) 94 95 static inline void gart_set_pte(struct gart_device *gart, 96 unsigned long offs, u32 pte) 97 { 98 writel(offs, gart->regs + GART_ENTRY_ADDR); 99 writel(pte, gart->regs + GART_ENTRY_DATA); 100 101 dev_dbg(gart->dev, "%s %08lx:%08x\n", 102 pte ? "map" : "unmap", offs, pte & GART_PAGE_MASK); 103 } 104 105 static inline unsigned long gart_read_pte(struct gart_device *gart, 106 unsigned long offs) 107 { 108 unsigned long pte; 109 110 writel(offs, gart->regs + GART_ENTRY_ADDR); 111 pte = readl(gart->regs + GART_ENTRY_DATA); 112 113 return pte; 114 } 115 116 static void do_gart_setup(struct gart_device *gart, const u32 *data) 117 { 118 unsigned long iova; 119 120 for_each_gart_pte(gart, iova) 121 gart_set_pte(gart, iova, data ? *(data++) : 0); 122 123 writel(1, gart->regs + GART_CONFIG); 124 FLUSH_GART_REGS(gart); 125 } 126 127 #ifdef DEBUG 128 static void gart_dump_table(struct gart_device *gart) 129 { 130 unsigned long iova; 131 unsigned long flags; 132 133 spin_lock_irqsave(&gart->pte_lock, flags); 134 for_each_gart_pte(gart, iova) { 135 unsigned long pte; 136 137 pte = gart_read_pte(gart, iova); 138 139 dev_dbg(gart->dev, "%s %08lx:%08lx\n", 140 (GART_ENTRY_PHYS_ADDR_VALID & pte) ? "v" : " ", 141 iova, pte & GART_PAGE_MASK); 142 } 143 spin_unlock_irqrestore(&gart->pte_lock, flags); 144 } 145 #else 146 static inline void gart_dump_table(struct gart_device *gart) 147 { 148 } 149 #endif 150 151 static inline bool gart_iova_range_valid(struct gart_device *gart, 152 unsigned long iova, size_t bytes) 153 { 154 unsigned long iova_start, iova_end, gart_start, gart_end; 155 156 iova_start = iova; 157 iova_end = iova_start + bytes - 1; 158 gart_start = gart->iovmm_base; 159 gart_end = gart_start + gart->page_count * GART_PAGE_SIZE - 1; 160 161 if (iova_start < gart_start) 162 return false; 163 if (iova_end > gart_end) 164 return false; 165 return true; 166 } 167 168 static int gart_iommu_attach_dev(struct iommu_domain *domain, 169 struct device *dev) 170 { 171 struct gart_domain *gart_domain = to_gart_domain(domain); 172 struct gart_device *gart = gart_domain->gart; 173 struct gart_client *client, *c; 174 int err = 0; 175 176 client = devm_kzalloc(gart->dev, sizeof(*c), GFP_KERNEL); 177 if (!client) 178 return -ENOMEM; 179 client->dev = dev; 180 181 spin_lock(&gart->client_lock); 182 list_for_each_entry(c, &gart->client, list) { 183 if (c->dev == dev) { 184 dev_err(gart->dev, 185 "%s is already attached\n", dev_name(dev)); 186 err = -EINVAL; 187 goto fail; 188 } 189 } 190 list_add(&client->list, &gart->client); 191 spin_unlock(&gart->client_lock); 192 dev_dbg(gart->dev, "Attached %s\n", dev_name(dev)); 193 return 0; 194 195 fail: 196 devm_kfree(gart->dev, client); 197 spin_unlock(&gart->client_lock); 198 return err; 199 } 200 201 static void gart_iommu_detach_dev(struct iommu_domain *domain, 202 struct device *dev) 203 { 204 struct gart_domain *gart_domain = to_gart_domain(domain); 205 struct gart_device *gart = gart_domain->gart; 206 struct gart_client *c; 207 208 spin_lock(&gart->client_lock); 209 210 list_for_each_entry(c, &gart->client, list) { 211 if (c->dev == dev) { 212 list_del(&c->list); 213 devm_kfree(gart->dev, c); 214 dev_dbg(gart->dev, "Detached %s\n", dev_name(dev)); 215 goto out; 216 } 217 } 218 dev_err(gart->dev, "Couldn't find\n"); 219 out: 220 spin_unlock(&gart->client_lock); 221 } 222 223 static struct iommu_domain *gart_iommu_domain_alloc(unsigned type) 224 { 225 struct gart_domain *gart_domain; 226 struct gart_device *gart; 227 228 if (type != IOMMU_DOMAIN_UNMANAGED) 229 return NULL; 230 231 gart = gart_handle; 232 if (!gart) 233 return NULL; 234 235 gart_domain = kzalloc(sizeof(*gart_domain), GFP_KERNEL); 236 if (!gart_domain) 237 return NULL; 238 239 gart_domain->gart = gart; 240 gart_domain->domain.geometry.aperture_start = gart->iovmm_base; 241 gart_domain->domain.geometry.aperture_end = gart->iovmm_base + 242 gart->page_count * GART_PAGE_SIZE - 1; 243 gart_domain->domain.geometry.force_aperture = true; 244 245 return &gart_domain->domain; 246 } 247 248 static void gart_iommu_domain_free(struct iommu_domain *domain) 249 { 250 struct gart_domain *gart_domain = to_gart_domain(domain); 251 struct gart_device *gart = gart_domain->gart; 252 253 if (gart) { 254 spin_lock(&gart->client_lock); 255 if (!list_empty(&gart->client)) { 256 struct gart_client *c; 257 258 list_for_each_entry(c, &gart->client, list) 259 gart_iommu_detach_dev(domain, c->dev); 260 } 261 spin_unlock(&gart->client_lock); 262 } 263 264 kfree(gart_domain); 265 } 266 267 static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova, 268 phys_addr_t pa, size_t bytes, int prot) 269 { 270 struct gart_domain *gart_domain = to_gart_domain(domain); 271 struct gart_device *gart = gart_domain->gart; 272 unsigned long flags; 273 unsigned long pfn; 274 275 if (!gart_iova_range_valid(gart, iova, bytes)) 276 return -EINVAL; 277 278 spin_lock_irqsave(&gart->pte_lock, flags); 279 pfn = __phys_to_pfn(pa); 280 if (!pfn_valid(pfn)) { 281 dev_err(gart->dev, "Invalid page: %pa\n", &pa); 282 spin_unlock_irqrestore(&gart->pte_lock, flags); 283 return -EINVAL; 284 } 285 gart_set_pte(gart, iova, GART_PTE(pfn)); 286 FLUSH_GART_REGS(gart); 287 spin_unlock_irqrestore(&gart->pte_lock, flags); 288 return 0; 289 } 290 291 static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova, 292 size_t bytes) 293 { 294 struct gart_domain *gart_domain = to_gart_domain(domain); 295 struct gart_device *gart = gart_domain->gart; 296 unsigned long flags; 297 298 if (!gart_iova_range_valid(gart, iova, bytes)) 299 return 0; 300 301 spin_lock_irqsave(&gart->pte_lock, flags); 302 gart_set_pte(gart, iova, 0); 303 FLUSH_GART_REGS(gart); 304 spin_unlock_irqrestore(&gart->pte_lock, flags); 305 return 0; 306 } 307 308 static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain, 309 dma_addr_t iova) 310 { 311 struct gart_domain *gart_domain = to_gart_domain(domain); 312 struct gart_device *gart = gart_domain->gart; 313 unsigned long pte; 314 phys_addr_t pa; 315 unsigned long flags; 316 317 if (!gart_iova_range_valid(gart, iova, 0)) 318 return -EINVAL; 319 320 spin_lock_irqsave(&gart->pte_lock, flags); 321 pte = gart_read_pte(gart, iova); 322 spin_unlock_irqrestore(&gart->pte_lock, flags); 323 324 pa = (pte & GART_PAGE_MASK); 325 if (!pfn_valid(__phys_to_pfn(pa))) { 326 dev_err(gart->dev, "No entry for %08llx:%pa\n", 327 (unsigned long long)iova, &pa); 328 gart_dump_table(gart); 329 return -EINVAL; 330 } 331 return pa; 332 } 333 334 static bool gart_iommu_capable(enum iommu_cap cap) 335 { 336 return false; 337 } 338 339 static int gart_iommu_add_device(struct device *dev) 340 { 341 struct iommu_group *group = iommu_group_get_for_dev(dev); 342 343 if (IS_ERR(group)) 344 return PTR_ERR(group); 345 346 iommu_group_put(group); 347 348 iommu_device_link(&gart_handle->iommu, dev); 349 350 return 0; 351 } 352 353 static void gart_iommu_remove_device(struct device *dev) 354 { 355 iommu_group_remove_device(dev); 356 iommu_device_unlink(&gart_handle->iommu, dev); 357 } 358 359 static const struct iommu_ops gart_iommu_ops = { 360 .capable = gart_iommu_capable, 361 .domain_alloc = gart_iommu_domain_alloc, 362 .domain_free = gart_iommu_domain_free, 363 .attach_dev = gart_iommu_attach_dev, 364 .detach_dev = gart_iommu_detach_dev, 365 .add_device = gart_iommu_add_device, 366 .remove_device = gart_iommu_remove_device, 367 .device_group = generic_device_group, 368 .map = gart_iommu_map, 369 .map_sg = default_iommu_map_sg, 370 .unmap = gart_iommu_unmap, 371 .iova_to_phys = gart_iommu_iova_to_phys, 372 .pgsize_bitmap = GART_IOMMU_PGSIZES, 373 }; 374 375 static int tegra_gart_suspend(struct device *dev) 376 { 377 struct gart_device *gart = dev_get_drvdata(dev); 378 unsigned long iova; 379 u32 *data = gart->savedata; 380 unsigned long flags; 381 382 spin_lock_irqsave(&gart->pte_lock, flags); 383 for_each_gart_pte(gart, iova) 384 *(data++) = gart_read_pte(gart, iova); 385 spin_unlock_irqrestore(&gart->pte_lock, flags); 386 return 0; 387 } 388 389 static int tegra_gart_resume(struct device *dev) 390 { 391 struct gart_device *gart = dev_get_drvdata(dev); 392 unsigned long flags; 393 394 spin_lock_irqsave(&gart->pte_lock, flags); 395 do_gart_setup(gart, gart->savedata); 396 spin_unlock_irqrestore(&gart->pte_lock, flags); 397 return 0; 398 } 399 400 static int tegra_gart_probe(struct platform_device *pdev) 401 { 402 struct gart_device *gart; 403 struct resource *res, *res_remap; 404 void __iomem *gart_regs; 405 struct device *dev = &pdev->dev; 406 int ret; 407 408 if (gart_handle) 409 return -EIO; 410 411 BUILD_BUG_ON(PAGE_SHIFT != GART_PAGE_SHIFT); 412 413 /* the GART memory aperture is required */ 414 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 415 res_remap = platform_get_resource(pdev, IORESOURCE_MEM, 1); 416 if (!res || !res_remap) { 417 dev_err(dev, "GART memory aperture expected\n"); 418 return -ENXIO; 419 } 420 421 gart = devm_kzalloc(dev, sizeof(*gart), GFP_KERNEL); 422 if (!gart) { 423 dev_err(dev, "failed to allocate gart_device\n"); 424 return -ENOMEM; 425 } 426 427 gart_regs = devm_ioremap(dev, res->start, resource_size(res)); 428 if (!gart_regs) { 429 dev_err(dev, "failed to remap GART registers\n"); 430 return -ENXIO; 431 } 432 433 ret = iommu_device_sysfs_add(&gart->iommu, &pdev->dev, NULL, 434 dev_name(&pdev->dev)); 435 if (ret) { 436 dev_err(dev, "Failed to register IOMMU in sysfs\n"); 437 return ret; 438 } 439 440 iommu_device_set_ops(&gart->iommu, &gart_iommu_ops); 441 442 ret = iommu_device_register(&gart->iommu); 443 if (ret) { 444 dev_err(dev, "Failed to register IOMMU\n"); 445 iommu_device_sysfs_remove(&gart->iommu); 446 return ret; 447 } 448 449 gart->dev = &pdev->dev; 450 spin_lock_init(&gart->pte_lock); 451 spin_lock_init(&gart->client_lock); 452 INIT_LIST_HEAD(&gart->client); 453 gart->regs = gart_regs; 454 gart->iovmm_base = (dma_addr_t)res_remap->start; 455 gart->page_count = (resource_size(res_remap) >> GART_PAGE_SHIFT); 456 457 gart->savedata = vmalloc(sizeof(u32) * gart->page_count); 458 if (!gart->savedata) { 459 dev_err(dev, "failed to allocate context save area\n"); 460 return -ENOMEM; 461 } 462 463 platform_set_drvdata(pdev, gart); 464 do_gart_setup(gart, NULL); 465 466 gart_handle = gart; 467 468 return 0; 469 } 470 471 static int tegra_gart_remove(struct platform_device *pdev) 472 { 473 struct gart_device *gart = platform_get_drvdata(pdev); 474 475 iommu_device_unregister(&gart->iommu); 476 iommu_device_sysfs_remove(&gart->iommu); 477 478 writel(0, gart->regs + GART_CONFIG); 479 if (gart->savedata) 480 vfree(gart->savedata); 481 gart_handle = NULL; 482 return 0; 483 } 484 485 static const struct dev_pm_ops tegra_gart_pm_ops = { 486 .suspend = tegra_gart_suspend, 487 .resume = tegra_gart_resume, 488 }; 489 490 static const struct of_device_id tegra_gart_of_match[] = { 491 { .compatible = "nvidia,tegra20-gart", }, 492 { }, 493 }; 494 MODULE_DEVICE_TABLE(of, tegra_gart_of_match); 495 496 static struct platform_driver tegra_gart_driver = { 497 .probe = tegra_gart_probe, 498 .remove = tegra_gart_remove, 499 .driver = { 500 .name = "tegra-gart", 501 .pm = &tegra_gart_pm_ops, 502 .of_match_table = tegra_gart_of_match, 503 }, 504 }; 505 506 static int tegra_gart_init(void) 507 { 508 return platform_driver_register(&tegra_gart_driver); 509 } 510 511 static void __exit tegra_gart_exit(void) 512 { 513 platform_driver_unregister(&tegra_gart_driver); 514 } 515 516 subsys_initcall(tegra_gart_init); 517 module_exit(tegra_gart_exit); 518 519 MODULE_DESCRIPTION("IOMMU API for GART in Tegra20"); 520 MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>"); 521 MODULE_ALIAS("platform:tegra-gart"); 522 MODULE_LICENSE("GPL v2"); 523