1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright (C) 2013 Freescale Semiconductor, Inc. 16 * Author: Varun Sethi <varun.sethi@freescale.com> 17 * 18 */ 19 20 #define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__ 21 22 #include <linux/init.h> 23 #include <linux/iommu.h> 24 #include <linux/notifier.h> 25 #include <linux/slab.h> 26 #include <linux/module.h> 27 #include <linux/types.h> 28 #include <linux/mm.h> 29 #include <linux/interrupt.h> 30 #include <linux/device.h> 31 #include <linux/of_platform.h> 32 #include <linux/bootmem.h> 33 #include <linux/err.h> 34 #include <asm/io.h> 35 #include <asm/bitops.h> 36 37 #include <asm/pci-bridge.h> 38 #include <sysdev/fsl_pci.h> 39 40 #include "fsl_pamu_domain.h" 41 42 /* 43 * Global spinlock that needs to be held while 44 * configuring PAMU. 45 */ 46 static DEFINE_SPINLOCK(iommu_lock); 47 48 static struct kmem_cache *fsl_pamu_domain_cache; 49 static struct kmem_cache *iommu_devinfo_cache; 50 static DEFINE_SPINLOCK(device_domain_lock); 51 52 static int __init iommu_init_mempool(void) 53 { 54 55 fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain", 56 sizeof(struct fsl_dma_domain), 57 0, 58 SLAB_HWCACHE_ALIGN, 59 60 NULL); 61 if (!fsl_pamu_domain_cache) { 62 pr_debug("Couldn't create fsl iommu_domain cache\n"); 63 return -ENOMEM; 64 } 65 66 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo", 67 sizeof(struct device_domain_info), 68 0, 69 SLAB_HWCACHE_ALIGN, 70 NULL); 71 if (!iommu_devinfo_cache) { 72 pr_debug("Couldn't create devinfo cache\n"); 73 kmem_cache_destroy(fsl_pamu_domain_cache); 74 return -ENOMEM; 75 } 76 77 return 0; 78 } 79 80 static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova) 81 { 82 u32 win_cnt = dma_domain->win_cnt; 83 struct dma_window *win_ptr = 84 &dma_domain->win_arr[0]; 85 struct iommu_domain_geometry *geom; 86 87 geom = &dma_domain->iommu_domain->geometry; 88 89 if (!win_cnt || !dma_domain->geom_size) { 90 pr_debug("Number of windows/geometry not configured for the domain\n"); 91 return 0; 92 } 93 94 if (win_cnt > 1) { 95 u64 subwin_size; 96 dma_addr_t subwin_iova; 97 u32 wnd; 98 99 subwin_size = dma_domain->geom_size >> ilog2(win_cnt); 100 subwin_iova = iova & ~(subwin_size - 1); 101 wnd = (subwin_iova - geom->aperture_start) >> ilog2(subwin_size); 102 win_ptr = &dma_domain->win_arr[wnd]; 103 } 104 105 if (win_ptr->valid) 106 return (win_ptr->paddr + (iova & (win_ptr->size - 1))); 107 108 return 0; 109 } 110 111 static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain) 112 { 113 struct dma_window *sub_win_ptr = 114 &dma_domain->win_arr[0]; 115 int i, ret; 116 unsigned long rpn, flags; 117 118 for (i = 0; i < dma_domain->win_cnt; i++) { 119 if (sub_win_ptr[i].valid) { 120 rpn = sub_win_ptr[i].paddr >> 121 PAMU_PAGE_SHIFT; 122 spin_lock_irqsave(&iommu_lock, flags); 123 ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i, 124 sub_win_ptr[i].size, 125 ~(u32)0, 126 rpn, 127 dma_domain->snoop_id, 128 dma_domain->stash_id, 129 (i > 0) ? 1 : 0, 130 sub_win_ptr[i].prot); 131 spin_unlock_irqrestore(&iommu_lock, flags); 132 if (ret) { 133 pr_debug("PAMU SPAACE configuration failed for liodn %d\n", 134 liodn); 135 return ret; 136 } 137 } 138 } 139 140 return ret; 141 } 142 143 static int map_win(int liodn, struct fsl_dma_domain *dma_domain) 144 { 145 int ret; 146 struct dma_window *wnd = &dma_domain->win_arr[0]; 147 phys_addr_t wnd_addr = dma_domain->iommu_domain->geometry.aperture_start; 148 unsigned long flags; 149 150 spin_lock_irqsave(&iommu_lock, flags); 151 ret = pamu_config_ppaace(liodn, wnd_addr, 152 wnd->size, 153 ~(u32)0, 154 wnd->paddr >> PAMU_PAGE_SHIFT, 155 dma_domain->snoop_id, dma_domain->stash_id, 156 0, wnd->prot); 157 spin_unlock_irqrestore(&iommu_lock, flags); 158 if (ret) 159 pr_debug("PAMU PAACE configuration failed for liodn %d\n", 160 liodn); 161 162 return ret; 163 } 164 165 /* Map the DMA window corresponding to the LIODN */ 166 static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain) 167 { 168 if (dma_domain->win_cnt > 1) 169 return map_subwins(liodn, dma_domain); 170 else 171 return map_win(liodn, dma_domain); 172 173 } 174 175 /* Update window/subwindow mapping for the LIODN */ 176 static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr) 177 { 178 int ret; 179 struct dma_window *wnd = &dma_domain->win_arr[wnd_nr]; 180 unsigned long flags; 181 182 spin_lock_irqsave(&iommu_lock, flags); 183 if (dma_domain->win_cnt > 1) { 184 ret = pamu_config_spaace(liodn, dma_domain->win_cnt, wnd_nr, 185 wnd->size, 186 ~(u32)0, 187 wnd->paddr >> PAMU_PAGE_SHIFT, 188 dma_domain->snoop_id, 189 dma_domain->stash_id, 190 (wnd_nr > 0) ? 1 : 0, 191 wnd->prot); 192 if (ret) 193 pr_debug("Subwindow reconfiguration failed for liodn %d\n", liodn); 194 } else { 195 phys_addr_t wnd_addr; 196 197 wnd_addr = dma_domain->iommu_domain->geometry.aperture_start; 198 199 ret = pamu_config_ppaace(liodn, wnd_addr, 200 wnd->size, 201 ~(u32)0, 202 wnd->paddr >> PAMU_PAGE_SHIFT, 203 dma_domain->snoop_id, dma_domain->stash_id, 204 0, wnd->prot); 205 if (ret) 206 pr_debug("Window reconfiguration failed for liodn %d\n", liodn); 207 } 208 209 spin_unlock_irqrestore(&iommu_lock, flags); 210 211 return ret; 212 } 213 214 static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain, 215 u32 val) 216 { 217 int ret = 0, i; 218 unsigned long flags; 219 220 spin_lock_irqsave(&iommu_lock, flags); 221 if (!dma_domain->win_arr) { 222 pr_debug("Windows not configured, stash destination update failed for liodn %d\n", liodn); 223 spin_unlock_irqrestore(&iommu_lock, flags); 224 return -EINVAL; 225 } 226 227 for (i = 0; i < dma_domain->win_cnt; i++) { 228 ret = pamu_update_paace_stash(liodn, i, val); 229 if (ret) { 230 pr_debug("Failed to update SPAACE %d field for liodn %d\n ", i, liodn); 231 spin_unlock_irqrestore(&iommu_lock, flags); 232 return ret; 233 } 234 } 235 236 spin_unlock_irqrestore(&iommu_lock, flags); 237 238 return ret; 239 } 240 241 /* Set the geometry parameters for a LIODN */ 242 static int pamu_set_liodn(int liodn, struct device *dev, 243 struct fsl_dma_domain *dma_domain, 244 struct iommu_domain_geometry *geom_attr, 245 u32 win_cnt) 246 { 247 phys_addr_t window_addr, window_size; 248 phys_addr_t subwin_size; 249 int ret = 0, i; 250 u32 omi_index = ~(u32)0; 251 unsigned long flags; 252 253 /* 254 * Configure the omi_index at the geometry setup time. 255 * This is a static value which depends on the type of 256 * device and would not change thereafter. 257 */ 258 get_ome_index(&omi_index, dev); 259 260 window_addr = geom_attr->aperture_start; 261 window_size = dma_domain->geom_size; 262 263 spin_lock_irqsave(&iommu_lock, flags); 264 ret = pamu_disable_liodn(liodn); 265 if (!ret) 266 ret = pamu_config_ppaace(liodn, window_addr, window_size, omi_index, 267 0, dma_domain->snoop_id, 268 dma_domain->stash_id, win_cnt, 0); 269 spin_unlock_irqrestore(&iommu_lock, flags); 270 if (ret) { 271 pr_debug("PAMU PAACE configuration failed for liodn %d, win_cnt =%d\n", liodn, win_cnt); 272 return ret; 273 } 274 275 if (win_cnt > 1) { 276 subwin_size = window_size >> ilog2(win_cnt); 277 for (i = 0; i < win_cnt; i++) { 278 spin_lock_irqsave(&iommu_lock, flags); 279 ret = pamu_disable_spaace(liodn, i); 280 if (!ret) 281 ret = pamu_config_spaace(liodn, win_cnt, i, 282 subwin_size, omi_index, 283 0, dma_domain->snoop_id, 284 dma_domain->stash_id, 285 0, 0); 286 spin_unlock_irqrestore(&iommu_lock, flags); 287 if (ret) { 288 pr_debug("PAMU SPAACE configuration failed for liodn %d\n", liodn); 289 return ret; 290 } 291 } 292 } 293 294 return ret; 295 } 296 297 static int check_size(u64 size, dma_addr_t iova) 298 { 299 /* 300 * Size must be a power of two and at least be equal 301 * to PAMU page size. 302 */ 303 if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) { 304 pr_debug("%s: size too small or not a power of two\n", __func__); 305 return -EINVAL; 306 } 307 308 /* iova must be page size aligned*/ 309 if (iova & (size - 1)) { 310 pr_debug("%s: address is not aligned with window size\n", __func__); 311 return -EINVAL; 312 } 313 314 return 0; 315 } 316 317 static struct fsl_dma_domain *iommu_alloc_dma_domain(void) 318 { 319 struct fsl_dma_domain *domain; 320 321 domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL); 322 if (!domain) 323 return NULL; 324 325 domain->stash_id = ~(u32)0; 326 domain->snoop_id = ~(u32)0; 327 domain->win_cnt = pamu_get_max_subwin_cnt(); 328 domain->geom_size = 0; 329 330 INIT_LIST_HEAD(&domain->devices); 331 332 spin_lock_init(&domain->domain_lock); 333 334 return domain; 335 } 336 337 static void remove_device_ref(struct device_domain_info *info, u32 win_cnt) 338 { 339 unsigned long flags; 340 341 list_del(&info->link); 342 spin_lock_irqsave(&iommu_lock, flags); 343 if (win_cnt > 1) 344 pamu_free_subwins(info->liodn); 345 pamu_disable_liodn(info->liodn); 346 spin_unlock_irqrestore(&iommu_lock, flags); 347 spin_lock_irqsave(&device_domain_lock, flags); 348 info->dev->archdata.iommu_domain = NULL; 349 kmem_cache_free(iommu_devinfo_cache, info); 350 spin_unlock_irqrestore(&device_domain_lock, flags); 351 } 352 353 static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain) 354 { 355 struct device_domain_info *info, *tmp; 356 unsigned long flags; 357 358 spin_lock_irqsave(&dma_domain->domain_lock, flags); 359 /* Remove the device from the domain device list */ 360 list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) { 361 if (!dev || (info->dev == dev)) 362 remove_device_ref(info, dma_domain->win_cnt); 363 } 364 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 365 } 366 367 static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev) 368 { 369 struct device_domain_info *info, *old_domain_info; 370 unsigned long flags; 371 372 spin_lock_irqsave(&device_domain_lock, flags); 373 /* 374 * Check here if the device is already attached to domain or not. 375 * If the device is already attached to a domain detach it. 376 */ 377 old_domain_info = dev->archdata.iommu_domain; 378 if (old_domain_info && old_domain_info->domain != dma_domain) { 379 spin_unlock_irqrestore(&device_domain_lock, flags); 380 detach_device(dev, old_domain_info->domain); 381 spin_lock_irqsave(&device_domain_lock, flags); 382 } 383 384 info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC); 385 386 info->dev = dev; 387 info->liodn = liodn; 388 info->domain = dma_domain; 389 390 list_add(&info->link, &dma_domain->devices); 391 /* 392 * In case of devices with multiple LIODNs just store 393 * the info for the first LIODN as all 394 * LIODNs share the same domain 395 */ 396 if (!dev->archdata.iommu_domain) 397 dev->archdata.iommu_domain = info; 398 spin_unlock_irqrestore(&device_domain_lock, flags); 399 400 } 401 402 static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain, 403 dma_addr_t iova) 404 { 405 struct fsl_dma_domain *dma_domain = domain->priv; 406 407 if ((iova < domain->geometry.aperture_start) || 408 iova > (domain->geometry.aperture_end)) 409 return 0; 410 411 return get_phys_addr(dma_domain, iova); 412 } 413 414 static int fsl_pamu_domain_has_cap(struct iommu_domain *domain, 415 unsigned long cap) 416 { 417 return cap == IOMMU_CAP_CACHE_COHERENCY; 418 } 419 420 static void fsl_pamu_domain_destroy(struct iommu_domain *domain) 421 { 422 struct fsl_dma_domain *dma_domain = domain->priv; 423 424 domain->priv = NULL; 425 426 /* remove all the devices from the device list */ 427 detach_device(NULL, dma_domain); 428 429 dma_domain->enabled = 0; 430 dma_domain->mapped = 0; 431 432 kmem_cache_free(fsl_pamu_domain_cache, dma_domain); 433 } 434 435 static int fsl_pamu_domain_init(struct iommu_domain *domain) 436 { 437 struct fsl_dma_domain *dma_domain; 438 439 dma_domain = iommu_alloc_dma_domain(); 440 if (!dma_domain) { 441 pr_debug("dma_domain allocation failed\n"); 442 return -ENOMEM; 443 } 444 domain->priv = dma_domain; 445 dma_domain->iommu_domain = domain; 446 /* defaul geometry 64 GB i.e. maximum system address */ 447 domain->geometry.aperture_start = 0; 448 domain->geometry.aperture_end = (1ULL << 36) - 1; 449 domain->geometry.force_aperture = true; 450 451 return 0; 452 } 453 454 /* Configure geometry settings for all LIODNs associated with domain */ 455 static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain, 456 struct iommu_domain_geometry *geom_attr, 457 u32 win_cnt) 458 { 459 struct device_domain_info *info; 460 int ret = 0; 461 462 list_for_each_entry(info, &dma_domain->devices, link) { 463 ret = pamu_set_liodn(info->liodn, info->dev, dma_domain, 464 geom_attr, win_cnt); 465 if (ret) 466 break; 467 } 468 469 return ret; 470 } 471 472 /* Update stash destination for all LIODNs associated with the domain */ 473 static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val) 474 { 475 struct device_domain_info *info; 476 int ret = 0; 477 478 list_for_each_entry(info, &dma_domain->devices, link) { 479 ret = update_liodn_stash(info->liodn, dma_domain, val); 480 if (ret) 481 break; 482 } 483 484 return ret; 485 } 486 487 /* Update domain mappings for all LIODNs associated with the domain */ 488 static int update_domain_mapping(struct fsl_dma_domain *dma_domain, u32 wnd_nr) 489 { 490 struct device_domain_info *info; 491 int ret = 0; 492 493 list_for_each_entry(info, &dma_domain->devices, link) { 494 ret = update_liodn(info->liodn, dma_domain, wnd_nr); 495 if (ret) 496 break; 497 } 498 return ret; 499 } 500 501 static int disable_domain_win(struct fsl_dma_domain *dma_domain, u32 wnd_nr) 502 { 503 struct device_domain_info *info; 504 int ret = 0; 505 506 list_for_each_entry(info, &dma_domain->devices, link) { 507 if (dma_domain->win_cnt == 1 && dma_domain->enabled) { 508 ret = pamu_disable_liodn(info->liodn); 509 if (!ret) 510 dma_domain->enabled = 0; 511 } else { 512 ret = pamu_disable_spaace(info->liodn, wnd_nr); 513 } 514 } 515 516 return ret; 517 } 518 519 static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr) 520 { 521 struct fsl_dma_domain *dma_domain = domain->priv; 522 unsigned long flags; 523 int ret; 524 525 spin_lock_irqsave(&dma_domain->domain_lock, flags); 526 if (!dma_domain->win_arr) { 527 pr_debug("Number of windows not configured\n"); 528 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 529 return; 530 } 531 532 if (wnd_nr >= dma_domain->win_cnt) { 533 pr_debug("Invalid window index\n"); 534 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 535 return; 536 } 537 538 if (dma_domain->win_arr[wnd_nr].valid) { 539 ret = disable_domain_win(dma_domain, wnd_nr); 540 if (!ret) { 541 dma_domain->win_arr[wnd_nr].valid = 0; 542 dma_domain->mapped--; 543 } 544 } 545 546 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 547 548 } 549 550 static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr, 551 phys_addr_t paddr, u64 size, int prot) 552 { 553 struct fsl_dma_domain *dma_domain = domain->priv; 554 struct dma_window *wnd; 555 int pamu_prot = 0; 556 int ret; 557 unsigned long flags; 558 u64 win_size; 559 560 if (prot & IOMMU_READ) 561 pamu_prot |= PAACE_AP_PERMS_QUERY; 562 if (prot & IOMMU_WRITE) 563 pamu_prot |= PAACE_AP_PERMS_UPDATE; 564 565 spin_lock_irqsave(&dma_domain->domain_lock, flags); 566 if (!dma_domain->win_arr) { 567 pr_debug("Number of windows not configured\n"); 568 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 569 return -ENODEV; 570 } 571 572 if (wnd_nr >= dma_domain->win_cnt) { 573 pr_debug("Invalid window index\n"); 574 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 575 return -EINVAL; 576 } 577 578 win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt); 579 if (size > win_size) { 580 pr_debug("Invalid window size \n"); 581 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 582 return -EINVAL; 583 } 584 585 if (dma_domain->win_cnt == 1) { 586 if (dma_domain->enabled) { 587 pr_debug("Disable the window before updating the mapping\n"); 588 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 589 return -EBUSY; 590 } 591 592 ret = check_size(size, domain->geometry.aperture_start); 593 if (ret) { 594 pr_debug("Aperture start not aligned to the size\n"); 595 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 596 return -EINVAL; 597 } 598 } 599 600 wnd = &dma_domain->win_arr[wnd_nr]; 601 if (!wnd->valid) { 602 wnd->paddr = paddr; 603 wnd->size = size; 604 wnd->prot = pamu_prot; 605 606 ret = update_domain_mapping(dma_domain, wnd_nr); 607 if (!ret) { 608 wnd->valid = 1; 609 dma_domain->mapped++; 610 } 611 } else { 612 pr_debug("Disable the window before updating the mapping\n"); 613 ret = -EBUSY; 614 } 615 616 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 617 618 return ret; 619 } 620 621 /* 622 * Attach the LIODN to the DMA domain and configure the geometry 623 * and window mappings. 624 */ 625 static int handle_attach_device(struct fsl_dma_domain *dma_domain, 626 struct device *dev, const u32 *liodn, 627 int num) 628 { 629 unsigned long flags; 630 struct iommu_domain *domain = dma_domain->iommu_domain; 631 int ret = 0; 632 int i; 633 634 spin_lock_irqsave(&dma_domain->domain_lock, flags); 635 for (i = 0; i < num; i++) { 636 637 /* Ensure that LIODN value is valid */ 638 if (liodn[i] >= PAACE_NUMBER_ENTRIES) { 639 pr_debug("Invalid liodn %d, attach device failed for %s\n", 640 liodn[i], dev->of_node->full_name); 641 ret = -EINVAL; 642 break; 643 } 644 645 attach_device(dma_domain, liodn[i], dev); 646 /* 647 * Check if geometry has already been configured 648 * for the domain. If yes, set the geometry for 649 * the LIODN. 650 */ 651 if (dma_domain->win_arr) { 652 u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0; 653 ret = pamu_set_liodn(liodn[i], dev, dma_domain, 654 &domain->geometry, 655 win_cnt); 656 if (ret) 657 break; 658 if (dma_domain->mapped) { 659 /* 660 * Create window/subwindow mapping for 661 * the LIODN. 662 */ 663 ret = map_liodn(liodn[i], dma_domain); 664 if (ret) 665 break; 666 } 667 } 668 } 669 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 670 671 return ret; 672 } 673 674 static int fsl_pamu_attach_device(struct iommu_domain *domain, 675 struct device *dev) 676 { 677 struct fsl_dma_domain *dma_domain = domain->priv; 678 const u32 *liodn; 679 u32 liodn_cnt; 680 int len, ret = 0; 681 struct pci_dev *pdev = NULL; 682 struct pci_controller *pci_ctl; 683 684 /* 685 * Use LIODN of the PCI controller while attaching a 686 * PCI device. 687 */ 688 if (dev_is_pci(dev)) { 689 pdev = to_pci_dev(dev); 690 pci_ctl = pci_bus_to_host(pdev->bus); 691 /* 692 * make dev point to pci controller device 693 * so we can get the LIODN programmed by 694 * u-boot. 695 */ 696 dev = pci_ctl->parent; 697 } 698 699 liodn = of_get_property(dev->of_node, "fsl,liodn", &len); 700 if (liodn) { 701 liodn_cnt = len / sizeof(u32); 702 ret = handle_attach_device(dma_domain, dev, 703 liodn, liodn_cnt); 704 } else { 705 pr_debug("missing fsl,liodn property at %s\n", 706 dev->of_node->full_name); 707 ret = -EINVAL; 708 } 709 710 return ret; 711 } 712 713 static void fsl_pamu_detach_device(struct iommu_domain *domain, 714 struct device *dev) 715 { 716 struct fsl_dma_domain *dma_domain = domain->priv; 717 const u32 *prop; 718 int len; 719 struct pci_dev *pdev = NULL; 720 struct pci_controller *pci_ctl; 721 722 /* 723 * Use LIODN of the PCI controller while detaching a 724 * PCI device. 725 */ 726 if (dev_is_pci(dev)) { 727 pdev = to_pci_dev(dev); 728 pci_ctl = pci_bus_to_host(pdev->bus); 729 /* 730 * make dev point to pci controller device 731 * so we can get the LIODN programmed by 732 * u-boot. 733 */ 734 dev = pci_ctl->parent; 735 } 736 737 prop = of_get_property(dev->of_node, "fsl,liodn", &len); 738 if (prop) 739 detach_device(dev, dma_domain); 740 else 741 pr_debug("missing fsl,liodn property at %s\n", 742 dev->of_node->full_name); 743 } 744 745 static int configure_domain_geometry(struct iommu_domain *domain, void *data) 746 { 747 struct iommu_domain_geometry *geom_attr = data; 748 struct fsl_dma_domain *dma_domain = domain->priv; 749 dma_addr_t geom_size; 750 unsigned long flags; 751 752 geom_size = geom_attr->aperture_end - geom_attr->aperture_start + 1; 753 /* 754 * Sanity check the geometry size. Also, we do not support 755 * DMA outside of the geometry. 756 */ 757 if (check_size(geom_size, geom_attr->aperture_start) || 758 !geom_attr->force_aperture) { 759 pr_debug("Invalid PAMU geometry attributes\n"); 760 return -EINVAL; 761 } 762 763 spin_lock_irqsave(&dma_domain->domain_lock, flags); 764 if (dma_domain->enabled) { 765 pr_debug("Can't set geometry attributes as domain is active\n"); 766 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 767 return -EBUSY; 768 } 769 770 /* Copy the domain geometry information */ 771 memcpy(&domain->geometry, geom_attr, 772 sizeof(struct iommu_domain_geometry)); 773 dma_domain->geom_size = geom_size; 774 775 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 776 777 return 0; 778 } 779 780 /* Set the domain stash attribute */ 781 static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data) 782 { 783 struct pamu_stash_attribute *stash_attr = data; 784 unsigned long flags; 785 int ret; 786 787 spin_lock_irqsave(&dma_domain->domain_lock, flags); 788 789 memcpy(&dma_domain->dma_stash, stash_attr, 790 sizeof(struct pamu_stash_attribute)); 791 792 dma_domain->stash_id = get_stash_id(stash_attr->cache, 793 stash_attr->cpu); 794 if (dma_domain->stash_id == ~(u32)0) { 795 pr_debug("Invalid stash attributes\n"); 796 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 797 return -EINVAL; 798 } 799 800 ret = update_domain_stash(dma_domain, dma_domain->stash_id); 801 802 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 803 804 return ret; 805 } 806 807 /* Configure domain dma state i.e. enable/disable DMA*/ 808 static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable) 809 { 810 struct device_domain_info *info; 811 unsigned long flags; 812 int ret; 813 814 spin_lock_irqsave(&dma_domain->domain_lock, flags); 815 816 if (enable && !dma_domain->mapped) { 817 pr_debug("Can't enable DMA domain without valid mapping\n"); 818 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 819 return -ENODEV; 820 } 821 822 dma_domain->enabled = enable; 823 list_for_each_entry(info, &dma_domain->devices, 824 link) { 825 ret = (enable) ? pamu_enable_liodn(info->liodn) : 826 pamu_disable_liodn(info->liodn); 827 if (ret) 828 pr_debug("Unable to set dma state for liodn %d", 829 info->liodn); 830 } 831 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 832 833 return 0; 834 } 835 836 static int fsl_pamu_set_domain_attr(struct iommu_domain *domain, 837 enum iommu_attr attr_type, void *data) 838 { 839 struct fsl_dma_domain *dma_domain = domain->priv; 840 int ret = 0; 841 842 843 switch (attr_type) { 844 case DOMAIN_ATTR_GEOMETRY: 845 ret = configure_domain_geometry(domain, data); 846 break; 847 case DOMAIN_ATTR_FSL_PAMU_STASH: 848 ret = configure_domain_stash(dma_domain, data); 849 break; 850 case DOMAIN_ATTR_FSL_PAMU_ENABLE: 851 ret = configure_domain_dma_state(dma_domain, *(int *)data); 852 break; 853 default: 854 pr_debug("Unsupported attribute type\n"); 855 ret = -EINVAL; 856 break; 857 }; 858 859 return ret; 860 } 861 862 static int fsl_pamu_get_domain_attr(struct iommu_domain *domain, 863 enum iommu_attr attr_type, void *data) 864 { 865 struct fsl_dma_domain *dma_domain = domain->priv; 866 int ret = 0; 867 868 869 switch (attr_type) { 870 case DOMAIN_ATTR_FSL_PAMU_STASH: 871 memcpy((struct pamu_stash_attribute *) data, &dma_domain->dma_stash, 872 sizeof(struct pamu_stash_attribute)); 873 break; 874 case DOMAIN_ATTR_FSL_PAMU_ENABLE: 875 *(int *)data = dma_domain->enabled; 876 break; 877 case DOMAIN_ATTR_FSL_PAMUV1: 878 *(int *)data = DOMAIN_ATTR_FSL_PAMUV1; 879 break; 880 default: 881 pr_debug("Unsupported attribute type\n"); 882 ret = -EINVAL; 883 break; 884 }; 885 886 return ret; 887 } 888 889 static struct iommu_group *get_device_iommu_group(struct device *dev) 890 { 891 struct iommu_group *group; 892 893 group = iommu_group_get(dev); 894 if (!group) 895 group = iommu_group_alloc(); 896 897 return group; 898 } 899 900 static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl) 901 { 902 u32 version; 903 904 /* Check the PCI controller version number by readding BRR1 register */ 905 version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2)); 906 version &= PCI_FSL_BRR1_VER; 907 /* If PCI controller version is >= 0x204 we can partition endpoints*/ 908 if (version >= 0x204) 909 return 1; 910 911 return 0; 912 } 913 914 /* Get iommu group information from peer devices or devices on the parent bus */ 915 static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev) 916 { 917 struct pci_dev *tmp; 918 struct iommu_group *group; 919 struct pci_bus *bus = pdev->bus; 920 921 /* 922 * Traverese the pci bus device list to get 923 * the shared iommu group. 924 */ 925 while (bus) { 926 list_for_each_entry(tmp, &bus->devices, bus_list) { 927 if (tmp == pdev) 928 continue; 929 group = iommu_group_get(&tmp->dev); 930 if (group) 931 return group; 932 } 933 934 bus = bus->parent; 935 } 936 937 return NULL; 938 } 939 940 static struct iommu_group *get_pci_device_group(struct pci_dev *pdev) 941 { 942 struct pci_controller *pci_ctl; 943 bool pci_endpt_partioning; 944 struct iommu_group *group = NULL; 945 946 pci_ctl = pci_bus_to_host(pdev->bus); 947 pci_endpt_partioning = check_pci_ctl_endpt_part(pci_ctl); 948 /* We can partition PCIe devices so assign device group to the device */ 949 if (pci_endpt_partioning) { 950 group = iommu_group_get_for_dev(&pdev->dev); 951 952 /* 953 * PCIe controller is not a paritionable entity 954 * free the controller device iommu_group. 955 */ 956 if (pci_ctl->parent->iommu_group) 957 iommu_group_remove_device(pci_ctl->parent); 958 } else { 959 /* 960 * All devices connected to the controller will share the 961 * PCI controllers device group. If this is the first 962 * device to be probed for the pci controller, copy the 963 * device group information from the PCI controller device 964 * node and remove the PCI controller iommu group. 965 * For subsequent devices, the iommu group information can 966 * be obtained from sibling devices (i.e. from the bus_devices 967 * link list). 968 */ 969 if (pci_ctl->parent->iommu_group) { 970 group = get_device_iommu_group(pci_ctl->parent); 971 iommu_group_remove_device(pci_ctl->parent); 972 } else 973 group = get_shared_pci_device_group(pdev); 974 } 975 976 if (!group) 977 group = ERR_PTR(-ENODEV); 978 979 return group; 980 } 981 982 static int fsl_pamu_add_device(struct device *dev) 983 { 984 struct iommu_group *group = ERR_PTR(-ENODEV); 985 struct pci_dev *pdev; 986 const u32 *prop; 987 int ret, len; 988 989 /* 990 * For platform devices we allocate a separate group for 991 * each of the devices. 992 */ 993 if (dev_is_pci(dev)) { 994 pdev = to_pci_dev(dev); 995 /* Don't create device groups for virtual PCI bridges */ 996 if (pdev->subordinate) 997 return 0; 998 999 group = get_pci_device_group(pdev); 1000 1001 } else { 1002 prop = of_get_property(dev->of_node, "fsl,liodn", &len); 1003 if (prop) 1004 group = get_device_iommu_group(dev); 1005 } 1006 1007 if (IS_ERR(group)) 1008 return PTR_ERR(group); 1009 1010 ret = iommu_group_add_device(group, dev); 1011 1012 iommu_group_put(group); 1013 return ret; 1014 } 1015 1016 static void fsl_pamu_remove_device(struct device *dev) 1017 { 1018 iommu_group_remove_device(dev); 1019 } 1020 1021 static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count) 1022 { 1023 struct fsl_dma_domain *dma_domain = domain->priv; 1024 unsigned long flags; 1025 int ret; 1026 1027 spin_lock_irqsave(&dma_domain->domain_lock, flags); 1028 /* Ensure domain is inactive i.e. DMA should be disabled for the domain */ 1029 if (dma_domain->enabled) { 1030 pr_debug("Can't set geometry attributes as domain is active\n"); 1031 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 1032 return -EBUSY; 1033 } 1034 1035 /* Ensure that the geometry has been set for the domain */ 1036 if (!dma_domain->geom_size) { 1037 pr_debug("Please configure geometry before setting the number of windows\n"); 1038 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 1039 return -EINVAL; 1040 } 1041 1042 /* 1043 * Ensure we have valid window count i.e. it should be less than 1044 * maximum permissible limit and should be a power of two. 1045 */ 1046 if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) { 1047 pr_debug("Invalid window count\n"); 1048 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 1049 return -EINVAL; 1050 } 1051 1052 ret = pamu_set_domain_geometry(dma_domain, &domain->geometry, 1053 ((w_count > 1) ? w_count : 0)); 1054 if (!ret) { 1055 kfree(dma_domain->win_arr); 1056 dma_domain->win_arr = kzalloc(sizeof(struct dma_window) * 1057 w_count, GFP_ATOMIC); 1058 if (!dma_domain->win_arr) { 1059 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 1060 return -ENOMEM; 1061 } 1062 dma_domain->win_cnt = w_count; 1063 } 1064 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 1065 1066 return ret; 1067 } 1068 1069 static u32 fsl_pamu_get_windows(struct iommu_domain *domain) 1070 { 1071 struct fsl_dma_domain *dma_domain = domain->priv; 1072 1073 return dma_domain->win_cnt; 1074 } 1075 1076 static const struct iommu_ops fsl_pamu_ops = { 1077 .domain_init = fsl_pamu_domain_init, 1078 .domain_destroy = fsl_pamu_domain_destroy, 1079 .attach_dev = fsl_pamu_attach_device, 1080 .detach_dev = fsl_pamu_detach_device, 1081 .domain_window_enable = fsl_pamu_window_enable, 1082 .domain_window_disable = fsl_pamu_window_disable, 1083 .domain_get_windows = fsl_pamu_get_windows, 1084 .domain_set_windows = fsl_pamu_set_windows, 1085 .iova_to_phys = fsl_pamu_iova_to_phys, 1086 .domain_has_cap = fsl_pamu_domain_has_cap, 1087 .domain_set_attr = fsl_pamu_set_domain_attr, 1088 .domain_get_attr = fsl_pamu_get_domain_attr, 1089 .add_device = fsl_pamu_add_device, 1090 .remove_device = fsl_pamu_remove_device, 1091 }; 1092 1093 int pamu_domain_init(void) 1094 { 1095 int ret = 0; 1096 1097 ret = iommu_init_mempool(); 1098 if (ret) 1099 return ret; 1100 1101 bus_set_iommu(&platform_bus_type, &fsl_pamu_ops); 1102 bus_set_iommu(&pci_bus_type, &fsl_pamu_ops); 1103 1104 return ret; 1105 } 1106