1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright (C) 2013 Freescale Semiconductor, Inc. 16 * Author: Varun Sethi <varun.sethi@freescale.com> 17 * 18 */ 19 20 #define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__ 21 22 #include "fsl_pamu_domain.h" 23 24 #include <sysdev/fsl_pci.h> 25 26 /* 27 * Global spinlock that needs to be held while 28 * configuring PAMU. 29 */ 30 static DEFINE_SPINLOCK(iommu_lock); 31 32 static struct kmem_cache *fsl_pamu_domain_cache; 33 static struct kmem_cache *iommu_devinfo_cache; 34 static DEFINE_SPINLOCK(device_domain_lock); 35 36 struct iommu_device pamu_iommu; /* IOMMU core code handle */ 37 38 static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom) 39 { 40 return container_of(dom, struct fsl_dma_domain, iommu_domain); 41 } 42 43 static int __init iommu_init_mempool(void) 44 { 45 fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain", 46 sizeof(struct fsl_dma_domain), 47 0, 48 SLAB_HWCACHE_ALIGN, 49 NULL); 50 if (!fsl_pamu_domain_cache) { 51 pr_debug("Couldn't create fsl iommu_domain cache\n"); 52 return -ENOMEM; 53 } 54 55 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo", 56 sizeof(struct device_domain_info), 57 0, 58 SLAB_HWCACHE_ALIGN, 59 NULL); 60 if (!iommu_devinfo_cache) { 61 pr_debug("Couldn't create devinfo cache\n"); 62 kmem_cache_destroy(fsl_pamu_domain_cache); 63 return -ENOMEM; 64 } 65 66 return 0; 67 } 68 69 static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova) 70 { 71 u32 win_cnt = dma_domain->win_cnt; 72 struct dma_window *win_ptr = &dma_domain->win_arr[0]; 73 struct iommu_domain_geometry *geom; 74 75 geom = &dma_domain->iommu_domain.geometry; 76 77 if (!win_cnt || !dma_domain->geom_size) { 78 pr_debug("Number of windows/geometry not configured for the domain\n"); 79 return 0; 80 } 81 82 if (win_cnt > 1) { 83 u64 subwin_size; 84 dma_addr_t subwin_iova; 85 u32 wnd; 86 87 subwin_size = dma_domain->geom_size >> ilog2(win_cnt); 88 subwin_iova = iova & ~(subwin_size - 1); 89 wnd = (subwin_iova - geom->aperture_start) >> ilog2(subwin_size); 90 win_ptr = &dma_domain->win_arr[wnd]; 91 } 92 93 if (win_ptr->valid) 94 return win_ptr->paddr + (iova & (win_ptr->size - 1)); 95 96 return 0; 97 } 98 99 static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain) 100 { 101 struct dma_window *sub_win_ptr = &dma_domain->win_arr[0]; 102 int i, ret; 103 unsigned long rpn, flags; 104 105 for (i = 0; i < dma_domain->win_cnt; i++) { 106 if (sub_win_ptr[i].valid) { 107 rpn = sub_win_ptr[i].paddr >> PAMU_PAGE_SHIFT; 108 spin_lock_irqsave(&iommu_lock, flags); 109 ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i, 110 sub_win_ptr[i].size, 111 ~(u32)0, 112 rpn, 113 dma_domain->snoop_id, 114 dma_domain->stash_id, 115 (i > 0) ? 1 : 0, 116 sub_win_ptr[i].prot); 117 spin_unlock_irqrestore(&iommu_lock, flags); 118 if (ret) { 119 pr_debug("SPAACE configuration failed for liodn %d\n", 120 liodn); 121 return ret; 122 } 123 } 124 } 125 126 return ret; 127 } 128 129 static int map_win(int liodn, struct fsl_dma_domain *dma_domain) 130 { 131 int ret; 132 struct dma_window *wnd = &dma_domain->win_arr[0]; 133 phys_addr_t wnd_addr = dma_domain->iommu_domain.geometry.aperture_start; 134 unsigned long flags; 135 136 spin_lock_irqsave(&iommu_lock, flags); 137 ret = pamu_config_ppaace(liodn, wnd_addr, 138 wnd->size, 139 ~(u32)0, 140 wnd->paddr >> PAMU_PAGE_SHIFT, 141 dma_domain->snoop_id, dma_domain->stash_id, 142 0, wnd->prot); 143 spin_unlock_irqrestore(&iommu_lock, flags); 144 if (ret) 145 pr_debug("PAACE configuration failed for liodn %d\n", liodn); 146 147 return ret; 148 } 149 150 /* Map the DMA window corresponding to the LIODN */ 151 static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain) 152 { 153 if (dma_domain->win_cnt > 1) 154 return map_subwins(liodn, dma_domain); 155 else 156 return map_win(liodn, dma_domain); 157 } 158 159 /* Update window/subwindow mapping for the LIODN */ 160 static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr) 161 { 162 int ret; 163 struct dma_window *wnd = &dma_domain->win_arr[wnd_nr]; 164 unsigned long flags; 165 166 spin_lock_irqsave(&iommu_lock, flags); 167 if (dma_domain->win_cnt > 1) { 168 ret = pamu_config_spaace(liodn, dma_domain->win_cnt, wnd_nr, 169 wnd->size, 170 ~(u32)0, 171 wnd->paddr >> PAMU_PAGE_SHIFT, 172 dma_domain->snoop_id, 173 dma_domain->stash_id, 174 (wnd_nr > 0) ? 1 : 0, 175 wnd->prot); 176 if (ret) 177 pr_debug("Subwindow reconfiguration failed for liodn %d\n", 178 liodn); 179 } else { 180 phys_addr_t wnd_addr; 181 182 wnd_addr = dma_domain->iommu_domain.geometry.aperture_start; 183 184 ret = pamu_config_ppaace(liodn, wnd_addr, 185 wnd->size, 186 ~(u32)0, 187 wnd->paddr >> PAMU_PAGE_SHIFT, 188 dma_domain->snoop_id, dma_domain->stash_id, 189 0, wnd->prot); 190 if (ret) 191 pr_debug("Window reconfiguration failed for liodn %d\n", 192 liodn); 193 } 194 195 spin_unlock_irqrestore(&iommu_lock, flags); 196 197 return ret; 198 } 199 200 static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain, 201 u32 val) 202 { 203 int ret = 0, i; 204 unsigned long flags; 205 206 spin_lock_irqsave(&iommu_lock, flags); 207 if (!dma_domain->win_arr) { 208 pr_debug("Windows not configured, stash destination update failed for liodn %d\n", 209 liodn); 210 spin_unlock_irqrestore(&iommu_lock, flags); 211 return -EINVAL; 212 } 213 214 for (i = 0; i < dma_domain->win_cnt; i++) { 215 ret = pamu_update_paace_stash(liodn, i, val); 216 if (ret) { 217 pr_debug("Failed to update SPAACE %d field for liodn %d\n ", 218 i, liodn); 219 spin_unlock_irqrestore(&iommu_lock, flags); 220 return ret; 221 } 222 } 223 224 spin_unlock_irqrestore(&iommu_lock, flags); 225 226 return ret; 227 } 228 229 /* Set the geometry parameters for a LIODN */ 230 static int pamu_set_liodn(int liodn, struct device *dev, 231 struct fsl_dma_domain *dma_domain, 232 struct iommu_domain_geometry *geom_attr, 233 u32 win_cnt) 234 { 235 phys_addr_t window_addr, window_size; 236 phys_addr_t subwin_size; 237 int ret = 0, i; 238 u32 omi_index = ~(u32)0; 239 unsigned long flags; 240 241 /* 242 * Configure the omi_index at the geometry setup time. 243 * This is a static value which depends on the type of 244 * device and would not change thereafter. 245 */ 246 get_ome_index(&omi_index, dev); 247 248 window_addr = geom_attr->aperture_start; 249 window_size = dma_domain->geom_size; 250 251 spin_lock_irqsave(&iommu_lock, flags); 252 ret = pamu_disable_liodn(liodn); 253 if (!ret) 254 ret = pamu_config_ppaace(liodn, window_addr, window_size, omi_index, 255 0, dma_domain->snoop_id, 256 dma_domain->stash_id, win_cnt, 0); 257 spin_unlock_irqrestore(&iommu_lock, flags); 258 if (ret) { 259 pr_debug("PAACE configuration failed for liodn %d, win_cnt =%d\n", 260 liodn, win_cnt); 261 return ret; 262 } 263 264 if (win_cnt > 1) { 265 subwin_size = window_size >> ilog2(win_cnt); 266 for (i = 0; i < win_cnt; i++) { 267 spin_lock_irqsave(&iommu_lock, flags); 268 ret = pamu_disable_spaace(liodn, i); 269 if (!ret) 270 ret = pamu_config_spaace(liodn, win_cnt, i, 271 subwin_size, omi_index, 272 0, dma_domain->snoop_id, 273 dma_domain->stash_id, 274 0, 0); 275 spin_unlock_irqrestore(&iommu_lock, flags); 276 if (ret) { 277 pr_debug("SPAACE configuration failed for liodn %d\n", 278 liodn); 279 return ret; 280 } 281 } 282 } 283 284 return ret; 285 } 286 287 static int check_size(u64 size, dma_addr_t iova) 288 { 289 /* 290 * Size must be a power of two and at least be equal 291 * to PAMU page size. 292 */ 293 if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) { 294 pr_debug("Size too small or not a power of two\n"); 295 return -EINVAL; 296 } 297 298 /* iova must be page size aligned */ 299 if (iova & (size - 1)) { 300 pr_debug("Address is not aligned with window size\n"); 301 return -EINVAL; 302 } 303 304 return 0; 305 } 306 307 static struct fsl_dma_domain *iommu_alloc_dma_domain(void) 308 { 309 struct fsl_dma_domain *domain; 310 311 domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL); 312 if (!domain) 313 return NULL; 314 315 domain->stash_id = ~(u32)0; 316 domain->snoop_id = ~(u32)0; 317 domain->win_cnt = pamu_get_max_subwin_cnt(); 318 domain->geom_size = 0; 319 320 INIT_LIST_HEAD(&domain->devices); 321 322 spin_lock_init(&domain->domain_lock); 323 324 return domain; 325 } 326 327 static void remove_device_ref(struct device_domain_info *info, u32 win_cnt) 328 { 329 unsigned long flags; 330 331 list_del(&info->link); 332 spin_lock_irqsave(&iommu_lock, flags); 333 if (win_cnt > 1) 334 pamu_free_subwins(info->liodn); 335 pamu_disable_liodn(info->liodn); 336 spin_unlock_irqrestore(&iommu_lock, flags); 337 spin_lock_irqsave(&device_domain_lock, flags); 338 info->dev->archdata.iommu_domain = NULL; 339 kmem_cache_free(iommu_devinfo_cache, info); 340 spin_unlock_irqrestore(&device_domain_lock, flags); 341 } 342 343 static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain) 344 { 345 struct device_domain_info *info, *tmp; 346 unsigned long flags; 347 348 spin_lock_irqsave(&dma_domain->domain_lock, flags); 349 /* Remove the device from the domain device list */ 350 list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) { 351 if (!dev || (info->dev == dev)) 352 remove_device_ref(info, dma_domain->win_cnt); 353 } 354 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 355 } 356 357 static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev) 358 { 359 struct device_domain_info *info, *old_domain_info; 360 unsigned long flags; 361 362 spin_lock_irqsave(&device_domain_lock, flags); 363 /* 364 * Check here if the device is already attached to domain or not. 365 * If the device is already attached to a domain detach it. 366 */ 367 old_domain_info = dev->archdata.iommu_domain; 368 if (old_domain_info && old_domain_info->domain != dma_domain) { 369 spin_unlock_irqrestore(&device_domain_lock, flags); 370 detach_device(dev, old_domain_info->domain); 371 spin_lock_irqsave(&device_domain_lock, flags); 372 } 373 374 info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC); 375 376 info->dev = dev; 377 info->liodn = liodn; 378 info->domain = dma_domain; 379 380 list_add(&info->link, &dma_domain->devices); 381 /* 382 * In case of devices with multiple LIODNs just store 383 * the info for the first LIODN as all 384 * LIODNs share the same domain 385 */ 386 if (!dev->archdata.iommu_domain) 387 dev->archdata.iommu_domain = info; 388 spin_unlock_irqrestore(&device_domain_lock, flags); 389 } 390 391 static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain, 392 dma_addr_t iova) 393 { 394 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); 395 396 if (iova < domain->geometry.aperture_start || 397 iova > domain->geometry.aperture_end) 398 return 0; 399 400 return get_phys_addr(dma_domain, iova); 401 } 402 403 static bool fsl_pamu_capable(enum iommu_cap cap) 404 { 405 return cap == IOMMU_CAP_CACHE_COHERENCY; 406 } 407 408 static void fsl_pamu_domain_free(struct iommu_domain *domain) 409 { 410 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); 411 412 /* remove all the devices from the device list */ 413 detach_device(NULL, dma_domain); 414 415 dma_domain->enabled = 0; 416 dma_domain->mapped = 0; 417 418 kmem_cache_free(fsl_pamu_domain_cache, dma_domain); 419 } 420 421 static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type) 422 { 423 struct fsl_dma_domain *dma_domain; 424 425 if (type != IOMMU_DOMAIN_UNMANAGED) 426 return NULL; 427 428 dma_domain = iommu_alloc_dma_domain(); 429 if (!dma_domain) { 430 pr_debug("dma_domain allocation failed\n"); 431 return NULL; 432 } 433 /* defaul geometry 64 GB i.e. maximum system address */ 434 dma_domain->iommu_domain. geometry.aperture_start = 0; 435 dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1; 436 dma_domain->iommu_domain.geometry.force_aperture = true; 437 438 return &dma_domain->iommu_domain; 439 } 440 441 /* Configure geometry settings for all LIODNs associated with domain */ 442 static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain, 443 struct iommu_domain_geometry *geom_attr, 444 u32 win_cnt) 445 { 446 struct device_domain_info *info; 447 int ret = 0; 448 449 list_for_each_entry(info, &dma_domain->devices, link) { 450 ret = pamu_set_liodn(info->liodn, info->dev, dma_domain, 451 geom_attr, win_cnt); 452 if (ret) 453 break; 454 } 455 456 return ret; 457 } 458 459 /* Update stash destination for all LIODNs associated with the domain */ 460 static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val) 461 { 462 struct device_domain_info *info; 463 int ret = 0; 464 465 list_for_each_entry(info, &dma_domain->devices, link) { 466 ret = update_liodn_stash(info->liodn, dma_domain, val); 467 if (ret) 468 break; 469 } 470 471 return ret; 472 } 473 474 /* Update domain mappings for all LIODNs associated with the domain */ 475 static int update_domain_mapping(struct fsl_dma_domain *dma_domain, u32 wnd_nr) 476 { 477 struct device_domain_info *info; 478 int ret = 0; 479 480 list_for_each_entry(info, &dma_domain->devices, link) { 481 ret = update_liodn(info->liodn, dma_domain, wnd_nr); 482 if (ret) 483 break; 484 } 485 return ret; 486 } 487 488 static int disable_domain_win(struct fsl_dma_domain *dma_domain, u32 wnd_nr) 489 { 490 struct device_domain_info *info; 491 int ret = 0; 492 493 list_for_each_entry(info, &dma_domain->devices, link) { 494 if (dma_domain->win_cnt == 1 && dma_domain->enabled) { 495 ret = pamu_disable_liodn(info->liodn); 496 if (!ret) 497 dma_domain->enabled = 0; 498 } else { 499 ret = pamu_disable_spaace(info->liodn, wnd_nr); 500 } 501 } 502 503 return ret; 504 } 505 506 static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr) 507 { 508 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); 509 unsigned long flags; 510 int ret; 511 512 spin_lock_irqsave(&dma_domain->domain_lock, flags); 513 if (!dma_domain->win_arr) { 514 pr_debug("Number of windows not configured\n"); 515 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 516 return; 517 } 518 519 if (wnd_nr >= dma_domain->win_cnt) { 520 pr_debug("Invalid window index\n"); 521 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 522 return; 523 } 524 525 if (dma_domain->win_arr[wnd_nr].valid) { 526 ret = disable_domain_win(dma_domain, wnd_nr); 527 if (!ret) { 528 dma_domain->win_arr[wnd_nr].valid = 0; 529 dma_domain->mapped--; 530 } 531 } 532 533 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 534 } 535 536 static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr, 537 phys_addr_t paddr, u64 size, int prot) 538 { 539 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); 540 struct dma_window *wnd; 541 int pamu_prot = 0; 542 int ret; 543 unsigned long flags; 544 u64 win_size; 545 546 if (prot & IOMMU_READ) 547 pamu_prot |= PAACE_AP_PERMS_QUERY; 548 if (prot & IOMMU_WRITE) 549 pamu_prot |= PAACE_AP_PERMS_UPDATE; 550 551 spin_lock_irqsave(&dma_domain->domain_lock, flags); 552 if (!dma_domain->win_arr) { 553 pr_debug("Number of windows not configured\n"); 554 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 555 return -ENODEV; 556 } 557 558 if (wnd_nr >= dma_domain->win_cnt) { 559 pr_debug("Invalid window index\n"); 560 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 561 return -EINVAL; 562 } 563 564 win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt); 565 if (size > win_size) { 566 pr_debug("Invalid window size\n"); 567 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 568 return -EINVAL; 569 } 570 571 if (dma_domain->win_cnt == 1) { 572 if (dma_domain->enabled) { 573 pr_debug("Disable the window before updating the mapping\n"); 574 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 575 return -EBUSY; 576 } 577 578 ret = check_size(size, domain->geometry.aperture_start); 579 if (ret) { 580 pr_debug("Aperture start not aligned to the size\n"); 581 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 582 return -EINVAL; 583 } 584 } 585 586 wnd = &dma_domain->win_arr[wnd_nr]; 587 if (!wnd->valid) { 588 wnd->paddr = paddr; 589 wnd->size = size; 590 wnd->prot = pamu_prot; 591 592 ret = update_domain_mapping(dma_domain, wnd_nr); 593 if (!ret) { 594 wnd->valid = 1; 595 dma_domain->mapped++; 596 } 597 } else { 598 pr_debug("Disable the window before updating the mapping\n"); 599 ret = -EBUSY; 600 } 601 602 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 603 604 return ret; 605 } 606 607 /* 608 * Attach the LIODN to the DMA domain and configure the geometry 609 * and window mappings. 610 */ 611 static int handle_attach_device(struct fsl_dma_domain *dma_domain, 612 struct device *dev, const u32 *liodn, 613 int num) 614 { 615 unsigned long flags; 616 struct iommu_domain *domain = &dma_domain->iommu_domain; 617 int ret = 0; 618 int i; 619 620 spin_lock_irqsave(&dma_domain->domain_lock, flags); 621 for (i = 0; i < num; i++) { 622 /* Ensure that LIODN value is valid */ 623 if (liodn[i] >= PAACE_NUMBER_ENTRIES) { 624 pr_debug("Invalid liodn %d, attach device failed for %pOF\n", 625 liodn[i], dev->of_node); 626 ret = -EINVAL; 627 break; 628 } 629 630 attach_device(dma_domain, liodn[i], dev); 631 /* 632 * Check if geometry has already been configured 633 * for the domain. If yes, set the geometry for 634 * the LIODN. 635 */ 636 if (dma_domain->win_arr) { 637 u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0; 638 639 ret = pamu_set_liodn(liodn[i], dev, dma_domain, 640 &domain->geometry, win_cnt); 641 if (ret) 642 break; 643 if (dma_domain->mapped) { 644 /* 645 * Create window/subwindow mapping for 646 * the LIODN. 647 */ 648 ret = map_liodn(liodn[i], dma_domain); 649 if (ret) 650 break; 651 } 652 } 653 } 654 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 655 656 return ret; 657 } 658 659 static int fsl_pamu_attach_device(struct iommu_domain *domain, 660 struct device *dev) 661 { 662 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); 663 const u32 *liodn; 664 u32 liodn_cnt; 665 int len, ret = 0; 666 struct pci_dev *pdev = NULL; 667 struct pci_controller *pci_ctl; 668 669 /* 670 * Use LIODN of the PCI controller while attaching a 671 * PCI device. 672 */ 673 if (dev_is_pci(dev)) { 674 pdev = to_pci_dev(dev); 675 pci_ctl = pci_bus_to_host(pdev->bus); 676 /* 677 * make dev point to pci controller device 678 * so we can get the LIODN programmed by 679 * u-boot. 680 */ 681 dev = pci_ctl->parent; 682 } 683 684 liodn = of_get_property(dev->of_node, "fsl,liodn", &len); 685 if (liodn) { 686 liodn_cnt = len / sizeof(u32); 687 ret = handle_attach_device(dma_domain, dev, liodn, liodn_cnt); 688 } else { 689 pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node); 690 ret = -EINVAL; 691 } 692 693 return ret; 694 } 695 696 static void fsl_pamu_detach_device(struct iommu_domain *domain, 697 struct device *dev) 698 { 699 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); 700 const u32 *prop; 701 int len; 702 struct pci_dev *pdev = NULL; 703 struct pci_controller *pci_ctl; 704 705 /* 706 * Use LIODN of the PCI controller while detaching a 707 * PCI device. 708 */ 709 if (dev_is_pci(dev)) { 710 pdev = to_pci_dev(dev); 711 pci_ctl = pci_bus_to_host(pdev->bus); 712 /* 713 * make dev point to pci controller device 714 * so we can get the LIODN programmed by 715 * u-boot. 716 */ 717 dev = pci_ctl->parent; 718 } 719 720 prop = of_get_property(dev->of_node, "fsl,liodn", &len); 721 if (prop) 722 detach_device(dev, dma_domain); 723 else 724 pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node); 725 } 726 727 static int configure_domain_geometry(struct iommu_domain *domain, void *data) 728 { 729 struct iommu_domain_geometry *geom_attr = data; 730 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); 731 dma_addr_t geom_size; 732 unsigned long flags; 733 734 geom_size = geom_attr->aperture_end - geom_attr->aperture_start + 1; 735 /* 736 * Sanity check the geometry size. Also, we do not support 737 * DMA outside of the geometry. 738 */ 739 if (check_size(geom_size, geom_attr->aperture_start) || 740 !geom_attr->force_aperture) { 741 pr_debug("Invalid PAMU geometry attributes\n"); 742 return -EINVAL; 743 } 744 745 spin_lock_irqsave(&dma_domain->domain_lock, flags); 746 if (dma_domain->enabled) { 747 pr_debug("Can't set geometry attributes as domain is active\n"); 748 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 749 return -EBUSY; 750 } 751 752 /* Copy the domain geometry information */ 753 memcpy(&domain->geometry, geom_attr, 754 sizeof(struct iommu_domain_geometry)); 755 dma_domain->geom_size = geom_size; 756 757 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 758 759 return 0; 760 } 761 762 /* Set the domain stash attribute */ 763 static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data) 764 { 765 struct pamu_stash_attribute *stash_attr = data; 766 unsigned long flags; 767 int ret; 768 769 spin_lock_irqsave(&dma_domain->domain_lock, flags); 770 771 memcpy(&dma_domain->dma_stash, stash_attr, 772 sizeof(struct pamu_stash_attribute)); 773 774 dma_domain->stash_id = get_stash_id(stash_attr->cache, 775 stash_attr->cpu); 776 if (dma_domain->stash_id == ~(u32)0) { 777 pr_debug("Invalid stash attributes\n"); 778 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 779 return -EINVAL; 780 } 781 782 ret = update_domain_stash(dma_domain, dma_domain->stash_id); 783 784 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 785 786 return ret; 787 } 788 789 /* Configure domain dma state i.e. enable/disable DMA */ 790 static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable) 791 { 792 struct device_domain_info *info; 793 unsigned long flags; 794 int ret; 795 796 spin_lock_irqsave(&dma_domain->domain_lock, flags); 797 798 if (enable && !dma_domain->mapped) { 799 pr_debug("Can't enable DMA domain without valid mapping\n"); 800 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 801 return -ENODEV; 802 } 803 804 dma_domain->enabled = enable; 805 list_for_each_entry(info, &dma_domain->devices, link) { 806 ret = (enable) ? pamu_enable_liodn(info->liodn) : 807 pamu_disable_liodn(info->liodn); 808 if (ret) 809 pr_debug("Unable to set dma state for liodn %d", 810 info->liodn); 811 } 812 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 813 814 return 0; 815 } 816 817 static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count) 818 { 819 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); 820 unsigned long flags; 821 int ret; 822 823 spin_lock_irqsave(&dma_domain->domain_lock, flags); 824 /* Ensure domain is inactive i.e. DMA should be disabled for the domain */ 825 if (dma_domain->enabled) { 826 pr_debug("Can't set geometry attributes as domain is active\n"); 827 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 828 return -EBUSY; 829 } 830 831 /* Ensure that the geometry has been set for the domain */ 832 if (!dma_domain->geom_size) { 833 pr_debug("Please configure geometry before setting the number of windows\n"); 834 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 835 return -EINVAL; 836 } 837 838 /* 839 * Ensure we have valid window count i.e. it should be less than 840 * maximum permissible limit and should be a power of two. 841 */ 842 if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) { 843 pr_debug("Invalid window count\n"); 844 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 845 return -EINVAL; 846 } 847 848 ret = pamu_set_domain_geometry(dma_domain, &domain->geometry, 849 w_count > 1 ? w_count : 0); 850 if (!ret) { 851 kfree(dma_domain->win_arr); 852 dma_domain->win_arr = kcalloc(w_count, 853 sizeof(*dma_domain->win_arr), 854 GFP_ATOMIC); 855 if (!dma_domain->win_arr) { 856 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 857 return -ENOMEM; 858 } 859 dma_domain->win_cnt = w_count; 860 } 861 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 862 863 return ret; 864 } 865 866 static int fsl_pamu_set_domain_attr(struct iommu_domain *domain, 867 enum iommu_attr attr_type, void *data) 868 { 869 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); 870 int ret = 0; 871 872 switch (attr_type) { 873 case DOMAIN_ATTR_GEOMETRY: 874 ret = configure_domain_geometry(domain, data); 875 break; 876 case DOMAIN_ATTR_FSL_PAMU_STASH: 877 ret = configure_domain_stash(dma_domain, data); 878 break; 879 case DOMAIN_ATTR_FSL_PAMU_ENABLE: 880 ret = configure_domain_dma_state(dma_domain, *(int *)data); 881 break; 882 case DOMAIN_ATTR_WINDOWS: 883 ret = fsl_pamu_set_windows(domain, *(u32 *)data); 884 break; 885 default: 886 pr_debug("Unsupported attribute type\n"); 887 ret = -EINVAL; 888 break; 889 } 890 891 return ret; 892 } 893 894 static int fsl_pamu_get_domain_attr(struct iommu_domain *domain, 895 enum iommu_attr attr_type, void *data) 896 { 897 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); 898 int ret = 0; 899 900 switch (attr_type) { 901 case DOMAIN_ATTR_FSL_PAMU_STASH: 902 memcpy(data, &dma_domain->dma_stash, 903 sizeof(struct pamu_stash_attribute)); 904 break; 905 case DOMAIN_ATTR_FSL_PAMU_ENABLE: 906 *(int *)data = dma_domain->enabled; 907 break; 908 case DOMAIN_ATTR_FSL_PAMUV1: 909 *(int *)data = DOMAIN_ATTR_FSL_PAMUV1; 910 break; 911 case DOMAIN_ATTR_WINDOWS: 912 *(u32 *)data = dma_domain->win_cnt; 913 break; 914 default: 915 pr_debug("Unsupported attribute type\n"); 916 ret = -EINVAL; 917 break; 918 } 919 920 return ret; 921 } 922 923 static struct iommu_group *get_device_iommu_group(struct device *dev) 924 { 925 struct iommu_group *group; 926 927 group = iommu_group_get(dev); 928 if (!group) 929 group = iommu_group_alloc(); 930 931 return group; 932 } 933 934 static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl) 935 { 936 u32 version; 937 938 /* Check the PCI controller version number by readding BRR1 register */ 939 version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2)); 940 version &= PCI_FSL_BRR1_VER; 941 /* If PCI controller version is >= 0x204 we can partition endpoints */ 942 return version >= 0x204; 943 } 944 945 /* Get iommu group information from peer devices or devices on the parent bus */ 946 static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev) 947 { 948 struct pci_dev *tmp; 949 struct iommu_group *group; 950 struct pci_bus *bus = pdev->bus; 951 952 /* 953 * Traverese the pci bus device list to get 954 * the shared iommu group. 955 */ 956 while (bus) { 957 list_for_each_entry(tmp, &bus->devices, bus_list) { 958 if (tmp == pdev) 959 continue; 960 group = iommu_group_get(&tmp->dev); 961 if (group) 962 return group; 963 } 964 965 bus = bus->parent; 966 } 967 968 return NULL; 969 } 970 971 static struct iommu_group *get_pci_device_group(struct pci_dev *pdev) 972 { 973 struct pci_controller *pci_ctl; 974 bool pci_endpt_partitioning; 975 struct iommu_group *group = NULL; 976 977 pci_ctl = pci_bus_to_host(pdev->bus); 978 pci_endpt_partitioning = check_pci_ctl_endpt_part(pci_ctl); 979 /* We can partition PCIe devices so assign device group to the device */ 980 if (pci_endpt_partitioning) { 981 group = pci_device_group(&pdev->dev); 982 983 /* 984 * PCIe controller is not a paritionable entity 985 * free the controller device iommu_group. 986 */ 987 if (pci_ctl->parent->iommu_group) 988 iommu_group_remove_device(pci_ctl->parent); 989 } else { 990 /* 991 * All devices connected to the controller will share the 992 * PCI controllers device group. If this is the first 993 * device to be probed for the pci controller, copy the 994 * device group information from the PCI controller device 995 * node and remove the PCI controller iommu group. 996 * For subsequent devices, the iommu group information can 997 * be obtained from sibling devices (i.e. from the bus_devices 998 * link list). 999 */ 1000 if (pci_ctl->parent->iommu_group) { 1001 group = get_device_iommu_group(pci_ctl->parent); 1002 iommu_group_remove_device(pci_ctl->parent); 1003 } else { 1004 group = get_shared_pci_device_group(pdev); 1005 } 1006 } 1007 1008 if (!group) 1009 group = ERR_PTR(-ENODEV); 1010 1011 return group; 1012 } 1013 1014 static struct iommu_group *fsl_pamu_device_group(struct device *dev) 1015 { 1016 struct iommu_group *group = ERR_PTR(-ENODEV); 1017 int len; 1018 1019 /* 1020 * For platform devices we allocate a separate group for 1021 * each of the devices. 1022 */ 1023 if (dev_is_pci(dev)) 1024 group = get_pci_device_group(to_pci_dev(dev)); 1025 else if (of_get_property(dev->of_node, "fsl,liodn", &len)) 1026 group = get_device_iommu_group(dev); 1027 1028 return group; 1029 } 1030 1031 static int fsl_pamu_add_device(struct device *dev) 1032 { 1033 struct iommu_group *group; 1034 1035 group = iommu_group_get_for_dev(dev); 1036 if (IS_ERR(group)) 1037 return PTR_ERR(group); 1038 1039 iommu_group_put(group); 1040 1041 iommu_device_link(&pamu_iommu, dev); 1042 1043 return 0; 1044 } 1045 1046 static void fsl_pamu_remove_device(struct device *dev) 1047 { 1048 iommu_device_unlink(&pamu_iommu, dev); 1049 iommu_group_remove_device(dev); 1050 } 1051 1052 static const struct iommu_ops fsl_pamu_ops = { 1053 .capable = fsl_pamu_capable, 1054 .domain_alloc = fsl_pamu_domain_alloc, 1055 .domain_free = fsl_pamu_domain_free, 1056 .attach_dev = fsl_pamu_attach_device, 1057 .detach_dev = fsl_pamu_detach_device, 1058 .domain_window_enable = fsl_pamu_window_enable, 1059 .domain_window_disable = fsl_pamu_window_disable, 1060 .iova_to_phys = fsl_pamu_iova_to_phys, 1061 .domain_set_attr = fsl_pamu_set_domain_attr, 1062 .domain_get_attr = fsl_pamu_get_domain_attr, 1063 .add_device = fsl_pamu_add_device, 1064 .remove_device = fsl_pamu_remove_device, 1065 .device_group = fsl_pamu_device_group, 1066 }; 1067 1068 int __init pamu_domain_init(void) 1069 { 1070 int ret = 0; 1071 1072 ret = iommu_init_mempool(); 1073 if (ret) 1074 return ret; 1075 1076 ret = iommu_device_sysfs_add(&pamu_iommu, NULL, NULL, "iommu0"); 1077 if (ret) 1078 return ret; 1079 1080 iommu_device_set_ops(&pamu_iommu, &fsl_pamu_ops); 1081 1082 ret = iommu_device_register(&pamu_iommu); 1083 if (ret) { 1084 iommu_device_sysfs_remove(&pamu_iommu); 1085 pr_err("Can't register iommu device\n"); 1086 return ret; 1087 } 1088 1089 bus_set_iommu(&platform_bus_type, &fsl_pamu_ops); 1090 bus_set_iommu(&pci_bus_type, &fsl_pamu_ops); 1091 1092 return ret; 1093 } 1094