1 /* 2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation 3 * 4 * Rewrite, cleanup, new allocation schemes, virtual merging: 5 * Copyright (C) 2004 Olof Johansson, IBM Corporation 6 * and Ben. Herrenschmidt, IBM Corporation 7 * 8 * Dynamic DMA mapping support, bus-independent parts. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 */ 24 25 26 #include <linux/init.h> 27 #include <linux/types.h> 28 #include <linux/slab.h> 29 #include <linux/mm.h> 30 #include <linux/spinlock.h> 31 #include <linux/string.h> 32 #include <linux/dma-mapping.h> 33 #include <linux/bitmap.h> 34 #include <linux/iommu-helper.h> 35 #include <linux/crash_dump.h> 36 #include <linux/hash.h> 37 #include <linux/fault-inject.h> 38 #include <linux/pci.h> 39 #include <linux/iommu.h> 40 #include <linux/sched.h> 41 #include <asm/io.h> 42 #include <asm/prom.h> 43 #include <asm/iommu.h> 44 #include <asm/pci-bridge.h> 45 #include <asm/machdep.h> 46 #include <asm/kdump.h> 47 #include <asm/fadump.h> 48 #include <asm/vio.h> 49 #include <asm/tce.h> 50 #include <asm/mmu_context.h> 51 52 #define DBG(...) 53 54 static int novmerge; 55 56 static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int); 57 58 static int __init setup_iommu(char *str) 59 { 60 if (!strcmp(str, "novmerge")) 61 novmerge = 1; 62 else if (!strcmp(str, "vmerge")) 63 novmerge = 0; 64 return 1; 65 } 66 67 __setup("iommu=", setup_iommu); 68 69 static DEFINE_PER_CPU(unsigned int, iommu_pool_hash); 70 71 /* 72 * We precalculate the hash to avoid doing it on every allocation. 73 * 74 * The hash is important to spread CPUs across all the pools. For example, 75 * on a POWER7 with 4 way SMT we want interrupts on the primary threads and 76 * with 4 pools all primary threads would map to the same pool. 77 */ 78 static int __init setup_iommu_pool_hash(void) 79 { 80 unsigned int i; 81 82 for_each_possible_cpu(i) 83 per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS); 84 85 return 0; 86 } 87 subsys_initcall(setup_iommu_pool_hash); 88 89 #ifdef CONFIG_FAIL_IOMMU 90 91 static DECLARE_FAULT_ATTR(fail_iommu); 92 93 static int __init setup_fail_iommu(char *str) 94 { 95 return setup_fault_attr(&fail_iommu, str); 96 } 97 __setup("fail_iommu=", setup_fail_iommu); 98 99 static bool should_fail_iommu(struct device *dev) 100 { 101 return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1); 102 } 103 104 static int __init fail_iommu_debugfs(void) 105 { 106 struct dentry *dir = fault_create_debugfs_attr("fail_iommu", 107 NULL, &fail_iommu); 108 109 return PTR_ERR_OR_ZERO(dir); 110 } 111 late_initcall(fail_iommu_debugfs); 112 113 static ssize_t fail_iommu_show(struct device *dev, 114 struct device_attribute *attr, char *buf) 115 { 116 return sprintf(buf, "%d\n", dev->archdata.fail_iommu); 117 } 118 119 static ssize_t fail_iommu_store(struct device *dev, 120 struct device_attribute *attr, const char *buf, 121 size_t count) 122 { 123 int i; 124 125 if (count > 0 && sscanf(buf, "%d", &i) > 0) 126 dev->archdata.fail_iommu = (i == 0) ? 0 : 1; 127 128 return count; 129 } 130 131 static DEVICE_ATTR_RW(fail_iommu); 132 133 static int fail_iommu_bus_notify(struct notifier_block *nb, 134 unsigned long action, void *data) 135 { 136 struct device *dev = data; 137 138 if (action == BUS_NOTIFY_ADD_DEVICE) { 139 if (device_create_file(dev, &dev_attr_fail_iommu)) 140 pr_warn("Unable to create IOMMU fault injection sysfs " 141 "entries\n"); 142 } else if (action == BUS_NOTIFY_DEL_DEVICE) { 143 device_remove_file(dev, &dev_attr_fail_iommu); 144 } 145 146 return 0; 147 } 148 149 static struct notifier_block fail_iommu_bus_notifier = { 150 .notifier_call = fail_iommu_bus_notify 151 }; 152 153 static int __init fail_iommu_setup(void) 154 { 155 #ifdef CONFIG_PCI 156 bus_register_notifier(&pci_bus_type, &fail_iommu_bus_notifier); 157 #endif 158 #ifdef CONFIG_IBMVIO 159 bus_register_notifier(&vio_bus_type, &fail_iommu_bus_notifier); 160 #endif 161 162 return 0; 163 } 164 /* 165 * Must execute after PCI and VIO subsystem have initialised but before 166 * devices are probed. 167 */ 168 arch_initcall(fail_iommu_setup); 169 #else 170 static inline bool should_fail_iommu(struct device *dev) 171 { 172 return false; 173 } 174 #endif 175 176 static unsigned long iommu_range_alloc(struct device *dev, 177 struct iommu_table *tbl, 178 unsigned long npages, 179 unsigned long *handle, 180 unsigned long mask, 181 unsigned int align_order) 182 { 183 unsigned long n, end, start; 184 unsigned long limit; 185 int largealloc = npages > 15; 186 int pass = 0; 187 unsigned long align_mask; 188 unsigned long boundary_size; 189 unsigned long flags; 190 unsigned int pool_nr; 191 struct iommu_pool *pool; 192 193 align_mask = (1ull << align_order) - 1; 194 195 /* This allocator was derived from x86_64's bit string search */ 196 197 /* Sanity check */ 198 if (unlikely(npages == 0)) { 199 if (printk_ratelimit()) 200 WARN_ON(1); 201 return DMA_MAPPING_ERROR; 202 } 203 204 if (should_fail_iommu(dev)) 205 return DMA_MAPPING_ERROR; 206 207 /* 208 * We don't need to disable preemption here because any CPU can 209 * safely use any IOMMU pool. 210 */ 211 pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1); 212 213 if (largealloc) 214 pool = &(tbl->large_pool); 215 else 216 pool = &(tbl->pools[pool_nr]); 217 218 spin_lock_irqsave(&(pool->lock), flags); 219 220 again: 221 if ((pass == 0) && handle && *handle && 222 (*handle >= pool->start) && (*handle < pool->end)) 223 start = *handle; 224 else 225 start = pool->hint; 226 227 limit = pool->end; 228 229 /* The case below can happen if we have a small segment appended 230 * to a large, or when the previous alloc was at the very end of 231 * the available space. If so, go back to the initial start. 232 */ 233 if (start >= limit) 234 start = pool->start; 235 236 if (limit + tbl->it_offset > mask) { 237 limit = mask - tbl->it_offset + 1; 238 /* If we're constrained on address range, first try 239 * at the masked hint to avoid O(n) search complexity, 240 * but on second pass, start at 0 in pool 0. 241 */ 242 if ((start & mask) >= limit || pass > 0) { 243 spin_unlock(&(pool->lock)); 244 pool = &(tbl->pools[0]); 245 spin_lock(&(pool->lock)); 246 start = pool->start; 247 } else { 248 start &= mask; 249 } 250 } 251 252 if (dev) 253 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 254 1 << tbl->it_page_shift); 255 else 256 boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift); 257 /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */ 258 259 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, 260 boundary_size >> tbl->it_page_shift, align_mask); 261 if (n == -1) { 262 if (likely(pass == 0)) { 263 /* First try the pool from the start */ 264 pool->hint = pool->start; 265 pass++; 266 goto again; 267 268 } else if (pass <= tbl->nr_pools) { 269 /* Now try scanning all the other pools */ 270 spin_unlock(&(pool->lock)); 271 pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1); 272 pool = &tbl->pools[pool_nr]; 273 spin_lock(&(pool->lock)); 274 pool->hint = pool->start; 275 pass++; 276 goto again; 277 278 } else { 279 /* Give up */ 280 spin_unlock_irqrestore(&(pool->lock), flags); 281 return DMA_MAPPING_ERROR; 282 } 283 } 284 285 end = n + npages; 286 287 /* Bump the hint to a new block for small allocs. */ 288 if (largealloc) { 289 /* Don't bump to new block to avoid fragmentation */ 290 pool->hint = end; 291 } else { 292 /* Overflow will be taken care of at the next allocation */ 293 pool->hint = (end + tbl->it_blocksize - 1) & 294 ~(tbl->it_blocksize - 1); 295 } 296 297 /* Update handle for SG allocations */ 298 if (handle) 299 *handle = end; 300 301 spin_unlock_irqrestore(&(pool->lock), flags); 302 303 return n; 304 } 305 306 static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, 307 void *page, unsigned int npages, 308 enum dma_data_direction direction, 309 unsigned long mask, unsigned int align_order, 310 unsigned long attrs) 311 { 312 unsigned long entry; 313 dma_addr_t ret = DMA_MAPPING_ERROR; 314 int build_fail; 315 316 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); 317 318 if (unlikely(entry == DMA_MAPPING_ERROR)) 319 return DMA_MAPPING_ERROR; 320 321 entry += tbl->it_offset; /* Offset into real TCE table */ 322 ret = entry << tbl->it_page_shift; /* Set the return dma address */ 323 324 /* Put the TCEs in the HW table */ 325 build_fail = tbl->it_ops->set(tbl, entry, npages, 326 (unsigned long)page & 327 IOMMU_PAGE_MASK(tbl), direction, attrs); 328 329 /* tbl->it_ops->set() only returns non-zero for transient errors. 330 * Clean up the table bitmap in this case and return 331 * DMA_MAPPING_ERROR. For all other errors the functionality is 332 * not altered. 333 */ 334 if (unlikely(build_fail)) { 335 __iommu_free(tbl, ret, npages); 336 return DMA_MAPPING_ERROR; 337 } 338 339 /* Flush/invalidate TLB caches if necessary */ 340 if (tbl->it_ops->flush) 341 tbl->it_ops->flush(tbl); 342 343 /* Make sure updates are seen by hardware */ 344 mb(); 345 346 return ret; 347 } 348 349 static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr, 350 unsigned int npages) 351 { 352 unsigned long entry, free_entry; 353 354 entry = dma_addr >> tbl->it_page_shift; 355 free_entry = entry - tbl->it_offset; 356 357 if (((free_entry + npages) > tbl->it_size) || 358 (entry < tbl->it_offset)) { 359 if (printk_ratelimit()) { 360 printk(KERN_INFO "iommu_free: invalid entry\n"); 361 printk(KERN_INFO "\tentry = 0x%lx\n", entry); 362 printk(KERN_INFO "\tdma_addr = 0x%llx\n", (u64)dma_addr); 363 printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl); 364 printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno); 365 printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size); 366 printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset); 367 printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index); 368 WARN_ON(1); 369 } 370 371 return false; 372 } 373 374 return true; 375 } 376 377 static struct iommu_pool *get_pool(struct iommu_table *tbl, 378 unsigned long entry) 379 { 380 struct iommu_pool *p; 381 unsigned long largepool_start = tbl->large_pool.start; 382 383 /* The large pool is the last pool at the top of the table */ 384 if (entry >= largepool_start) { 385 p = &tbl->large_pool; 386 } else { 387 unsigned int pool_nr = entry / tbl->poolsize; 388 389 BUG_ON(pool_nr > tbl->nr_pools); 390 p = &tbl->pools[pool_nr]; 391 } 392 393 return p; 394 } 395 396 static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, 397 unsigned int npages) 398 { 399 unsigned long entry, free_entry; 400 unsigned long flags; 401 struct iommu_pool *pool; 402 403 entry = dma_addr >> tbl->it_page_shift; 404 free_entry = entry - tbl->it_offset; 405 406 pool = get_pool(tbl, free_entry); 407 408 if (!iommu_free_check(tbl, dma_addr, npages)) 409 return; 410 411 tbl->it_ops->clear(tbl, entry, npages); 412 413 spin_lock_irqsave(&(pool->lock), flags); 414 bitmap_clear(tbl->it_map, free_entry, npages); 415 spin_unlock_irqrestore(&(pool->lock), flags); 416 } 417 418 static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, 419 unsigned int npages) 420 { 421 __iommu_free(tbl, dma_addr, npages); 422 423 /* Make sure TLB cache is flushed if the HW needs it. We do 424 * not do an mb() here on purpose, it is not needed on any of 425 * the current platforms. 426 */ 427 if (tbl->it_ops->flush) 428 tbl->it_ops->flush(tbl); 429 } 430 431 int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, 432 struct scatterlist *sglist, int nelems, 433 unsigned long mask, enum dma_data_direction direction, 434 unsigned long attrs) 435 { 436 dma_addr_t dma_next = 0, dma_addr; 437 struct scatterlist *s, *outs, *segstart; 438 int outcount, incount, i, build_fail = 0; 439 unsigned int align; 440 unsigned long handle; 441 unsigned int max_seg_size; 442 443 BUG_ON(direction == DMA_NONE); 444 445 if ((nelems == 0) || !tbl) 446 return 0; 447 448 outs = s = segstart = &sglist[0]; 449 outcount = 1; 450 incount = nelems; 451 handle = 0; 452 453 /* Init first segment length for backout at failure */ 454 outs->dma_length = 0; 455 456 DBG("sg mapping %d elements:\n", nelems); 457 458 max_seg_size = dma_get_max_seg_size(dev); 459 for_each_sg(sglist, s, nelems, i) { 460 unsigned long vaddr, npages, entry, slen; 461 462 slen = s->length; 463 /* Sanity check */ 464 if (slen == 0) { 465 dma_next = 0; 466 continue; 467 } 468 /* Allocate iommu entries for that segment */ 469 vaddr = (unsigned long) sg_virt(s); 470 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl)); 471 align = 0; 472 if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE && 473 (vaddr & ~PAGE_MASK) == 0) 474 align = PAGE_SHIFT - tbl->it_page_shift; 475 entry = iommu_range_alloc(dev, tbl, npages, &handle, 476 mask >> tbl->it_page_shift, align); 477 478 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); 479 480 /* Handle failure */ 481 if (unlikely(entry == DMA_MAPPING_ERROR)) { 482 if (!(attrs & DMA_ATTR_NO_WARN) && 483 printk_ratelimit()) 484 dev_info(dev, "iommu_alloc failed, tbl %p " 485 "vaddr %lx npages %lu\n", tbl, vaddr, 486 npages); 487 goto failure; 488 } 489 490 /* Convert entry to a dma_addr_t */ 491 entry += tbl->it_offset; 492 dma_addr = entry << tbl->it_page_shift; 493 dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl)); 494 495 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n", 496 npages, entry, dma_addr); 497 498 /* Insert into HW table */ 499 build_fail = tbl->it_ops->set(tbl, entry, npages, 500 vaddr & IOMMU_PAGE_MASK(tbl), 501 direction, attrs); 502 if(unlikely(build_fail)) 503 goto failure; 504 505 /* If we are in an open segment, try merging */ 506 if (segstart != s) { 507 DBG(" - trying merge...\n"); 508 /* We cannot merge if: 509 * - allocated dma_addr isn't contiguous to previous allocation 510 */ 511 if (novmerge || (dma_addr != dma_next) || 512 (outs->dma_length + s->length > max_seg_size)) { 513 /* Can't merge: create a new segment */ 514 segstart = s; 515 outcount++; 516 outs = sg_next(outs); 517 DBG(" can't merge, new segment.\n"); 518 } else { 519 outs->dma_length += s->length; 520 DBG(" merged, new len: %ux\n", outs->dma_length); 521 } 522 } 523 524 if (segstart == s) { 525 /* This is a new segment, fill entries */ 526 DBG(" - filling new segment.\n"); 527 outs->dma_address = dma_addr; 528 outs->dma_length = slen; 529 } 530 531 /* Calculate next page pointer for contiguous check */ 532 dma_next = dma_addr + slen; 533 534 DBG(" - dma next is: %lx\n", dma_next); 535 } 536 537 /* Flush/invalidate TLB caches if necessary */ 538 if (tbl->it_ops->flush) 539 tbl->it_ops->flush(tbl); 540 541 DBG("mapped %d elements:\n", outcount); 542 543 /* For the sake of ppc_iommu_unmap_sg, we clear out the length in the 544 * next entry of the sglist if we didn't fill the list completely 545 */ 546 if (outcount < incount) { 547 outs = sg_next(outs); 548 outs->dma_address = DMA_MAPPING_ERROR; 549 outs->dma_length = 0; 550 } 551 552 /* Make sure updates are seen by hardware */ 553 mb(); 554 555 return outcount; 556 557 failure: 558 for_each_sg(sglist, s, nelems, i) { 559 if (s->dma_length != 0) { 560 unsigned long vaddr, npages; 561 562 vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl); 563 npages = iommu_num_pages(s->dma_address, s->dma_length, 564 IOMMU_PAGE_SIZE(tbl)); 565 __iommu_free(tbl, vaddr, npages); 566 s->dma_address = DMA_MAPPING_ERROR; 567 s->dma_length = 0; 568 } 569 if (s == outs) 570 break; 571 } 572 return 0; 573 } 574 575 576 void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, 577 int nelems, enum dma_data_direction direction, 578 unsigned long attrs) 579 { 580 struct scatterlist *sg; 581 582 BUG_ON(direction == DMA_NONE); 583 584 if (!tbl) 585 return; 586 587 sg = sglist; 588 while (nelems--) { 589 unsigned int npages; 590 dma_addr_t dma_handle = sg->dma_address; 591 592 if (sg->dma_length == 0) 593 break; 594 npages = iommu_num_pages(dma_handle, sg->dma_length, 595 IOMMU_PAGE_SIZE(tbl)); 596 __iommu_free(tbl, dma_handle, npages); 597 sg = sg_next(sg); 598 } 599 600 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we 601 * do not do an mb() here, the affected platforms do not need it 602 * when freeing. 603 */ 604 if (tbl->it_ops->flush) 605 tbl->it_ops->flush(tbl); 606 } 607 608 static void iommu_table_clear(struct iommu_table *tbl) 609 { 610 /* 611 * In case of firmware assisted dump system goes through clean 612 * reboot process at the time of system crash. Hence it's safe to 613 * clear the TCE entries if firmware assisted dump is active. 614 */ 615 if (!is_kdump_kernel() || is_fadump_active()) { 616 /* Clear the table in case firmware left allocations in it */ 617 tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size); 618 return; 619 } 620 621 #ifdef CONFIG_CRASH_DUMP 622 if (tbl->it_ops->get) { 623 unsigned long index, tceval, tcecount = 0; 624 625 /* Reserve the existing mappings left by the first kernel. */ 626 for (index = 0; index < tbl->it_size; index++) { 627 tceval = tbl->it_ops->get(tbl, index + tbl->it_offset); 628 /* 629 * Freed TCE entry contains 0x7fffffffffffffff on JS20 630 */ 631 if (tceval && (tceval != 0x7fffffffffffffffUL)) { 632 __set_bit(index, tbl->it_map); 633 tcecount++; 634 } 635 } 636 637 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) { 638 printk(KERN_WARNING "TCE table is full; freeing "); 639 printk(KERN_WARNING "%d entries for the kdump boot\n", 640 KDUMP_MIN_TCE_ENTRIES); 641 for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES; 642 index < tbl->it_size; index++) 643 __clear_bit(index, tbl->it_map); 644 } 645 } 646 #endif 647 } 648 649 /* 650 * Build a iommu_table structure. This contains a bit map which 651 * is used to manage allocation of the tce space. 652 */ 653 struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) 654 { 655 unsigned long sz; 656 static int welcomed = 0; 657 struct page *page; 658 unsigned int i; 659 struct iommu_pool *p; 660 661 BUG_ON(!tbl->it_ops); 662 663 /* number of bytes needed for the bitmap */ 664 sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); 665 666 page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz)); 667 if (!page) 668 panic("iommu_init_table: Can't allocate %ld bytes\n", sz); 669 tbl->it_map = page_address(page); 670 memset(tbl->it_map, 0, sz); 671 672 /* 673 * Reserve page 0 so it will not be used for any mappings. 674 * This avoids buggy drivers that consider page 0 to be invalid 675 * to crash the machine or even lose data. 676 */ 677 if (tbl->it_offset == 0) 678 set_bit(0, tbl->it_map); 679 680 /* We only split the IOMMU table if we have 1GB or more of space */ 681 if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024)) 682 tbl->nr_pools = IOMMU_NR_POOLS; 683 else 684 tbl->nr_pools = 1; 685 686 /* We reserve the top 1/4 of the table for large allocations */ 687 tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools; 688 689 for (i = 0; i < tbl->nr_pools; i++) { 690 p = &tbl->pools[i]; 691 spin_lock_init(&(p->lock)); 692 p->start = tbl->poolsize * i; 693 p->hint = p->start; 694 p->end = p->start + tbl->poolsize; 695 } 696 697 p = &tbl->large_pool; 698 spin_lock_init(&(p->lock)); 699 p->start = tbl->poolsize * i; 700 p->hint = p->start; 701 p->end = tbl->it_size; 702 703 iommu_table_clear(tbl); 704 705 if (!welcomed) { 706 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n", 707 novmerge ? "disabled" : "enabled"); 708 welcomed = 1; 709 } 710 711 return tbl; 712 } 713 714 static void iommu_table_free(struct kref *kref) 715 { 716 unsigned long bitmap_sz; 717 unsigned int order; 718 struct iommu_table *tbl; 719 720 tbl = container_of(kref, struct iommu_table, it_kref); 721 722 if (tbl->it_ops->free) 723 tbl->it_ops->free(tbl); 724 725 if (!tbl->it_map) { 726 kfree(tbl); 727 return; 728 } 729 730 /* 731 * In case we have reserved the first bit, we should not emit 732 * the warning below. 733 */ 734 if (tbl->it_offset == 0) 735 clear_bit(0, tbl->it_map); 736 737 /* verify that table contains no entries */ 738 if (!bitmap_empty(tbl->it_map, tbl->it_size)) 739 pr_warn("%s: Unexpected TCEs\n", __func__); 740 741 /* calculate bitmap size in bytes */ 742 bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); 743 744 /* free bitmap */ 745 order = get_order(bitmap_sz); 746 free_pages((unsigned long) tbl->it_map, order); 747 748 /* free table */ 749 kfree(tbl); 750 } 751 752 struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl) 753 { 754 if (kref_get_unless_zero(&tbl->it_kref)) 755 return tbl; 756 757 return NULL; 758 } 759 EXPORT_SYMBOL_GPL(iommu_tce_table_get); 760 761 int iommu_tce_table_put(struct iommu_table *tbl) 762 { 763 if (WARN_ON(!tbl)) 764 return 0; 765 766 return kref_put(&tbl->it_kref, iommu_table_free); 767 } 768 EXPORT_SYMBOL_GPL(iommu_tce_table_put); 769 770 /* Creates TCEs for a user provided buffer. The user buffer must be 771 * contiguous real kernel storage (not vmalloc). The address passed here 772 * comprises a page address and offset into that page. The dma_addr_t 773 * returned will point to the same byte within the page as was passed in. 774 */ 775 dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, 776 struct page *page, unsigned long offset, size_t size, 777 unsigned long mask, enum dma_data_direction direction, 778 unsigned long attrs) 779 { 780 dma_addr_t dma_handle = DMA_MAPPING_ERROR; 781 void *vaddr; 782 unsigned long uaddr; 783 unsigned int npages, align; 784 785 BUG_ON(direction == DMA_NONE); 786 787 vaddr = page_address(page) + offset; 788 uaddr = (unsigned long)vaddr; 789 790 if (tbl) { 791 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl)); 792 align = 0; 793 if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE && 794 ((unsigned long)vaddr & ~PAGE_MASK) == 0) 795 align = PAGE_SHIFT - tbl->it_page_shift; 796 797 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, 798 mask >> tbl->it_page_shift, align, 799 attrs); 800 if (dma_handle == DMA_MAPPING_ERROR) { 801 if (!(attrs & DMA_ATTR_NO_WARN) && 802 printk_ratelimit()) { 803 dev_info(dev, "iommu_alloc failed, tbl %p " 804 "vaddr %p npages %d\n", tbl, vaddr, 805 npages); 806 } 807 } else 808 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl)); 809 } 810 811 return dma_handle; 812 } 813 814 void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, 815 size_t size, enum dma_data_direction direction, 816 unsigned long attrs) 817 { 818 unsigned int npages; 819 820 BUG_ON(direction == DMA_NONE); 821 822 if (tbl) { 823 npages = iommu_num_pages(dma_handle, size, 824 IOMMU_PAGE_SIZE(tbl)); 825 iommu_free(tbl, dma_handle, npages); 826 } 827 } 828 829 /* Allocates a contiguous real buffer and creates mappings over it. 830 * Returns the virtual address of the buffer and sets dma_handle 831 * to the dma address (mapping) of the first page. 832 */ 833 void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, 834 size_t size, dma_addr_t *dma_handle, 835 unsigned long mask, gfp_t flag, int node) 836 { 837 void *ret = NULL; 838 dma_addr_t mapping; 839 unsigned int order; 840 unsigned int nio_pages, io_order; 841 struct page *page; 842 843 size = PAGE_ALIGN(size); 844 order = get_order(size); 845 846 /* 847 * Client asked for way too much space. This is checked later 848 * anyway. It is easier to debug here for the drivers than in 849 * the tce tables. 850 */ 851 if (order >= IOMAP_MAX_ORDER) { 852 dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n", 853 size); 854 return NULL; 855 } 856 857 if (!tbl) 858 return NULL; 859 860 /* Alloc enough pages (and possibly more) */ 861 page = alloc_pages_node(node, flag, order); 862 if (!page) 863 return NULL; 864 ret = page_address(page); 865 memset(ret, 0, size); 866 867 /* Set up tces to cover the allocated range */ 868 nio_pages = size >> tbl->it_page_shift; 869 io_order = get_iommu_order(size, tbl); 870 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, 871 mask >> tbl->it_page_shift, io_order, 0); 872 if (mapping == DMA_MAPPING_ERROR) { 873 free_pages((unsigned long)ret, order); 874 return NULL; 875 } 876 *dma_handle = mapping; 877 return ret; 878 } 879 880 void iommu_free_coherent(struct iommu_table *tbl, size_t size, 881 void *vaddr, dma_addr_t dma_handle) 882 { 883 if (tbl) { 884 unsigned int nio_pages; 885 886 size = PAGE_ALIGN(size); 887 nio_pages = size >> tbl->it_page_shift; 888 iommu_free(tbl, dma_handle, nio_pages); 889 size = PAGE_ALIGN(size); 890 free_pages((unsigned long)vaddr, get_order(size)); 891 } 892 } 893 894 unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir) 895 { 896 switch (dir) { 897 case DMA_BIDIRECTIONAL: 898 return TCE_PCI_READ | TCE_PCI_WRITE; 899 case DMA_FROM_DEVICE: 900 return TCE_PCI_WRITE; 901 case DMA_TO_DEVICE: 902 return TCE_PCI_READ; 903 default: 904 return 0; 905 } 906 } 907 EXPORT_SYMBOL_GPL(iommu_direction_to_tce_perm); 908 909 #ifdef CONFIG_IOMMU_API 910 /* 911 * SPAPR TCE API 912 */ 913 static void group_release(void *iommu_data) 914 { 915 struct iommu_table_group *table_group = iommu_data; 916 917 table_group->group = NULL; 918 } 919 920 void iommu_register_group(struct iommu_table_group *table_group, 921 int pci_domain_number, unsigned long pe_num) 922 { 923 struct iommu_group *grp; 924 char *name; 925 926 grp = iommu_group_alloc(); 927 if (IS_ERR(grp)) { 928 pr_warn("powerpc iommu api: cannot create new group, err=%ld\n", 929 PTR_ERR(grp)); 930 return; 931 } 932 table_group->group = grp; 933 iommu_group_set_iommudata(grp, table_group, group_release); 934 name = kasprintf(GFP_KERNEL, "domain%d-pe%lx", 935 pci_domain_number, pe_num); 936 if (!name) 937 return; 938 iommu_group_set_name(grp, name); 939 kfree(name); 940 } 941 942 enum dma_data_direction iommu_tce_direction(unsigned long tce) 943 { 944 if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE)) 945 return DMA_BIDIRECTIONAL; 946 else if (tce & TCE_PCI_READ) 947 return DMA_TO_DEVICE; 948 else if (tce & TCE_PCI_WRITE) 949 return DMA_FROM_DEVICE; 950 else 951 return DMA_NONE; 952 } 953 EXPORT_SYMBOL_GPL(iommu_tce_direction); 954 955 void iommu_flush_tce(struct iommu_table *tbl) 956 { 957 /* Flush/invalidate TLB caches if necessary */ 958 if (tbl->it_ops->flush) 959 tbl->it_ops->flush(tbl); 960 961 /* Make sure updates are seen by hardware */ 962 mb(); 963 } 964 EXPORT_SYMBOL_GPL(iommu_flush_tce); 965 966 int iommu_tce_check_ioba(unsigned long page_shift, 967 unsigned long offset, unsigned long size, 968 unsigned long ioba, unsigned long npages) 969 { 970 unsigned long mask = (1UL << page_shift) - 1; 971 972 if (ioba & mask) 973 return -EINVAL; 974 975 ioba >>= page_shift; 976 if (ioba < offset) 977 return -EINVAL; 978 979 if ((ioba + 1) > (offset + size)) 980 return -EINVAL; 981 982 return 0; 983 } 984 EXPORT_SYMBOL_GPL(iommu_tce_check_ioba); 985 986 int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa) 987 { 988 unsigned long mask = (1UL << page_shift) - 1; 989 990 if (gpa & mask) 991 return -EINVAL; 992 993 return 0; 994 } 995 EXPORT_SYMBOL_GPL(iommu_tce_check_gpa); 996 997 long iommu_tce_xchg(struct mm_struct *mm, struct iommu_table *tbl, 998 unsigned long entry, unsigned long *hpa, 999 enum dma_data_direction *direction) 1000 { 1001 long ret; 1002 unsigned long size = 0; 1003 1004 ret = tbl->it_ops->exchange(tbl, entry, hpa, direction); 1005 1006 if (!ret && ((*direction == DMA_FROM_DEVICE) || 1007 (*direction == DMA_BIDIRECTIONAL)) && 1008 !mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift, 1009 &size)) 1010 SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT)); 1011 1012 /* if (unlikely(ret)) 1013 pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n", 1014 __func__, hwaddr, entry << tbl->it_page_shift, 1015 hwaddr, ret); */ 1016 1017 return ret; 1018 } 1019 EXPORT_SYMBOL_GPL(iommu_tce_xchg); 1020 1021 int iommu_take_ownership(struct iommu_table *tbl) 1022 { 1023 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3; 1024 int ret = 0; 1025 1026 /* 1027 * VFIO does not control TCE entries allocation and the guest 1028 * can write new TCEs on top of existing ones so iommu_tce_build() 1029 * must be able to release old pages. This functionality 1030 * requires exchange() callback defined so if it is not 1031 * implemented, we disallow taking ownership over the table. 1032 */ 1033 if (!tbl->it_ops->exchange) 1034 return -EINVAL; 1035 1036 spin_lock_irqsave(&tbl->large_pool.lock, flags); 1037 for (i = 0; i < tbl->nr_pools; i++) 1038 spin_lock(&tbl->pools[i].lock); 1039 1040 if (tbl->it_offset == 0) 1041 clear_bit(0, tbl->it_map); 1042 1043 if (!bitmap_empty(tbl->it_map, tbl->it_size)) { 1044 pr_err("iommu_tce: it_map is not empty"); 1045 ret = -EBUSY; 1046 /* Restore bit#0 set by iommu_init_table() */ 1047 if (tbl->it_offset == 0) 1048 set_bit(0, tbl->it_map); 1049 } else { 1050 memset(tbl->it_map, 0xff, sz); 1051 } 1052 1053 for (i = 0; i < tbl->nr_pools; i++) 1054 spin_unlock(&tbl->pools[i].lock); 1055 spin_unlock_irqrestore(&tbl->large_pool.lock, flags); 1056 1057 return ret; 1058 } 1059 EXPORT_SYMBOL_GPL(iommu_take_ownership); 1060 1061 void iommu_release_ownership(struct iommu_table *tbl) 1062 { 1063 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3; 1064 1065 spin_lock_irqsave(&tbl->large_pool.lock, flags); 1066 for (i = 0; i < tbl->nr_pools; i++) 1067 spin_lock(&tbl->pools[i].lock); 1068 1069 memset(tbl->it_map, 0, sz); 1070 1071 /* Restore bit#0 set by iommu_init_table() */ 1072 if (tbl->it_offset == 0) 1073 set_bit(0, tbl->it_map); 1074 1075 for (i = 0; i < tbl->nr_pools; i++) 1076 spin_unlock(&tbl->pools[i].lock); 1077 spin_unlock_irqrestore(&tbl->large_pool.lock, flags); 1078 } 1079 EXPORT_SYMBOL_GPL(iommu_release_ownership); 1080 1081 int iommu_add_device(struct iommu_table_group *table_group, struct device *dev) 1082 { 1083 /* 1084 * The sysfs entries should be populated before 1085 * binding IOMMU group. If sysfs entries isn't 1086 * ready, we simply bail. 1087 */ 1088 if (!device_is_registered(dev)) 1089 return -ENOENT; 1090 1091 if (device_iommu_mapped(dev)) { 1092 pr_debug("%s: Skipping device %s with iommu group %d\n", 1093 __func__, dev_name(dev), 1094 iommu_group_id(dev->iommu_group)); 1095 return -EBUSY; 1096 } 1097 1098 pr_debug("%s: Adding %s to iommu group %d\n", 1099 __func__, dev_name(dev), iommu_group_id(table_group->group)); 1100 1101 return iommu_group_add_device(table_group->group, dev); 1102 } 1103 EXPORT_SYMBOL_GPL(iommu_add_device); 1104 1105 void iommu_del_device(struct device *dev) 1106 { 1107 /* 1108 * Some devices might not have IOMMU table and group 1109 * and we needn't detach them from the associated 1110 * IOMMU groups 1111 */ 1112 if (!device_iommu_mapped(dev)) { 1113 pr_debug("iommu_tce: skipping device %s with no tbl\n", 1114 dev_name(dev)); 1115 return; 1116 } 1117 1118 iommu_group_remove_device(dev); 1119 } 1120 EXPORT_SYMBOL_GPL(iommu_del_device); 1121 #endif /* CONFIG_IOMMU_API */ 1122