1 /* 2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation 3 * 4 * Rewrite, cleanup, new allocation schemes, virtual merging: 5 * Copyright (C) 2004 Olof Johansson, IBM Corporation 6 * and Ben. Herrenschmidt, IBM Corporation 7 * 8 * Dynamic DMA mapping support, bus-independent parts. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 */ 24 25 26 #include <linux/init.h> 27 #include <linux/types.h> 28 #include <linux/slab.h> 29 #include <linux/mm.h> 30 #include <linux/spinlock.h> 31 #include <linux/string.h> 32 #include <linux/dma-mapping.h> 33 #include <linux/bitmap.h> 34 #include <linux/iommu-helper.h> 35 #include <linux/crash_dump.h> 36 #include <asm/io.h> 37 #include <asm/prom.h> 38 #include <asm/iommu.h> 39 #include <asm/pci-bridge.h> 40 #include <asm/machdep.h> 41 #include <asm/kdump.h> 42 #include <asm/fadump.h> 43 44 #define DBG(...) 45 46 static int novmerge; 47 48 static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int); 49 50 static int __init setup_iommu(char *str) 51 { 52 if (!strcmp(str, "novmerge")) 53 novmerge = 1; 54 else if (!strcmp(str, "vmerge")) 55 novmerge = 0; 56 return 1; 57 } 58 59 __setup("iommu=", setup_iommu); 60 61 static unsigned long iommu_range_alloc(struct device *dev, 62 struct iommu_table *tbl, 63 unsigned long npages, 64 unsigned long *handle, 65 unsigned long mask, 66 unsigned int align_order) 67 { 68 unsigned long n, end, start; 69 unsigned long limit; 70 int largealloc = npages > 15; 71 int pass = 0; 72 unsigned long align_mask; 73 unsigned long boundary_size; 74 75 align_mask = 0xffffffffffffffffl >> (64 - align_order); 76 77 /* This allocator was derived from x86_64's bit string search */ 78 79 /* Sanity check */ 80 if (unlikely(npages == 0)) { 81 if (printk_ratelimit()) 82 WARN_ON(1); 83 return DMA_ERROR_CODE; 84 } 85 86 if (handle && *handle) 87 start = *handle; 88 else 89 start = largealloc ? tbl->it_largehint : tbl->it_hint; 90 91 /* Use only half of the table for small allocs (15 pages or less) */ 92 limit = largealloc ? tbl->it_size : tbl->it_halfpoint; 93 94 if (largealloc && start < tbl->it_halfpoint) 95 start = tbl->it_halfpoint; 96 97 /* The case below can happen if we have a small segment appended 98 * to a large, or when the previous alloc was at the very end of 99 * the available space. If so, go back to the initial start. 100 */ 101 if (start >= limit) 102 start = largealloc ? tbl->it_largehint : tbl->it_hint; 103 104 again: 105 106 if (limit + tbl->it_offset > mask) { 107 limit = mask - tbl->it_offset + 1; 108 /* If we're constrained on address range, first try 109 * at the masked hint to avoid O(n) search complexity, 110 * but on second pass, start at 0. 111 */ 112 if ((start & mask) >= limit || pass > 0) 113 start = 0; 114 else 115 start &= mask; 116 } 117 118 if (dev) 119 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 120 1 << IOMMU_PAGE_SHIFT); 121 else 122 boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT); 123 /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */ 124 125 n = iommu_area_alloc(tbl->it_map, limit, start, npages, 126 tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT, 127 align_mask); 128 if (n == -1) { 129 if (likely(pass < 2)) { 130 /* First failure, just rescan the half of the table. 131 * Second failure, rescan the other half of the table. 132 */ 133 start = (largealloc ^ pass) ? tbl->it_halfpoint : 0; 134 limit = pass ? tbl->it_size : limit; 135 pass++; 136 goto again; 137 } else { 138 /* Third failure, give up */ 139 return DMA_ERROR_CODE; 140 } 141 } 142 143 end = n + npages; 144 145 /* Bump the hint to a new block for small allocs. */ 146 if (largealloc) { 147 /* Don't bump to new block to avoid fragmentation */ 148 tbl->it_largehint = end; 149 } else { 150 /* Overflow will be taken care of at the next allocation */ 151 tbl->it_hint = (end + tbl->it_blocksize - 1) & 152 ~(tbl->it_blocksize - 1); 153 } 154 155 /* Update handle for SG allocations */ 156 if (handle) 157 *handle = end; 158 159 return n; 160 } 161 162 static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, 163 void *page, unsigned int npages, 164 enum dma_data_direction direction, 165 unsigned long mask, unsigned int align_order, 166 struct dma_attrs *attrs) 167 { 168 unsigned long entry, flags; 169 dma_addr_t ret = DMA_ERROR_CODE; 170 int build_fail; 171 172 spin_lock_irqsave(&(tbl->it_lock), flags); 173 174 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); 175 176 if (unlikely(entry == DMA_ERROR_CODE)) { 177 spin_unlock_irqrestore(&(tbl->it_lock), flags); 178 return DMA_ERROR_CODE; 179 } 180 181 entry += tbl->it_offset; /* Offset into real TCE table */ 182 ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */ 183 184 /* Put the TCEs in the HW table */ 185 build_fail = ppc_md.tce_build(tbl, entry, npages, 186 (unsigned long)page & IOMMU_PAGE_MASK, 187 direction, attrs); 188 189 /* ppc_md.tce_build() only returns non-zero for transient errors. 190 * Clean up the table bitmap in this case and return 191 * DMA_ERROR_CODE. For all other errors the functionality is 192 * not altered. 193 */ 194 if (unlikely(build_fail)) { 195 __iommu_free(tbl, ret, npages); 196 197 spin_unlock_irqrestore(&(tbl->it_lock), flags); 198 return DMA_ERROR_CODE; 199 } 200 201 /* Flush/invalidate TLB caches if necessary */ 202 if (ppc_md.tce_flush) 203 ppc_md.tce_flush(tbl); 204 205 spin_unlock_irqrestore(&(tbl->it_lock), flags); 206 207 /* Make sure updates are seen by hardware */ 208 mb(); 209 210 return ret; 211 } 212 213 static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, 214 unsigned int npages) 215 { 216 unsigned long entry, free_entry; 217 218 entry = dma_addr >> IOMMU_PAGE_SHIFT; 219 free_entry = entry - tbl->it_offset; 220 221 if (((free_entry + npages) > tbl->it_size) || 222 (entry < tbl->it_offset)) { 223 if (printk_ratelimit()) { 224 printk(KERN_INFO "iommu_free: invalid entry\n"); 225 printk(KERN_INFO "\tentry = 0x%lx\n", entry); 226 printk(KERN_INFO "\tdma_addr = 0x%llx\n", (u64)dma_addr); 227 printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl); 228 printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno); 229 printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size); 230 printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset); 231 printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index); 232 WARN_ON(1); 233 } 234 return; 235 } 236 237 ppc_md.tce_free(tbl, entry, npages); 238 bitmap_clear(tbl->it_map, free_entry, npages); 239 } 240 241 static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, 242 unsigned int npages) 243 { 244 unsigned long flags; 245 246 spin_lock_irqsave(&(tbl->it_lock), flags); 247 248 __iommu_free(tbl, dma_addr, npages); 249 250 /* Make sure TLB cache is flushed if the HW needs it. We do 251 * not do an mb() here on purpose, it is not needed on any of 252 * the current platforms. 253 */ 254 if (ppc_md.tce_flush) 255 ppc_md.tce_flush(tbl); 256 257 spin_unlock_irqrestore(&(tbl->it_lock), flags); 258 } 259 260 int iommu_map_sg(struct device *dev, struct iommu_table *tbl, 261 struct scatterlist *sglist, int nelems, 262 unsigned long mask, enum dma_data_direction direction, 263 struct dma_attrs *attrs) 264 { 265 dma_addr_t dma_next = 0, dma_addr; 266 unsigned long flags; 267 struct scatterlist *s, *outs, *segstart; 268 int outcount, incount, i, build_fail = 0; 269 unsigned int align; 270 unsigned long handle; 271 unsigned int max_seg_size; 272 273 BUG_ON(direction == DMA_NONE); 274 275 if ((nelems == 0) || !tbl) 276 return 0; 277 278 outs = s = segstart = &sglist[0]; 279 outcount = 1; 280 incount = nelems; 281 handle = 0; 282 283 /* Init first segment length for backout at failure */ 284 outs->dma_length = 0; 285 286 DBG("sg mapping %d elements:\n", nelems); 287 288 spin_lock_irqsave(&(tbl->it_lock), flags); 289 290 max_seg_size = dma_get_max_seg_size(dev); 291 for_each_sg(sglist, s, nelems, i) { 292 unsigned long vaddr, npages, entry, slen; 293 294 slen = s->length; 295 /* Sanity check */ 296 if (slen == 0) { 297 dma_next = 0; 298 continue; 299 } 300 /* Allocate iommu entries for that segment */ 301 vaddr = (unsigned long) sg_virt(s); 302 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE); 303 align = 0; 304 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE && 305 (vaddr & ~PAGE_MASK) == 0) 306 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; 307 entry = iommu_range_alloc(dev, tbl, npages, &handle, 308 mask >> IOMMU_PAGE_SHIFT, align); 309 310 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); 311 312 /* Handle failure */ 313 if (unlikely(entry == DMA_ERROR_CODE)) { 314 if (printk_ratelimit()) 315 dev_info(dev, "iommu_alloc failed, tbl %p " 316 "vaddr %lx npages %lu\n", tbl, vaddr, 317 npages); 318 goto failure; 319 } 320 321 /* Convert entry to a dma_addr_t */ 322 entry += tbl->it_offset; 323 dma_addr = entry << IOMMU_PAGE_SHIFT; 324 dma_addr |= (s->offset & ~IOMMU_PAGE_MASK); 325 326 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n", 327 npages, entry, dma_addr); 328 329 /* Insert into HW table */ 330 build_fail = ppc_md.tce_build(tbl, entry, npages, 331 vaddr & IOMMU_PAGE_MASK, 332 direction, attrs); 333 if(unlikely(build_fail)) 334 goto failure; 335 336 /* If we are in an open segment, try merging */ 337 if (segstart != s) { 338 DBG(" - trying merge...\n"); 339 /* We cannot merge if: 340 * - allocated dma_addr isn't contiguous to previous allocation 341 */ 342 if (novmerge || (dma_addr != dma_next) || 343 (outs->dma_length + s->length > max_seg_size)) { 344 /* Can't merge: create a new segment */ 345 segstart = s; 346 outcount++; 347 outs = sg_next(outs); 348 DBG(" can't merge, new segment.\n"); 349 } else { 350 outs->dma_length += s->length; 351 DBG(" merged, new len: %ux\n", outs->dma_length); 352 } 353 } 354 355 if (segstart == s) { 356 /* This is a new segment, fill entries */ 357 DBG(" - filling new segment.\n"); 358 outs->dma_address = dma_addr; 359 outs->dma_length = slen; 360 } 361 362 /* Calculate next page pointer for contiguous check */ 363 dma_next = dma_addr + slen; 364 365 DBG(" - dma next is: %lx\n", dma_next); 366 } 367 368 /* Flush/invalidate TLB caches if necessary */ 369 if (ppc_md.tce_flush) 370 ppc_md.tce_flush(tbl); 371 372 spin_unlock_irqrestore(&(tbl->it_lock), flags); 373 374 DBG("mapped %d elements:\n", outcount); 375 376 /* For the sake of iommu_unmap_sg, we clear out the length in the 377 * next entry of the sglist if we didn't fill the list completely 378 */ 379 if (outcount < incount) { 380 outs = sg_next(outs); 381 outs->dma_address = DMA_ERROR_CODE; 382 outs->dma_length = 0; 383 } 384 385 /* Make sure updates are seen by hardware */ 386 mb(); 387 388 return outcount; 389 390 failure: 391 for_each_sg(sglist, s, nelems, i) { 392 if (s->dma_length != 0) { 393 unsigned long vaddr, npages; 394 395 vaddr = s->dma_address & IOMMU_PAGE_MASK; 396 npages = iommu_num_pages(s->dma_address, s->dma_length, 397 IOMMU_PAGE_SIZE); 398 __iommu_free(tbl, vaddr, npages); 399 s->dma_address = DMA_ERROR_CODE; 400 s->dma_length = 0; 401 } 402 if (s == outs) 403 break; 404 } 405 spin_unlock_irqrestore(&(tbl->it_lock), flags); 406 return 0; 407 } 408 409 410 void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, 411 int nelems, enum dma_data_direction direction, 412 struct dma_attrs *attrs) 413 { 414 struct scatterlist *sg; 415 unsigned long flags; 416 417 BUG_ON(direction == DMA_NONE); 418 419 if (!tbl) 420 return; 421 422 spin_lock_irqsave(&(tbl->it_lock), flags); 423 424 sg = sglist; 425 while (nelems--) { 426 unsigned int npages; 427 dma_addr_t dma_handle = sg->dma_address; 428 429 if (sg->dma_length == 0) 430 break; 431 npages = iommu_num_pages(dma_handle, sg->dma_length, 432 IOMMU_PAGE_SIZE); 433 __iommu_free(tbl, dma_handle, npages); 434 sg = sg_next(sg); 435 } 436 437 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we 438 * do not do an mb() here, the affected platforms do not need it 439 * when freeing. 440 */ 441 if (ppc_md.tce_flush) 442 ppc_md.tce_flush(tbl); 443 444 spin_unlock_irqrestore(&(tbl->it_lock), flags); 445 } 446 447 static void iommu_table_clear(struct iommu_table *tbl) 448 { 449 /* 450 * In case of firmware assisted dump system goes through clean 451 * reboot process at the time of system crash. Hence it's safe to 452 * clear the TCE entries if firmware assisted dump is active. 453 */ 454 if (!is_kdump_kernel() || is_fadump_active()) { 455 /* Clear the table in case firmware left allocations in it */ 456 ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size); 457 return; 458 } 459 460 #ifdef CONFIG_CRASH_DUMP 461 if (ppc_md.tce_get) { 462 unsigned long index, tceval, tcecount = 0; 463 464 /* Reserve the existing mappings left by the first kernel. */ 465 for (index = 0; index < tbl->it_size; index++) { 466 tceval = ppc_md.tce_get(tbl, index + tbl->it_offset); 467 /* 468 * Freed TCE entry contains 0x7fffffffffffffff on JS20 469 */ 470 if (tceval && (tceval != 0x7fffffffffffffffUL)) { 471 __set_bit(index, tbl->it_map); 472 tcecount++; 473 } 474 } 475 476 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) { 477 printk(KERN_WARNING "TCE table is full; freeing "); 478 printk(KERN_WARNING "%d entries for the kdump boot\n", 479 KDUMP_MIN_TCE_ENTRIES); 480 for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES; 481 index < tbl->it_size; index++) 482 __clear_bit(index, tbl->it_map); 483 } 484 } 485 #endif 486 } 487 488 /* 489 * Build a iommu_table structure. This contains a bit map which 490 * is used to manage allocation of the tce space. 491 */ 492 struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) 493 { 494 unsigned long sz; 495 static int welcomed = 0; 496 struct page *page; 497 498 /* Set aside 1/4 of the table for large allocations. */ 499 tbl->it_halfpoint = tbl->it_size * 3 / 4; 500 501 /* number of bytes needed for the bitmap */ 502 sz = (tbl->it_size + 7) >> 3; 503 504 page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz)); 505 if (!page) 506 panic("iommu_init_table: Can't allocate %ld bytes\n", sz); 507 tbl->it_map = page_address(page); 508 memset(tbl->it_map, 0, sz); 509 510 /* 511 * Reserve page 0 so it will not be used for any mappings. 512 * This avoids buggy drivers that consider page 0 to be invalid 513 * to crash the machine or even lose data. 514 */ 515 if (tbl->it_offset == 0) 516 set_bit(0, tbl->it_map); 517 518 tbl->it_hint = 0; 519 tbl->it_largehint = tbl->it_halfpoint; 520 spin_lock_init(&tbl->it_lock); 521 522 iommu_table_clear(tbl); 523 524 if (!welcomed) { 525 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n", 526 novmerge ? "disabled" : "enabled"); 527 welcomed = 1; 528 } 529 530 return tbl; 531 } 532 533 void iommu_free_table(struct iommu_table *tbl, const char *node_name) 534 { 535 unsigned long bitmap_sz, i; 536 unsigned int order; 537 538 if (!tbl || !tbl->it_map) { 539 printk(KERN_ERR "%s: expected TCE map for %s\n", __func__, 540 node_name); 541 return; 542 } 543 544 /* verify that table contains no entries */ 545 /* it_size is in entries, and we're examining 64 at a time */ 546 for (i = 0; i < (tbl->it_size/64); i++) { 547 if (tbl->it_map[i] != 0) { 548 printk(KERN_WARNING "%s: Unexpected TCEs for %s\n", 549 __func__, node_name); 550 break; 551 } 552 } 553 554 /* calculate bitmap size in bytes */ 555 bitmap_sz = (tbl->it_size + 7) / 8; 556 557 /* free bitmap */ 558 order = get_order(bitmap_sz); 559 free_pages((unsigned long) tbl->it_map, order); 560 561 /* free table */ 562 kfree(tbl); 563 } 564 565 /* Creates TCEs for a user provided buffer. The user buffer must be 566 * contiguous real kernel storage (not vmalloc). The address passed here 567 * comprises a page address and offset into that page. The dma_addr_t 568 * returned will point to the same byte within the page as was passed in. 569 */ 570 dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, 571 struct page *page, unsigned long offset, size_t size, 572 unsigned long mask, enum dma_data_direction direction, 573 struct dma_attrs *attrs) 574 { 575 dma_addr_t dma_handle = DMA_ERROR_CODE; 576 void *vaddr; 577 unsigned long uaddr; 578 unsigned int npages, align; 579 580 BUG_ON(direction == DMA_NONE); 581 582 vaddr = page_address(page) + offset; 583 uaddr = (unsigned long)vaddr; 584 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE); 585 586 if (tbl) { 587 align = 0; 588 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE && 589 ((unsigned long)vaddr & ~PAGE_MASK) == 0) 590 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; 591 592 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, 593 mask >> IOMMU_PAGE_SHIFT, align, 594 attrs); 595 if (dma_handle == DMA_ERROR_CODE) { 596 if (printk_ratelimit()) { 597 dev_info(dev, "iommu_alloc failed, tbl %p " 598 "vaddr %p npages %d\n", tbl, vaddr, 599 npages); 600 } 601 } else 602 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK); 603 } 604 605 return dma_handle; 606 } 607 608 void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, 609 size_t size, enum dma_data_direction direction, 610 struct dma_attrs *attrs) 611 { 612 unsigned int npages; 613 614 BUG_ON(direction == DMA_NONE); 615 616 if (tbl) { 617 npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE); 618 iommu_free(tbl, dma_handle, npages); 619 } 620 } 621 622 /* Allocates a contiguous real buffer and creates mappings over it. 623 * Returns the virtual address of the buffer and sets dma_handle 624 * to the dma address (mapping) of the first page. 625 */ 626 void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, 627 size_t size, dma_addr_t *dma_handle, 628 unsigned long mask, gfp_t flag, int node) 629 { 630 void *ret = NULL; 631 dma_addr_t mapping; 632 unsigned int order; 633 unsigned int nio_pages, io_order; 634 struct page *page; 635 636 size = PAGE_ALIGN(size); 637 order = get_order(size); 638 639 /* 640 * Client asked for way too much space. This is checked later 641 * anyway. It is easier to debug here for the drivers than in 642 * the tce tables. 643 */ 644 if (order >= IOMAP_MAX_ORDER) { 645 dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n", 646 size); 647 return NULL; 648 } 649 650 if (!tbl) 651 return NULL; 652 653 /* Alloc enough pages (and possibly more) */ 654 page = alloc_pages_node(node, flag, order); 655 if (!page) 656 return NULL; 657 ret = page_address(page); 658 memset(ret, 0, size); 659 660 /* Set up tces to cover the allocated range */ 661 nio_pages = size >> IOMMU_PAGE_SHIFT; 662 io_order = get_iommu_order(size); 663 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, 664 mask >> IOMMU_PAGE_SHIFT, io_order, NULL); 665 if (mapping == DMA_ERROR_CODE) { 666 free_pages((unsigned long)ret, order); 667 return NULL; 668 } 669 *dma_handle = mapping; 670 return ret; 671 } 672 673 void iommu_free_coherent(struct iommu_table *tbl, size_t size, 674 void *vaddr, dma_addr_t dma_handle) 675 { 676 if (tbl) { 677 unsigned int nio_pages; 678 679 size = PAGE_ALIGN(size); 680 nio_pages = size >> IOMMU_PAGE_SHIFT; 681 iommu_free(tbl, dma_handle, nio_pages); 682 size = PAGE_ALIGN(size); 683 free_pages((unsigned long)vaddr, get_order(size)); 684 } 685 } 686