1 /* 2 * Dynamic DMA mapping support for AMD Hammer. 3 * 4 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI. 5 * This allows to use PCI devices that only support 32bit addresses on systems 6 * with more than 4GB. 7 * 8 * See Documentation/DMA-API-HOWTO.txt for the interface specification. 9 * 10 * Copyright 2002 Andi Kleen, SuSE Labs. 11 * Subject to the GNU General Public License v2 only. 12 */ 13 14 #include <linux/types.h> 15 #include <linux/ctype.h> 16 #include <linux/agp_backend.h> 17 #include <linux/init.h> 18 #include <linux/mm.h> 19 #include <linux/sched.h> 20 #include <linux/sched/debug.h> 21 #include <linux/string.h> 22 #include <linux/spinlock.h> 23 #include <linux/pci.h> 24 #include <linux/topology.h> 25 #include <linux/interrupt.h> 26 #include <linux/bitmap.h> 27 #include <linux/kdebug.h> 28 #include <linux/scatterlist.h> 29 #include <linux/iommu-helper.h> 30 #include <linux/syscore_ops.h> 31 #include <linux/io.h> 32 #include <linux/gfp.h> 33 #include <linux/atomic.h> 34 #include <linux/dma-direct.h> 35 #include <asm/mtrr.h> 36 #include <asm/pgtable.h> 37 #include <asm/proto.h> 38 #include <asm/iommu.h> 39 #include <asm/gart.h> 40 #include <asm/set_memory.h> 41 #include <asm/swiotlb.h> 42 #include <asm/dma.h> 43 #include <asm/amd_nb.h> 44 #include <asm/x86_init.h> 45 #include <asm/iommu_table.h> 46 47 static unsigned long iommu_bus_base; /* GART remapping area (physical) */ 48 static unsigned long iommu_size; /* size of remapping area bytes */ 49 static unsigned long iommu_pages; /* .. and in pages */ 50 51 static u32 *iommu_gatt_base; /* Remapping table */ 52 53 /* 54 * If this is disabled the IOMMU will use an optimized flushing strategy 55 * of only flushing when an mapping is reused. With it true the GART is 56 * flushed for every mapping. Problem is that doing the lazy flush seems 57 * to trigger bugs with some popular PCI cards, in particular 3ware (but 58 * has been also also seen with Qlogic at least). 59 */ 60 static int iommu_fullflush = 1; 61 62 /* Allocation bitmap for the remapping area: */ 63 static DEFINE_SPINLOCK(iommu_bitmap_lock); 64 /* Guarded by iommu_bitmap_lock: */ 65 static unsigned long *iommu_gart_bitmap; 66 67 static u32 gart_unmapped_entry; 68 69 #define GPTE_VALID 1 70 #define GPTE_COHERENT 2 71 #define GPTE_ENCODE(x) \ 72 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT) 73 #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28)) 74 75 #ifdef CONFIG_AGP 76 #define AGPEXTERN extern 77 #else 78 #define AGPEXTERN 79 #endif 80 81 /* GART can only remap to physical addresses < 1TB */ 82 #define GART_MAX_PHYS_ADDR (1ULL << 40) 83 84 /* backdoor interface to AGP driver */ 85 AGPEXTERN int agp_memory_reserved; 86 AGPEXTERN __u32 *agp_gatt_table; 87 88 static unsigned long next_bit; /* protected by iommu_bitmap_lock */ 89 static bool need_flush; /* global flush state. set for each gart wrap */ 90 91 static unsigned long alloc_iommu(struct device *dev, int size, 92 unsigned long align_mask) 93 { 94 unsigned long offset, flags; 95 unsigned long boundary_size; 96 unsigned long base_index; 97 98 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev), 99 PAGE_SIZE) >> PAGE_SHIFT; 100 boundary_size = ALIGN((u64)dma_get_seg_boundary(dev) + 1, 101 PAGE_SIZE) >> PAGE_SHIFT; 102 103 spin_lock_irqsave(&iommu_bitmap_lock, flags); 104 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit, 105 size, base_index, boundary_size, align_mask); 106 if (offset == -1) { 107 need_flush = true; 108 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0, 109 size, base_index, boundary_size, 110 align_mask); 111 } 112 if (offset != -1) { 113 next_bit = offset+size; 114 if (next_bit >= iommu_pages) { 115 next_bit = 0; 116 need_flush = true; 117 } 118 } 119 if (iommu_fullflush) 120 need_flush = true; 121 spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 122 123 return offset; 124 } 125 126 static void free_iommu(unsigned long offset, int size) 127 { 128 unsigned long flags; 129 130 spin_lock_irqsave(&iommu_bitmap_lock, flags); 131 bitmap_clear(iommu_gart_bitmap, offset, size); 132 if (offset >= next_bit) 133 next_bit = offset + size; 134 spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 135 } 136 137 /* 138 * Use global flush state to avoid races with multiple flushers. 139 */ 140 static void flush_gart(void) 141 { 142 unsigned long flags; 143 144 spin_lock_irqsave(&iommu_bitmap_lock, flags); 145 if (need_flush) { 146 amd_flush_garts(); 147 need_flush = false; 148 } 149 spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 150 } 151 152 #ifdef CONFIG_IOMMU_LEAK 153 /* Debugging aid for drivers that don't free their IOMMU tables */ 154 static int leak_trace; 155 static int iommu_leak_pages = 20; 156 157 static void dump_leak(void) 158 { 159 static int dump; 160 161 if (dump) 162 return; 163 dump = 1; 164 165 show_stack(NULL, NULL); 166 debug_dma_dump_mappings(NULL); 167 } 168 #endif 169 170 static void iommu_full(struct device *dev, size_t size, int dir) 171 { 172 /* 173 * Ran out of IOMMU space for this operation. This is very bad. 174 * Unfortunately the drivers cannot handle this operation properly. 175 * Return some non mapped prereserved space in the aperture and 176 * let the Northbridge deal with it. This will result in garbage 177 * in the IO operation. When the size exceeds the prereserved space 178 * memory corruption will occur or random memory will be DMAed 179 * out. Hopefully no network devices use single mappings that big. 180 */ 181 182 dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size); 183 #ifdef CONFIG_IOMMU_LEAK 184 dump_leak(); 185 #endif 186 } 187 188 static inline int 189 need_iommu(struct device *dev, unsigned long addr, size_t size) 190 { 191 return force_iommu || !dma_capable(dev, addr, size); 192 } 193 194 static inline int 195 nonforced_iommu(struct device *dev, unsigned long addr, size_t size) 196 { 197 return !dma_capable(dev, addr, size); 198 } 199 200 /* Map a single continuous physical area into the IOMMU. 201 * Caller needs to check if the iommu is needed and flush. 202 */ 203 static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, 204 size_t size, int dir, unsigned long align_mask) 205 { 206 unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE); 207 unsigned long iommu_page; 208 int i; 209 210 if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR)) 211 return DMA_MAPPING_ERROR; 212 213 iommu_page = alloc_iommu(dev, npages, align_mask); 214 if (iommu_page == -1) { 215 if (!nonforced_iommu(dev, phys_mem, size)) 216 return phys_mem; 217 if (panic_on_overflow) 218 panic("dma_map_area overflow %lu bytes\n", size); 219 iommu_full(dev, size, dir); 220 return DMA_MAPPING_ERROR; 221 } 222 223 for (i = 0; i < npages; i++) { 224 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem); 225 phys_mem += PAGE_SIZE; 226 } 227 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK); 228 } 229 230 /* Map a single area into the IOMMU */ 231 static dma_addr_t gart_map_page(struct device *dev, struct page *page, 232 unsigned long offset, size_t size, 233 enum dma_data_direction dir, 234 unsigned long attrs) 235 { 236 unsigned long bus; 237 phys_addr_t paddr = page_to_phys(page) + offset; 238 239 if (!dev) 240 dev = &x86_dma_fallback_dev; 241 242 if (!need_iommu(dev, paddr, size)) 243 return paddr; 244 245 bus = dma_map_area(dev, paddr, size, dir, 0); 246 flush_gart(); 247 248 return bus; 249 } 250 251 /* 252 * Free a DMA mapping. 253 */ 254 static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, 255 size_t size, enum dma_data_direction dir, 256 unsigned long attrs) 257 { 258 unsigned long iommu_page; 259 int npages; 260 int i; 261 262 if (dma_addr == DMA_MAPPING_ERROR || 263 dma_addr >= iommu_bus_base + iommu_size) 264 return; 265 266 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; 267 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); 268 for (i = 0; i < npages; i++) { 269 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; 270 } 271 free_iommu(iommu_page, npages); 272 } 273 274 /* 275 * Wrapper for pci_unmap_single working with scatterlists. 276 */ 277 static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 278 enum dma_data_direction dir, unsigned long attrs) 279 { 280 struct scatterlist *s; 281 int i; 282 283 for_each_sg(sg, s, nents, i) { 284 if (!s->dma_length || !s->length) 285 break; 286 gart_unmap_page(dev, s->dma_address, s->dma_length, dir, 0); 287 } 288 } 289 290 /* Fallback for dma_map_sg in case of overflow */ 291 static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, 292 int nents, int dir) 293 { 294 struct scatterlist *s; 295 int i; 296 297 #ifdef CONFIG_IOMMU_DEBUG 298 pr_debug("dma_map_sg overflow\n"); 299 #endif 300 301 for_each_sg(sg, s, nents, i) { 302 unsigned long addr = sg_phys(s); 303 304 if (nonforced_iommu(dev, addr, s->length)) { 305 addr = dma_map_area(dev, addr, s->length, dir, 0); 306 if (addr == DMA_MAPPING_ERROR) { 307 if (i > 0) 308 gart_unmap_sg(dev, sg, i, dir, 0); 309 nents = 0; 310 sg[0].dma_length = 0; 311 break; 312 } 313 } 314 s->dma_address = addr; 315 s->dma_length = s->length; 316 } 317 flush_gart(); 318 319 return nents; 320 } 321 322 /* Map multiple scatterlist entries continuous into the first. */ 323 static int __dma_map_cont(struct device *dev, struct scatterlist *start, 324 int nelems, struct scatterlist *sout, 325 unsigned long pages) 326 { 327 unsigned long iommu_start = alloc_iommu(dev, pages, 0); 328 unsigned long iommu_page = iommu_start; 329 struct scatterlist *s; 330 int i; 331 332 if (iommu_start == -1) 333 return -1; 334 335 for_each_sg(start, s, nelems, i) { 336 unsigned long pages, addr; 337 unsigned long phys_addr = s->dma_address; 338 339 BUG_ON(s != start && s->offset); 340 if (s == start) { 341 sout->dma_address = iommu_bus_base; 342 sout->dma_address += iommu_page*PAGE_SIZE + s->offset; 343 sout->dma_length = s->length; 344 } else { 345 sout->dma_length += s->length; 346 } 347 348 addr = phys_addr; 349 pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE); 350 while (pages--) { 351 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); 352 addr += PAGE_SIZE; 353 iommu_page++; 354 } 355 } 356 BUG_ON(iommu_page - iommu_start != pages); 357 358 return 0; 359 } 360 361 static inline int 362 dma_map_cont(struct device *dev, struct scatterlist *start, int nelems, 363 struct scatterlist *sout, unsigned long pages, int need) 364 { 365 if (!need) { 366 BUG_ON(nelems != 1); 367 sout->dma_address = start->dma_address; 368 sout->dma_length = start->length; 369 return 0; 370 } 371 return __dma_map_cont(dev, start, nelems, sout, pages); 372 } 373 374 /* 375 * DMA map all entries in a scatterlist. 376 * Merge chunks that have page aligned sizes into a continuous mapping. 377 */ 378 static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, 379 enum dma_data_direction dir, unsigned long attrs) 380 { 381 struct scatterlist *s, *ps, *start_sg, *sgmap; 382 int need = 0, nextneed, i, out, start; 383 unsigned long pages = 0; 384 unsigned int seg_size; 385 unsigned int max_seg_size; 386 387 if (nents == 0) 388 return 0; 389 390 if (!dev) 391 dev = &x86_dma_fallback_dev; 392 393 out = 0; 394 start = 0; 395 start_sg = sg; 396 sgmap = sg; 397 seg_size = 0; 398 max_seg_size = dma_get_max_seg_size(dev); 399 ps = NULL; /* shut up gcc */ 400 401 for_each_sg(sg, s, nents, i) { 402 dma_addr_t addr = sg_phys(s); 403 404 s->dma_address = addr; 405 BUG_ON(s->length == 0); 406 407 nextneed = need_iommu(dev, addr, s->length); 408 409 /* Handle the previous not yet processed entries */ 410 if (i > start) { 411 /* 412 * Can only merge when the last chunk ends on a 413 * page boundary and the new one doesn't have an 414 * offset. 415 */ 416 if (!iommu_merge || !nextneed || !need || s->offset || 417 (s->length + seg_size > max_seg_size) || 418 (ps->offset + ps->length) % PAGE_SIZE) { 419 if (dma_map_cont(dev, start_sg, i - start, 420 sgmap, pages, need) < 0) 421 goto error; 422 out++; 423 424 seg_size = 0; 425 sgmap = sg_next(sgmap); 426 pages = 0; 427 start = i; 428 start_sg = s; 429 } 430 } 431 432 seg_size += s->length; 433 need = nextneed; 434 pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE); 435 ps = s; 436 } 437 if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0) 438 goto error; 439 out++; 440 flush_gart(); 441 if (out < nents) { 442 sgmap = sg_next(sgmap); 443 sgmap->dma_length = 0; 444 } 445 return out; 446 447 error: 448 flush_gart(); 449 gart_unmap_sg(dev, sg, out, dir, 0); 450 451 /* When it was forced or merged try again in a dumb way */ 452 if (force_iommu || iommu_merge) { 453 out = dma_map_sg_nonforce(dev, sg, nents, dir); 454 if (out > 0) 455 return out; 456 } 457 if (panic_on_overflow) 458 panic("dma_map_sg: overflow on %lu pages\n", pages); 459 460 iommu_full(dev, pages << PAGE_SHIFT, dir); 461 for_each_sg(sg, s, nents, i) 462 s->dma_address = DMA_MAPPING_ERROR; 463 return 0; 464 } 465 466 /* allocate and map a coherent mapping */ 467 static void * 468 gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, 469 gfp_t flag, unsigned long attrs) 470 { 471 void *vaddr; 472 473 vaddr = dma_direct_alloc_pages(dev, size, dma_addr, flag, attrs); 474 if (!vaddr || 475 !force_iommu || dev->coherent_dma_mask <= DMA_BIT_MASK(24)) 476 return vaddr; 477 478 *dma_addr = dma_map_area(dev, virt_to_phys(vaddr), size, 479 DMA_BIDIRECTIONAL, (1UL << get_order(size)) - 1); 480 flush_gart(); 481 if (unlikely(*dma_addr == DMA_MAPPING_ERROR)) 482 goto out_free; 483 return vaddr; 484 out_free: 485 dma_direct_free_pages(dev, size, vaddr, *dma_addr, attrs); 486 return NULL; 487 } 488 489 /* free a coherent mapping */ 490 static void 491 gart_free_coherent(struct device *dev, size_t size, void *vaddr, 492 dma_addr_t dma_addr, unsigned long attrs) 493 { 494 gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0); 495 dma_direct_free_pages(dev, size, vaddr, dma_addr, attrs); 496 } 497 498 static int no_agp; 499 500 static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) 501 { 502 unsigned long a; 503 504 if (!iommu_size) { 505 iommu_size = aper_size; 506 if (!no_agp) 507 iommu_size /= 2; 508 } 509 510 a = aper + iommu_size; 511 iommu_size -= round_up(a, PMD_PAGE_SIZE) - a; 512 513 if (iommu_size < 64*1024*1024) { 514 pr_warning( 515 "PCI-DMA: Warning: Small IOMMU %luMB." 516 " Consider increasing the AGP aperture in BIOS\n", 517 iommu_size >> 20); 518 } 519 520 return iommu_size; 521 } 522 523 static __init unsigned read_aperture(struct pci_dev *dev, u32 *size) 524 { 525 unsigned aper_size = 0, aper_base_32, aper_order; 526 u64 aper_base; 527 528 pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32); 529 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order); 530 aper_order = (aper_order >> 1) & 7; 531 532 aper_base = aper_base_32 & 0x7fff; 533 aper_base <<= 25; 534 535 aper_size = (32 * 1024 * 1024) << aper_order; 536 if (aper_base + aper_size > 0x100000000UL || !aper_size) 537 aper_base = 0; 538 539 *size = aper_size; 540 return aper_base; 541 } 542 543 static void enable_gart_translations(void) 544 { 545 int i; 546 547 if (!amd_nb_has_feature(AMD_NB_GART)) 548 return; 549 550 for (i = 0; i < amd_nb_num(); i++) { 551 struct pci_dev *dev = node_to_amd_nb(i)->misc; 552 553 enable_gart_translation(dev, __pa(agp_gatt_table)); 554 } 555 556 /* Flush the GART-TLB to remove stale entries */ 557 amd_flush_garts(); 558 } 559 560 /* 561 * If fix_up_north_bridges is set, the north bridges have to be fixed up on 562 * resume in the same way as they are handled in gart_iommu_hole_init(). 563 */ 564 static bool fix_up_north_bridges; 565 static u32 aperture_order; 566 static u32 aperture_alloc; 567 568 void set_up_gart_resume(u32 aper_order, u32 aper_alloc) 569 { 570 fix_up_north_bridges = true; 571 aperture_order = aper_order; 572 aperture_alloc = aper_alloc; 573 } 574 575 static void gart_fixup_northbridges(void) 576 { 577 int i; 578 579 if (!fix_up_north_bridges) 580 return; 581 582 if (!amd_nb_has_feature(AMD_NB_GART)) 583 return; 584 585 pr_info("PCI-DMA: Restoring GART aperture settings\n"); 586 587 for (i = 0; i < amd_nb_num(); i++) { 588 struct pci_dev *dev = node_to_amd_nb(i)->misc; 589 590 /* 591 * Don't enable translations just yet. That is the next 592 * step. Restore the pre-suspend aperture settings. 593 */ 594 gart_set_size_and_enable(dev, aperture_order); 595 pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25); 596 } 597 } 598 599 static void gart_resume(void) 600 { 601 pr_info("PCI-DMA: Resuming GART IOMMU\n"); 602 603 gart_fixup_northbridges(); 604 605 enable_gart_translations(); 606 } 607 608 static struct syscore_ops gart_syscore_ops = { 609 .resume = gart_resume, 610 611 }; 612 613 /* 614 * Private Northbridge GATT initialization in case we cannot use the 615 * AGP driver for some reason. 616 */ 617 static __init int init_amd_gatt(struct agp_kern_info *info) 618 { 619 unsigned aper_size, gatt_size, new_aper_size; 620 unsigned aper_base, new_aper_base; 621 struct pci_dev *dev; 622 void *gatt; 623 int i; 624 625 pr_info("PCI-DMA: Disabling AGP.\n"); 626 627 aper_size = aper_base = info->aper_size = 0; 628 dev = NULL; 629 for (i = 0; i < amd_nb_num(); i++) { 630 dev = node_to_amd_nb(i)->misc; 631 new_aper_base = read_aperture(dev, &new_aper_size); 632 if (!new_aper_base) 633 goto nommu; 634 635 if (!aper_base) { 636 aper_size = new_aper_size; 637 aper_base = new_aper_base; 638 } 639 if (aper_size != new_aper_size || aper_base != new_aper_base) 640 goto nommu; 641 } 642 if (!aper_base) 643 goto nommu; 644 645 info->aper_base = aper_base; 646 info->aper_size = aper_size >> 20; 647 648 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32); 649 gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 650 get_order(gatt_size)); 651 if (!gatt) 652 panic("Cannot allocate GATT table"); 653 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT)) 654 panic("Could not set GART PTEs to uncacheable pages"); 655 656 agp_gatt_table = gatt; 657 658 register_syscore_ops(&gart_syscore_ops); 659 660 flush_gart(); 661 662 pr_info("PCI-DMA: aperture base @ %x size %u KB\n", 663 aper_base, aper_size>>10); 664 665 return 0; 666 667 nommu: 668 /* Should not happen anymore */ 669 pr_warning("PCI-DMA: More than 4GB of RAM and no IOMMU\n" 670 "falling back to iommu=soft.\n"); 671 return -1; 672 } 673 674 static const struct dma_map_ops gart_dma_ops = { 675 .map_sg = gart_map_sg, 676 .unmap_sg = gart_unmap_sg, 677 .map_page = gart_map_page, 678 .unmap_page = gart_unmap_page, 679 .alloc = gart_alloc_coherent, 680 .free = gart_free_coherent, 681 .dma_supported = dma_direct_supported, 682 }; 683 684 static void gart_iommu_shutdown(void) 685 { 686 struct pci_dev *dev; 687 int i; 688 689 /* don't shutdown it if there is AGP installed */ 690 if (!no_agp) 691 return; 692 693 if (!amd_nb_has_feature(AMD_NB_GART)) 694 return; 695 696 for (i = 0; i < amd_nb_num(); i++) { 697 u32 ctl; 698 699 dev = node_to_amd_nb(i)->misc; 700 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); 701 702 ctl &= ~GARTEN; 703 704 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl); 705 } 706 } 707 708 int __init gart_iommu_init(void) 709 { 710 struct agp_kern_info info; 711 unsigned long iommu_start; 712 unsigned long aper_base, aper_size; 713 unsigned long start_pfn, end_pfn; 714 unsigned long scratch; 715 716 if (!amd_nb_has_feature(AMD_NB_GART)) 717 return 0; 718 719 #ifndef CONFIG_AGP_AMD64 720 no_agp = 1; 721 #else 722 /* Makefile puts PCI initialization via subsys_initcall first. */ 723 /* Add other AMD AGP bridge drivers here */ 724 no_agp = no_agp || 725 (agp_amd64_init() < 0) || 726 (agp_copy_info(agp_bridge, &info) < 0); 727 #endif 728 729 if (no_iommu || 730 (!force_iommu && max_pfn <= MAX_DMA32_PFN) || 731 !gart_iommu_aperture || 732 (no_agp && init_amd_gatt(&info) < 0)) { 733 if (max_pfn > MAX_DMA32_PFN) { 734 pr_warning("More than 4GB of memory but GART IOMMU not available.\n"); 735 pr_warning("falling back to iommu=soft.\n"); 736 } 737 return 0; 738 } 739 740 /* need to map that range */ 741 aper_size = info.aper_size << 20; 742 aper_base = info.aper_base; 743 end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT); 744 745 start_pfn = PFN_DOWN(aper_base); 746 if (!pfn_range_is_mapped(start_pfn, end_pfn)) 747 init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); 748 749 pr_info("PCI-DMA: using GART IOMMU.\n"); 750 iommu_size = check_iommu_size(info.aper_base, aper_size); 751 iommu_pages = iommu_size >> PAGE_SHIFT; 752 753 iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, 754 get_order(iommu_pages/8)); 755 if (!iommu_gart_bitmap) 756 panic("Cannot allocate iommu bitmap\n"); 757 758 #ifdef CONFIG_IOMMU_LEAK 759 if (leak_trace) { 760 int ret; 761 762 ret = dma_debug_resize_entries(iommu_pages); 763 if (ret) 764 pr_debug("PCI-DMA: Cannot trace all the entries\n"); 765 } 766 #endif 767 768 pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n", 769 iommu_size >> 20); 770 771 agp_memory_reserved = iommu_size; 772 iommu_start = aper_size - iommu_size; 773 iommu_bus_base = info.aper_base + iommu_start; 774 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT); 775 776 /* 777 * Unmap the IOMMU part of the GART. The alias of the page is 778 * always mapped with cache enabled and there is no full cache 779 * coherency across the GART remapping. The unmapping avoids 780 * automatic prefetches from the CPU allocating cache lines in 781 * there. All CPU accesses are done via the direct mapping to 782 * the backing memory. The GART address is only used by PCI 783 * devices. 784 */ 785 set_memory_np((unsigned long)__va(iommu_bus_base), 786 iommu_size >> PAGE_SHIFT); 787 /* 788 * Tricky. The GART table remaps the physical memory range, 789 * so the CPU wont notice potential aliases and if the memory 790 * is remapped to UC later on, we might surprise the PCI devices 791 * with a stray writeout of a cacheline. So play it sure and 792 * do an explicit, full-scale wbinvd() _after_ having marked all 793 * the pages as Not-Present: 794 */ 795 wbinvd(); 796 797 /* 798 * Now all caches are flushed and we can safely enable 799 * GART hardware. Doing it early leaves the possibility 800 * of stale cache entries that can lead to GART PTE 801 * errors. 802 */ 803 enable_gart_translations(); 804 805 /* 806 * Try to workaround a bug (thanks to BenH): 807 * Set unmapped entries to a scratch page instead of 0. 808 * Any prefetches that hit unmapped entries won't get an bus abort 809 * then. (P2P bridge may be prefetching on DMA reads). 810 */ 811 scratch = get_zeroed_page(GFP_KERNEL); 812 if (!scratch) 813 panic("Cannot allocate iommu scratch page"); 814 gart_unmapped_entry = GPTE_ENCODE(__pa(scratch)); 815 816 flush_gart(); 817 dma_ops = &gart_dma_ops; 818 x86_platform.iommu_shutdown = gart_iommu_shutdown; 819 swiotlb = 0; 820 821 return 0; 822 } 823 824 void __init gart_parse_options(char *p) 825 { 826 int arg; 827 828 #ifdef CONFIG_IOMMU_LEAK 829 if (!strncmp(p, "leak", 4)) { 830 leak_trace = 1; 831 p += 4; 832 if (*p == '=') 833 ++p; 834 if (isdigit(*p) && get_option(&p, &arg)) 835 iommu_leak_pages = arg; 836 } 837 #endif 838 if (isdigit(*p) && get_option(&p, &arg)) 839 iommu_size = arg; 840 if (!strncmp(p, "fullflush", 9)) 841 iommu_fullflush = 1; 842 if (!strncmp(p, "nofullflush", 11)) 843 iommu_fullflush = 0; 844 if (!strncmp(p, "noagp", 5)) 845 no_agp = 1; 846 if (!strncmp(p, "noaperture", 10)) 847 fix_aperture = 0; 848 /* duplicated from pci-dma.c */ 849 if (!strncmp(p, "force", 5)) 850 gart_iommu_aperture_allowed = 1; 851 if (!strncmp(p, "allowed", 7)) 852 gart_iommu_aperture_allowed = 1; 853 if (!strncmp(p, "memaper", 7)) { 854 fallback_aper_force = 1; 855 p += 7; 856 if (*p == '=') { 857 ++p; 858 if (get_option(&p, &arg)) 859 fallback_aper_order = arg; 860 } 861 } 862 } 863 IOMMU_INIT_POST(gart_iommu_hole_init); 864