1 /* 2 * linux/arch/arm/mm/dma-mapping.c 3 * 4 * Copyright (C) 2000-2004 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * DMA uncached mapping support. 11 */ 12 #include <linux/module.h> 13 #include <linux/mm.h> 14 #include <linux/gfp.h> 15 #include <linux/errno.h> 16 #include <linux/list.h> 17 #include <linux/init.h> 18 #include <linux/device.h> 19 #include <linux/dma-mapping.h> 20 #include <linux/dma-contiguous.h> 21 #include <linux/highmem.h> 22 #include <linux/memblock.h> 23 #include <linux/slab.h> 24 #include <linux/iommu.h> 25 #include <linux/io.h> 26 #include <linux/vmalloc.h> 27 #include <linux/sizes.h> 28 29 #include <asm/memory.h> 30 #include <asm/highmem.h> 31 #include <asm/cacheflush.h> 32 #include <asm/tlbflush.h> 33 #include <asm/mach/arch.h> 34 #include <asm/dma-iommu.h> 35 #include <asm/mach/map.h> 36 #include <asm/system_info.h> 37 #include <asm/dma-contiguous.h> 38 39 #include "mm.h" 40 41 /* 42 * The DMA API is built upon the notion of "buffer ownership". A buffer 43 * is either exclusively owned by the CPU (and therefore may be accessed 44 * by it) or exclusively owned by the DMA device. These helper functions 45 * represent the transitions between these two ownership states. 46 * 47 * Note, however, that on later ARMs, this notion does not work due to 48 * speculative prefetches. We model our approach on the assumption that 49 * the CPU does do speculative prefetches, which means we clean caches 50 * before transfers and delay cache invalidation until transfer completion. 51 * 52 */ 53 static void __dma_page_cpu_to_dev(struct page *, unsigned long, 54 size_t, enum dma_data_direction); 55 static void __dma_page_dev_to_cpu(struct page *, unsigned long, 56 size_t, enum dma_data_direction); 57 58 /** 59 * arm_dma_map_page - map a portion of a page for streaming DMA 60 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 61 * @page: page that buffer resides in 62 * @offset: offset into page for start of buffer 63 * @size: size of buffer to map 64 * @dir: DMA transfer direction 65 * 66 * Ensure that any data held in the cache is appropriately discarded 67 * or written back. 68 * 69 * The device owns this memory once this call has completed. The CPU 70 * can regain ownership by calling dma_unmap_page(). 71 */ 72 static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, 73 unsigned long offset, size_t size, enum dma_data_direction dir, 74 struct dma_attrs *attrs) 75 { 76 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 77 __dma_page_cpu_to_dev(page, offset, size, dir); 78 return pfn_to_dma(dev, page_to_pfn(page)) + offset; 79 } 80 81 static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, 82 unsigned long offset, size_t size, enum dma_data_direction dir, 83 struct dma_attrs *attrs) 84 { 85 return pfn_to_dma(dev, page_to_pfn(page)) + offset; 86 } 87 88 /** 89 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page() 90 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 91 * @handle: DMA address of buffer 92 * @size: size of buffer (same as passed to dma_map_page) 93 * @dir: DMA transfer direction (same as passed to dma_map_page) 94 * 95 * Unmap a page streaming mode DMA translation. The handle and size 96 * must match what was provided in the previous dma_map_page() call. 97 * All other usages are undefined. 98 * 99 * After this call, reads by the CPU to the buffer are guaranteed to see 100 * whatever the device wrote there. 101 */ 102 static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, 103 size_t size, enum dma_data_direction dir, 104 struct dma_attrs *attrs) 105 { 106 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 107 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), 108 handle & ~PAGE_MASK, size, dir); 109 } 110 111 static void arm_dma_sync_single_for_cpu(struct device *dev, 112 dma_addr_t handle, size_t size, enum dma_data_direction dir) 113 { 114 unsigned int offset = handle & (PAGE_SIZE - 1); 115 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 116 __dma_page_dev_to_cpu(page, offset, size, dir); 117 } 118 119 static void arm_dma_sync_single_for_device(struct device *dev, 120 dma_addr_t handle, size_t size, enum dma_data_direction dir) 121 { 122 unsigned int offset = handle & (PAGE_SIZE - 1); 123 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 124 __dma_page_cpu_to_dev(page, offset, size, dir); 125 } 126 127 struct dma_map_ops arm_dma_ops = { 128 .alloc = arm_dma_alloc, 129 .free = arm_dma_free, 130 .mmap = arm_dma_mmap, 131 .get_sgtable = arm_dma_get_sgtable, 132 .map_page = arm_dma_map_page, 133 .unmap_page = arm_dma_unmap_page, 134 .map_sg = arm_dma_map_sg, 135 .unmap_sg = arm_dma_unmap_sg, 136 .sync_single_for_cpu = arm_dma_sync_single_for_cpu, 137 .sync_single_for_device = arm_dma_sync_single_for_device, 138 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, 139 .sync_sg_for_device = arm_dma_sync_sg_for_device, 140 .set_dma_mask = arm_dma_set_mask, 141 }; 142 EXPORT_SYMBOL(arm_dma_ops); 143 144 static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 145 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs); 146 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 147 dma_addr_t handle, struct dma_attrs *attrs); 148 149 struct dma_map_ops arm_coherent_dma_ops = { 150 .alloc = arm_coherent_dma_alloc, 151 .free = arm_coherent_dma_free, 152 .mmap = arm_dma_mmap, 153 .get_sgtable = arm_dma_get_sgtable, 154 .map_page = arm_coherent_dma_map_page, 155 .map_sg = arm_dma_map_sg, 156 .set_dma_mask = arm_dma_set_mask, 157 }; 158 EXPORT_SYMBOL(arm_coherent_dma_ops); 159 160 static u64 get_coherent_dma_mask(struct device *dev) 161 { 162 u64 mask = (u64)DMA_BIT_MASK(32); 163 164 if (dev) { 165 mask = dev->coherent_dma_mask; 166 167 /* 168 * Sanity check the DMA mask - it must be non-zero, and 169 * must be able to be satisfied by a DMA allocation. 170 */ 171 if (mask == 0) { 172 dev_warn(dev, "coherent DMA mask is unset\n"); 173 return 0; 174 } 175 176 /* 177 * If the mask allows for more memory than we can address, 178 * and we actually have that much memory, then fail the 179 * allocation. 180 */ 181 if (sizeof(mask) != sizeof(dma_addr_t) && 182 mask > (dma_addr_t)~0 && 183 dma_to_pfn(dev, ~0) > arm_dma_pfn_limit) { 184 dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", 185 mask); 186 dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n"); 187 return 0; 188 } 189 190 /* 191 * Now check that the mask, when translated to a PFN, 192 * fits within the allowable addresses which we can 193 * allocate. 194 */ 195 if (dma_to_pfn(dev, mask) < arm_dma_pfn_limit) { 196 dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", 197 mask, 198 dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, 199 arm_dma_pfn_limit + 1); 200 return 0; 201 } 202 } 203 204 return mask; 205 } 206 207 static void __dma_clear_buffer(struct page *page, size_t size) 208 { 209 /* 210 * Ensure that the allocated pages are zeroed, and that any data 211 * lurking in the kernel direct-mapped region is invalidated. 212 */ 213 if (PageHighMem(page)) { 214 phys_addr_t base = __pfn_to_phys(page_to_pfn(page)); 215 phys_addr_t end = base + size; 216 while (size > 0) { 217 void *ptr = kmap_atomic(page); 218 memset(ptr, 0, PAGE_SIZE); 219 dmac_flush_range(ptr, ptr + PAGE_SIZE); 220 kunmap_atomic(ptr); 221 page++; 222 size -= PAGE_SIZE; 223 } 224 outer_flush_range(base, end); 225 } else { 226 void *ptr = page_address(page); 227 memset(ptr, 0, size); 228 dmac_flush_range(ptr, ptr + size); 229 outer_flush_range(__pa(ptr), __pa(ptr) + size); 230 } 231 } 232 233 /* 234 * Allocate a DMA buffer for 'dev' of size 'size' using the 235 * specified gfp mask. Note that 'size' must be page aligned. 236 */ 237 static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) 238 { 239 unsigned long order = get_order(size); 240 struct page *page, *p, *e; 241 242 page = alloc_pages(gfp, order); 243 if (!page) 244 return NULL; 245 246 /* 247 * Now split the huge page and free the excess pages 248 */ 249 split_page(page, order); 250 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) 251 __free_page(p); 252 253 __dma_clear_buffer(page, size); 254 255 return page; 256 } 257 258 /* 259 * Free a DMA buffer. 'size' must be page aligned. 260 */ 261 static void __dma_free_buffer(struct page *page, size_t size) 262 { 263 struct page *e = page + (size >> PAGE_SHIFT); 264 265 while (page < e) { 266 __free_page(page); 267 page++; 268 } 269 } 270 271 #ifdef CONFIG_MMU 272 #ifdef CONFIG_HUGETLB_PAGE 273 #warning ARM Coherent DMA allocator does not (yet) support huge TLB 274 #endif 275 276 static void *__alloc_from_contiguous(struct device *dev, size_t size, 277 pgprot_t prot, struct page **ret_page, 278 const void *caller); 279 280 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 281 pgprot_t prot, struct page **ret_page, 282 const void *caller); 283 284 static void * 285 __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, 286 const void *caller) 287 { 288 struct vm_struct *area; 289 unsigned long addr; 290 291 /* 292 * DMA allocation can be mapped to user space, so lets 293 * set VM_USERMAP flags too. 294 */ 295 area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, 296 caller); 297 if (!area) 298 return NULL; 299 addr = (unsigned long)area->addr; 300 area->phys_addr = __pfn_to_phys(page_to_pfn(page)); 301 302 if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) { 303 vunmap((void *)addr); 304 return NULL; 305 } 306 return (void *)addr; 307 } 308 309 static void __dma_free_remap(void *cpu_addr, size_t size) 310 { 311 unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP; 312 struct vm_struct *area = find_vm_area(cpu_addr); 313 if (!area || (area->flags & flags) != flags) { 314 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); 315 return; 316 } 317 unmap_kernel_range((unsigned long)cpu_addr, size); 318 vunmap(cpu_addr); 319 } 320 321 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K 322 323 struct dma_pool { 324 size_t size; 325 spinlock_t lock; 326 unsigned long *bitmap; 327 unsigned long nr_pages; 328 void *vaddr; 329 struct page **pages; 330 }; 331 332 static struct dma_pool atomic_pool = { 333 .size = DEFAULT_DMA_COHERENT_POOL_SIZE, 334 }; 335 336 static int __init early_coherent_pool(char *p) 337 { 338 atomic_pool.size = memparse(p, &p); 339 return 0; 340 } 341 early_param("coherent_pool", early_coherent_pool); 342 343 void __init init_dma_coherent_pool_size(unsigned long size) 344 { 345 /* 346 * Catch any attempt to set the pool size too late. 347 */ 348 BUG_ON(atomic_pool.vaddr); 349 350 /* 351 * Set architecture specific coherent pool size only if 352 * it has not been changed by kernel command line parameter. 353 */ 354 if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE) 355 atomic_pool.size = size; 356 } 357 358 /* 359 * Initialise the coherent pool for atomic allocations. 360 */ 361 static int __init atomic_pool_init(void) 362 { 363 struct dma_pool *pool = &atomic_pool; 364 pgprot_t prot = pgprot_dmacoherent(pgprot_kernel); 365 gfp_t gfp = GFP_KERNEL | GFP_DMA; 366 unsigned long nr_pages = pool->size >> PAGE_SHIFT; 367 unsigned long *bitmap; 368 struct page *page; 369 struct page **pages; 370 void *ptr; 371 int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long); 372 373 bitmap = kzalloc(bitmap_size, GFP_KERNEL); 374 if (!bitmap) 375 goto no_bitmap; 376 377 pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); 378 if (!pages) 379 goto no_pages; 380 381 if (IS_ENABLED(CONFIG_DMA_CMA)) 382 ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page, 383 atomic_pool_init); 384 else 385 ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page, 386 atomic_pool_init); 387 if (ptr) { 388 int i; 389 390 for (i = 0; i < nr_pages; i++) 391 pages[i] = page + i; 392 393 spin_lock_init(&pool->lock); 394 pool->vaddr = ptr; 395 pool->pages = pages; 396 pool->bitmap = bitmap; 397 pool->nr_pages = nr_pages; 398 pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n", 399 (unsigned)pool->size / 1024); 400 return 0; 401 } 402 403 kfree(pages); 404 no_pages: 405 kfree(bitmap); 406 no_bitmap: 407 pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n", 408 (unsigned)pool->size / 1024); 409 return -ENOMEM; 410 } 411 /* 412 * CMA is activated by core_initcall, so we must be called after it. 413 */ 414 postcore_initcall(atomic_pool_init); 415 416 struct dma_contig_early_reserve { 417 phys_addr_t base; 418 unsigned long size; 419 }; 420 421 static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata; 422 423 static int dma_mmu_remap_num __initdata; 424 425 void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) 426 { 427 dma_mmu_remap[dma_mmu_remap_num].base = base; 428 dma_mmu_remap[dma_mmu_remap_num].size = size; 429 dma_mmu_remap_num++; 430 } 431 432 void __init dma_contiguous_remap(void) 433 { 434 int i; 435 for (i = 0; i < dma_mmu_remap_num; i++) { 436 phys_addr_t start = dma_mmu_remap[i].base; 437 phys_addr_t end = start + dma_mmu_remap[i].size; 438 struct map_desc map; 439 unsigned long addr; 440 441 if (end > arm_lowmem_limit) 442 end = arm_lowmem_limit; 443 if (start >= end) 444 continue; 445 446 map.pfn = __phys_to_pfn(start); 447 map.virtual = __phys_to_virt(start); 448 map.length = end - start; 449 map.type = MT_MEMORY_DMA_READY; 450 451 /* 452 * Clear previous low-memory mapping 453 */ 454 for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); 455 addr += PMD_SIZE) 456 pmd_clear(pmd_off_k(addr)); 457 458 iotable_init(&map, 1); 459 } 460 } 461 462 static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr, 463 void *data) 464 { 465 struct page *page = virt_to_page(addr); 466 pgprot_t prot = *(pgprot_t *)data; 467 468 set_pte_ext(pte, mk_pte(page, prot), 0); 469 return 0; 470 } 471 472 static void __dma_remap(struct page *page, size_t size, pgprot_t prot) 473 { 474 unsigned long start = (unsigned long) page_address(page); 475 unsigned end = start + size; 476 477 apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); 478 flush_tlb_kernel_range(start, end); 479 } 480 481 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 482 pgprot_t prot, struct page **ret_page, 483 const void *caller) 484 { 485 struct page *page; 486 void *ptr; 487 page = __dma_alloc_buffer(dev, size, gfp); 488 if (!page) 489 return NULL; 490 491 ptr = __dma_alloc_remap(page, size, gfp, prot, caller); 492 if (!ptr) { 493 __dma_free_buffer(page, size); 494 return NULL; 495 } 496 497 *ret_page = page; 498 return ptr; 499 } 500 501 static void *__alloc_from_pool(size_t size, struct page **ret_page) 502 { 503 struct dma_pool *pool = &atomic_pool; 504 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 505 unsigned int pageno; 506 unsigned long flags; 507 void *ptr = NULL; 508 unsigned long align_mask; 509 510 if (!pool->vaddr) { 511 WARN(1, "coherent pool not initialised!\n"); 512 return NULL; 513 } 514 515 /* 516 * Align the region allocation - allocations from pool are rather 517 * small, so align them to their order in pages, minimum is a page 518 * size. This helps reduce fragmentation of the DMA space. 519 */ 520 align_mask = (1 << get_order(size)) - 1; 521 522 spin_lock_irqsave(&pool->lock, flags); 523 pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages, 524 0, count, align_mask); 525 if (pageno < pool->nr_pages) { 526 bitmap_set(pool->bitmap, pageno, count); 527 ptr = pool->vaddr + PAGE_SIZE * pageno; 528 *ret_page = pool->pages[pageno]; 529 } else { 530 pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n" 531 "Please increase it with coherent_pool= kernel parameter!\n", 532 (unsigned)pool->size / 1024); 533 } 534 spin_unlock_irqrestore(&pool->lock, flags); 535 536 return ptr; 537 } 538 539 static bool __in_atomic_pool(void *start, size_t size) 540 { 541 struct dma_pool *pool = &atomic_pool; 542 void *end = start + size; 543 void *pool_start = pool->vaddr; 544 void *pool_end = pool->vaddr + pool->size; 545 546 if (start < pool_start || start >= pool_end) 547 return false; 548 549 if (end <= pool_end) 550 return true; 551 552 WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n", 553 start, end - 1, pool_start, pool_end - 1); 554 555 return false; 556 } 557 558 static int __free_from_pool(void *start, size_t size) 559 { 560 struct dma_pool *pool = &atomic_pool; 561 unsigned long pageno, count; 562 unsigned long flags; 563 564 if (!__in_atomic_pool(start, size)) 565 return 0; 566 567 pageno = (start - pool->vaddr) >> PAGE_SHIFT; 568 count = size >> PAGE_SHIFT; 569 570 spin_lock_irqsave(&pool->lock, flags); 571 bitmap_clear(pool->bitmap, pageno, count); 572 spin_unlock_irqrestore(&pool->lock, flags); 573 574 return 1; 575 } 576 577 static void *__alloc_from_contiguous(struct device *dev, size_t size, 578 pgprot_t prot, struct page **ret_page, 579 const void *caller) 580 { 581 unsigned long order = get_order(size); 582 size_t count = size >> PAGE_SHIFT; 583 struct page *page; 584 void *ptr; 585 586 page = dma_alloc_from_contiguous(dev, count, order); 587 if (!page) 588 return NULL; 589 590 __dma_clear_buffer(page, size); 591 592 if (PageHighMem(page)) { 593 ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller); 594 if (!ptr) { 595 dma_release_from_contiguous(dev, page, count); 596 return NULL; 597 } 598 } else { 599 __dma_remap(page, size, prot); 600 ptr = page_address(page); 601 } 602 *ret_page = page; 603 return ptr; 604 } 605 606 static void __free_from_contiguous(struct device *dev, struct page *page, 607 void *cpu_addr, size_t size) 608 { 609 if (PageHighMem(page)) 610 __dma_free_remap(cpu_addr, size); 611 else 612 __dma_remap(page, size, pgprot_kernel); 613 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); 614 } 615 616 static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot) 617 { 618 prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ? 619 pgprot_writecombine(prot) : 620 pgprot_dmacoherent(prot); 621 return prot; 622 } 623 624 #define nommu() 0 625 626 #else /* !CONFIG_MMU */ 627 628 #define nommu() 1 629 630 #define __get_dma_pgprot(attrs, prot) __pgprot(0) 631 #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL 632 #define __alloc_from_pool(size, ret_page) NULL 633 #define __alloc_from_contiguous(dev, size, prot, ret, c) NULL 634 #define __free_from_pool(cpu_addr, size) 0 635 #define __free_from_contiguous(dev, page, cpu_addr, size) do { } while (0) 636 #define __dma_free_remap(cpu_addr, size) do { } while (0) 637 638 #endif /* CONFIG_MMU */ 639 640 static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, 641 struct page **ret_page) 642 { 643 struct page *page; 644 page = __dma_alloc_buffer(dev, size, gfp); 645 if (!page) 646 return NULL; 647 648 *ret_page = page; 649 return page_address(page); 650 } 651 652 653 654 static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 655 gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller) 656 { 657 u64 mask = get_coherent_dma_mask(dev); 658 struct page *page = NULL; 659 void *addr; 660 661 #ifdef CONFIG_DMA_API_DEBUG 662 u64 limit = (mask + 1) & ~mask; 663 if (limit && size >= limit) { 664 dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n", 665 size, mask); 666 return NULL; 667 } 668 #endif 669 670 if (!mask) 671 return NULL; 672 673 if (mask < 0xffffffffULL) 674 gfp |= GFP_DMA; 675 676 /* 677 * Following is a work-around (a.k.a. hack) to prevent pages 678 * with __GFP_COMP being passed to split_page() which cannot 679 * handle them. The real problem is that this flag probably 680 * should be 0 on ARM as it is not supported on this 681 * platform; see CONFIG_HUGETLBFS. 682 */ 683 gfp &= ~(__GFP_COMP); 684 685 *handle = DMA_ERROR_CODE; 686 size = PAGE_ALIGN(size); 687 688 if (is_coherent || nommu()) 689 addr = __alloc_simple_buffer(dev, size, gfp, &page); 690 else if (!(gfp & __GFP_WAIT)) 691 addr = __alloc_from_pool(size, &page); 692 else if (!IS_ENABLED(CONFIG_DMA_CMA)) 693 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); 694 else 695 addr = __alloc_from_contiguous(dev, size, prot, &page, caller); 696 697 if (addr) 698 *handle = pfn_to_dma(dev, page_to_pfn(page)); 699 700 return addr; 701 } 702 703 /* 704 * Allocate DMA-coherent memory space and return both the kernel remapped 705 * virtual and bus address for that space. 706 */ 707 void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 708 gfp_t gfp, struct dma_attrs *attrs) 709 { 710 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); 711 void *memory; 712 713 if (dma_alloc_from_coherent(dev, size, handle, &memory)) 714 return memory; 715 716 return __dma_alloc(dev, size, handle, gfp, prot, false, 717 __builtin_return_address(0)); 718 } 719 720 static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 721 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) 722 { 723 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); 724 void *memory; 725 726 if (dma_alloc_from_coherent(dev, size, handle, &memory)) 727 return memory; 728 729 return __dma_alloc(dev, size, handle, gfp, prot, true, 730 __builtin_return_address(0)); 731 } 732 733 /* 734 * Create userspace mapping for the DMA-coherent memory. 735 */ 736 int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, 737 void *cpu_addr, dma_addr_t dma_addr, size_t size, 738 struct dma_attrs *attrs) 739 { 740 int ret = -ENXIO; 741 #ifdef CONFIG_MMU 742 unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 743 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 744 unsigned long pfn = dma_to_pfn(dev, dma_addr); 745 unsigned long off = vma->vm_pgoff; 746 747 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 748 749 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) 750 return ret; 751 752 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { 753 ret = remap_pfn_range(vma, vma->vm_start, 754 pfn + off, 755 vma->vm_end - vma->vm_start, 756 vma->vm_page_prot); 757 } 758 #endif /* CONFIG_MMU */ 759 760 return ret; 761 } 762 763 /* 764 * Free a buffer as defined by the above mapping. 765 */ 766 static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 767 dma_addr_t handle, struct dma_attrs *attrs, 768 bool is_coherent) 769 { 770 struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 771 772 if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) 773 return; 774 775 size = PAGE_ALIGN(size); 776 777 if (is_coherent || nommu()) { 778 __dma_free_buffer(page, size); 779 } else if (__free_from_pool(cpu_addr, size)) { 780 return; 781 } else if (!IS_ENABLED(CONFIG_DMA_CMA)) { 782 __dma_free_remap(cpu_addr, size); 783 __dma_free_buffer(page, size); 784 } else { 785 /* 786 * Non-atomic allocations cannot be freed with IRQs disabled 787 */ 788 WARN_ON(irqs_disabled()); 789 __free_from_contiguous(dev, page, cpu_addr, size); 790 } 791 } 792 793 void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 794 dma_addr_t handle, struct dma_attrs *attrs) 795 { 796 __arm_dma_free(dev, size, cpu_addr, handle, attrs, false); 797 } 798 799 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 800 dma_addr_t handle, struct dma_attrs *attrs) 801 { 802 __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); 803 } 804 805 int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 806 void *cpu_addr, dma_addr_t handle, size_t size, 807 struct dma_attrs *attrs) 808 { 809 struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 810 int ret; 811 812 ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 813 if (unlikely(ret)) 814 return ret; 815 816 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 817 return 0; 818 } 819 820 static void dma_cache_maint_page(struct page *page, unsigned long offset, 821 size_t size, enum dma_data_direction dir, 822 void (*op)(const void *, size_t, int)) 823 { 824 unsigned long pfn; 825 size_t left = size; 826 827 pfn = page_to_pfn(page) + offset / PAGE_SIZE; 828 offset %= PAGE_SIZE; 829 830 /* 831 * A single sg entry may refer to multiple physically contiguous 832 * pages. But we still need to process highmem pages individually. 833 * If highmem is not configured then the bulk of this loop gets 834 * optimized out. 835 */ 836 do { 837 size_t len = left; 838 void *vaddr; 839 840 page = pfn_to_page(pfn); 841 842 if (PageHighMem(page)) { 843 if (len + offset > PAGE_SIZE) 844 len = PAGE_SIZE - offset; 845 846 if (cache_is_vipt_nonaliasing()) { 847 vaddr = kmap_atomic(page); 848 op(vaddr + offset, len, dir); 849 kunmap_atomic(vaddr); 850 } else { 851 vaddr = kmap_high_get(page); 852 if (vaddr) { 853 op(vaddr + offset, len, dir); 854 kunmap_high(page); 855 } 856 } 857 } else { 858 vaddr = page_address(page) + offset; 859 op(vaddr, len, dir); 860 } 861 offset = 0; 862 pfn++; 863 left -= len; 864 } while (left); 865 } 866 867 /* 868 * Make an area consistent for devices. 869 * Note: Drivers should NOT use this function directly, as it will break 870 * platforms with CONFIG_DMABOUNCE. 871 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 872 */ 873 static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, 874 size_t size, enum dma_data_direction dir) 875 { 876 unsigned long paddr; 877 878 dma_cache_maint_page(page, off, size, dir, dmac_map_area); 879 880 paddr = page_to_phys(page) + off; 881 if (dir == DMA_FROM_DEVICE) { 882 outer_inv_range(paddr, paddr + size); 883 } else { 884 outer_clean_range(paddr, paddr + size); 885 } 886 /* FIXME: non-speculating: flush on bidirectional mappings? */ 887 } 888 889 static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, 890 size_t size, enum dma_data_direction dir) 891 { 892 unsigned long paddr = page_to_phys(page) + off; 893 894 /* FIXME: non-speculating: not required */ 895 /* don't bother invalidating if DMA to device */ 896 if (dir != DMA_TO_DEVICE) 897 outer_inv_range(paddr, paddr + size); 898 899 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); 900 901 /* 902 * Mark the D-cache clean for these pages to avoid extra flushing. 903 */ 904 if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) { 905 unsigned long pfn; 906 size_t left = size; 907 908 pfn = page_to_pfn(page) + off / PAGE_SIZE; 909 off %= PAGE_SIZE; 910 if (off) { 911 pfn++; 912 left -= PAGE_SIZE - off; 913 } 914 while (left >= PAGE_SIZE) { 915 page = pfn_to_page(pfn++); 916 set_bit(PG_dcache_clean, &page->flags); 917 left -= PAGE_SIZE; 918 } 919 } 920 } 921 922 /** 923 * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA 924 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 925 * @sg: list of buffers 926 * @nents: number of buffers to map 927 * @dir: DMA transfer direction 928 * 929 * Map a set of buffers described by scatterlist in streaming mode for DMA. 930 * This is the scatter-gather version of the dma_map_single interface. 931 * Here the scatter gather list elements are each tagged with the 932 * appropriate dma address and length. They are obtained via 933 * sg_dma_{address,length}. 934 * 935 * Device ownership issues as mentioned for dma_map_single are the same 936 * here. 937 */ 938 int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 939 enum dma_data_direction dir, struct dma_attrs *attrs) 940 { 941 struct dma_map_ops *ops = get_dma_ops(dev); 942 struct scatterlist *s; 943 int i, j; 944 945 for_each_sg(sg, s, nents, i) { 946 #ifdef CONFIG_NEED_SG_DMA_LENGTH 947 s->dma_length = s->length; 948 #endif 949 s->dma_address = ops->map_page(dev, sg_page(s), s->offset, 950 s->length, dir, attrs); 951 if (dma_mapping_error(dev, s->dma_address)) 952 goto bad_mapping; 953 } 954 return nents; 955 956 bad_mapping: 957 for_each_sg(sg, s, i, j) 958 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 959 return 0; 960 } 961 962 /** 963 * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 964 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 965 * @sg: list of buffers 966 * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 967 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 968 * 969 * Unmap a set of streaming mode DMA translations. Again, CPU access 970 * rules concerning calls here are the same as for dma_unmap_single(). 971 */ 972 void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 973 enum dma_data_direction dir, struct dma_attrs *attrs) 974 { 975 struct dma_map_ops *ops = get_dma_ops(dev); 976 struct scatterlist *s; 977 978 int i; 979 980 for_each_sg(sg, s, nents, i) 981 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 982 } 983 984 /** 985 * arm_dma_sync_sg_for_cpu 986 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 987 * @sg: list of buffers 988 * @nents: number of buffers to map (returned from dma_map_sg) 989 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 990 */ 991 void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 992 int nents, enum dma_data_direction dir) 993 { 994 struct dma_map_ops *ops = get_dma_ops(dev); 995 struct scatterlist *s; 996 int i; 997 998 for_each_sg(sg, s, nents, i) 999 ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length, 1000 dir); 1001 } 1002 1003 /** 1004 * arm_dma_sync_sg_for_device 1005 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1006 * @sg: list of buffers 1007 * @nents: number of buffers to map (returned from dma_map_sg) 1008 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1009 */ 1010 void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 1011 int nents, enum dma_data_direction dir) 1012 { 1013 struct dma_map_ops *ops = get_dma_ops(dev); 1014 struct scatterlist *s; 1015 int i; 1016 1017 for_each_sg(sg, s, nents, i) 1018 ops->sync_single_for_device(dev, sg_dma_address(s), s->length, 1019 dir); 1020 } 1021 1022 /* 1023 * Return whether the given device DMA address mask can be supported 1024 * properly. For example, if your device can only drive the low 24-bits 1025 * during bus mastering, then you would pass 0x00ffffff as the mask 1026 * to this function. 1027 */ 1028 int dma_supported(struct device *dev, u64 mask) 1029 { 1030 unsigned long limit; 1031 1032 /* 1033 * If the mask allows for more memory than we can address, 1034 * and we actually have that much memory, then we must 1035 * indicate that DMA to this device is not supported. 1036 */ 1037 if (sizeof(mask) != sizeof(dma_addr_t) && 1038 mask > (dma_addr_t)~0 && 1039 dma_to_pfn(dev, ~0) > arm_dma_pfn_limit) 1040 return 0; 1041 1042 /* 1043 * Translate the device's DMA mask to a PFN limit. This 1044 * PFN number includes the page which we can DMA to. 1045 */ 1046 limit = dma_to_pfn(dev, mask); 1047 1048 if (limit < arm_dma_pfn_limit) 1049 return 0; 1050 1051 return 1; 1052 } 1053 EXPORT_SYMBOL(dma_supported); 1054 1055 int arm_dma_set_mask(struct device *dev, u64 dma_mask) 1056 { 1057 if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 1058 return -EIO; 1059 1060 *dev->dma_mask = dma_mask; 1061 1062 return 0; 1063 } 1064 1065 #define PREALLOC_DMA_DEBUG_ENTRIES 4096 1066 1067 static int __init dma_debug_do_init(void) 1068 { 1069 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); 1070 return 0; 1071 } 1072 fs_initcall(dma_debug_do_init); 1073 1074 #ifdef CONFIG_ARM_DMA_USE_IOMMU 1075 1076 /* IOMMU */ 1077 1078 static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, 1079 size_t size) 1080 { 1081 unsigned int order = get_order(size); 1082 unsigned int align = 0; 1083 unsigned int count, start; 1084 unsigned long flags; 1085 1086 if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT) 1087 order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT; 1088 1089 count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) + 1090 (1 << mapping->order) - 1) >> mapping->order; 1091 1092 if (order > mapping->order) 1093 align = (1 << (order - mapping->order)) - 1; 1094 1095 spin_lock_irqsave(&mapping->lock, flags); 1096 start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0, 1097 count, align); 1098 if (start > mapping->bits) { 1099 spin_unlock_irqrestore(&mapping->lock, flags); 1100 return DMA_ERROR_CODE; 1101 } 1102 1103 bitmap_set(mapping->bitmap, start, count); 1104 spin_unlock_irqrestore(&mapping->lock, flags); 1105 1106 return mapping->base + (start << (mapping->order + PAGE_SHIFT)); 1107 } 1108 1109 static inline void __free_iova(struct dma_iommu_mapping *mapping, 1110 dma_addr_t addr, size_t size) 1111 { 1112 unsigned int start = (addr - mapping->base) >> 1113 (mapping->order + PAGE_SHIFT); 1114 unsigned int count = ((size >> PAGE_SHIFT) + 1115 (1 << mapping->order) - 1) >> mapping->order; 1116 unsigned long flags; 1117 1118 spin_lock_irqsave(&mapping->lock, flags); 1119 bitmap_clear(mapping->bitmap, start, count); 1120 spin_unlock_irqrestore(&mapping->lock, flags); 1121 } 1122 1123 static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, 1124 gfp_t gfp, struct dma_attrs *attrs) 1125 { 1126 struct page **pages; 1127 int count = size >> PAGE_SHIFT; 1128 int array_size = count * sizeof(struct page *); 1129 int i = 0; 1130 1131 if (array_size <= PAGE_SIZE) 1132 pages = kzalloc(array_size, gfp); 1133 else 1134 pages = vzalloc(array_size); 1135 if (!pages) 1136 return NULL; 1137 1138 if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) 1139 { 1140 unsigned long order = get_order(size); 1141 struct page *page; 1142 1143 page = dma_alloc_from_contiguous(dev, count, order); 1144 if (!page) 1145 goto error; 1146 1147 __dma_clear_buffer(page, size); 1148 1149 for (i = 0; i < count; i++) 1150 pages[i] = page + i; 1151 1152 return pages; 1153 } 1154 1155 /* 1156 * IOMMU can map any pages, so himem can also be used here 1157 */ 1158 gfp |= __GFP_NOWARN | __GFP_HIGHMEM; 1159 1160 while (count) { 1161 int j, order = __fls(count); 1162 1163 pages[i] = alloc_pages(gfp, order); 1164 while (!pages[i] && order) 1165 pages[i] = alloc_pages(gfp, --order); 1166 if (!pages[i]) 1167 goto error; 1168 1169 if (order) { 1170 split_page(pages[i], order); 1171 j = 1 << order; 1172 while (--j) 1173 pages[i + j] = pages[i] + j; 1174 } 1175 1176 __dma_clear_buffer(pages[i], PAGE_SIZE << order); 1177 i += 1 << order; 1178 count -= 1 << order; 1179 } 1180 1181 return pages; 1182 error: 1183 while (i--) 1184 if (pages[i]) 1185 __free_pages(pages[i], 0); 1186 if (array_size <= PAGE_SIZE) 1187 kfree(pages); 1188 else 1189 vfree(pages); 1190 return NULL; 1191 } 1192 1193 static int __iommu_free_buffer(struct device *dev, struct page **pages, 1194 size_t size, struct dma_attrs *attrs) 1195 { 1196 int count = size >> PAGE_SHIFT; 1197 int array_size = count * sizeof(struct page *); 1198 int i; 1199 1200 if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) { 1201 dma_release_from_contiguous(dev, pages[0], count); 1202 } else { 1203 for (i = 0; i < count; i++) 1204 if (pages[i]) 1205 __free_pages(pages[i], 0); 1206 } 1207 1208 if (array_size <= PAGE_SIZE) 1209 kfree(pages); 1210 else 1211 vfree(pages); 1212 return 0; 1213 } 1214 1215 /* 1216 * Create a CPU mapping for a specified pages 1217 */ 1218 static void * 1219 __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, 1220 const void *caller) 1221 { 1222 unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 1223 struct vm_struct *area; 1224 unsigned long p; 1225 1226 area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, 1227 caller); 1228 if (!area) 1229 return NULL; 1230 1231 area->pages = pages; 1232 area->nr_pages = nr_pages; 1233 p = (unsigned long)area->addr; 1234 1235 for (i = 0; i < nr_pages; i++) { 1236 phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i])); 1237 if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot)) 1238 goto err; 1239 p += PAGE_SIZE; 1240 } 1241 return area->addr; 1242 err: 1243 unmap_kernel_range((unsigned long)area->addr, size); 1244 vunmap(area->addr); 1245 return NULL; 1246 } 1247 1248 /* 1249 * Create a mapping in device IO address space for specified pages 1250 */ 1251 static dma_addr_t 1252 __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) 1253 { 1254 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1255 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1256 dma_addr_t dma_addr, iova; 1257 int i, ret = DMA_ERROR_CODE; 1258 1259 dma_addr = __alloc_iova(mapping, size); 1260 if (dma_addr == DMA_ERROR_CODE) 1261 return dma_addr; 1262 1263 iova = dma_addr; 1264 for (i = 0; i < count; ) { 1265 unsigned int next_pfn = page_to_pfn(pages[i]) + 1; 1266 phys_addr_t phys = page_to_phys(pages[i]); 1267 unsigned int len, j; 1268 1269 for (j = i + 1; j < count; j++, next_pfn++) 1270 if (page_to_pfn(pages[j]) != next_pfn) 1271 break; 1272 1273 len = (j - i) << PAGE_SHIFT; 1274 ret = iommu_map(mapping->domain, iova, phys, len, 1275 IOMMU_READ|IOMMU_WRITE); 1276 if (ret < 0) 1277 goto fail; 1278 iova += len; 1279 i = j; 1280 } 1281 return dma_addr; 1282 fail: 1283 iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); 1284 __free_iova(mapping, dma_addr, size); 1285 return DMA_ERROR_CODE; 1286 } 1287 1288 static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) 1289 { 1290 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1291 1292 /* 1293 * add optional in-page offset from iova to size and align 1294 * result to page size 1295 */ 1296 size = PAGE_ALIGN((iova & ~PAGE_MASK) + size); 1297 iova &= PAGE_MASK; 1298 1299 iommu_unmap(mapping->domain, iova, size); 1300 __free_iova(mapping, iova, size); 1301 return 0; 1302 } 1303 1304 static struct page **__atomic_get_pages(void *addr) 1305 { 1306 struct dma_pool *pool = &atomic_pool; 1307 struct page **pages = pool->pages; 1308 int offs = (addr - pool->vaddr) >> PAGE_SHIFT; 1309 1310 return pages + offs; 1311 } 1312 1313 static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) 1314 { 1315 struct vm_struct *area; 1316 1317 if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) 1318 return __atomic_get_pages(cpu_addr); 1319 1320 if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) 1321 return cpu_addr; 1322 1323 area = find_vm_area(cpu_addr); 1324 if (area && (area->flags & VM_ARM_DMA_CONSISTENT)) 1325 return area->pages; 1326 return NULL; 1327 } 1328 1329 static void *__iommu_alloc_atomic(struct device *dev, size_t size, 1330 dma_addr_t *handle) 1331 { 1332 struct page *page; 1333 void *addr; 1334 1335 addr = __alloc_from_pool(size, &page); 1336 if (!addr) 1337 return NULL; 1338 1339 *handle = __iommu_create_mapping(dev, &page, size); 1340 if (*handle == DMA_ERROR_CODE) 1341 goto err_mapping; 1342 1343 return addr; 1344 1345 err_mapping: 1346 __free_from_pool(addr, size); 1347 return NULL; 1348 } 1349 1350 static void __iommu_free_atomic(struct device *dev, void *cpu_addr, 1351 dma_addr_t handle, size_t size) 1352 { 1353 __iommu_remove_mapping(dev, handle, size); 1354 __free_from_pool(cpu_addr, size); 1355 } 1356 1357 static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, 1358 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) 1359 { 1360 pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel); 1361 struct page **pages; 1362 void *addr = NULL; 1363 1364 *handle = DMA_ERROR_CODE; 1365 size = PAGE_ALIGN(size); 1366 1367 if (gfp & GFP_ATOMIC) 1368 return __iommu_alloc_atomic(dev, size, handle); 1369 1370 /* 1371 * Following is a work-around (a.k.a. hack) to prevent pages 1372 * with __GFP_COMP being passed to split_page() which cannot 1373 * handle them. The real problem is that this flag probably 1374 * should be 0 on ARM as it is not supported on this 1375 * platform; see CONFIG_HUGETLBFS. 1376 */ 1377 gfp &= ~(__GFP_COMP); 1378 1379 pages = __iommu_alloc_buffer(dev, size, gfp, attrs); 1380 if (!pages) 1381 return NULL; 1382 1383 *handle = __iommu_create_mapping(dev, pages, size); 1384 if (*handle == DMA_ERROR_CODE) 1385 goto err_buffer; 1386 1387 if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) 1388 return pages; 1389 1390 addr = __iommu_alloc_remap(pages, size, gfp, prot, 1391 __builtin_return_address(0)); 1392 if (!addr) 1393 goto err_mapping; 1394 1395 return addr; 1396 1397 err_mapping: 1398 __iommu_remove_mapping(dev, *handle, size); 1399 err_buffer: 1400 __iommu_free_buffer(dev, pages, size, attrs); 1401 return NULL; 1402 } 1403 1404 static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 1405 void *cpu_addr, dma_addr_t dma_addr, size_t size, 1406 struct dma_attrs *attrs) 1407 { 1408 unsigned long uaddr = vma->vm_start; 1409 unsigned long usize = vma->vm_end - vma->vm_start; 1410 struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1411 1412 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 1413 1414 if (!pages) 1415 return -ENXIO; 1416 1417 do { 1418 int ret = vm_insert_page(vma, uaddr, *pages++); 1419 if (ret) { 1420 pr_err("Remapping memory failed: %d\n", ret); 1421 return ret; 1422 } 1423 uaddr += PAGE_SIZE; 1424 usize -= PAGE_SIZE; 1425 } while (usize > 0); 1426 1427 return 0; 1428 } 1429 1430 /* 1431 * free a page as defined by the above mapping. 1432 * Must not be called with IRQs disabled. 1433 */ 1434 void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, 1435 dma_addr_t handle, struct dma_attrs *attrs) 1436 { 1437 struct page **pages; 1438 size = PAGE_ALIGN(size); 1439 1440 if (__in_atomic_pool(cpu_addr, size)) { 1441 __iommu_free_atomic(dev, cpu_addr, handle, size); 1442 return; 1443 } 1444 1445 pages = __iommu_get_pages(cpu_addr, attrs); 1446 if (!pages) { 1447 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); 1448 return; 1449 } 1450 1451 if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { 1452 unmap_kernel_range((unsigned long)cpu_addr, size); 1453 vunmap(cpu_addr); 1454 } 1455 1456 __iommu_remove_mapping(dev, handle, size); 1457 __iommu_free_buffer(dev, pages, size, attrs); 1458 } 1459 1460 static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, 1461 void *cpu_addr, dma_addr_t dma_addr, 1462 size_t size, struct dma_attrs *attrs) 1463 { 1464 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1465 struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1466 1467 if (!pages) 1468 return -ENXIO; 1469 1470 return sg_alloc_table_from_pages(sgt, pages, count, 0, size, 1471 GFP_KERNEL); 1472 } 1473 1474 static int __dma_direction_to_prot(enum dma_data_direction dir) 1475 { 1476 int prot; 1477 1478 switch (dir) { 1479 case DMA_BIDIRECTIONAL: 1480 prot = IOMMU_READ | IOMMU_WRITE; 1481 break; 1482 case DMA_TO_DEVICE: 1483 prot = IOMMU_READ; 1484 break; 1485 case DMA_FROM_DEVICE: 1486 prot = IOMMU_WRITE; 1487 break; 1488 default: 1489 prot = 0; 1490 } 1491 1492 return prot; 1493 } 1494 1495 /* 1496 * Map a part of the scatter-gather list into contiguous io address space 1497 */ 1498 static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, 1499 size_t size, dma_addr_t *handle, 1500 enum dma_data_direction dir, struct dma_attrs *attrs, 1501 bool is_coherent) 1502 { 1503 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1504 dma_addr_t iova, iova_base; 1505 int ret = 0; 1506 unsigned int count; 1507 struct scatterlist *s; 1508 int prot; 1509 1510 size = PAGE_ALIGN(size); 1511 *handle = DMA_ERROR_CODE; 1512 1513 iova_base = iova = __alloc_iova(mapping, size); 1514 if (iova == DMA_ERROR_CODE) 1515 return -ENOMEM; 1516 1517 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { 1518 phys_addr_t phys = page_to_phys(sg_page(s)); 1519 unsigned int len = PAGE_ALIGN(s->offset + s->length); 1520 1521 if (!is_coherent && 1522 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 1523 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 1524 1525 prot = __dma_direction_to_prot(dir); 1526 1527 ret = iommu_map(mapping->domain, iova, phys, len, prot); 1528 if (ret < 0) 1529 goto fail; 1530 count += len >> PAGE_SHIFT; 1531 iova += len; 1532 } 1533 *handle = iova_base; 1534 1535 return 0; 1536 fail: 1537 iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); 1538 __free_iova(mapping, iova_base, size); 1539 return ret; 1540 } 1541 1542 static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, 1543 enum dma_data_direction dir, struct dma_attrs *attrs, 1544 bool is_coherent) 1545 { 1546 struct scatterlist *s = sg, *dma = sg, *start = sg; 1547 int i, count = 0; 1548 unsigned int offset = s->offset; 1549 unsigned int size = s->offset + s->length; 1550 unsigned int max = dma_get_max_seg_size(dev); 1551 1552 for (i = 1; i < nents; i++) { 1553 s = sg_next(s); 1554 1555 s->dma_address = DMA_ERROR_CODE; 1556 s->dma_length = 0; 1557 1558 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { 1559 if (__map_sg_chunk(dev, start, size, &dma->dma_address, 1560 dir, attrs, is_coherent) < 0) 1561 goto bad_mapping; 1562 1563 dma->dma_address += offset; 1564 dma->dma_length = size - offset; 1565 1566 size = offset = s->offset; 1567 start = s; 1568 dma = sg_next(dma); 1569 count += 1; 1570 } 1571 size += s->length; 1572 } 1573 if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, 1574 is_coherent) < 0) 1575 goto bad_mapping; 1576 1577 dma->dma_address += offset; 1578 dma->dma_length = size - offset; 1579 1580 return count+1; 1581 1582 bad_mapping: 1583 for_each_sg(sg, s, count, i) 1584 __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); 1585 return 0; 1586 } 1587 1588 /** 1589 * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA 1590 * @dev: valid struct device pointer 1591 * @sg: list of buffers 1592 * @nents: number of buffers to map 1593 * @dir: DMA transfer direction 1594 * 1595 * Map a set of i/o coherent buffers described by scatterlist in streaming 1596 * mode for DMA. The scatter gather list elements are merged together (if 1597 * possible) and tagged with the appropriate dma address and length. They are 1598 * obtained via sg_dma_{address,length}. 1599 */ 1600 int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, 1601 int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 1602 { 1603 return __iommu_map_sg(dev, sg, nents, dir, attrs, true); 1604 } 1605 1606 /** 1607 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA 1608 * @dev: valid struct device pointer 1609 * @sg: list of buffers 1610 * @nents: number of buffers to map 1611 * @dir: DMA transfer direction 1612 * 1613 * Map a set of buffers described by scatterlist in streaming mode for DMA. 1614 * The scatter gather list elements are merged together (if possible) and 1615 * tagged with the appropriate dma address and length. They are obtained via 1616 * sg_dma_{address,length}. 1617 */ 1618 int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, 1619 int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 1620 { 1621 return __iommu_map_sg(dev, sg, nents, dir, attrs, false); 1622 } 1623 1624 static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, 1625 int nents, enum dma_data_direction dir, struct dma_attrs *attrs, 1626 bool is_coherent) 1627 { 1628 struct scatterlist *s; 1629 int i; 1630 1631 for_each_sg(sg, s, nents, i) { 1632 if (sg_dma_len(s)) 1633 __iommu_remove_mapping(dev, sg_dma_address(s), 1634 sg_dma_len(s)); 1635 if (!is_coherent && 1636 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 1637 __dma_page_dev_to_cpu(sg_page(s), s->offset, 1638 s->length, dir); 1639 } 1640 } 1641 1642 /** 1643 * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 1644 * @dev: valid struct device pointer 1645 * @sg: list of buffers 1646 * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 1647 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1648 * 1649 * Unmap a set of streaming mode DMA translations. Again, CPU access 1650 * rules concerning calls here are the same as for dma_unmap_single(). 1651 */ 1652 void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, 1653 int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 1654 { 1655 __iommu_unmap_sg(dev, sg, nents, dir, attrs, true); 1656 } 1657 1658 /** 1659 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 1660 * @dev: valid struct device pointer 1661 * @sg: list of buffers 1662 * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 1663 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1664 * 1665 * Unmap a set of streaming mode DMA translations. Again, CPU access 1666 * rules concerning calls here are the same as for dma_unmap_single(). 1667 */ 1668 void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 1669 enum dma_data_direction dir, struct dma_attrs *attrs) 1670 { 1671 __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); 1672 } 1673 1674 /** 1675 * arm_iommu_sync_sg_for_cpu 1676 * @dev: valid struct device pointer 1677 * @sg: list of buffers 1678 * @nents: number of buffers to map (returned from dma_map_sg) 1679 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1680 */ 1681 void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 1682 int nents, enum dma_data_direction dir) 1683 { 1684 struct scatterlist *s; 1685 int i; 1686 1687 for_each_sg(sg, s, nents, i) 1688 __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); 1689 1690 } 1691 1692 /** 1693 * arm_iommu_sync_sg_for_device 1694 * @dev: valid struct device pointer 1695 * @sg: list of buffers 1696 * @nents: number of buffers to map (returned from dma_map_sg) 1697 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1698 */ 1699 void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 1700 int nents, enum dma_data_direction dir) 1701 { 1702 struct scatterlist *s; 1703 int i; 1704 1705 for_each_sg(sg, s, nents, i) 1706 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 1707 } 1708 1709 1710 /** 1711 * arm_coherent_iommu_map_page 1712 * @dev: valid struct device pointer 1713 * @page: page that buffer resides in 1714 * @offset: offset into page for start of buffer 1715 * @size: size of buffer to map 1716 * @dir: DMA transfer direction 1717 * 1718 * Coherent IOMMU aware version of arm_dma_map_page() 1719 */ 1720 static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page, 1721 unsigned long offset, size_t size, enum dma_data_direction dir, 1722 struct dma_attrs *attrs) 1723 { 1724 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1725 dma_addr_t dma_addr; 1726 int ret, prot, len = PAGE_ALIGN(size + offset); 1727 1728 dma_addr = __alloc_iova(mapping, len); 1729 if (dma_addr == DMA_ERROR_CODE) 1730 return dma_addr; 1731 1732 prot = __dma_direction_to_prot(dir); 1733 1734 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); 1735 if (ret < 0) 1736 goto fail; 1737 1738 return dma_addr + offset; 1739 fail: 1740 __free_iova(mapping, dma_addr, len); 1741 return DMA_ERROR_CODE; 1742 } 1743 1744 /** 1745 * arm_iommu_map_page 1746 * @dev: valid struct device pointer 1747 * @page: page that buffer resides in 1748 * @offset: offset into page for start of buffer 1749 * @size: size of buffer to map 1750 * @dir: DMA transfer direction 1751 * 1752 * IOMMU aware version of arm_dma_map_page() 1753 */ 1754 static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, 1755 unsigned long offset, size_t size, enum dma_data_direction dir, 1756 struct dma_attrs *attrs) 1757 { 1758 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 1759 __dma_page_cpu_to_dev(page, offset, size, dir); 1760 1761 return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); 1762 } 1763 1764 /** 1765 * arm_coherent_iommu_unmap_page 1766 * @dev: valid struct device pointer 1767 * @handle: DMA address of buffer 1768 * @size: size of buffer (same as passed to dma_map_page) 1769 * @dir: DMA transfer direction (same as passed to dma_map_page) 1770 * 1771 * Coherent IOMMU aware version of arm_dma_unmap_page() 1772 */ 1773 static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, 1774 size_t size, enum dma_data_direction dir, 1775 struct dma_attrs *attrs) 1776 { 1777 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1778 dma_addr_t iova = handle & PAGE_MASK; 1779 int offset = handle & ~PAGE_MASK; 1780 int len = PAGE_ALIGN(size + offset); 1781 1782 if (!iova) 1783 return; 1784 1785 iommu_unmap(mapping->domain, iova, len); 1786 __free_iova(mapping, iova, len); 1787 } 1788 1789 /** 1790 * arm_iommu_unmap_page 1791 * @dev: valid struct device pointer 1792 * @handle: DMA address of buffer 1793 * @size: size of buffer (same as passed to dma_map_page) 1794 * @dir: DMA transfer direction (same as passed to dma_map_page) 1795 * 1796 * IOMMU aware version of arm_dma_unmap_page() 1797 */ 1798 static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, 1799 size_t size, enum dma_data_direction dir, 1800 struct dma_attrs *attrs) 1801 { 1802 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1803 dma_addr_t iova = handle & PAGE_MASK; 1804 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 1805 int offset = handle & ~PAGE_MASK; 1806 int len = PAGE_ALIGN(size + offset); 1807 1808 if (!iova) 1809 return; 1810 1811 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 1812 __dma_page_dev_to_cpu(page, offset, size, dir); 1813 1814 iommu_unmap(mapping->domain, iova, len); 1815 __free_iova(mapping, iova, len); 1816 } 1817 1818 static void arm_iommu_sync_single_for_cpu(struct device *dev, 1819 dma_addr_t handle, size_t size, enum dma_data_direction dir) 1820 { 1821 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1822 dma_addr_t iova = handle & PAGE_MASK; 1823 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 1824 unsigned int offset = handle & ~PAGE_MASK; 1825 1826 if (!iova) 1827 return; 1828 1829 __dma_page_dev_to_cpu(page, offset, size, dir); 1830 } 1831 1832 static void arm_iommu_sync_single_for_device(struct device *dev, 1833 dma_addr_t handle, size_t size, enum dma_data_direction dir) 1834 { 1835 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1836 dma_addr_t iova = handle & PAGE_MASK; 1837 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 1838 unsigned int offset = handle & ~PAGE_MASK; 1839 1840 if (!iova) 1841 return; 1842 1843 __dma_page_cpu_to_dev(page, offset, size, dir); 1844 } 1845 1846 struct dma_map_ops iommu_ops = { 1847 .alloc = arm_iommu_alloc_attrs, 1848 .free = arm_iommu_free_attrs, 1849 .mmap = arm_iommu_mmap_attrs, 1850 .get_sgtable = arm_iommu_get_sgtable, 1851 1852 .map_page = arm_iommu_map_page, 1853 .unmap_page = arm_iommu_unmap_page, 1854 .sync_single_for_cpu = arm_iommu_sync_single_for_cpu, 1855 .sync_single_for_device = arm_iommu_sync_single_for_device, 1856 1857 .map_sg = arm_iommu_map_sg, 1858 .unmap_sg = arm_iommu_unmap_sg, 1859 .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, 1860 .sync_sg_for_device = arm_iommu_sync_sg_for_device, 1861 1862 .set_dma_mask = arm_dma_set_mask, 1863 }; 1864 1865 struct dma_map_ops iommu_coherent_ops = { 1866 .alloc = arm_iommu_alloc_attrs, 1867 .free = arm_iommu_free_attrs, 1868 .mmap = arm_iommu_mmap_attrs, 1869 .get_sgtable = arm_iommu_get_sgtable, 1870 1871 .map_page = arm_coherent_iommu_map_page, 1872 .unmap_page = arm_coherent_iommu_unmap_page, 1873 1874 .map_sg = arm_coherent_iommu_map_sg, 1875 .unmap_sg = arm_coherent_iommu_unmap_sg, 1876 1877 .set_dma_mask = arm_dma_set_mask, 1878 }; 1879 1880 /** 1881 * arm_iommu_create_mapping 1882 * @bus: pointer to the bus holding the client device (for IOMMU calls) 1883 * @base: start address of the valid IO address space 1884 * @size: size of the valid IO address space 1885 * @order: accuracy of the IO addresses allocations 1886 * 1887 * Creates a mapping structure which holds information about used/unused 1888 * IO address ranges, which is required to perform memory allocation and 1889 * mapping with IOMMU aware functions. 1890 * 1891 * The client device need to be attached to the mapping with 1892 * arm_iommu_attach_device function. 1893 */ 1894 struct dma_iommu_mapping * 1895 arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size, 1896 int order) 1897 { 1898 unsigned int count = size >> (PAGE_SHIFT + order); 1899 unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long); 1900 struct dma_iommu_mapping *mapping; 1901 int err = -ENOMEM; 1902 1903 if (!count) 1904 return ERR_PTR(-EINVAL); 1905 1906 mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); 1907 if (!mapping) 1908 goto err; 1909 1910 mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 1911 if (!mapping->bitmap) 1912 goto err2; 1913 1914 mapping->base = base; 1915 mapping->bits = BITS_PER_BYTE * bitmap_size; 1916 mapping->order = order; 1917 spin_lock_init(&mapping->lock); 1918 1919 mapping->domain = iommu_domain_alloc(bus); 1920 if (!mapping->domain) 1921 goto err3; 1922 1923 kref_init(&mapping->kref); 1924 return mapping; 1925 err3: 1926 kfree(mapping->bitmap); 1927 err2: 1928 kfree(mapping); 1929 err: 1930 return ERR_PTR(err); 1931 } 1932 EXPORT_SYMBOL_GPL(arm_iommu_create_mapping); 1933 1934 static void release_iommu_mapping(struct kref *kref) 1935 { 1936 struct dma_iommu_mapping *mapping = 1937 container_of(kref, struct dma_iommu_mapping, kref); 1938 1939 iommu_domain_free(mapping->domain); 1940 kfree(mapping->bitmap); 1941 kfree(mapping); 1942 } 1943 1944 void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) 1945 { 1946 if (mapping) 1947 kref_put(&mapping->kref, release_iommu_mapping); 1948 } 1949 EXPORT_SYMBOL_GPL(arm_iommu_release_mapping); 1950 1951 /** 1952 * arm_iommu_attach_device 1953 * @dev: valid struct device pointer 1954 * @mapping: io address space mapping structure (returned from 1955 * arm_iommu_create_mapping) 1956 * 1957 * Attaches specified io address space mapping to the provided device, 1958 * this replaces the dma operations (dma_map_ops pointer) with the 1959 * IOMMU aware version. More than one client might be attached to 1960 * the same io address space mapping. 1961 */ 1962 int arm_iommu_attach_device(struct device *dev, 1963 struct dma_iommu_mapping *mapping) 1964 { 1965 int err; 1966 1967 err = iommu_attach_device(mapping->domain, dev); 1968 if (err) 1969 return err; 1970 1971 kref_get(&mapping->kref); 1972 dev->archdata.mapping = mapping; 1973 set_dma_ops(dev, &iommu_ops); 1974 1975 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); 1976 return 0; 1977 } 1978 EXPORT_SYMBOL_GPL(arm_iommu_attach_device); 1979 1980 /** 1981 * arm_iommu_detach_device 1982 * @dev: valid struct device pointer 1983 * 1984 * Detaches the provided device from a previously attached map. 1985 * This voids the dma operations (dma_map_ops pointer) 1986 */ 1987 void arm_iommu_detach_device(struct device *dev) 1988 { 1989 struct dma_iommu_mapping *mapping; 1990 1991 mapping = to_dma_iommu_mapping(dev); 1992 if (!mapping) { 1993 dev_warn(dev, "Not attached\n"); 1994 return; 1995 } 1996 1997 iommu_detach_device(mapping->domain, dev); 1998 kref_put(&mapping->kref, release_iommu_mapping); 1999 dev->archdata.mapping = NULL; 2000 set_dma_ops(dev, NULL); 2001 2002 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); 2003 } 2004 EXPORT_SYMBOL_GPL(arm_iommu_detach_device); 2005 2006 #endif 2007