1 /* 2 * linux/arch/arm/mm/dma-mapping.c 3 * 4 * Copyright (C) 2000-2004 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * DMA uncached mapping support. 11 */ 12 #include <linux/bootmem.h> 13 #include <linux/module.h> 14 #include <linux/mm.h> 15 #include <linux/genalloc.h> 16 #include <linux/gfp.h> 17 #include <linux/errno.h> 18 #include <linux/list.h> 19 #include <linux/init.h> 20 #include <linux/device.h> 21 #include <linux/dma-mapping.h> 22 #include <linux/dma-contiguous.h> 23 #include <linux/highmem.h> 24 #include <linux/memblock.h> 25 #include <linux/slab.h> 26 #include <linux/iommu.h> 27 #include <linux/io.h> 28 #include <linux/vmalloc.h> 29 #include <linux/sizes.h> 30 #include <linux/cma.h> 31 32 #include <asm/memory.h> 33 #include <asm/highmem.h> 34 #include <asm/cacheflush.h> 35 #include <asm/tlbflush.h> 36 #include <asm/mach/arch.h> 37 #include <asm/dma-iommu.h> 38 #include <asm/mach/map.h> 39 #include <asm/system_info.h> 40 #include <asm/dma-contiguous.h> 41 42 #include "dma.h" 43 #include "mm.h" 44 45 struct arm_dma_buffer { 46 struct list_head list; 47 void *virt; 48 }; 49 50 static LIST_HEAD(arm_dma_bufs); 51 static DEFINE_SPINLOCK(arm_dma_bufs_lock); 52 53 static struct arm_dma_buffer *arm_dma_buffer_find(void *virt) 54 { 55 struct arm_dma_buffer *buf, *found = NULL; 56 unsigned long flags; 57 58 spin_lock_irqsave(&arm_dma_bufs_lock, flags); 59 list_for_each_entry(buf, &arm_dma_bufs, list) { 60 if (buf->virt == virt) { 61 list_del(&buf->list); 62 found = buf; 63 break; 64 } 65 } 66 spin_unlock_irqrestore(&arm_dma_bufs_lock, flags); 67 return found; 68 } 69 70 /* 71 * The DMA API is built upon the notion of "buffer ownership". A buffer 72 * is either exclusively owned by the CPU (and therefore may be accessed 73 * by it) or exclusively owned by the DMA device. These helper functions 74 * represent the transitions between these two ownership states. 75 * 76 * Note, however, that on later ARMs, this notion does not work due to 77 * speculative prefetches. We model our approach on the assumption that 78 * the CPU does do speculative prefetches, which means we clean caches 79 * before transfers and delay cache invalidation until transfer completion. 80 * 81 */ 82 static void __dma_page_cpu_to_dev(struct page *, unsigned long, 83 size_t, enum dma_data_direction); 84 static void __dma_page_dev_to_cpu(struct page *, unsigned long, 85 size_t, enum dma_data_direction); 86 87 /** 88 * arm_dma_map_page - map a portion of a page for streaming DMA 89 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 90 * @page: page that buffer resides in 91 * @offset: offset into page for start of buffer 92 * @size: size of buffer to map 93 * @dir: DMA transfer direction 94 * 95 * Ensure that any data held in the cache is appropriately discarded 96 * or written back. 97 * 98 * The device owns this memory once this call has completed. The CPU 99 * can regain ownership by calling dma_unmap_page(). 100 */ 101 static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, 102 unsigned long offset, size_t size, enum dma_data_direction dir, 103 struct dma_attrs *attrs) 104 { 105 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 106 __dma_page_cpu_to_dev(page, offset, size, dir); 107 return pfn_to_dma(dev, page_to_pfn(page)) + offset; 108 } 109 110 static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, 111 unsigned long offset, size_t size, enum dma_data_direction dir, 112 struct dma_attrs *attrs) 113 { 114 return pfn_to_dma(dev, page_to_pfn(page)) + offset; 115 } 116 117 /** 118 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page() 119 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 120 * @handle: DMA address of buffer 121 * @size: size of buffer (same as passed to dma_map_page) 122 * @dir: DMA transfer direction (same as passed to dma_map_page) 123 * 124 * Unmap a page streaming mode DMA translation. The handle and size 125 * must match what was provided in the previous dma_map_page() call. 126 * All other usages are undefined. 127 * 128 * After this call, reads by the CPU to the buffer are guaranteed to see 129 * whatever the device wrote there. 130 */ 131 static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, 132 size_t size, enum dma_data_direction dir, 133 struct dma_attrs *attrs) 134 { 135 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 136 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), 137 handle & ~PAGE_MASK, size, dir); 138 } 139 140 static void arm_dma_sync_single_for_cpu(struct device *dev, 141 dma_addr_t handle, size_t size, enum dma_data_direction dir) 142 { 143 unsigned int offset = handle & (PAGE_SIZE - 1); 144 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 145 __dma_page_dev_to_cpu(page, offset, size, dir); 146 } 147 148 static void arm_dma_sync_single_for_device(struct device *dev, 149 dma_addr_t handle, size_t size, enum dma_data_direction dir) 150 { 151 unsigned int offset = handle & (PAGE_SIZE - 1); 152 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 153 __dma_page_cpu_to_dev(page, offset, size, dir); 154 } 155 156 struct dma_map_ops arm_dma_ops = { 157 .alloc = arm_dma_alloc, 158 .free = arm_dma_free, 159 .mmap = arm_dma_mmap, 160 .get_sgtable = arm_dma_get_sgtable, 161 .map_page = arm_dma_map_page, 162 .unmap_page = arm_dma_unmap_page, 163 .map_sg = arm_dma_map_sg, 164 .unmap_sg = arm_dma_unmap_sg, 165 .sync_single_for_cpu = arm_dma_sync_single_for_cpu, 166 .sync_single_for_device = arm_dma_sync_single_for_device, 167 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, 168 .sync_sg_for_device = arm_dma_sync_sg_for_device, 169 .set_dma_mask = arm_dma_set_mask, 170 }; 171 EXPORT_SYMBOL(arm_dma_ops); 172 173 static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 174 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs); 175 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 176 dma_addr_t handle, struct dma_attrs *attrs); 177 static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, 178 void *cpu_addr, dma_addr_t dma_addr, size_t size, 179 struct dma_attrs *attrs); 180 181 struct dma_map_ops arm_coherent_dma_ops = { 182 .alloc = arm_coherent_dma_alloc, 183 .free = arm_coherent_dma_free, 184 .mmap = arm_coherent_dma_mmap, 185 .get_sgtable = arm_dma_get_sgtable, 186 .map_page = arm_coherent_dma_map_page, 187 .map_sg = arm_dma_map_sg, 188 .set_dma_mask = arm_dma_set_mask, 189 }; 190 EXPORT_SYMBOL(arm_coherent_dma_ops); 191 192 static int __dma_supported(struct device *dev, u64 mask, bool warn) 193 { 194 unsigned long max_dma_pfn; 195 196 /* 197 * If the mask allows for more memory than we can address, 198 * and we actually have that much memory, then we must 199 * indicate that DMA to this device is not supported. 200 */ 201 if (sizeof(mask) != sizeof(dma_addr_t) && 202 mask > (dma_addr_t)~0 && 203 dma_to_pfn(dev, ~0) < max_pfn - 1) { 204 if (warn) { 205 dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", 206 mask); 207 dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n"); 208 } 209 return 0; 210 } 211 212 max_dma_pfn = min(max_pfn, arm_dma_pfn_limit); 213 214 /* 215 * Translate the device's DMA mask to a PFN limit. This 216 * PFN number includes the page which we can DMA to. 217 */ 218 if (dma_to_pfn(dev, mask) < max_dma_pfn) { 219 if (warn) 220 dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", 221 mask, 222 dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, 223 max_dma_pfn + 1); 224 return 0; 225 } 226 227 return 1; 228 } 229 230 static u64 get_coherent_dma_mask(struct device *dev) 231 { 232 u64 mask = (u64)DMA_BIT_MASK(32); 233 234 if (dev) { 235 mask = dev->coherent_dma_mask; 236 237 /* 238 * Sanity check the DMA mask - it must be non-zero, and 239 * must be able to be satisfied by a DMA allocation. 240 */ 241 if (mask == 0) { 242 dev_warn(dev, "coherent DMA mask is unset\n"); 243 return 0; 244 } 245 246 if (!__dma_supported(dev, mask, true)) 247 return 0; 248 } 249 250 return mask; 251 } 252 253 static void __dma_clear_buffer(struct page *page, size_t size) 254 { 255 /* 256 * Ensure that the allocated pages are zeroed, and that any data 257 * lurking in the kernel direct-mapped region is invalidated. 258 */ 259 if (PageHighMem(page)) { 260 phys_addr_t base = __pfn_to_phys(page_to_pfn(page)); 261 phys_addr_t end = base + size; 262 while (size > 0) { 263 void *ptr = kmap_atomic(page); 264 memset(ptr, 0, PAGE_SIZE); 265 dmac_flush_range(ptr, ptr + PAGE_SIZE); 266 kunmap_atomic(ptr); 267 page++; 268 size -= PAGE_SIZE; 269 } 270 outer_flush_range(base, end); 271 } else { 272 void *ptr = page_address(page); 273 memset(ptr, 0, size); 274 dmac_flush_range(ptr, ptr + size); 275 outer_flush_range(__pa(ptr), __pa(ptr) + size); 276 } 277 } 278 279 /* 280 * Allocate a DMA buffer for 'dev' of size 'size' using the 281 * specified gfp mask. Note that 'size' must be page aligned. 282 */ 283 static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) 284 { 285 unsigned long order = get_order(size); 286 struct page *page, *p, *e; 287 288 page = alloc_pages(gfp, order); 289 if (!page) 290 return NULL; 291 292 /* 293 * Now split the huge page and free the excess pages 294 */ 295 split_page(page, order); 296 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) 297 __free_page(p); 298 299 __dma_clear_buffer(page, size); 300 301 return page; 302 } 303 304 /* 305 * Free a DMA buffer. 'size' must be page aligned. 306 */ 307 static void __dma_free_buffer(struct page *page, size_t size) 308 { 309 struct page *e = page + (size >> PAGE_SHIFT); 310 311 while (page < e) { 312 __free_page(page); 313 page++; 314 } 315 } 316 317 #ifdef CONFIG_MMU 318 319 static void *__alloc_from_contiguous(struct device *dev, size_t size, 320 pgprot_t prot, struct page **ret_page, 321 const void *caller, bool want_vaddr); 322 323 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 324 pgprot_t prot, struct page **ret_page, 325 const void *caller, bool want_vaddr); 326 327 static void * 328 __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, 329 const void *caller) 330 { 331 /* 332 * DMA allocation can be mapped to user space, so lets 333 * set VM_USERMAP flags too. 334 */ 335 return dma_common_contiguous_remap(page, size, 336 VM_ARM_DMA_CONSISTENT | VM_USERMAP, 337 prot, caller); 338 } 339 340 static void __dma_free_remap(void *cpu_addr, size_t size) 341 { 342 dma_common_free_remap(cpu_addr, size, 343 VM_ARM_DMA_CONSISTENT | VM_USERMAP); 344 } 345 346 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K 347 static struct gen_pool *atomic_pool; 348 349 static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE; 350 351 static int __init early_coherent_pool(char *p) 352 { 353 atomic_pool_size = memparse(p, &p); 354 return 0; 355 } 356 early_param("coherent_pool", early_coherent_pool); 357 358 void __init init_dma_coherent_pool_size(unsigned long size) 359 { 360 /* 361 * Catch any attempt to set the pool size too late. 362 */ 363 BUG_ON(atomic_pool); 364 365 /* 366 * Set architecture specific coherent pool size only if 367 * it has not been changed by kernel command line parameter. 368 */ 369 if (atomic_pool_size == DEFAULT_DMA_COHERENT_POOL_SIZE) 370 atomic_pool_size = size; 371 } 372 373 /* 374 * Initialise the coherent pool for atomic allocations. 375 */ 376 static int __init atomic_pool_init(void) 377 { 378 pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL); 379 gfp_t gfp = GFP_KERNEL | GFP_DMA; 380 struct page *page; 381 void *ptr; 382 383 atomic_pool = gen_pool_create(PAGE_SHIFT, -1); 384 if (!atomic_pool) 385 goto out; 386 387 if (dev_get_cma_area(NULL)) 388 ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot, 389 &page, atomic_pool_init, true); 390 else 391 ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot, 392 &page, atomic_pool_init, true); 393 if (ptr) { 394 int ret; 395 396 ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr, 397 page_to_phys(page), 398 atomic_pool_size, -1); 399 if (ret) 400 goto destroy_genpool; 401 402 gen_pool_set_algo(atomic_pool, 403 gen_pool_first_fit_order_align, 404 (void *)PAGE_SHIFT); 405 pr_info("DMA: preallocated %zd KiB pool for atomic coherent allocations\n", 406 atomic_pool_size / 1024); 407 return 0; 408 } 409 410 destroy_genpool: 411 gen_pool_destroy(atomic_pool); 412 atomic_pool = NULL; 413 out: 414 pr_err("DMA: failed to allocate %zx KiB pool for atomic coherent allocation\n", 415 atomic_pool_size / 1024); 416 return -ENOMEM; 417 } 418 /* 419 * CMA is activated by core_initcall, so we must be called after it. 420 */ 421 postcore_initcall(atomic_pool_init); 422 423 struct dma_contig_early_reserve { 424 phys_addr_t base; 425 unsigned long size; 426 }; 427 428 static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata; 429 430 static int dma_mmu_remap_num __initdata; 431 432 void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) 433 { 434 dma_mmu_remap[dma_mmu_remap_num].base = base; 435 dma_mmu_remap[dma_mmu_remap_num].size = size; 436 dma_mmu_remap_num++; 437 } 438 439 void __init dma_contiguous_remap(void) 440 { 441 int i; 442 for (i = 0; i < dma_mmu_remap_num; i++) { 443 phys_addr_t start = dma_mmu_remap[i].base; 444 phys_addr_t end = start + dma_mmu_remap[i].size; 445 struct map_desc map; 446 unsigned long addr; 447 448 if (end > arm_lowmem_limit) 449 end = arm_lowmem_limit; 450 if (start >= end) 451 continue; 452 453 map.pfn = __phys_to_pfn(start); 454 map.virtual = __phys_to_virt(start); 455 map.length = end - start; 456 map.type = MT_MEMORY_DMA_READY; 457 458 /* 459 * Clear previous low-memory mapping to ensure that the 460 * TLB does not see any conflicting entries, then flush 461 * the TLB of the old entries before creating new mappings. 462 * 463 * This ensures that any speculatively loaded TLB entries 464 * (even though they may be rare) can not cause any problems, 465 * and ensures that this code is architecturally compliant. 466 */ 467 for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); 468 addr += PMD_SIZE) 469 pmd_clear(pmd_off_k(addr)); 470 471 flush_tlb_kernel_range(__phys_to_virt(start), 472 __phys_to_virt(end)); 473 474 iotable_init(&map, 1); 475 } 476 } 477 478 static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr, 479 void *data) 480 { 481 struct page *page = virt_to_page(addr); 482 pgprot_t prot = *(pgprot_t *)data; 483 484 set_pte_ext(pte, mk_pte(page, prot), 0); 485 return 0; 486 } 487 488 static void __dma_remap(struct page *page, size_t size, pgprot_t prot) 489 { 490 unsigned long start = (unsigned long) page_address(page); 491 unsigned end = start + size; 492 493 apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); 494 flush_tlb_kernel_range(start, end); 495 } 496 497 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 498 pgprot_t prot, struct page **ret_page, 499 const void *caller, bool want_vaddr) 500 { 501 struct page *page; 502 void *ptr = NULL; 503 page = __dma_alloc_buffer(dev, size, gfp); 504 if (!page) 505 return NULL; 506 if (!want_vaddr) 507 goto out; 508 509 ptr = __dma_alloc_remap(page, size, gfp, prot, caller); 510 if (!ptr) { 511 __dma_free_buffer(page, size); 512 return NULL; 513 } 514 515 out: 516 *ret_page = page; 517 return ptr; 518 } 519 520 static void *__alloc_from_pool(size_t size, struct page **ret_page) 521 { 522 unsigned long val; 523 void *ptr = NULL; 524 525 if (!atomic_pool) { 526 WARN(1, "coherent pool not initialised!\n"); 527 return NULL; 528 } 529 530 val = gen_pool_alloc(atomic_pool, size); 531 if (val) { 532 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val); 533 534 *ret_page = phys_to_page(phys); 535 ptr = (void *)val; 536 } 537 538 return ptr; 539 } 540 541 static bool __in_atomic_pool(void *start, size_t size) 542 { 543 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size); 544 } 545 546 static int __free_from_pool(void *start, size_t size) 547 { 548 if (!__in_atomic_pool(start, size)) 549 return 0; 550 551 gen_pool_free(atomic_pool, (unsigned long)start, size); 552 553 return 1; 554 } 555 556 static void *__alloc_from_contiguous(struct device *dev, size_t size, 557 pgprot_t prot, struct page **ret_page, 558 const void *caller, bool want_vaddr) 559 { 560 unsigned long order = get_order(size); 561 size_t count = size >> PAGE_SHIFT; 562 struct page *page; 563 void *ptr = NULL; 564 565 page = dma_alloc_from_contiguous(dev, count, order); 566 if (!page) 567 return NULL; 568 569 __dma_clear_buffer(page, size); 570 571 if (!want_vaddr) 572 goto out; 573 574 if (PageHighMem(page)) { 575 ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller); 576 if (!ptr) { 577 dma_release_from_contiguous(dev, page, count); 578 return NULL; 579 } 580 } else { 581 __dma_remap(page, size, prot); 582 ptr = page_address(page); 583 } 584 585 out: 586 *ret_page = page; 587 return ptr; 588 } 589 590 static void __free_from_contiguous(struct device *dev, struct page *page, 591 void *cpu_addr, size_t size, bool want_vaddr) 592 { 593 if (want_vaddr) { 594 if (PageHighMem(page)) 595 __dma_free_remap(cpu_addr, size); 596 else 597 __dma_remap(page, size, PAGE_KERNEL); 598 } 599 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); 600 } 601 602 static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot) 603 { 604 prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ? 605 pgprot_writecombine(prot) : 606 pgprot_dmacoherent(prot); 607 return prot; 608 } 609 610 #define nommu() 0 611 612 #else /* !CONFIG_MMU */ 613 614 #define nommu() 1 615 616 #define __get_dma_pgprot(attrs, prot) __pgprot(0) 617 #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL 618 #define __alloc_from_pool(size, ret_page) NULL 619 #define __alloc_from_contiguous(dev, size, prot, ret, c, wv) NULL 620 #define __free_from_pool(cpu_addr, size) 0 621 #define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0) 622 #define __dma_free_remap(cpu_addr, size) do { } while (0) 623 624 #endif /* CONFIG_MMU */ 625 626 static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, 627 struct page **ret_page) 628 { 629 struct page *page; 630 page = __dma_alloc_buffer(dev, size, gfp); 631 if (!page) 632 return NULL; 633 634 *ret_page = page; 635 return page_address(page); 636 } 637 638 639 640 static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 641 gfp_t gfp, pgprot_t prot, bool is_coherent, 642 struct dma_attrs *attrs, const void *caller) 643 { 644 u64 mask = get_coherent_dma_mask(dev); 645 struct page *page = NULL; 646 void *addr; 647 bool want_vaddr; 648 struct arm_dma_buffer *buf; 649 650 #ifdef CONFIG_DMA_API_DEBUG 651 u64 limit = (mask + 1) & ~mask; 652 if (limit && size >= limit) { 653 dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n", 654 size, mask); 655 return NULL; 656 } 657 #endif 658 659 if (!mask) 660 return NULL; 661 662 buf = kzalloc(sizeof(*buf), gfp); 663 if (!buf) 664 return NULL; 665 666 if (mask < 0xffffffffULL) 667 gfp |= GFP_DMA; 668 669 /* 670 * Following is a work-around (a.k.a. hack) to prevent pages 671 * with __GFP_COMP being passed to split_page() which cannot 672 * handle them. The real problem is that this flag probably 673 * should be 0 on ARM as it is not supported on this 674 * platform; see CONFIG_HUGETLBFS. 675 */ 676 gfp &= ~(__GFP_COMP); 677 678 *handle = DMA_ERROR_CODE; 679 size = PAGE_ALIGN(size); 680 want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs); 681 682 if (nommu()) 683 addr = __alloc_simple_buffer(dev, size, gfp, &page); 684 else if (dev_get_cma_area(dev) && (gfp & __GFP_DIRECT_RECLAIM)) 685 addr = __alloc_from_contiguous(dev, size, prot, &page, 686 caller, want_vaddr); 687 else if (is_coherent) 688 addr = __alloc_simple_buffer(dev, size, gfp, &page); 689 else if (!gfpflags_allow_blocking(gfp)) 690 addr = __alloc_from_pool(size, &page); 691 else 692 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, 693 caller, want_vaddr); 694 695 if (page) { 696 unsigned long flags; 697 698 *handle = pfn_to_dma(dev, page_to_pfn(page)); 699 buf->virt = want_vaddr ? addr : page; 700 701 spin_lock_irqsave(&arm_dma_bufs_lock, flags); 702 list_add(&buf->list, &arm_dma_bufs); 703 spin_unlock_irqrestore(&arm_dma_bufs_lock, flags); 704 } else { 705 kfree(buf); 706 } 707 708 return want_vaddr ? addr : page; 709 } 710 711 /* 712 * Allocate DMA-coherent memory space and return both the kernel remapped 713 * virtual and bus address for that space. 714 */ 715 void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 716 gfp_t gfp, struct dma_attrs *attrs) 717 { 718 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); 719 720 return __dma_alloc(dev, size, handle, gfp, prot, false, 721 attrs, __builtin_return_address(0)); 722 } 723 724 static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 725 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) 726 { 727 return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true, 728 attrs, __builtin_return_address(0)); 729 } 730 731 static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, 732 void *cpu_addr, dma_addr_t dma_addr, size_t size, 733 struct dma_attrs *attrs) 734 { 735 int ret = -ENXIO; 736 #ifdef CONFIG_MMU 737 unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 738 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 739 unsigned long pfn = dma_to_pfn(dev, dma_addr); 740 unsigned long off = vma->vm_pgoff; 741 742 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) 743 return ret; 744 745 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { 746 ret = remap_pfn_range(vma, vma->vm_start, 747 pfn + off, 748 vma->vm_end - vma->vm_start, 749 vma->vm_page_prot); 750 } 751 #endif /* CONFIG_MMU */ 752 753 return ret; 754 } 755 756 /* 757 * Create userspace mapping for the DMA-coherent memory. 758 */ 759 static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, 760 void *cpu_addr, dma_addr_t dma_addr, size_t size, 761 struct dma_attrs *attrs) 762 { 763 return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 764 } 765 766 int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, 767 void *cpu_addr, dma_addr_t dma_addr, size_t size, 768 struct dma_attrs *attrs) 769 { 770 #ifdef CONFIG_MMU 771 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 772 #endif /* CONFIG_MMU */ 773 return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 774 } 775 776 /* 777 * Free a buffer as defined by the above mapping. 778 */ 779 static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 780 dma_addr_t handle, struct dma_attrs *attrs, 781 bool is_coherent) 782 { 783 struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 784 bool want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs); 785 struct arm_dma_buffer *buf; 786 787 buf = arm_dma_buffer_find(cpu_addr); 788 if (WARN(!buf, "Freeing invalid buffer %p\n", cpu_addr)) 789 return; 790 791 size = PAGE_ALIGN(size); 792 793 if (nommu()) { 794 __dma_free_buffer(page, size); 795 } else if (!is_coherent && __free_from_pool(cpu_addr, size)) { 796 return; 797 } else if (!dev_get_cma_area(dev)) { 798 if (want_vaddr && !is_coherent) 799 __dma_free_remap(cpu_addr, size); 800 __dma_free_buffer(page, size); 801 } else { 802 /* 803 * Non-atomic allocations cannot be freed with IRQs disabled 804 */ 805 WARN_ON(irqs_disabled()); 806 __free_from_contiguous(dev, page, cpu_addr, size, want_vaddr); 807 } 808 809 kfree(buf); 810 } 811 812 void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 813 dma_addr_t handle, struct dma_attrs *attrs) 814 { 815 __arm_dma_free(dev, size, cpu_addr, handle, attrs, false); 816 } 817 818 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 819 dma_addr_t handle, struct dma_attrs *attrs) 820 { 821 __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); 822 } 823 824 int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 825 void *cpu_addr, dma_addr_t handle, size_t size, 826 struct dma_attrs *attrs) 827 { 828 struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 829 int ret; 830 831 ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 832 if (unlikely(ret)) 833 return ret; 834 835 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 836 return 0; 837 } 838 839 static void dma_cache_maint_page(struct page *page, unsigned long offset, 840 size_t size, enum dma_data_direction dir, 841 void (*op)(const void *, size_t, int)) 842 { 843 unsigned long pfn; 844 size_t left = size; 845 846 pfn = page_to_pfn(page) + offset / PAGE_SIZE; 847 offset %= PAGE_SIZE; 848 849 /* 850 * A single sg entry may refer to multiple physically contiguous 851 * pages. But we still need to process highmem pages individually. 852 * If highmem is not configured then the bulk of this loop gets 853 * optimized out. 854 */ 855 do { 856 size_t len = left; 857 void *vaddr; 858 859 page = pfn_to_page(pfn); 860 861 if (PageHighMem(page)) { 862 if (len + offset > PAGE_SIZE) 863 len = PAGE_SIZE - offset; 864 865 if (cache_is_vipt_nonaliasing()) { 866 vaddr = kmap_atomic(page); 867 op(vaddr + offset, len, dir); 868 kunmap_atomic(vaddr); 869 } else { 870 vaddr = kmap_high_get(page); 871 if (vaddr) { 872 op(vaddr + offset, len, dir); 873 kunmap_high(page); 874 } 875 } 876 } else { 877 vaddr = page_address(page) + offset; 878 op(vaddr, len, dir); 879 } 880 offset = 0; 881 pfn++; 882 left -= len; 883 } while (left); 884 } 885 886 /* 887 * Make an area consistent for devices. 888 * Note: Drivers should NOT use this function directly, as it will break 889 * platforms with CONFIG_DMABOUNCE. 890 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 891 */ 892 static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, 893 size_t size, enum dma_data_direction dir) 894 { 895 phys_addr_t paddr; 896 897 dma_cache_maint_page(page, off, size, dir, dmac_map_area); 898 899 paddr = page_to_phys(page) + off; 900 if (dir == DMA_FROM_DEVICE) { 901 outer_inv_range(paddr, paddr + size); 902 } else { 903 outer_clean_range(paddr, paddr + size); 904 } 905 /* FIXME: non-speculating: flush on bidirectional mappings? */ 906 } 907 908 static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, 909 size_t size, enum dma_data_direction dir) 910 { 911 phys_addr_t paddr = page_to_phys(page) + off; 912 913 /* FIXME: non-speculating: not required */ 914 /* in any case, don't bother invalidating if DMA to device */ 915 if (dir != DMA_TO_DEVICE) { 916 outer_inv_range(paddr, paddr + size); 917 918 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); 919 } 920 921 /* 922 * Mark the D-cache clean for these pages to avoid extra flushing. 923 */ 924 if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) { 925 unsigned long pfn; 926 size_t left = size; 927 928 pfn = page_to_pfn(page) + off / PAGE_SIZE; 929 off %= PAGE_SIZE; 930 if (off) { 931 pfn++; 932 left -= PAGE_SIZE - off; 933 } 934 while (left >= PAGE_SIZE) { 935 page = pfn_to_page(pfn++); 936 set_bit(PG_dcache_clean, &page->flags); 937 left -= PAGE_SIZE; 938 } 939 } 940 } 941 942 /** 943 * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA 944 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 945 * @sg: list of buffers 946 * @nents: number of buffers to map 947 * @dir: DMA transfer direction 948 * 949 * Map a set of buffers described by scatterlist in streaming mode for DMA. 950 * This is the scatter-gather version of the dma_map_single interface. 951 * Here the scatter gather list elements are each tagged with the 952 * appropriate dma address and length. They are obtained via 953 * sg_dma_{address,length}. 954 * 955 * Device ownership issues as mentioned for dma_map_single are the same 956 * here. 957 */ 958 int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 959 enum dma_data_direction dir, struct dma_attrs *attrs) 960 { 961 struct dma_map_ops *ops = get_dma_ops(dev); 962 struct scatterlist *s; 963 int i, j; 964 965 for_each_sg(sg, s, nents, i) { 966 #ifdef CONFIG_NEED_SG_DMA_LENGTH 967 s->dma_length = s->length; 968 #endif 969 s->dma_address = ops->map_page(dev, sg_page(s), s->offset, 970 s->length, dir, attrs); 971 if (dma_mapping_error(dev, s->dma_address)) 972 goto bad_mapping; 973 } 974 return nents; 975 976 bad_mapping: 977 for_each_sg(sg, s, i, j) 978 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 979 return 0; 980 } 981 982 /** 983 * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 984 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 985 * @sg: list of buffers 986 * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 987 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 988 * 989 * Unmap a set of streaming mode DMA translations. Again, CPU access 990 * rules concerning calls here are the same as for dma_unmap_single(). 991 */ 992 void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 993 enum dma_data_direction dir, struct dma_attrs *attrs) 994 { 995 struct dma_map_ops *ops = get_dma_ops(dev); 996 struct scatterlist *s; 997 998 int i; 999 1000 for_each_sg(sg, s, nents, i) 1001 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 1002 } 1003 1004 /** 1005 * arm_dma_sync_sg_for_cpu 1006 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1007 * @sg: list of buffers 1008 * @nents: number of buffers to map (returned from dma_map_sg) 1009 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1010 */ 1011 void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 1012 int nents, enum dma_data_direction dir) 1013 { 1014 struct dma_map_ops *ops = get_dma_ops(dev); 1015 struct scatterlist *s; 1016 int i; 1017 1018 for_each_sg(sg, s, nents, i) 1019 ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length, 1020 dir); 1021 } 1022 1023 /** 1024 * arm_dma_sync_sg_for_device 1025 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1026 * @sg: list of buffers 1027 * @nents: number of buffers to map (returned from dma_map_sg) 1028 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1029 */ 1030 void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 1031 int nents, enum dma_data_direction dir) 1032 { 1033 struct dma_map_ops *ops = get_dma_ops(dev); 1034 struct scatterlist *s; 1035 int i; 1036 1037 for_each_sg(sg, s, nents, i) 1038 ops->sync_single_for_device(dev, sg_dma_address(s), s->length, 1039 dir); 1040 } 1041 1042 /* 1043 * Return whether the given device DMA address mask can be supported 1044 * properly. For example, if your device can only drive the low 24-bits 1045 * during bus mastering, then you would pass 0x00ffffff as the mask 1046 * to this function. 1047 */ 1048 int dma_supported(struct device *dev, u64 mask) 1049 { 1050 return __dma_supported(dev, mask, false); 1051 } 1052 EXPORT_SYMBOL(dma_supported); 1053 1054 int arm_dma_set_mask(struct device *dev, u64 dma_mask) 1055 { 1056 if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 1057 return -EIO; 1058 1059 *dev->dma_mask = dma_mask; 1060 1061 return 0; 1062 } 1063 1064 #define PREALLOC_DMA_DEBUG_ENTRIES 4096 1065 1066 static int __init dma_debug_do_init(void) 1067 { 1068 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); 1069 return 0; 1070 } 1071 fs_initcall(dma_debug_do_init); 1072 1073 #ifdef CONFIG_ARM_DMA_USE_IOMMU 1074 1075 /* IOMMU */ 1076 1077 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping); 1078 1079 static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, 1080 size_t size) 1081 { 1082 unsigned int order = get_order(size); 1083 unsigned int align = 0; 1084 unsigned int count, start; 1085 size_t mapping_size = mapping->bits << PAGE_SHIFT; 1086 unsigned long flags; 1087 dma_addr_t iova; 1088 int i; 1089 1090 if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT) 1091 order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT; 1092 1093 count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1094 align = (1 << order) - 1; 1095 1096 spin_lock_irqsave(&mapping->lock, flags); 1097 for (i = 0; i < mapping->nr_bitmaps; i++) { 1098 start = bitmap_find_next_zero_area(mapping->bitmaps[i], 1099 mapping->bits, 0, count, align); 1100 1101 if (start > mapping->bits) 1102 continue; 1103 1104 bitmap_set(mapping->bitmaps[i], start, count); 1105 break; 1106 } 1107 1108 /* 1109 * No unused range found. Try to extend the existing mapping 1110 * and perform a second attempt to reserve an IO virtual 1111 * address range of size bytes. 1112 */ 1113 if (i == mapping->nr_bitmaps) { 1114 if (extend_iommu_mapping(mapping)) { 1115 spin_unlock_irqrestore(&mapping->lock, flags); 1116 return DMA_ERROR_CODE; 1117 } 1118 1119 start = bitmap_find_next_zero_area(mapping->bitmaps[i], 1120 mapping->bits, 0, count, align); 1121 1122 if (start > mapping->bits) { 1123 spin_unlock_irqrestore(&mapping->lock, flags); 1124 return DMA_ERROR_CODE; 1125 } 1126 1127 bitmap_set(mapping->bitmaps[i], start, count); 1128 } 1129 spin_unlock_irqrestore(&mapping->lock, flags); 1130 1131 iova = mapping->base + (mapping_size * i); 1132 iova += start << PAGE_SHIFT; 1133 1134 return iova; 1135 } 1136 1137 static inline void __free_iova(struct dma_iommu_mapping *mapping, 1138 dma_addr_t addr, size_t size) 1139 { 1140 unsigned int start, count; 1141 size_t mapping_size = mapping->bits << PAGE_SHIFT; 1142 unsigned long flags; 1143 dma_addr_t bitmap_base; 1144 u32 bitmap_index; 1145 1146 if (!size) 1147 return; 1148 1149 bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size; 1150 BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions); 1151 1152 bitmap_base = mapping->base + mapping_size * bitmap_index; 1153 1154 start = (addr - bitmap_base) >> PAGE_SHIFT; 1155 1156 if (addr + size > bitmap_base + mapping_size) { 1157 /* 1158 * The address range to be freed reaches into the iova 1159 * range of the next bitmap. This should not happen as 1160 * we don't allow this in __alloc_iova (at the 1161 * moment). 1162 */ 1163 BUG(); 1164 } else 1165 count = size >> PAGE_SHIFT; 1166 1167 spin_lock_irqsave(&mapping->lock, flags); 1168 bitmap_clear(mapping->bitmaps[bitmap_index], start, count); 1169 spin_unlock_irqrestore(&mapping->lock, flags); 1170 } 1171 1172 /* We'll try 2M, 1M, 64K, and finally 4K; array must end with 0! */ 1173 static const int iommu_order_array[] = { 9, 8, 4, 0 }; 1174 1175 static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, 1176 gfp_t gfp, struct dma_attrs *attrs) 1177 { 1178 struct page **pages; 1179 int count = size >> PAGE_SHIFT; 1180 int array_size = count * sizeof(struct page *); 1181 int i = 0; 1182 int order_idx = 0; 1183 1184 if (array_size <= PAGE_SIZE) 1185 pages = kzalloc(array_size, GFP_KERNEL); 1186 else 1187 pages = vzalloc(array_size); 1188 if (!pages) 1189 return NULL; 1190 1191 if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) 1192 { 1193 unsigned long order = get_order(size); 1194 struct page *page; 1195 1196 page = dma_alloc_from_contiguous(dev, count, order); 1197 if (!page) 1198 goto error; 1199 1200 __dma_clear_buffer(page, size); 1201 1202 for (i = 0; i < count; i++) 1203 pages[i] = page + i; 1204 1205 return pages; 1206 } 1207 1208 /* Go straight to 4K chunks if caller says it's OK. */ 1209 if (dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs)) 1210 order_idx = ARRAY_SIZE(iommu_order_array) - 1; 1211 1212 /* 1213 * IOMMU can map any pages, so himem can also be used here 1214 */ 1215 gfp |= __GFP_NOWARN | __GFP_HIGHMEM; 1216 1217 while (count) { 1218 int j, order; 1219 1220 order = iommu_order_array[order_idx]; 1221 1222 /* Drop down when we get small */ 1223 if (__fls(count) < order) { 1224 order_idx++; 1225 continue; 1226 } 1227 1228 if (order) { 1229 /* See if it's easy to allocate a high-order chunk */ 1230 pages[i] = alloc_pages(gfp | __GFP_NORETRY, order); 1231 1232 /* Go down a notch at first sign of pressure */ 1233 if (!pages[i]) { 1234 order_idx++; 1235 continue; 1236 } 1237 } else { 1238 pages[i] = alloc_pages(gfp, 0); 1239 if (!pages[i]) 1240 goto error; 1241 } 1242 1243 if (order) { 1244 split_page(pages[i], order); 1245 j = 1 << order; 1246 while (--j) 1247 pages[i + j] = pages[i] + j; 1248 } 1249 1250 __dma_clear_buffer(pages[i], PAGE_SIZE << order); 1251 i += 1 << order; 1252 count -= 1 << order; 1253 } 1254 1255 return pages; 1256 error: 1257 while (i--) 1258 if (pages[i]) 1259 __free_pages(pages[i], 0); 1260 kvfree(pages); 1261 return NULL; 1262 } 1263 1264 static int __iommu_free_buffer(struct device *dev, struct page **pages, 1265 size_t size, struct dma_attrs *attrs) 1266 { 1267 int count = size >> PAGE_SHIFT; 1268 int i; 1269 1270 if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) { 1271 dma_release_from_contiguous(dev, pages[0], count); 1272 } else { 1273 for (i = 0; i < count; i++) 1274 if (pages[i]) 1275 __free_pages(pages[i], 0); 1276 } 1277 1278 kvfree(pages); 1279 return 0; 1280 } 1281 1282 /* 1283 * Create a CPU mapping for a specified pages 1284 */ 1285 static void * 1286 __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, 1287 const void *caller) 1288 { 1289 return dma_common_pages_remap(pages, size, 1290 VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller); 1291 } 1292 1293 /* 1294 * Create a mapping in device IO address space for specified pages 1295 */ 1296 static dma_addr_t 1297 __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) 1298 { 1299 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1300 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1301 dma_addr_t dma_addr, iova; 1302 int i; 1303 1304 dma_addr = __alloc_iova(mapping, size); 1305 if (dma_addr == DMA_ERROR_CODE) 1306 return dma_addr; 1307 1308 iova = dma_addr; 1309 for (i = 0; i < count; ) { 1310 int ret; 1311 1312 unsigned int next_pfn = page_to_pfn(pages[i]) + 1; 1313 phys_addr_t phys = page_to_phys(pages[i]); 1314 unsigned int len, j; 1315 1316 for (j = i + 1; j < count; j++, next_pfn++) 1317 if (page_to_pfn(pages[j]) != next_pfn) 1318 break; 1319 1320 len = (j - i) << PAGE_SHIFT; 1321 ret = iommu_map(mapping->domain, iova, phys, len, 1322 IOMMU_READ|IOMMU_WRITE); 1323 if (ret < 0) 1324 goto fail; 1325 iova += len; 1326 i = j; 1327 } 1328 return dma_addr; 1329 fail: 1330 iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); 1331 __free_iova(mapping, dma_addr, size); 1332 return DMA_ERROR_CODE; 1333 } 1334 1335 static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) 1336 { 1337 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1338 1339 /* 1340 * add optional in-page offset from iova to size and align 1341 * result to page size 1342 */ 1343 size = PAGE_ALIGN((iova & ~PAGE_MASK) + size); 1344 iova &= PAGE_MASK; 1345 1346 iommu_unmap(mapping->domain, iova, size); 1347 __free_iova(mapping, iova, size); 1348 return 0; 1349 } 1350 1351 static struct page **__atomic_get_pages(void *addr) 1352 { 1353 struct page *page; 1354 phys_addr_t phys; 1355 1356 phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr); 1357 page = phys_to_page(phys); 1358 1359 return (struct page **)page; 1360 } 1361 1362 static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) 1363 { 1364 struct vm_struct *area; 1365 1366 if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) 1367 return __atomic_get_pages(cpu_addr); 1368 1369 if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) 1370 return cpu_addr; 1371 1372 area = find_vm_area(cpu_addr); 1373 if (area && (area->flags & VM_ARM_DMA_CONSISTENT)) 1374 return area->pages; 1375 return NULL; 1376 } 1377 1378 static void *__iommu_alloc_atomic(struct device *dev, size_t size, 1379 dma_addr_t *handle) 1380 { 1381 struct page *page; 1382 void *addr; 1383 1384 addr = __alloc_from_pool(size, &page); 1385 if (!addr) 1386 return NULL; 1387 1388 *handle = __iommu_create_mapping(dev, &page, size); 1389 if (*handle == DMA_ERROR_CODE) 1390 goto err_mapping; 1391 1392 return addr; 1393 1394 err_mapping: 1395 __free_from_pool(addr, size); 1396 return NULL; 1397 } 1398 1399 static void __iommu_free_atomic(struct device *dev, void *cpu_addr, 1400 dma_addr_t handle, size_t size) 1401 { 1402 __iommu_remove_mapping(dev, handle, size); 1403 __free_from_pool(cpu_addr, size); 1404 } 1405 1406 static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, 1407 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) 1408 { 1409 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); 1410 struct page **pages; 1411 void *addr = NULL; 1412 1413 *handle = DMA_ERROR_CODE; 1414 size = PAGE_ALIGN(size); 1415 1416 if (!gfpflags_allow_blocking(gfp)) 1417 return __iommu_alloc_atomic(dev, size, handle); 1418 1419 /* 1420 * Following is a work-around (a.k.a. hack) to prevent pages 1421 * with __GFP_COMP being passed to split_page() which cannot 1422 * handle them. The real problem is that this flag probably 1423 * should be 0 on ARM as it is not supported on this 1424 * platform; see CONFIG_HUGETLBFS. 1425 */ 1426 gfp &= ~(__GFP_COMP); 1427 1428 pages = __iommu_alloc_buffer(dev, size, gfp, attrs); 1429 if (!pages) 1430 return NULL; 1431 1432 *handle = __iommu_create_mapping(dev, pages, size); 1433 if (*handle == DMA_ERROR_CODE) 1434 goto err_buffer; 1435 1436 if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) 1437 return pages; 1438 1439 addr = __iommu_alloc_remap(pages, size, gfp, prot, 1440 __builtin_return_address(0)); 1441 if (!addr) 1442 goto err_mapping; 1443 1444 return addr; 1445 1446 err_mapping: 1447 __iommu_remove_mapping(dev, *handle, size); 1448 err_buffer: 1449 __iommu_free_buffer(dev, pages, size, attrs); 1450 return NULL; 1451 } 1452 1453 static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 1454 void *cpu_addr, dma_addr_t dma_addr, size_t size, 1455 struct dma_attrs *attrs) 1456 { 1457 unsigned long uaddr = vma->vm_start; 1458 unsigned long usize = vma->vm_end - vma->vm_start; 1459 struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1460 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 1461 unsigned long off = vma->vm_pgoff; 1462 1463 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 1464 1465 if (!pages) 1466 return -ENXIO; 1467 1468 if (off >= nr_pages || (usize >> PAGE_SHIFT) > nr_pages - off) 1469 return -ENXIO; 1470 1471 pages += off; 1472 1473 do { 1474 int ret = vm_insert_page(vma, uaddr, *pages++); 1475 if (ret) { 1476 pr_err("Remapping memory failed: %d\n", ret); 1477 return ret; 1478 } 1479 uaddr += PAGE_SIZE; 1480 usize -= PAGE_SIZE; 1481 } while (usize > 0); 1482 1483 return 0; 1484 } 1485 1486 /* 1487 * free a page as defined by the above mapping. 1488 * Must not be called with IRQs disabled. 1489 */ 1490 void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, 1491 dma_addr_t handle, struct dma_attrs *attrs) 1492 { 1493 struct page **pages; 1494 size = PAGE_ALIGN(size); 1495 1496 if (__in_atomic_pool(cpu_addr, size)) { 1497 __iommu_free_atomic(dev, cpu_addr, handle, size); 1498 return; 1499 } 1500 1501 pages = __iommu_get_pages(cpu_addr, attrs); 1502 if (!pages) { 1503 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); 1504 return; 1505 } 1506 1507 if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { 1508 dma_common_free_remap(cpu_addr, size, 1509 VM_ARM_DMA_CONSISTENT | VM_USERMAP); 1510 } 1511 1512 __iommu_remove_mapping(dev, handle, size); 1513 __iommu_free_buffer(dev, pages, size, attrs); 1514 } 1515 1516 static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, 1517 void *cpu_addr, dma_addr_t dma_addr, 1518 size_t size, struct dma_attrs *attrs) 1519 { 1520 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1521 struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1522 1523 if (!pages) 1524 return -ENXIO; 1525 1526 return sg_alloc_table_from_pages(sgt, pages, count, 0, size, 1527 GFP_KERNEL); 1528 } 1529 1530 static int __dma_direction_to_prot(enum dma_data_direction dir) 1531 { 1532 int prot; 1533 1534 switch (dir) { 1535 case DMA_BIDIRECTIONAL: 1536 prot = IOMMU_READ | IOMMU_WRITE; 1537 break; 1538 case DMA_TO_DEVICE: 1539 prot = IOMMU_READ; 1540 break; 1541 case DMA_FROM_DEVICE: 1542 prot = IOMMU_WRITE; 1543 break; 1544 default: 1545 prot = 0; 1546 } 1547 1548 return prot; 1549 } 1550 1551 /* 1552 * Map a part of the scatter-gather list into contiguous io address space 1553 */ 1554 static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, 1555 size_t size, dma_addr_t *handle, 1556 enum dma_data_direction dir, struct dma_attrs *attrs, 1557 bool is_coherent) 1558 { 1559 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1560 dma_addr_t iova, iova_base; 1561 int ret = 0; 1562 unsigned int count; 1563 struct scatterlist *s; 1564 int prot; 1565 1566 size = PAGE_ALIGN(size); 1567 *handle = DMA_ERROR_CODE; 1568 1569 iova_base = iova = __alloc_iova(mapping, size); 1570 if (iova == DMA_ERROR_CODE) 1571 return -ENOMEM; 1572 1573 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { 1574 phys_addr_t phys = page_to_phys(sg_page(s)); 1575 unsigned int len = PAGE_ALIGN(s->offset + s->length); 1576 1577 if (!is_coherent && 1578 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 1579 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 1580 1581 prot = __dma_direction_to_prot(dir); 1582 1583 ret = iommu_map(mapping->domain, iova, phys, len, prot); 1584 if (ret < 0) 1585 goto fail; 1586 count += len >> PAGE_SHIFT; 1587 iova += len; 1588 } 1589 *handle = iova_base; 1590 1591 return 0; 1592 fail: 1593 iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); 1594 __free_iova(mapping, iova_base, size); 1595 return ret; 1596 } 1597 1598 static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, 1599 enum dma_data_direction dir, struct dma_attrs *attrs, 1600 bool is_coherent) 1601 { 1602 struct scatterlist *s = sg, *dma = sg, *start = sg; 1603 int i, count = 0; 1604 unsigned int offset = s->offset; 1605 unsigned int size = s->offset + s->length; 1606 unsigned int max = dma_get_max_seg_size(dev); 1607 1608 for (i = 1; i < nents; i++) { 1609 s = sg_next(s); 1610 1611 s->dma_address = DMA_ERROR_CODE; 1612 s->dma_length = 0; 1613 1614 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { 1615 if (__map_sg_chunk(dev, start, size, &dma->dma_address, 1616 dir, attrs, is_coherent) < 0) 1617 goto bad_mapping; 1618 1619 dma->dma_address += offset; 1620 dma->dma_length = size - offset; 1621 1622 size = offset = s->offset; 1623 start = s; 1624 dma = sg_next(dma); 1625 count += 1; 1626 } 1627 size += s->length; 1628 } 1629 if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, 1630 is_coherent) < 0) 1631 goto bad_mapping; 1632 1633 dma->dma_address += offset; 1634 dma->dma_length = size - offset; 1635 1636 return count+1; 1637 1638 bad_mapping: 1639 for_each_sg(sg, s, count, i) 1640 __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); 1641 return 0; 1642 } 1643 1644 /** 1645 * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA 1646 * @dev: valid struct device pointer 1647 * @sg: list of buffers 1648 * @nents: number of buffers to map 1649 * @dir: DMA transfer direction 1650 * 1651 * Map a set of i/o coherent buffers described by scatterlist in streaming 1652 * mode for DMA. The scatter gather list elements are merged together (if 1653 * possible) and tagged with the appropriate dma address and length. They are 1654 * obtained via sg_dma_{address,length}. 1655 */ 1656 int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, 1657 int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 1658 { 1659 return __iommu_map_sg(dev, sg, nents, dir, attrs, true); 1660 } 1661 1662 /** 1663 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA 1664 * @dev: valid struct device pointer 1665 * @sg: list of buffers 1666 * @nents: number of buffers to map 1667 * @dir: DMA transfer direction 1668 * 1669 * Map a set of buffers described by scatterlist in streaming mode for DMA. 1670 * The scatter gather list elements are merged together (if possible) and 1671 * tagged with the appropriate dma address and length. They are obtained via 1672 * sg_dma_{address,length}. 1673 */ 1674 int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, 1675 int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 1676 { 1677 return __iommu_map_sg(dev, sg, nents, dir, attrs, false); 1678 } 1679 1680 static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, 1681 int nents, enum dma_data_direction dir, struct dma_attrs *attrs, 1682 bool is_coherent) 1683 { 1684 struct scatterlist *s; 1685 int i; 1686 1687 for_each_sg(sg, s, nents, i) { 1688 if (sg_dma_len(s)) 1689 __iommu_remove_mapping(dev, sg_dma_address(s), 1690 sg_dma_len(s)); 1691 if (!is_coherent && 1692 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 1693 __dma_page_dev_to_cpu(sg_page(s), s->offset, 1694 s->length, dir); 1695 } 1696 } 1697 1698 /** 1699 * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 1700 * @dev: valid struct device pointer 1701 * @sg: list of buffers 1702 * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 1703 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1704 * 1705 * Unmap a set of streaming mode DMA translations. Again, CPU access 1706 * rules concerning calls here are the same as for dma_unmap_single(). 1707 */ 1708 void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, 1709 int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 1710 { 1711 __iommu_unmap_sg(dev, sg, nents, dir, attrs, true); 1712 } 1713 1714 /** 1715 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 1716 * @dev: valid struct device pointer 1717 * @sg: list of buffers 1718 * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 1719 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1720 * 1721 * Unmap a set of streaming mode DMA translations. Again, CPU access 1722 * rules concerning calls here are the same as for dma_unmap_single(). 1723 */ 1724 void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 1725 enum dma_data_direction dir, struct dma_attrs *attrs) 1726 { 1727 __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); 1728 } 1729 1730 /** 1731 * arm_iommu_sync_sg_for_cpu 1732 * @dev: valid struct device pointer 1733 * @sg: list of buffers 1734 * @nents: number of buffers to map (returned from dma_map_sg) 1735 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1736 */ 1737 void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 1738 int nents, enum dma_data_direction dir) 1739 { 1740 struct scatterlist *s; 1741 int i; 1742 1743 for_each_sg(sg, s, nents, i) 1744 __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); 1745 1746 } 1747 1748 /** 1749 * arm_iommu_sync_sg_for_device 1750 * @dev: valid struct device pointer 1751 * @sg: list of buffers 1752 * @nents: number of buffers to map (returned from dma_map_sg) 1753 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1754 */ 1755 void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 1756 int nents, enum dma_data_direction dir) 1757 { 1758 struct scatterlist *s; 1759 int i; 1760 1761 for_each_sg(sg, s, nents, i) 1762 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 1763 } 1764 1765 1766 /** 1767 * arm_coherent_iommu_map_page 1768 * @dev: valid struct device pointer 1769 * @page: page that buffer resides in 1770 * @offset: offset into page for start of buffer 1771 * @size: size of buffer to map 1772 * @dir: DMA transfer direction 1773 * 1774 * Coherent IOMMU aware version of arm_dma_map_page() 1775 */ 1776 static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page, 1777 unsigned long offset, size_t size, enum dma_data_direction dir, 1778 struct dma_attrs *attrs) 1779 { 1780 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1781 dma_addr_t dma_addr; 1782 int ret, prot, len = PAGE_ALIGN(size + offset); 1783 1784 dma_addr = __alloc_iova(mapping, len); 1785 if (dma_addr == DMA_ERROR_CODE) 1786 return dma_addr; 1787 1788 prot = __dma_direction_to_prot(dir); 1789 1790 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); 1791 if (ret < 0) 1792 goto fail; 1793 1794 return dma_addr + offset; 1795 fail: 1796 __free_iova(mapping, dma_addr, len); 1797 return DMA_ERROR_CODE; 1798 } 1799 1800 /** 1801 * arm_iommu_map_page 1802 * @dev: valid struct device pointer 1803 * @page: page that buffer resides in 1804 * @offset: offset into page for start of buffer 1805 * @size: size of buffer to map 1806 * @dir: DMA transfer direction 1807 * 1808 * IOMMU aware version of arm_dma_map_page() 1809 */ 1810 static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, 1811 unsigned long offset, size_t size, enum dma_data_direction dir, 1812 struct dma_attrs *attrs) 1813 { 1814 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 1815 __dma_page_cpu_to_dev(page, offset, size, dir); 1816 1817 return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); 1818 } 1819 1820 /** 1821 * arm_coherent_iommu_unmap_page 1822 * @dev: valid struct device pointer 1823 * @handle: DMA address of buffer 1824 * @size: size of buffer (same as passed to dma_map_page) 1825 * @dir: DMA transfer direction (same as passed to dma_map_page) 1826 * 1827 * Coherent IOMMU aware version of arm_dma_unmap_page() 1828 */ 1829 static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, 1830 size_t size, enum dma_data_direction dir, 1831 struct dma_attrs *attrs) 1832 { 1833 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1834 dma_addr_t iova = handle & PAGE_MASK; 1835 int offset = handle & ~PAGE_MASK; 1836 int len = PAGE_ALIGN(size + offset); 1837 1838 if (!iova) 1839 return; 1840 1841 iommu_unmap(mapping->domain, iova, len); 1842 __free_iova(mapping, iova, len); 1843 } 1844 1845 /** 1846 * arm_iommu_unmap_page 1847 * @dev: valid struct device pointer 1848 * @handle: DMA address of buffer 1849 * @size: size of buffer (same as passed to dma_map_page) 1850 * @dir: DMA transfer direction (same as passed to dma_map_page) 1851 * 1852 * IOMMU aware version of arm_dma_unmap_page() 1853 */ 1854 static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, 1855 size_t size, enum dma_data_direction dir, 1856 struct dma_attrs *attrs) 1857 { 1858 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1859 dma_addr_t iova = handle & PAGE_MASK; 1860 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 1861 int offset = handle & ~PAGE_MASK; 1862 int len = PAGE_ALIGN(size + offset); 1863 1864 if (!iova) 1865 return; 1866 1867 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 1868 __dma_page_dev_to_cpu(page, offset, size, dir); 1869 1870 iommu_unmap(mapping->domain, iova, len); 1871 __free_iova(mapping, iova, len); 1872 } 1873 1874 static void arm_iommu_sync_single_for_cpu(struct device *dev, 1875 dma_addr_t handle, size_t size, enum dma_data_direction dir) 1876 { 1877 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1878 dma_addr_t iova = handle & PAGE_MASK; 1879 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 1880 unsigned int offset = handle & ~PAGE_MASK; 1881 1882 if (!iova) 1883 return; 1884 1885 __dma_page_dev_to_cpu(page, offset, size, dir); 1886 } 1887 1888 static void arm_iommu_sync_single_for_device(struct device *dev, 1889 dma_addr_t handle, size_t size, enum dma_data_direction dir) 1890 { 1891 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1892 dma_addr_t iova = handle & PAGE_MASK; 1893 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 1894 unsigned int offset = handle & ~PAGE_MASK; 1895 1896 if (!iova) 1897 return; 1898 1899 __dma_page_cpu_to_dev(page, offset, size, dir); 1900 } 1901 1902 struct dma_map_ops iommu_ops = { 1903 .alloc = arm_iommu_alloc_attrs, 1904 .free = arm_iommu_free_attrs, 1905 .mmap = arm_iommu_mmap_attrs, 1906 .get_sgtable = arm_iommu_get_sgtable, 1907 1908 .map_page = arm_iommu_map_page, 1909 .unmap_page = arm_iommu_unmap_page, 1910 .sync_single_for_cpu = arm_iommu_sync_single_for_cpu, 1911 .sync_single_for_device = arm_iommu_sync_single_for_device, 1912 1913 .map_sg = arm_iommu_map_sg, 1914 .unmap_sg = arm_iommu_unmap_sg, 1915 .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, 1916 .sync_sg_for_device = arm_iommu_sync_sg_for_device, 1917 1918 .set_dma_mask = arm_dma_set_mask, 1919 }; 1920 1921 struct dma_map_ops iommu_coherent_ops = { 1922 .alloc = arm_iommu_alloc_attrs, 1923 .free = arm_iommu_free_attrs, 1924 .mmap = arm_iommu_mmap_attrs, 1925 .get_sgtable = arm_iommu_get_sgtable, 1926 1927 .map_page = arm_coherent_iommu_map_page, 1928 .unmap_page = arm_coherent_iommu_unmap_page, 1929 1930 .map_sg = arm_coherent_iommu_map_sg, 1931 .unmap_sg = arm_coherent_iommu_unmap_sg, 1932 1933 .set_dma_mask = arm_dma_set_mask, 1934 }; 1935 1936 /** 1937 * arm_iommu_create_mapping 1938 * @bus: pointer to the bus holding the client device (for IOMMU calls) 1939 * @base: start address of the valid IO address space 1940 * @size: maximum size of the valid IO address space 1941 * 1942 * Creates a mapping structure which holds information about used/unused 1943 * IO address ranges, which is required to perform memory allocation and 1944 * mapping with IOMMU aware functions. 1945 * 1946 * The client device need to be attached to the mapping with 1947 * arm_iommu_attach_device function. 1948 */ 1949 struct dma_iommu_mapping * 1950 arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size) 1951 { 1952 unsigned int bits = size >> PAGE_SHIFT; 1953 unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long); 1954 struct dma_iommu_mapping *mapping; 1955 int extensions = 1; 1956 int err = -ENOMEM; 1957 1958 /* currently only 32-bit DMA address space is supported */ 1959 if (size > DMA_BIT_MASK(32) + 1) 1960 return ERR_PTR(-ERANGE); 1961 1962 if (!bitmap_size) 1963 return ERR_PTR(-EINVAL); 1964 1965 if (bitmap_size > PAGE_SIZE) { 1966 extensions = bitmap_size / PAGE_SIZE; 1967 bitmap_size = PAGE_SIZE; 1968 } 1969 1970 mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); 1971 if (!mapping) 1972 goto err; 1973 1974 mapping->bitmap_size = bitmap_size; 1975 mapping->bitmaps = kzalloc(extensions * sizeof(unsigned long *), 1976 GFP_KERNEL); 1977 if (!mapping->bitmaps) 1978 goto err2; 1979 1980 mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL); 1981 if (!mapping->bitmaps[0]) 1982 goto err3; 1983 1984 mapping->nr_bitmaps = 1; 1985 mapping->extensions = extensions; 1986 mapping->base = base; 1987 mapping->bits = BITS_PER_BYTE * bitmap_size; 1988 1989 spin_lock_init(&mapping->lock); 1990 1991 mapping->domain = iommu_domain_alloc(bus); 1992 if (!mapping->domain) 1993 goto err4; 1994 1995 kref_init(&mapping->kref); 1996 return mapping; 1997 err4: 1998 kfree(mapping->bitmaps[0]); 1999 err3: 2000 kfree(mapping->bitmaps); 2001 err2: 2002 kfree(mapping); 2003 err: 2004 return ERR_PTR(err); 2005 } 2006 EXPORT_SYMBOL_GPL(arm_iommu_create_mapping); 2007 2008 static void release_iommu_mapping(struct kref *kref) 2009 { 2010 int i; 2011 struct dma_iommu_mapping *mapping = 2012 container_of(kref, struct dma_iommu_mapping, kref); 2013 2014 iommu_domain_free(mapping->domain); 2015 for (i = 0; i < mapping->nr_bitmaps; i++) 2016 kfree(mapping->bitmaps[i]); 2017 kfree(mapping->bitmaps); 2018 kfree(mapping); 2019 } 2020 2021 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping) 2022 { 2023 int next_bitmap; 2024 2025 if (mapping->nr_bitmaps >= mapping->extensions) 2026 return -EINVAL; 2027 2028 next_bitmap = mapping->nr_bitmaps; 2029 mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size, 2030 GFP_ATOMIC); 2031 if (!mapping->bitmaps[next_bitmap]) 2032 return -ENOMEM; 2033 2034 mapping->nr_bitmaps++; 2035 2036 return 0; 2037 } 2038 2039 void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) 2040 { 2041 if (mapping) 2042 kref_put(&mapping->kref, release_iommu_mapping); 2043 } 2044 EXPORT_SYMBOL_GPL(arm_iommu_release_mapping); 2045 2046 static int __arm_iommu_attach_device(struct device *dev, 2047 struct dma_iommu_mapping *mapping) 2048 { 2049 int err; 2050 2051 err = iommu_attach_device(mapping->domain, dev); 2052 if (err) 2053 return err; 2054 2055 kref_get(&mapping->kref); 2056 to_dma_iommu_mapping(dev) = mapping; 2057 2058 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); 2059 return 0; 2060 } 2061 2062 /** 2063 * arm_iommu_attach_device 2064 * @dev: valid struct device pointer 2065 * @mapping: io address space mapping structure (returned from 2066 * arm_iommu_create_mapping) 2067 * 2068 * Attaches specified io address space mapping to the provided device. 2069 * This replaces the dma operations (dma_map_ops pointer) with the 2070 * IOMMU aware version. 2071 * 2072 * More than one client might be attached to the same io address space 2073 * mapping. 2074 */ 2075 int arm_iommu_attach_device(struct device *dev, 2076 struct dma_iommu_mapping *mapping) 2077 { 2078 int err; 2079 2080 err = __arm_iommu_attach_device(dev, mapping); 2081 if (err) 2082 return err; 2083 2084 set_dma_ops(dev, &iommu_ops); 2085 return 0; 2086 } 2087 EXPORT_SYMBOL_GPL(arm_iommu_attach_device); 2088 2089 static void __arm_iommu_detach_device(struct device *dev) 2090 { 2091 struct dma_iommu_mapping *mapping; 2092 2093 mapping = to_dma_iommu_mapping(dev); 2094 if (!mapping) { 2095 dev_warn(dev, "Not attached\n"); 2096 return; 2097 } 2098 2099 iommu_detach_device(mapping->domain, dev); 2100 kref_put(&mapping->kref, release_iommu_mapping); 2101 to_dma_iommu_mapping(dev) = NULL; 2102 2103 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); 2104 } 2105 2106 /** 2107 * arm_iommu_detach_device 2108 * @dev: valid struct device pointer 2109 * 2110 * Detaches the provided device from a previously attached map. 2111 * This voids the dma operations (dma_map_ops pointer) 2112 */ 2113 void arm_iommu_detach_device(struct device *dev) 2114 { 2115 __arm_iommu_detach_device(dev); 2116 set_dma_ops(dev, NULL); 2117 } 2118 EXPORT_SYMBOL_GPL(arm_iommu_detach_device); 2119 2120 static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) 2121 { 2122 return coherent ? &iommu_coherent_ops : &iommu_ops; 2123 } 2124 2125 static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, 2126 struct iommu_ops *iommu) 2127 { 2128 struct dma_iommu_mapping *mapping; 2129 2130 if (!iommu) 2131 return false; 2132 2133 mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); 2134 if (IS_ERR(mapping)) { 2135 pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", 2136 size, dev_name(dev)); 2137 return false; 2138 } 2139 2140 if (__arm_iommu_attach_device(dev, mapping)) { 2141 pr_warn("Failed to attached device %s to IOMMU_mapping\n", 2142 dev_name(dev)); 2143 arm_iommu_release_mapping(mapping); 2144 return false; 2145 } 2146 2147 return true; 2148 } 2149 2150 static void arm_teardown_iommu_dma_ops(struct device *dev) 2151 { 2152 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 2153 2154 if (!mapping) 2155 return; 2156 2157 __arm_iommu_detach_device(dev); 2158 arm_iommu_release_mapping(mapping); 2159 } 2160 2161 #else 2162 2163 static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, 2164 struct iommu_ops *iommu) 2165 { 2166 return false; 2167 } 2168 2169 static void arm_teardown_iommu_dma_ops(struct device *dev) { } 2170 2171 #define arm_get_iommu_dma_map_ops arm_get_dma_map_ops 2172 2173 #endif /* CONFIG_ARM_DMA_USE_IOMMU */ 2174 2175 static struct dma_map_ops *arm_get_dma_map_ops(bool coherent) 2176 { 2177 return coherent ? &arm_coherent_dma_ops : &arm_dma_ops; 2178 } 2179 2180 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, 2181 struct iommu_ops *iommu, bool coherent) 2182 { 2183 struct dma_map_ops *dma_ops; 2184 2185 dev->archdata.dma_coherent = coherent; 2186 if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu)) 2187 dma_ops = arm_get_iommu_dma_map_ops(coherent); 2188 else 2189 dma_ops = arm_get_dma_map_ops(coherent); 2190 2191 set_dma_ops(dev, dma_ops); 2192 } 2193 2194 void arch_teardown_dma_ops(struct device *dev) 2195 { 2196 arm_teardown_iommu_dma_ops(dev); 2197 } 2198