1 /* 2 * linux/arch/arm/mm/dma-mapping.c 3 * 4 * Copyright (C) 2000-2004 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * DMA uncached mapping support. 11 */ 12 #include <linux/bootmem.h> 13 #include <linux/module.h> 14 #include <linux/mm.h> 15 #include <linux/genalloc.h> 16 #include <linux/gfp.h> 17 #include <linux/errno.h> 18 #include <linux/list.h> 19 #include <linux/init.h> 20 #include <linux/device.h> 21 #include <linux/dma-mapping.h> 22 #include <linux/dma-contiguous.h> 23 #include <linux/highmem.h> 24 #include <linux/memblock.h> 25 #include <linux/slab.h> 26 #include <linux/iommu.h> 27 #include <linux/io.h> 28 #include <linux/vmalloc.h> 29 #include <linux/sizes.h> 30 #include <linux/cma.h> 31 32 #include <asm/memory.h> 33 #include <asm/highmem.h> 34 #include <asm/cacheflush.h> 35 #include <asm/tlbflush.h> 36 #include <asm/mach/arch.h> 37 #include <asm/dma-iommu.h> 38 #include <asm/mach/map.h> 39 #include <asm/system_info.h> 40 #include <asm/dma-contiguous.h> 41 42 #include "mm.h" 43 44 /* 45 * The DMA API is built upon the notion of "buffer ownership". A buffer 46 * is either exclusively owned by the CPU (and therefore may be accessed 47 * by it) or exclusively owned by the DMA device. These helper functions 48 * represent the transitions between these two ownership states. 49 * 50 * Note, however, that on later ARMs, this notion does not work due to 51 * speculative prefetches. We model our approach on the assumption that 52 * the CPU does do speculative prefetches, which means we clean caches 53 * before transfers and delay cache invalidation until transfer completion. 54 * 55 */ 56 static void __dma_page_cpu_to_dev(struct page *, unsigned long, 57 size_t, enum dma_data_direction); 58 static void __dma_page_dev_to_cpu(struct page *, unsigned long, 59 size_t, enum dma_data_direction); 60 61 /** 62 * arm_dma_map_page - map a portion of a page for streaming DMA 63 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 64 * @page: page that buffer resides in 65 * @offset: offset into page for start of buffer 66 * @size: size of buffer to map 67 * @dir: DMA transfer direction 68 * 69 * Ensure that any data held in the cache is appropriately discarded 70 * or written back. 71 * 72 * The device owns this memory once this call has completed. The CPU 73 * can regain ownership by calling dma_unmap_page(). 74 */ 75 static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, 76 unsigned long offset, size_t size, enum dma_data_direction dir, 77 struct dma_attrs *attrs) 78 { 79 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 80 __dma_page_cpu_to_dev(page, offset, size, dir); 81 return pfn_to_dma(dev, page_to_pfn(page)) + offset; 82 } 83 84 static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, 85 unsigned long offset, size_t size, enum dma_data_direction dir, 86 struct dma_attrs *attrs) 87 { 88 return pfn_to_dma(dev, page_to_pfn(page)) + offset; 89 } 90 91 /** 92 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page() 93 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 94 * @handle: DMA address of buffer 95 * @size: size of buffer (same as passed to dma_map_page) 96 * @dir: DMA transfer direction (same as passed to dma_map_page) 97 * 98 * Unmap a page streaming mode DMA translation. The handle and size 99 * must match what was provided in the previous dma_map_page() call. 100 * All other usages are undefined. 101 * 102 * After this call, reads by the CPU to the buffer are guaranteed to see 103 * whatever the device wrote there. 104 */ 105 static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, 106 size_t size, enum dma_data_direction dir, 107 struct dma_attrs *attrs) 108 { 109 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 110 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), 111 handle & ~PAGE_MASK, size, dir); 112 } 113 114 static void arm_dma_sync_single_for_cpu(struct device *dev, 115 dma_addr_t handle, size_t size, enum dma_data_direction dir) 116 { 117 unsigned int offset = handle & (PAGE_SIZE - 1); 118 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 119 __dma_page_dev_to_cpu(page, offset, size, dir); 120 } 121 122 static void arm_dma_sync_single_for_device(struct device *dev, 123 dma_addr_t handle, size_t size, enum dma_data_direction dir) 124 { 125 unsigned int offset = handle & (PAGE_SIZE - 1); 126 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 127 __dma_page_cpu_to_dev(page, offset, size, dir); 128 } 129 130 struct dma_map_ops arm_dma_ops = { 131 .alloc = arm_dma_alloc, 132 .free = arm_dma_free, 133 .mmap = arm_dma_mmap, 134 .get_sgtable = arm_dma_get_sgtable, 135 .map_page = arm_dma_map_page, 136 .unmap_page = arm_dma_unmap_page, 137 .map_sg = arm_dma_map_sg, 138 .unmap_sg = arm_dma_unmap_sg, 139 .sync_single_for_cpu = arm_dma_sync_single_for_cpu, 140 .sync_single_for_device = arm_dma_sync_single_for_device, 141 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, 142 .sync_sg_for_device = arm_dma_sync_sg_for_device, 143 .set_dma_mask = arm_dma_set_mask, 144 }; 145 EXPORT_SYMBOL(arm_dma_ops); 146 147 static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 148 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs); 149 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 150 dma_addr_t handle, struct dma_attrs *attrs); 151 static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, 152 void *cpu_addr, dma_addr_t dma_addr, size_t size, 153 struct dma_attrs *attrs); 154 155 struct dma_map_ops arm_coherent_dma_ops = { 156 .alloc = arm_coherent_dma_alloc, 157 .free = arm_coherent_dma_free, 158 .mmap = arm_coherent_dma_mmap, 159 .get_sgtable = arm_dma_get_sgtable, 160 .map_page = arm_coherent_dma_map_page, 161 .map_sg = arm_dma_map_sg, 162 .set_dma_mask = arm_dma_set_mask, 163 }; 164 EXPORT_SYMBOL(arm_coherent_dma_ops); 165 166 static int __dma_supported(struct device *dev, u64 mask, bool warn) 167 { 168 unsigned long max_dma_pfn; 169 170 /* 171 * If the mask allows for more memory than we can address, 172 * and we actually have that much memory, then we must 173 * indicate that DMA to this device is not supported. 174 */ 175 if (sizeof(mask) != sizeof(dma_addr_t) && 176 mask > (dma_addr_t)~0 && 177 dma_to_pfn(dev, ~0) < max_pfn - 1) { 178 if (warn) { 179 dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", 180 mask); 181 dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n"); 182 } 183 return 0; 184 } 185 186 max_dma_pfn = min(max_pfn, arm_dma_pfn_limit); 187 188 /* 189 * Translate the device's DMA mask to a PFN limit. This 190 * PFN number includes the page which we can DMA to. 191 */ 192 if (dma_to_pfn(dev, mask) < max_dma_pfn) { 193 if (warn) 194 dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", 195 mask, 196 dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, 197 max_dma_pfn + 1); 198 return 0; 199 } 200 201 return 1; 202 } 203 204 static u64 get_coherent_dma_mask(struct device *dev) 205 { 206 u64 mask = (u64)DMA_BIT_MASK(32); 207 208 if (dev) { 209 mask = dev->coherent_dma_mask; 210 211 /* 212 * Sanity check the DMA mask - it must be non-zero, and 213 * must be able to be satisfied by a DMA allocation. 214 */ 215 if (mask == 0) { 216 dev_warn(dev, "coherent DMA mask is unset\n"); 217 return 0; 218 } 219 220 if (!__dma_supported(dev, mask, true)) 221 return 0; 222 } 223 224 return mask; 225 } 226 227 static void __dma_clear_buffer(struct page *page, size_t size) 228 { 229 /* 230 * Ensure that the allocated pages are zeroed, and that any data 231 * lurking in the kernel direct-mapped region is invalidated. 232 */ 233 if (PageHighMem(page)) { 234 phys_addr_t base = __pfn_to_phys(page_to_pfn(page)); 235 phys_addr_t end = base + size; 236 while (size > 0) { 237 void *ptr = kmap_atomic(page); 238 memset(ptr, 0, PAGE_SIZE); 239 dmac_flush_range(ptr, ptr + PAGE_SIZE); 240 kunmap_atomic(ptr); 241 page++; 242 size -= PAGE_SIZE; 243 } 244 outer_flush_range(base, end); 245 } else { 246 void *ptr = page_address(page); 247 memset(ptr, 0, size); 248 dmac_flush_range(ptr, ptr + size); 249 outer_flush_range(__pa(ptr), __pa(ptr) + size); 250 } 251 } 252 253 /* 254 * Allocate a DMA buffer for 'dev' of size 'size' using the 255 * specified gfp mask. Note that 'size' must be page aligned. 256 */ 257 static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) 258 { 259 unsigned long order = get_order(size); 260 struct page *page, *p, *e; 261 262 page = alloc_pages(gfp, order); 263 if (!page) 264 return NULL; 265 266 /* 267 * Now split the huge page and free the excess pages 268 */ 269 split_page(page, order); 270 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) 271 __free_page(p); 272 273 __dma_clear_buffer(page, size); 274 275 return page; 276 } 277 278 /* 279 * Free a DMA buffer. 'size' must be page aligned. 280 */ 281 static void __dma_free_buffer(struct page *page, size_t size) 282 { 283 struct page *e = page + (size >> PAGE_SHIFT); 284 285 while (page < e) { 286 __free_page(page); 287 page++; 288 } 289 } 290 291 #ifdef CONFIG_MMU 292 293 static void *__alloc_from_contiguous(struct device *dev, size_t size, 294 pgprot_t prot, struct page **ret_page, 295 const void *caller, bool want_vaddr); 296 297 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 298 pgprot_t prot, struct page **ret_page, 299 const void *caller, bool want_vaddr); 300 301 static void * 302 __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, 303 const void *caller) 304 { 305 /* 306 * DMA allocation can be mapped to user space, so lets 307 * set VM_USERMAP flags too. 308 */ 309 return dma_common_contiguous_remap(page, size, 310 VM_ARM_DMA_CONSISTENT | VM_USERMAP, 311 prot, caller); 312 } 313 314 static void __dma_free_remap(void *cpu_addr, size_t size) 315 { 316 dma_common_free_remap(cpu_addr, size, 317 VM_ARM_DMA_CONSISTENT | VM_USERMAP); 318 } 319 320 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K 321 static struct gen_pool *atomic_pool; 322 323 static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE; 324 325 static int __init early_coherent_pool(char *p) 326 { 327 atomic_pool_size = memparse(p, &p); 328 return 0; 329 } 330 early_param("coherent_pool", early_coherent_pool); 331 332 void __init init_dma_coherent_pool_size(unsigned long size) 333 { 334 /* 335 * Catch any attempt to set the pool size too late. 336 */ 337 BUG_ON(atomic_pool); 338 339 /* 340 * Set architecture specific coherent pool size only if 341 * it has not been changed by kernel command line parameter. 342 */ 343 if (atomic_pool_size == DEFAULT_DMA_COHERENT_POOL_SIZE) 344 atomic_pool_size = size; 345 } 346 347 /* 348 * Initialise the coherent pool for atomic allocations. 349 */ 350 static int __init atomic_pool_init(void) 351 { 352 pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL); 353 gfp_t gfp = GFP_KERNEL | GFP_DMA; 354 struct page *page; 355 void *ptr; 356 357 atomic_pool = gen_pool_create(PAGE_SHIFT, -1); 358 if (!atomic_pool) 359 goto out; 360 361 if (dev_get_cma_area(NULL)) 362 ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot, 363 &page, atomic_pool_init, true); 364 else 365 ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot, 366 &page, atomic_pool_init, true); 367 if (ptr) { 368 int ret; 369 370 ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr, 371 page_to_phys(page), 372 atomic_pool_size, -1); 373 if (ret) 374 goto destroy_genpool; 375 376 gen_pool_set_algo(atomic_pool, 377 gen_pool_first_fit_order_align, 378 (void *)PAGE_SHIFT); 379 pr_info("DMA: preallocated %zd KiB pool for atomic coherent allocations\n", 380 atomic_pool_size / 1024); 381 return 0; 382 } 383 384 destroy_genpool: 385 gen_pool_destroy(atomic_pool); 386 atomic_pool = NULL; 387 out: 388 pr_err("DMA: failed to allocate %zx KiB pool for atomic coherent allocation\n", 389 atomic_pool_size / 1024); 390 return -ENOMEM; 391 } 392 /* 393 * CMA is activated by core_initcall, so we must be called after it. 394 */ 395 postcore_initcall(atomic_pool_init); 396 397 struct dma_contig_early_reserve { 398 phys_addr_t base; 399 unsigned long size; 400 }; 401 402 static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata; 403 404 static int dma_mmu_remap_num __initdata; 405 406 void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) 407 { 408 dma_mmu_remap[dma_mmu_remap_num].base = base; 409 dma_mmu_remap[dma_mmu_remap_num].size = size; 410 dma_mmu_remap_num++; 411 } 412 413 void __init dma_contiguous_remap(void) 414 { 415 int i; 416 for (i = 0; i < dma_mmu_remap_num; i++) { 417 phys_addr_t start = dma_mmu_remap[i].base; 418 phys_addr_t end = start + dma_mmu_remap[i].size; 419 struct map_desc map; 420 unsigned long addr; 421 422 if (end > arm_lowmem_limit) 423 end = arm_lowmem_limit; 424 if (start >= end) 425 continue; 426 427 map.pfn = __phys_to_pfn(start); 428 map.virtual = __phys_to_virt(start); 429 map.length = end - start; 430 map.type = MT_MEMORY_DMA_READY; 431 432 /* 433 * Clear previous low-memory mapping to ensure that the 434 * TLB does not see any conflicting entries, then flush 435 * the TLB of the old entries before creating new mappings. 436 * 437 * This ensures that any speculatively loaded TLB entries 438 * (even though they may be rare) can not cause any problems, 439 * and ensures that this code is architecturally compliant. 440 */ 441 for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); 442 addr += PMD_SIZE) 443 pmd_clear(pmd_off_k(addr)); 444 445 flush_tlb_kernel_range(__phys_to_virt(start), 446 __phys_to_virt(end)); 447 448 iotable_init(&map, 1); 449 } 450 } 451 452 static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr, 453 void *data) 454 { 455 struct page *page = virt_to_page(addr); 456 pgprot_t prot = *(pgprot_t *)data; 457 458 set_pte_ext(pte, mk_pte(page, prot), 0); 459 return 0; 460 } 461 462 static void __dma_remap(struct page *page, size_t size, pgprot_t prot) 463 { 464 unsigned long start = (unsigned long) page_address(page); 465 unsigned end = start + size; 466 467 apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); 468 flush_tlb_kernel_range(start, end); 469 } 470 471 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 472 pgprot_t prot, struct page **ret_page, 473 const void *caller, bool want_vaddr) 474 { 475 struct page *page; 476 void *ptr = NULL; 477 page = __dma_alloc_buffer(dev, size, gfp); 478 if (!page) 479 return NULL; 480 if (!want_vaddr) 481 goto out; 482 483 ptr = __dma_alloc_remap(page, size, gfp, prot, caller); 484 if (!ptr) { 485 __dma_free_buffer(page, size); 486 return NULL; 487 } 488 489 out: 490 *ret_page = page; 491 return ptr; 492 } 493 494 static void *__alloc_from_pool(size_t size, struct page **ret_page) 495 { 496 unsigned long val; 497 void *ptr = NULL; 498 499 if (!atomic_pool) { 500 WARN(1, "coherent pool not initialised!\n"); 501 return NULL; 502 } 503 504 val = gen_pool_alloc(atomic_pool, size); 505 if (val) { 506 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val); 507 508 *ret_page = phys_to_page(phys); 509 ptr = (void *)val; 510 } 511 512 return ptr; 513 } 514 515 static bool __in_atomic_pool(void *start, size_t size) 516 { 517 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size); 518 } 519 520 static int __free_from_pool(void *start, size_t size) 521 { 522 if (!__in_atomic_pool(start, size)) 523 return 0; 524 525 gen_pool_free(atomic_pool, (unsigned long)start, size); 526 527 return 1; 528 } 529 530 static void *__alloc_from_contiguous(struct device *dev, size_t size, 531 pgprot_t prot, struct page **ret_page, 532 const void *caller, bool want_vaddr) 533 { 534 unsigned long order = get_order(size); 535 size_t count = size >> PAGE_SHIFT; 536 struct page *page; 537 void *ptr = NULL; 538 539 page = dma_alloc_from_contiguous(dev, count, order); 540 if (!page) 541 return NULL; 542 543 __dma_clear_buffer(page, size); 544 545 if (!want_vaddr) 546 goto out; 547 548 if (PageHighMem(page)) { 549 ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller); 550 if (!ptr) { 551 dma_release_from_contiguous(dev, page, count); 552 return NULL; 553 } 554 } else { 555 __dma_remap(page, size, prot); 556 ptr = page_address(page); 557 } 558 559 out: 560 *ret_page = page; 561 return ptr; 562 } 563 564 static void __free_from_contiguous(struct device *dev, struct page *page, 565 void *cpu_addr, size_t size, bool want_vaddr) 566 { 567 if (want_vaddr) { 568 if (PageHighMem(page)) 569 __dma_free_remap(cpu_addr, size); 570 else 571 __dma_remap(page, size, PAGE_KERNEL); 572 } 573 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); 574 } 575 576 static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot) 577 { 578 prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ? 579 pgprot_writecombine(prot) : 580 pgprot_dmacoherent(prot); 581 return prot; 582 } 583 584 #define nommu() 0 585 586 #else /* !CONFIG_MMU */ 587 588 #define nommu() 1 589 590 #define __get_dma_pgprot(attrs, prot) __pgprot(0) 591 #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL 592 #define __alloc_from_pool(size, ret_page) NULL 593 #define __alloc_from_contiguous(dev, size, prot, ret, c, wv) NULL 594 #define __free_from_pool(cpu_addr, size) 0 595 #define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0) 596 #define __dma_free_remap(cpu_addr, size) do { } while (0) 597 598 #endif /* CONFIG_MMU */ 599 600 static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, 601 struct page **ret_page) 602 { 603 struct page *page; 604 page = __dma_alloc_buffer(dev, size, gfp); 605 if (!page) 606 return NULL; 607 608 *ret_page = page; 609 return page_address(page); 610 } 611 612 613 614 static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 615 gfp_t gfp, pgprot_t prot, bool is_coherent, 616 struct dma_attrs *attrs, const void *caller) 617 { 618 u64 mask = get_coherent_dma_mask(dev); 619 struct page *page = NULL; 620 void *addr; 621 bool want_vaddr; 622 623 #ifdef CONFIG_DMA_API_DEBUG 624 u64 limit = (mask + 1) & ~mask; 625 if (limit && size >= limit) { 626 dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n", 627 size, mask); 628 return NULL; 629 } 630 #endif 631 632 if (!mask) 633 return NULL; 634 635 if (mask < 0xffffffffULL) 636 gfp |= GFP_DMA; 637 638 /* 639 * Following is a work-around (a.k.a. hack) to prevent pages 640 * with __GFP_COMP being passed to split_page() which cannot 641 * handle them. The real problem is that this flag probably 642 * should be 0 on ARM as it is not supported on this 643 * platform; see CONFIG_HUGETLBFS. 644 */ 645 gfp &= ~(__GFP_COMP); 646 647 *handle = DMA_ERROR_CODE; 648 size = PAGE_ALIGN(size); 649 want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs); 650 651 if (is_coherent || nommu()) 652 addr = __alloc_simple_buffer(dev, size, gfp, &page); 653 else if (!(gfp & __GFP_WAIT)) 654 addr = __alloc_from_pool(size, &page); 655 else if (!dev_get_cma_area(dev)) 656 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller, want_vaddr); 657 else 658 addr = __alloc_from_contiguous(dev, size, prot, &page, caller, want_vaddr); 659 660 if (page) 661 *handle = pfn_to_dma(dev, page_to_pfn(page)); 662 663 return want_vaddr ? addr : page; 664 } 665 666 /* 667 * Allocate DMA-coherent memory space and return both the kernel remapped 668 * virtual and bus address for that space. 669 */ 670 void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 671 gfp_t gfp, struct dma_attrs *attrs) 672 { 673 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); 674 void *memory; 675 676 if (dma_alloc_from_coherent(dev, size, handle, &memory)) 677 return memory; 678 679 return __dma_alloc(dev, size, handle, gfp, prot, false, 680 attrs, __builtin_return_address(0)); 681 } 682 683 static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 684 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) 685 { 686 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); 687 void *memory; 688 689 if (dma_alloc_from_coherent(dev, size, handle, &memory)) 690 return memory; 691 692 return __dma_alloc(dev, size, handle, gfp, prot, true, 693 attrs, __builtin_return_address(0)); 694 } 695 696 static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, 697 void *cpu_addr, dma_addr_t dma_addr, size_t size, 698 struct dma_attrs *attrs) 699 { 700 int ret = -ENXIO; 701 #ifdef CONFIG_MMU 702 unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 703 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 704 unsigned long pfn = dma_to_pfn(dev, dma_addr); 705 unsigned long off = vma->vm_pgoff; 706 707 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) 708 return ret; 709 710 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { 711 ret = remap_pfn_range(vma, vma->vm_start, 712 pfn + off, 713 vma->vm_end - vma->vm_start, 714 vma->vm_page_prot); 715 } 716 #endif /* CONFIG_MMU */ 717 718 return ret; 719 } 720 721 /* 722 * Create userspace mapping for the DMA-coherent memory. 723 */ 724 static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, 725 void *cpu_addr, dma_addr_t dma_addr, size_t size, 726 struct dma_attrs *attrs) 727 { 728 return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 729 } 730 731 int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, 732 void *cpu_addr, dma_addr_t dma_addr, size_t size, 733 struct dma_attrs *attrs) 734 { 735 #ifdef CONFIG_MMU 736 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 737 #endif /* CONFIG_MMU */ 738 return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 739 } 740 741 /* 742 * Free a buffer as defined by the above mapping. 743 */ 744 static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 745 dma_addr_t handle, struct dma_attrs *attrs, 746 bool is_coherent) 747 { 748 struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 749 bool want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs); 750 751 if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) 752 return; 753 754 size = PAGE_ALIGN(size); 755 756 if (is_coherent || nommu()) { 757 __dma_free_buffer(page, size); 758 } else if (__free_from_pool(cpu_addr, size)) { 759 return; 760 } else if (!dev_get_cma_area(dev)) { 761 if (want_vaddr) 762 __dma_free_remap(cpu_addr, size); 763 __dma_free_buffer(page, size); 764 } else { 765 /* 766 * Non-atomic allocations cannot be freed with IRQs disabled 767 */ 768 WARN_ON(irqs_disabled()); 769 __free_from_contiguous(dev, page, cpu_addr, size, want_vaddr); 770 } 771 } 772 773 void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 774 dma_addr_t handle, struct dma_attrs *attrs) 775 { 776 __arm_dma_free(dev, size, cpu_addr, handle, attrs, false); 777 } 778 779 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 780 dma_addr_t handle, struct dma_attrs *attrs) 781 { 782 __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); 783 } 784 785 int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 786 void *cpu_addr, dma_addr_t handle, size_t size, 787 struct dma_attrs *attrs) 788 { 789 struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 790 int ret; 791 792 ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 793 if (unlikely(ret)) 794 return ret; 795 796 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 797 return 0; 798 } 799 800 static void dma_cache_maint_page(struct page *page, unsigned long offset, 801 size_t size, enum dma_data_direction dir, 802 void (*op)(const void *, size_t, int)) 803 { 804 unsigned long pfn; 805 size_t left = size; 806 807 pfn = page_to_pfn(page) + offset / PAGE_SIZE; 808 offset %= PAGE_SIZE; 809 810 /* 811 * A single sg entry may refer to multiple physically contiguous 812 * pages. But we still need to process highmem pages individually. 813 * If highmem is not configured then the bulk of this loop gets 814 * optimized out. 815 */ 816 do { 817 size_t len = left; 818 void *vaddr; 819 820 page = pfn_to_page(pfn); 821 822 if (PageHighMem(page)) { 823 if (len + offset > PAGE_SIZE) 824 len = PAGE_SIZE - offset; 825 826 if (cache_is_vipt_nonaliasing()) { 827 vaddr = kmap_atomic(page); 828 op(vaddr + offset, len, dir); 829 kunmap_atomic(vaddr); 830 } else { 831 vaddr = kmap_high_get(page); 832 if (vaddr) { 833 op(vaddr + offset, len, dir); 834 kunmap_high(page); 835 } 836 } 837 } else { 838 vaddr = page_address(page) + offset; 839 op(vaddr, len, dir); 840 } 841 offset = 0; 842 pfn++; 843 left -= len; 844 } while (left); 845 } 846 847 /* 848 * Make an area consistent for devices. 849 * Note: Drivers should NOT use this function directly, as it will break 850 * platforms with CONFIG_DMABOUNCE. 851 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 852 */ 853 static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, 854 size_t size, enum dma_data_direction dir) 855 { 856 phys_addr_t paddr; 857 858 dma_cache_maint_page(page, off, size, dir, dmac_map_area); 859 860 paddr = page_to_phys(page) + off; 861 if (dir == DMA_FROM_DEVICE) { 862 outer_inv_range(paddr, paddr + size); 863 } else { 864 outer_clean_range(paddr, paddr + size); 865 } 866 /* FIXME: non-speculating: flush on bidirectional mappings? */ 867 } 868 869 static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, 870 size_t size, enum dma_data_direction dir) 871 { 872 phys_addr_t paddr = page_to_phys(page) + off; 873 874 /* FIXME: non-speculating: not required */ 875 /* in any case, don't bother invalidating if DMA to device */ 876 if (dir != DMA_TO_DEVICE) { 877 outer_inv_range(paddr, paddr + size); 878 879 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); 880 } 881 882 /* 883 * Mark the D-cache clean for these pages to avoid extra flushing. 884 */ 885 if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) { 886 unsigned long pfn; 887 size_t left = size; 888 889 pfn = page_to_pfn(page) + off / PAGE_SIZE; 890 off %= PAGE_SIZE; 891 if (off) { 892 pfn++; 893 left -= PAGE_SIZE - off; 894 } 895 while (left >= PAGE_SIZE) { 896 page = pfn_to_page(pfn++); 897 set_bit(PG_dcache_clean, &page->flags); 898 left -= PAGE_SIZE; 899 } 900 } 901 } 902 903 /** 904 * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA 905 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 906 * @sg: list of buffers 907 * @nents: number of buffers to map 908 * @dir: DMA transfer direction 909 * 910 * Map a set of buffers described by scatterlist in streaming mode for DMA. 911 * This is the scatter-gather version of the dma_map_single interface. 912 * Here the scatter gather list elements are each tagged with the 913 * appropriate dma address and length. They are obtained via 914 * sg_dma_{address,length}. 915 * 916 * Device ownership issues as mentioned for dma_map_single are the same 917 * here. 918 */ 919 int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 920 enum dma_data_direction dir, struct dma_attrs *attrs) 921 { 922 struct dma_map_ops *ops = get_dma_ops(dev); 923 struct scatterlist *s; 924 int i, j; 925 926 for_each_sg(sg, s, nents, i) { 927 #ifdef CONFIG_NEED_SG_DMA_LENGTH 928 s->dma_length = s->length; 929 #endif 930 s->dma_address = ops->map_page(dev, sg_page(s), s->offset, 931 s->length, dir, attrs); 932 if (dma_mapping_error(dev, s->dma_address)) 933 goto bad_mapping; 934 } 935 return nents; 936 937 bad_mapping: 938 for_each_sg(sg, s, i, j) 939 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 940 return 0; 941 } 942 943 /** 944 * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 945 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 946 * @sg: list of buffers 947 * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 948 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 949 * 950 * Unmap a set of streaming mode DMA translations. Again, CPU access 951 * rules concerning calls here are the same as for dma_unmap_single(). 952 */ 953 void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 954 enum dma_data_direction dir, struct dma_attrs *attrs) 955 { 956 struct dma_map_ops *ops = get_dma_ops(dev); 957 struct scatterlist *s; 958 959 int i; 960 961 for_each_sg(sg, s, nents, i) 962 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 963 } 964 965 /** 966 * arm_dma_sync_sg_for_cpu 967 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 968 * @sg: list of buffers 969 * @nents: number of buffers to map (returned from dma_map_sg) 970 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 971 */ 972 void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 973 int nents, enum dma_data_direction dir) 974 { 975 struct dma_map_ops *ops = get_dma_ops(dev); 976 struct scatterlist *s; 977 int i; 978 979 for_each_sg(sg, s, nents, i) 980 ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length, 981 dir); 982 } 983 984 /** 985 * arm_dma_sync_sg_for_device 986 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 987 * @sg: list of buffers 988 * @nents: number of buffers to map (returned from dma_map_sg) 989 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 990 */ 991 void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 992 int nents, enum dma_data_direction dir) 993 { 994 struct dma_map_ops *ops = get_dma_ops(dev); 995 struct scatterlist *s; 996 int i; 997 998 for_each_sg(sg, s, nents, i) 999 ops->sync_single_for_device(dev, sg_dma_address(s), s->length, 1000 dir); 1001 } 1002 1003 /* 1004 * Return whether the given device DMA address mask can be supported 1005 * properly. For example, if your device can only drive the low 24-bits 1006 * during bus mastering, then you would pass 0x00ffffff as the mask 1007 * to this function. 1008 */ 1009 int dma_supported(struct device *dev, u64 mask) 1010 { 1011 return __dma_supported(dev, mask, false); 1012 } 1013 EXPORT_SYMBOL(dma_supported); 1014 1015 int arm_dma_set_mask(struct device *dev, u64 dma_mask) 1016 { 1017 if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 1018 return -EIO; 1019 1020 *dev->dma_mask = dma_mask; 1021 1022 return 0; 1023 } 1024 1025 #define PREALLOC_DMA_DEBUG_ENTRIES 4096 1026 1027 static int __init dma_debug_do_init(void) 1028 { 1029 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); 1030 return 0; 1031 } 1032 fs_initcall(dma_debug_do_init); 1033 1034 #ifdef CONFIG_ARM_DMA_USE_IOMMU 1035 1036 /* IOMMU */ 1037 1038 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping); 1039 1040 static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, 1041 size_t size) 1042 { 1043 unsigned int order = get_order(size); 1044 unsigned int align = 0; 1045 unsigned int count, start; 1046 size_t mapping_size = mapping->bits << PAGE_SHIFT; 1047 unsigned long flags; 1048 dma_addr_t iova; 1049 int i; 1050 1051 if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT) 1052 order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT; 1053 1054 count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1055 align = (1 << order) - 1; 1056 1057 spin_lock_irqsave(&mapping->lock, flags); 1058 for (i = 0; i < mapping->nr_bitmaps; i++) { 1059 start = bitmap_find_next_zero_area(mapping->bitmaps[i], 1060 mapping->bits, 0, count, align); 1061 1062 if (start > mapping->bits) 1063 continue; 1064 1065 bitmap_set(mapping->bitmaps[i], start, count); 1066 break; 1067 } 1068 1069 /* 1070 * No unused range found. Try to extend the existing mapping 1071 * and perform a second attempt to reserve an IO virtual 1072 * address range of size bytes. 1073 */ 1074 if (i == mapping->nr_bitmaps) { 1075 if (extend_iommu_mapping(mapping)) { 1076 spin_unlock_irqrestore(&mapping->lock, flags); 1077 return DMA_ERROR_CODE; 1078 } 1079 1080 start = bitmap_find_next_zero_area(mapping->bitmaps[i], 1081 mapping->bits, 0, count, align); 1082 1083 if (start > mapping->bits) { 1084 spin_unlock_irqrestore(&mapping->lock, flags); 1085 return DMA_ERROR_CODE; 1086 } 1087 1088 bitmap_set(mapping->bitmaps[i], start, count); 1089 } 1090 spin_unlock_irqrestore(&mapping->lock, flags); 1091 1092 iova = mapping->base + (mapping_size * i); 1093 iova += start << PAGE_SHIFT; 1094 1095 return iova; 1096 } 1097 1098 static inline void __free_iova(struct dma_iommu_mapping *mapping, 1099 dma_addr_t addr, size_t size) 1100 { 1101 unsigned int start, count; 1102 size_t mapping_size = mapping->bits << PAGE_SHIFT; 1103 unsigned long flags; 1104 dma_addr_t bitmap_base; 1105 u32 bitmap_index; 1106 1107 if (!size) 1108 return; 1109 1110 bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size; 1111 BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions); 1112 1113 bitmap_base = mapping->base + mapping_size * bitmap_index; 1114 1115 start = (addr - bitmap_base) >> PAGE_SHIFT; 1116 1117 if (addr + size > bitmap_base + mapping_size) { 1118 /* 1119 * The address range to be freed reaches into the iova 1120 * range of the next bitmap. This should not happen as 1121 * we don't allow this in __alloc_iova (at the 1122 * moment). 1123 */ 1124 BUG(); 1125 } else 1126 count = size >> PAGE_SHIFT; 1127 1128 spin_lock_irqsave(&mapping->lock, flags); 1129 bitmap_clear(mapping->bitmaps[bitmap_index], start, count); 1130 spin_unlock_irqrestore(&mapping->lock, flags); 1131 } 1132 1133 static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, 1134 gfp_t gfp, struct dma_attrs *attrs) 1135 { 1136 struct page **pages; 1137 int count = size >> PAGE_SHIFT; 1138 int array_size = count * sizeof(struct page *); 1139 int i = 0; 1140 1141 if (array_size <= PAGE_SIZE) 1142 pages = kzalloc(array_size, GFP_KERNEL); 1143 else 1144 pages = vzalloc(array_size); 1145 if (!pages) 1146 return NULL; 1147 1148 if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) 1149 { 1150 unsigned long order = get_order(size); 1151 struct page *page; 1152 1153 page = dma_alloc_from_contiguous(dev, count, order); 1154 if (!page) 1155 goto error; 1156 1157 __dma_clear_buffer(page, size); 1158 1159 for (i = 0; i < count; i++) 1160 pages[i] = page + i; 1161 1162 return pages; 1163 } 1164 1165 /* 1166 * IOMMU can map any pages, so himem can also be used here 1167 */ 1168 gfp |= __GFP_NOWARN | __GFP_HIGHMEM; 1169 1170 while (count) { 1171 int j, order; 1172 1173 for (order = __fls(count); order > 0; --order) { 1174 /* 1175 * We do not want OOM killer to be invoked as long 1176 * as we can fall back to single pages, so we force 1177 * __GFP_NORETRY for orders higher than zero. 1178 */ 1179 pages[i] = alloc_pages(gfp | __GFP_NORETRY, order); 1180 if (pages[i]) 1181 break; 1182 } 1183 1184 if (!pages[i]) { 1185 /* 1186 * Fall back to single page allocation. 1187 * Might invoke OOM killer as last resort. 1188 */ 1189 pages[i] = alloc_pages(gfp, 0); 1190 if (!pages[i]) 1191 goto error; 1192 } 1193 1194 if (order) { 1195 split_page(pages[i], order); 1196 j = 1 << order; 1197 while (--j) 1198 pages[i + j] = pages[i] + j; 1199 } 1200 1201 __dma_clear_buffer(pages[i], PAGE_SIZE << order); 1202 i += 1 << order; 1203 count -= 1 << order; 1204 } 1205 1206 return pages; 1207 error: 1208 while (i--) 1209 if (pages[i]) 1210 __free_pages(pages[i], 0); 1211 if (array_size <= PAGE_SIZE) 1212 kfree(pages); 1213 else 1214 vfree(pages); 1215 return NULL; 1216 } 1217 1218 static int __iommu_free_buffer(struct device *dev, struct page **pages, 1219 size_t size, struct dma_attrs *attrs) 1220 { 1221 int count = size >> PAGE_SHIFT; 1222 int array_size = count * sizeof(struct page *); 1223 int i; 1224 1225 if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) { 1226 dma_release_from_contiguous(dev, pages[0], count); 1227 } else { 1228 for (i = 0; i < count; i++) 1229 if (pages[i]) 1230 __free_pages(pages[i], 0); 1231 } 1232 1233 if (array_size <= PAGE_SIZE) 1234 kfree(pages); 1235 else 1236 vfree(pages); 1237 return 0; 1238 } 1239 1240 /* 1241 * Create a CPU mapping for a specified pages 1242 */ 1243 static void * 1244 __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, 1245 const void *caller) 1246 { 1247 return dma_common_pages_remap(pages, size, 1248 VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller); 1249 } 1250 1251 /* 1252 * Create a mapping in device IO address space for specified pages 1253 */ 1254 static dma_addr_t 1255 __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) 1256 { 1257 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1258 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1259 dma_addr_t dma_addr, iova; 1260 int i, ret = DMA_ERROR_CODE; 1261 1262 dma_addr = __alloc_iova(mapping, size); 1263 if (dma_addr == DMA_ERROR_CODE) 1264 return dma_addr; 1265 1266 iova = dma_addr; 1267 for (i = 0; i < count; ) { 1268 unsigned int next_pfn = page_to_pfn(pages[i]) + 1; 1269 phys_addr_t phys = page_to_phys(pages[i]); 1270 unsigned int len, j; 1271 1272 for (j = i + 1; j < count; j++, next_pfn++) 1273 if (page_to_pfn(pages[j]) != next_pfn) 1274 break; 1275 1276 len = (j - i) << PAGE_SHIFT; 1277 ret = iommu_map(mapping->domain, iova, phys, len, 1278 IOMMU_READ|IOMMU_WRITE); 1279 if (ret < 0) 1280 goto fail; 1281 iova += len; 1282 i = j; 1283 } 1284 return dma_addr; 1285 fail: 1286 iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); 1287 __free_iova(mapping, dma_addr, size); 1288 return DMA_ERROR_CODE; 1289 } 1290 1291 static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) 1292 { 1293 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1294 1295 /* 1296 * add optional in-page offset from iova to size and align 1297 * result to page size 1298 */ 1299 size = PAGE_ALIGN((iova & ~PAGE_MASK) + size); 1300 iova &= PAGE_MASK; 1301 1302 iommu_unmap(mapping->domain, iova, size); 1303 __free_iova(mapping, iova, size); 1304 return 0; 1305 } 1306 1307 static struct page **__atomic_get_pages(void *addr) 1308 { 1309 struct page *page; 1310 phys_addr_t phys; 1311 1312 phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr); 1313 page = phys_to_page(phys); 1314 1315 return (struct page **)page; 1316 } 1317 1318 static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) 1319 { 1320 struct vm_struct *area; 1321 1322 if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) 1323 return __atomic_get_pages(cpu_addr); 1324 1325 if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) 1326 return cpu_addr; 1327 1328 area = find_vm_area(cpu_addr); 1329 if (area && (area->flags & VM_ARM_DMA_CONSISTENT)) 1330 return area->pages; 1331 return NULL; 1332 } 1333 1334 static void *__iommu_alloc_atomic(struct device *dev, size_t size, 1335 dma_addr_t *handle) 1336 { 1337 struct page *page; 1338 void *addr; 1339 1340 addr = __alloc_from_pool(size, &page); 1341 if (!addr) 1342 return NULL; 1343 1344 *handle = __iommu_create_mapping(dev, &page, size); 1345 if (*handle == DMA_ERROR_CODE) 1346 goto err_mapping; 1347 1348 return addr; 1349 1350 err_mapping: 1351 __free_from_pool(addr, size); 1352 return NULL; 1353 } 1354 1355 static void __iommu_free_atomic(struct device *dev, void *cpu_addr, 1356 dma_addr_t handle, size_t size) 1357 { 1358 __iommu_remove_mapping(dev, handle, size); 1359 __free_from_pool(cpu_addr, size); 1360 } 1361 1362 static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, 1363 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) 1364 { 1365 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); 1366 struct page **pages; 1367 void *addr = NULL; 1368 1369 *handle = DMA_ERROR_CODE; 1370 size = PAGE_ALIGN(size); 1371 1372 if (!(gfp & __GFP_WAIT)) 1373 return __iommu_alloc_atomic(dev, size, handle); 1374 1375 /* 1376 * Following is a work-around (a.k.a. hack) to prevent pages 1377 * with __GFP_COMP being passed to split_page() which cannot 1378 * handle them. The real problem is that this flag probably 1379 * should be 0 on ARM as it is not supported on this 1380 * platform; see CONFIG_HUGETLBFS. 1381 */ 1382 gfp &= ~(__GFP_COMP); 1383 1384 pages = __iommu_alloc_buffer(dev, size, gfp, attrs); 1385 if (!pages) 1386 return NULL; 1387 1388 *handle = __iommu_create_mapping(dev, pages, size); 1389 if (*handle == DMA_ERROR_CODE) 1390 goto err_buffer; 1391 1392 if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) 1393 return pages; 1394 1395 addr = __iommu_alloc_remap(pages, size, gfp, prot, 1396 __builtin_return_address(0)); 1397 if (!addr) 1398 goto err_mapping; 1399 1400 return addr; 1401 1402 err_mapping: 1403 __iommu_remove_mapping(dev, *handle, size); 1404 err_buffer: 1405 __iommu_free_buffer(dev, pages, size, attrs); 1406 return NULL; 1407 } 1408 1409 static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 1410 void *cpu_addr, dma_addr_t dma_addr, size_t size, 1411 struct dma_attrs *attrs) 1412 { 1413 unsigned long uaddr = vma->vm_start; 1414 unsigned long usize = vma->vm_end - vma->vm_start; 1415 struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1416 1417 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 1418 1419 if (!pages) 1420 return -ENXIO; 1421 1422 do { 1423 int ret = vm_insert_page(vma, uaddr, *pages++); 1424 if (ret) { 1425 pr_err("Remapping memory failed: %d\n", ret); 1426 return ret; 1427 } 1428 uaddr += PAGE_SIZE; 1429 usize -= PAGE_SIZE; 1430 } while (usize > 0); 1431 1432 return 0; 1433 } 1434 1435 /* 1436 * free a page as defined by the above mapping. 1437 * Must not be called with IRQs disabled. 1438 */ 1439 void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, 1440 dma_addr_t handle, struct dma_attrs *attrs) 1441 { 1442 struct page **pages; 1443 size = PAGE_ALIGN(size); 1444 1445 if (__in_atomic_pool(cpu_addr, size)) { 1446 __iommu_free_atomic(dev, cpu_addr, handle, size); 1447 return; 1448 } 1449 1450 pages = __iommu_get_pages(cpu_addr, attrs); 1451 if (!pages) { 1452 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); 1453 return; 1454 } 1455 1456 if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { 1457 dma_common_free_remap(cpu_addr, size, 1458 VM_ARM_DMA_CONSISTENT | VM_USERMAP); 1459 } 1460 1461 __iommu_remove_mapping(dev, handle, size); 1462 __iommu_free_buffer(dev, pages, size, attrs); 1463 } 1464 1465 static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, 1466 void *cpu_addr, dma_addr_t dma_addr, 1467 size_t size, struct dma_attrs *attrs) 1468 { 1469 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1470 struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1471 1472 if (!pages) 1473 return -ENXIO; 1474 1475 return sg_alloc_table_from_pages(sgt, pages, count, 0, size, 1476 GFP_KERNEL); 1477 } 1478 1479 static int __dma_direction_to_prot(enum dma_data_direction dir) 1480 { 1481 int prot; 1482 1483 switch (dir) { 1484 case DMA_BIDIRECTIONAL: 1485 prot = IOMMU_READ | IOMMU_WRITE; 1486 break; 1487 case DMA_TO_DEVICE: 1488 prot = IOMMU_READ; 1489 break; 1490 case DMA_FROM_DEVICE: 1491 prot = IOMMU_WRITE; 1492 break; 1493 default: 1494 prot = 0; 1495 } 1496 1497 return prot; 1498 } 1499 1500 /* 1501 * Map a part of the scatter-gather list into contiguous io address space 1502 */ 1503 static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, 1504 size_t size, dma_addr_t *handle, 1505 enum dma_data_direction dir, struct dma_attrs *attrs, 1506 bool is_coherent) 1507 { 1508 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1509 dma_addr_t iova, iova_base; 1510 int ret = 0; 1511 unsigned int count; 1512 struct scatterlist *s; 1513 int prot; 1514 1515 size = PAGE_ALIGN(size); 1516 *handle = DMA_ERROR_CODE; 1517 1518 iova_base = iova = __alloc_iova(mapping, size); 1519 if (iova == DMA_ERROR_CODE) 1520 return -ENOMEM; 1521 1522 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { 1523 phys_addr_t phys = page_to_phys(sg_page(s)); 1524 unsigned int len = PAGE_ALIGN(s->offset + s->length); 1525 1526 if (!is_coherent && 1527 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 1528 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 1529 1530 prot = __dma_direction_to_prot(dir); 1531 1532 ret = iommu_map(mapping->domain, iova, phys, len, prot); 1533 if (ret < 0) 1534 goto fail; 1535 count += len >> PAGE_SHIFT; 1536 iova += len; 1537 } 1538 *handle = iova_base; 1539 1540 return 0; 1541 fail: 1542 iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); 1543 __free_iova(mapping, iova_base, size); 1544 return ret; 1545 } 1546 1547 static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, 1548 enum dma_data_direction dir, struct dma_attrs *attrs, 1549 bool is_coherent) 1550 { 1551 struct scatterlist *s = sg, *dma = sg, *start = sg; 1552 int i, count = 0; 1553 unsigned int offset = s->offset; 1554 unsigned int size = s->offset + s->length; 1555 unsigned int max = dma_get_max_seg_size(dev); 1556 1557 for (i = 1; i < nents; i++) { 1558 s = sg_next(s); 1559 1560 s->dma_address = DMA_ERROR_CODE; 1561 s->dma_length = 0; 1562 1563 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { 1564 if (__map_sg_chunk(dev, start, size, &dma->dma_address, 1565 dir, attrs, is_coherent) < 0) 1566 goto bad_mapping; 1567 1568 dma->dma_address += offset; 1569 dma->dma_length = size - offset; 1570 1571 size = offset = s->offset; 1572 start = s; 1573 dma = sg_next(dma); 1574 count += 1; 1575 } 1576 size += s->length; 1577 } 1578 if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, 1579 is_coherent) < 0) 1580 goto bad_mapping; 1581 1582 dma->dma_address += offset; 1583 dma->dma_length = size - offset; 1584 1585 return count+1; 1586 1587 bad_mapping: 1588 for_each_sg(sg, s, count, i) 1589 __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); 1590 return 0; 1591 } 1592 1593 /** 1594 * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA 1595 * @dev: valid struct device pointer 1596 * @sg: list of buffers 1597 * @nents: number of buffers to map 1598 * @dir: DMA transfer direction 1599 * 1600 * Map a set of i/o coherent buffers described by scatterlist in streaming 1601 * mode for DMA. The scatter gather list elements are merged together (if 1602 * possible) and tagged with the appropriate dma address and length. They are 1603 * obtained via sg_dma_{address,length}. 1604 */ 1605 int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, 1606 int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 1607 { 1608 return __iommu_map_sg(dev, sg, nents, dir, attrs, true); 1609 } 1610 1611 /** 1612 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA 1613 * @dev: valid struct device pointer 1614 * @sg: list of buffers 1615 * @nents: number of buffers to map 1616 * @dir: DMA transfer direction 1617 * 1618 * Map a set of buffers described by scatterlist in streaming mode for DMA. 1619 * The scatter gather list elements are merged together (if possible) and 1620 * tagged with the appropriate dma address and length. They are obtained via 1621 * sg_dma_{address,length}. 1622 */ 1623 int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, 1624 int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 1625 { 1626 return __iommu_map_sg(dev, sg, nents, dir, attrs, false); 1627 } 1628 1629 static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, 1630 int nents, enum dma_data_direction dir, struct dma_attrs *attrs, 1631 bool is_coherent) 1632 { 1633 struct scatterlist *s; 1634 int i; 1635 1636 for_each_sg(sg, s, nents, i) { 1637 if (sg_dma_len(s)) 1638 __iommu_remove_mapping(dev, sg_dma_address(s), 1639 sg_dma_len(s)); 1640 if (!is_coherent && 1641 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 1642 __dma_page_dev_to_cpu(sg_page(s), s->offset, 1643 s->length, dir); 1644 } 1645 } 1646 1647 /** 1648 * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 1649 * @dev: valid struct device pointer 1650 * @sg: list of buffers 1651 * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 1652 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1653 * 1654 * Unmap a set of streaming mode DMA translations. Again, CPU access 1655 * rules concerning calls here are the same as for dma_unmap_single(). 1656 */ 1657 void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, 1658 int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 1659 { 1660 __iommu_unmap_sg(dev, sg, nents, dir, attrs, true); 1661 } 1662 1663 /** 1664 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 1665 * @dev: valid struct device pointer 1666 * @sg: list of buffers 1667 * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 1668 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1669 * 1670 * Unmap a set of streaming mode DMA translations. Again, CPU access 1671 * rules concerning calls here are the same as for dma_unmap_single(). 1672 */ 1673 void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 1674 enum dma_data_direction dir, struct dma_attrs *attrs) 1675 { 1676 __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); 1677 } 1678 1679 /** 1680 * arm_iommu_sync_sg_for_cpu 1681 * @dev: valid struct device pointer 1682 * @sg: list of buffers 1683 * @nents: number of buffers to map (returned from dma_map_sg) 1684 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1685 */ 1686 void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 1687 int nents, enum dma_data_direction dir) 1688 { 1689 struct scatterlist *s; 1690 int i; 1691 1692 for_each_sg(sg, s, nents, i) 1693 __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); 1694 1695 } 1696 1697 /** 1698 * arm_iommu_sync_sg_for_device 1699 * @dev: valid struct device pointer 1700 * @sg: list of buffers 1701 * @nents: number of buffers to map (returned from dma_map_sg) 1702 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1703 */ 1704 void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 1705 int nents, enum dma_data_direction dir) 1706 { 1707 struct scatterlist *s; 1708 int i; 1709 1710 for_each_sg(sg, s, nents, i) 1711 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 1712 } 1713 1714 1715 /** 1716 * arm_coherent_iommu_map_page 1717 * @dev: valid struct device pointer 1718 * @page: page that buffer resides in 1719 * @offset: offset into page for start of buffer 1720 * @size: size of buffer to map 1721 * @dir: DMA transfer direction 1722 * 1723 * Coherent IOMMU aware version of arm_dma_map_page() 1724 */ 1725 static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page, 1726 unsigned long offset, size_t size, enum dma_data_direction dir, 1727 struct dma_attrs *attrs) 1728 { 1729 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1730 dma_addr_t dma_addr; 1731 int ret, prot, len = PAGE_ALIGN(size + offset); 1732 1733 dma_addr = __alloc_iova(mapping, len); 1734 if (dma_addr == DMA_ERROR_CODE) 1735 return dma_addr; 1736 1737 prot = __dma_direction_to_prot(dir); 1738 1739 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); 1740 if (ret < 0) 1741 goto fail; 1742 1743 return dma_addr + offset; 1744 fail: 1745 __free_iova(mapping, dma_addr, len); 1746 return DMA_ERROR_CODE; 1747 } 1748 1749 /** 1750 * arm_iommu_map_page 1751 * @dev: valid struct device pointer 1752 * @page: page that buffer resides in 1753 * @offset: offset into page for start of buffer 1754 * @size: size of buffer to map 1755 * @dir: DMA transfer direction 1756 * 1757 * IOMMU aware version of arm_dma_map_page() 1758 */ 1759 static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, 1760 unsigned long offset, size_t size, enum dma_data_direction dir, 1761 struct dma_attrs *attrs) 1762 { 1763 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 1764 __dma_page_cpu_to_dev(page, offset, size, dir); 1765 1766 return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); 1767 } 1768 1769 /** 1770 * arm_coherent_iommu_unmap_page 1771 * @dev: valid struct device pointer 1772 * @handle: DMA address of buffer 1773 * @size: size of buffer (same as passed to dma_map_page) 1774 * @dir: DMA transfer direction (same as passed to dma_map_page) 1775 * 1776 * Coherent IOMMU aware version of arm_dma_unmap_page() 1777 */ 1778 static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, 1779 size_t size, enum dma_data_direction dir, 1780 struct dma_attrs *attrs) 1781 { 1782 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1783 dma_addr_t iova = handle & PAGE_MASK; 1784 int offset = handle & ~PAGE_MASK; 1785 int len = PAGE_ALIGN(size + offset); 1786 1787 if (!iova) 1788 return; 1789 1790 iommu_unmap(mapping->domain, iova, len); 1791 __free_iova(mapping, iova, len); 1792 } 1793 1794 /** 1795 * arm_iommu_unmap_page 1796 * @dev: valid struct device pointer 1797 * @handle: DMA address of buffer 1798 * @size: size of buffer (same as passed to dma_map_page) 1799 * @dir: DMA transfer direction (same as passed to dma_map_page) 1800 * 1801 * IOMMU aware version of arm_dma_unmap_page() 1802 */ 1803 static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, 1804 size_t size, enum dma_data_direction dir, 1805 struct dma_attrs *attrs) 1806 { 1807 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1808 dma_addr_t iova = handle & PAGE_MASK; 1809 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 1810 int offset = handle & ~PAGE_MASK; 1811 int len = PAGE_ALIGN(size + offset); 1812 1813 if (!iova) 1814 return; 1815 1816 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 1817 __dma_page_dev_to_cpu(page, offset, size, dir); 1818 1819 iommu_unmap(mapping->domain, iova, len); 1820 __free_iova(mapping, iova, len); 1821 } 1822 1823 static void arm_iommu_sync_single_for_cpu(struct device *dev, 1824 dma_addr_t handle, size_t size, enum dma_data_direction dir) 1825 { 1826 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1827 dma_addr_t iova = handle & PAGE_MASK; 1828 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 1829 unsigned int offset = handle & ~PAGE_MASK; 1830 1831 if (!iova) 1832 return; 1833 1834 __dma_page_dev_to_cpu(page, offset, size, dir); 1835 } 1836 1837 static void arm_iommu_sync_single_for_device(struct device *dev, 1838 dma_addr_t handle, size_t size, enum dma_data_direction dir) 1839 { 1840 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1841 dma_addr_t iova = handle & PAGE_MASK; 1842 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 1843 unsigned int offset = handle & ~PAGE_MASK; 1844 1845 if (!iova) 1846 return; 1847 1848 __dma_page_cpu_to_dev(page, offset, size, dir); 1849 } 1850 1851 struct dma_map_ops iommu_ops = { 1852 .alloc = arm_iommu_alloc_attrs, 1853 .free = arm_iommu_free_attrs, 1854 .mmap = arm_iommu_mmap_attrs, 1855 .get_sgtable = arm_iommu_get_sgtable, 1856 1857 .map_page = arm_iommu_map_page, 1858 .unmap_page = arm_iommu_unmap_page, 1859 .sync_single_for_cpu = arm_iommu_sync_single_for_cpu, 1860 .sync_single_for_device = arm_iommu_sync_single_for_device, 1861 1862 .map_sg = arm_iommu_map_sg, 1863 .unmap_sg = arm_iommu_unmap_sg, 1864 .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, 1865 .sync_sg_for_device = arm_iommu_sync_sg_for_device, 1866 1867 .set_dma_mask = arm_dma_set_mask, 1868 }; 1869 1870 struct dma_map_ops iommu_coherent_ops = { 1871 .alloc = arm_iommu_alloc_attrs, 1872 .free = arm_iommu_free_attrs, 1873 .mmap = arm_iommu_mmap_attrs, 1874 .get_sgtable = arm_iommu_get_sgtable, 1875 1876 .map_page = arm_coherent_iommu_map_page, 1877 .unmap_page = arm_coherent_iommu_unmap_page, 1878 1879 .map_sg = arm_coherent_iommu_map_sg, 1880 .unmap_sg = arm_coherent_iommu_unmap_sg, 1881 1882 .set_dma_mask = arm_dma_set_mask, 1883 }; 1884 1885 /** 1886 * arm_iommu_create_mapping 1887 * @bus: pointer to the bus holding the client device (for IOMMU calls) 1888 * @base: start address of the valid IO address space 1889 * @size: maximum size of the valid IO address space 1890 * 1891 * Creates a mapping structure which holds information about used/unused 1892 * IO address ranges, which is required to perform memory allocation and 1893 * mapping with IOMMU aware functions. 1894 * 1895 * The client device need to be attached to the mapping with 1896 * arm_iommu_attach_device function. 1897 */ 1898 struct dma_iommu_mapping * 1899 arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size) 1900 { 1901 unsigned int bits = size >> PAGE_SHIFT; 1902 unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long); 1903 struct dma_iommu_mapping *mapping; 1904 int extensions = 1; 1905 int err = -ENOMEM; 1906 1907 /* currently only 32-bit DMA address space is supported */ 1908 if (size > DMA_BIT_MASK(32) + 1) 1909 return ERR_PTR(-ERANGE); 1910 1911 if (!bitmap_size) 1912 return ERR_PTR(-EINVAL); 1913 1914 if (bitmap_size > PAGE_SIZE) { 1915 extensions = bitmap_size / PAGE_SIZE; 1916 bitmap_size = PAGE_SIZE; 1917 } 1918 1919 mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); 1920 if (!mapping) 1921 goto err; 1922 1923 mapping->bitmap_size = bitmap_size; 1924 mapping->bitmaps = kzalloc(extensions * sizeof(unsigned long *), 1925 GFP_KERNEL); 1926 if (!mapping->bitmaps) 1927 goto err2; 1928 1929 mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL); 1930 if (!mapping->bitmaps[0]) 1931 goto err3; 1932 1933 mapping->nr_bitmaps = 1; 1934 mapping->extensions = extensions; 1935 mapping->base = base; 1936 mapping->bits = BITS_PER_BYTE * bitmap_size; 1937 1938 spin_lock_init(&mapping->lock); 1939 1940 mapping->domain = iommu_domain_alloc(bus); 1941 if (!mapping->domain) 1942 goto err4; 1943 1944 kref_init(&mapping->kref); 1945 return mapping; 1946 err4: 1947 kfree(mapping->bitmaps[0]); 1948 err3: 1949 kfree(mapping->bitmaps); 1950 err2: 1951 kfree(mapping); 1952 err: 1953 return ERR_PTR(err); 1954 } 1955 EXPORT_SYMBOL_GPL(arm_iommu_create_mapping); 1956 1957 static void release_iommu_mapping(struct kref *kref) 1958 { 1959 int i; 1960 struct dma_iommu_mapping *mapping = 1961 container_of(kref, struct dma_iommu_mapping, kref); 1962 1963 iommu_domain_free(mapping->domain); 1964 for (i = 0; i < mapping->nr_bitmaps; i++) 1965 kfree(mapping->bitmaps[i]); 1966 kfree(mapping->bitmaps); 1967 kfree(mapping); 1968 } 1969 1970 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping) 1971 { 1972 int next_bitmap; 1973 1974 if (mapping->nr_bitmaps >= mapping->extensions) 1975 return -EINVAL; 1976 1977 next_bitmap = mapping->nr_bitmaps; 1978 mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size, 1979 GFP_ATOMIC); 1980 if (!mapping->bitmaps[next_bitmap]) 1981 return -ENOMEM; 1982 1983 mapping->nr_bitmaps++; 1984 1985 return 0; 1986 } 1987 1988 void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) 1989 { 1990 if (mapping) 1991 kref_put(&mapping->kref, release_iommu_mapping); 1992 } 1993 EXPORT_SYMBOL_GPL(arm_iommu_release_mapping); 1994 1995 static int __arm_iommu_attach_device(struct device *dev, 1996 struct dma_iommu_mapping *mapping) 1997 { 1998 int err; 1999 2000 err = iommu_attach_device(mapping->domain, dev); 2001 if (err) 2002 return err; 2003 2004 kref_get(&mapping->kref); 2005 to_dma_iommu_mapping(dev) = mapping; 2006 2007 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); 2008 return 0; 2009 } 2010 2011 /** 2012 * arm_iommu_attach_device 2013 * @dev: valid struct device pointer 2014 * @mapping: io address space mapping structure (returned from 2015 * arm_iommu_create_mapping) 2016 * 2017 * Attaches specified io address space mapping to the provided device. 2018 * This replaces the dma operations (dma_map_ops pointer) with the 2019 * IOMMU aware version. 2020 * 2021 * More than one client might be attached to the same io address space 2022 * mapping. 2023 */ 2024 int arm_iommu_attach_device(struct device *dev, 2025 struct dma_iommu_mapping *mapping) 2026 { 2027 int err; 2028 2029 err = __arm_iommu_attach_device(dev, mapping); 2030 if (err) 2031 return err; 2032 2033 set_dma_ops(dev, &iommu_ops); 2034 return 0; 2035 } 2036 EXPORT_SYMBOL_GPL(arm_iommu_attach_device); 2037 2038 static void __arm_iommu_detach_device(struct device *dev) 2039 { 2040 struct dma_iommu_mapping *mapping; 2041 2042 mapping = to_dma_iommu_mapping(dev); 2043 if (!mapping) { 2044 dev_warn(dev, "Not attached\n"); 2045 return; 2046 } 2047 2048 iommu_detach_device(mapping->domain, dev); 2049 kref_put(&mapping->kref, release_iommu_mapping); 2050 to_dma_iommu_mapping(dev) = NULL; 2051 2052 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); 2053 } 2054 2055 /** 2056 * arm_iommu_detach_device 2057 * @dev: valid struct device pointer 2058 * 2059 * Detaches the provided device from a previously attached map. 2060 * This voids the dma operations (dma_map_ops pointer) 2061 */ 2062 void arm_iommu_detach_device(struct device *dev) 2063 { 2064 __arm_iommu_detach_device(dev); 2065 set_dma_ops(dev, NULL); 2066 } 2067 EXPORT_SYMBOL_GPL(arm_iommu_detach_device); 2068 2069 static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) 2070 { 2071 return coherent ? &iommu_coherent_ops : &iommu_ops; 2072 } 2073 2074 static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, 2075 struct iommu_ops *iommu) 2076 { 2077 struct dma_iommu_mapping *mapping; 2078 2079 if (!iommu) 2080 return false; 2081 2082 mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); 2083 if (IS_ERR(mapping)) { 2084 pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", 2085 size, dev_name(dev)); 2086 return false; 2087 } 2088 2089 if (__arm_iommu_attach_device(dev, mapping)) { 2090 pr_warn("Failed to attached device %s to IOMMU_mapping\n", 2091 dev_name(dev)); 2092 arm_iommu_release_mapping(mapping); 2093 return false; 2094 } 2095 2096 return true; 2097 } 2098 2099 static void arm_teardown_iommu_dma_ops(struct device *dev) 2100 { 2101 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 2102 2103 if (!mapping) 2104 return; 2105 2106 __arm_iommu_detach_device(dev); 2107 arm_iommu_release_mapping(mapping); 2108 } 2109 2110 #else 2111 2112 static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, 2113 struct iommu_ops *iommu) 2114 { 2115 return false; 2116 } 2117 2118 static void arm_teardown_iommu_dma_ops(struct device *dev) { } 2119 2120 #define arm_get_iommu_dma_map_ops arm_get_dma_map_ops 2121 2122 #endif /* CONFIG_ARM_DMA_USE_IOMMU */ 2123 2124 static struct dma_map_ops *arm_get_dma_map_ops(bool coherent) 2125 { 2126 return coherent ? &arm_coherent_dma_ops : &arm_dma_ops; 2127 } 2128 2129 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, 2130 struct iommu_ops *iommu, bool coherent) 2131 { 2132 struct dma_map_ops *dma_ops; 2133 2134 dev->archdata.dma_coherent = coherent; 2135 if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu)) 2136 dma_ops = arm_get_iommu_dma_map_ops(coherent); 2137 else 2138 dma_ops = arm_get_dma_map_ops(coherent); 2139 2140 set_dma_ops(dev, dma_ops); 2141 } 2142 2143 void arch_teardown_dma_ops(struct device *dev) 2144 { 2145 arm_teardown_iommu_dma_ops(dev); 2146 } 2147