1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/arch/arm/mm/dma-mapping.c 4 * 5 * Copyright (C) 2000-2004 Russell King 6 * 7 * DMA uncached mapping support. 8 */ 9 #include <linux/module.h> 10 #include <linux/mm.h> 11 #include <linux/genalloc.h> 12 #include <linux/gfp.h> 13 #include <linux/errno.h> 14 #include <linux/list.h> 15 #include <linux/init.h> 16 #include <linux/device.h> 17 #include <linux/dma-direct.h> 18 #include <linux/dma-map-ops.h> 19 #include <linux/dma-noncoherent.h> 20 #include <linux/dma-contiguous.h> 21 #include <linux/highmem.h> 22 #include <linux/memblock.h> 23 #include <linux/slab.h> 24 #include <linux/iommu.h> 25 #include <linux/io.h> 26 #include <linux/vmalloc.h> 27 #include <linux/sizes.h> 28 #include <linux/cma.h> 29 30 #include <asm/memory.h> 31 #include <asm/highmem.h> 32 #include <asm/cacheflush.h> 33 #include <asm/tlbflush.h> 34 #include <asm/mach/arch.h> 35 #include <asm/dma-iommu.h> 36 #include <asm/mach/map.h> 37 #include <asm/system_info.h> 38 #include <asm/dma-contiguous.h> 39 #include <xen/swiotlb-xen.h> 40 41 #include "dma.h" 42 #include "mm.h" 43 44 struct arm_dma_alloc_args { 45 struct device *dev; 46 size_t size; 47 gfp_t gfp; 48 pgprot_t prot; 49 const void *caller; 50 bool want_vaddr; 51 int coherent_flag; 52 }; 53 54 struct arm_dma_free_args { 55 struct device *dev; 56 size_t size; 57 void *cpu_addr; 58 struct page *page; 59 bool want_vaddr; 60 }; 61 62 #define NORMAL 0 63 #define COHERENT 1 64 65 struct arm_dma_allocator { 66 void *(*alloc)(struct arm_dma_alloc_args *args, 67 struct page **ret_page); 68 void (*free)(struct arm_dma_free_args *args); 69 }; 70 71 struct arm_dma_buffer { 72 struct list_head list; 73 void *virt; 74 struct arm_dma_allocator *allocator; 75 }; 76 77 static LIST_HEAD(arm_dma_bufs); 78 static DEFINE_SPINLOCK(arm_dma_bufs_lock); 79 80 static struct arm_dma_buffer *arm_dma_buffer_find(void *virt) 81 { 82 struct arm_dma_buffer *buf, *found = NULL; 83 unsigned long flags; 84 85 spin_lock_irqsave(&arm_dma_bufs_lock, flags); 86 list_for_each_entry(buf, &arm_dma_bufs, list) { 87 if (buf->virt == virt) { 88 list_del(&buf->list); 89 found = buf; 90 break; 91 } 92 } 93 spin_unlock_irqrestore(&arm_dma_bufs_lock, flags); 94 return found; 95 } 96 97 /* 98 * The DMA API is built upon the notion of "buffer ownership". A buffer 99 * is either exclusively owned by the CPU (and therefore may be accessed 100 * by it) or exclusively owned by the DMA device. These helper functions 101 * represent the transitions between these two ownership states. 102 * 103 * Note, however, that on later ARMs, this notion does not work due to 104 * speculative prefetches. We model our approach on the assumption that 105 * the CPU does do speculative prefetches, which means we clean caches 106 * before transfers and delay cache invalidation until transfer completion. 107 * 108 */ 109 static void __dma_page_cpu_to_dev(struct page *, unsigned long, 110 size_t, enum dma_data_direction); 111 static void __dma_page_dev_to_cpu(struct page *, unsigned long, 112 size_t, enum dma_data_direction); 113 114 /** 115 * arm_dma_map_page - map a portion of a page for streaming DMA 116 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 117 * @page: page that buffer resides in 118 * @offset: offset into page for start of buffer 119 * @size: size of buffer to map 120 * @dir: DMA transfer direction 121 * 122 * Ensure that any data held in the cache is appropriately discarded 123 * or written back. 124 * 125 * The device owns this memory once this call has completed. The CPU 126 * can regain ownership by calling dma_unmap_page(). 127 */ 128 static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, 129 unsigned long offset, size_t size, enum dma_data_direction dir, 130 unsigned long attrs) 131 { 132 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 133 __dma_page_cpu_to_dev(page, offset, size, dir); 134 return pfn_to_dma(dev, page_to_pfn(page)) + offset; 135 } 136 137 static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, 138 unsigned long offset, size_t size, enum dma_data_direction dir, 139 unsigned long attrs) 140 { 141 return pfn_to_dma(dev, page_to_pfn(page)) + offset; 142 } 143 144 /** 145 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page() 146 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 147 * @handle: DMA address of buffer 148 * @size: size of buffer (same as passed to dma_map_page) 149 * @dir: DMA transfer direction (same as passed to dma_map_page) 150 * 151 * Unmap a page streaming mode DMA translation. The handle and size 152 * must match what was provided in the previous dma_map_page() call. 153 * All other usages are undefined. 154 * 155 * After this call, reads by the CPU to the buffer are guaranteed to see 156 * whatever the device wrote there. 157 */ 158 static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, 159 size_t size, enum dma_data_direction dir, unsigned long attrs) 160 { 161 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 162 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), 163 handle & ~PAGE_MASK, size, dir); 164 } 165 166 static void arm_dma_sync_single_for_cpu(struct device *dev, 167 dma_addr_t handle, size_t size, enum dma_data_direction dir) 168 { 169 unsigned int offset = handle & (PAGE_SIZE - 1); 170 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 171 __dma_page_dev_to_cpu(page, offset, size, dir); 172 } 173 174 static void arm_dma_sync_single_for_device(struct device *dev, 175 dma_addr_t handle, size_t size, enum dma_data_direction dir) 176 { 177 unsigned int offset = handle & (PAGE_SIZE - 1); 178 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 179 __dma_page_cpu_to_dev(page, offset, size, dir); 180 } 181 182 /* 183 * Return whether the given device DMA address mask can be supported 184 * properly. For example, if your device can only drive the low 24-bits 185 * during bus mastering, then you would pass 0x00ffffff as the mask 186 * to this function. 187 */ 188 static int arm_dma_supported(struct device *dev, u64 mask) 189 { 190 unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit); 191 192 /* 193 * Translate the device's DMA mask to a PFN limit. This 194 * PFN number includes the page which we can DMA to. 195 */ 196 return dma_to_pfn(dev, mask) >= max_dma_pfn; 197 } 198 199 const struct dma_map_ops arm_dma_ops = { 200 .alloc = arm_dma_alloc, 201 .free = arm_dma_free, 202 .alloc_pages = dma_direct_alloc_pages, 203 .free_pages = dma_direct_free_pages, 204 .mmap = arm_dma_mmap, 205 .get_sgtable = arm_dma_get_sgtable, 206 .map_page = arm_dma_map_page, 207 .unmap_page = arm_dma_unmap_page, 208 .map_sg = arm_dma_map_sg, 209 .unmap_sg = arm_dma_unmap_sg, 210 .map_resource = dma_direct_map_resource, 211 .sync_single_for_cpu = arm_dma_sync_single_for_cpu, 212 .sync_single_for_device = arm_dma_sync_single_for_device, 213 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, 214 .sync_sg_for_device = arm_dma_sync_sg_for_device, 215 .dma_supported = arm_dma_supported, 216 .get_required_mask = dma_direct_get_required_mask, 217 }; 218 EXPORT_SYMBOL(arm_dma_ops); 219 220 static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 221 dma_addr_t *handle, gfp_t gfp, unsigned long attrs); 222 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 223 dma_addr_t handle, unsigned long attrs); 224 static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, 225 void *cpu_addr, dma_addr_t dma_addr, size_t size, 226 unsigned long attrs); 227 228 const struct dma_map_ops arm_coherent_dma_ops = { 229 .alloc = arm_coherent_dma_alloc, 230 .free = arm_coherent_dma_free, 231 .alloc_pages = dma_direct_alloc_pages, 232 .free_pages = dma_direct_free_pages, 233 .mmap = arm_coherent_dma_mmap, 234 .get_sgtable = arm_dma_get_sgtable, 235 .map_page = arm_coherent_dma_map_page, 236 .map_sg = arm_dma_map_sg, 237 .map_resource = dma_direct_map_resource, 238 .dma_supported = arm_dma_supported, 239 .get_required_mask = dma_direct_get_required_mask, 240 }; 241 EXPORT_SYMBOL(arm_coherent_dma_ops); 242 243 static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag) 244 { 245 /* 246 * Ensure that the allocated pages are zeroed, and that any data 247 * lurking in the kernel direct-mapped region is invalidated. 248 */ 249 if (PageHighMem(page)) { 250 phys_addr_t base = __pfn_to_phys(page_to_pfn(page)); 251 phys_addr_t end = base + size; 252 while (size > 0) { 253 void *ptr = kmap_atomic(page); 254 memset(ptr, 0, PAGE_SIZE); 255 if (coherent_flag != COHERENT) 256 dmac_flush_range(ptr, ptr + PAGE_SIZE); 257 kunmap_atomic(ptr); 258 page++; 259 size -= PAGE_SIZE; 260 } 261 if (coherent_flag != COHERENT) 262 outer_flush_range(base, end); 263 } else { 264 void *ptr = page_address(page); 265 memset(ptr, 0, size); 266 if (coherent_flag != COHERENT) { 267 dmac_flush_range(ptr, ptr + size); 268 outer_flush_range(__pa(ptr), __pa(ptr) + size); 269 } 270 } 271 } 272 273 /* 274 * Allocate a DMA buffer for 'dev' of size 'size' using the 275 * specified gfp mask. Note that 'size' must be page aligned. 276 */ 277 static struct page *__dma_alloc_buffer(struct device *dev, size_t size, 278 gfp_t gfp, int coherent_flag) 279 { 280 unsigned long order = get_order(size); 281 struct page *page, *p, *e; 282 283 page = alloc_pages(gfp, order); 284 if (!page) 285 return NULL; 286 287 /* 288 * Now split the huge page and free the excess pages 289 */ 290 split_page(page, order); 291 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) 292 __free_page(p); 293 294 __dma_clear_buffer(page, size, coherent_flag); 295 296 return page; 297 } 298 299 /* 300 * Free a DMA buffer. 'size' must be page aligned. 301 */ 302 static void __dma_free_buffer(struct page *page, size_t size) 303 { 304 struct page *e = page + (size >> PAGE_SHIFT); 305 306 while (page < e) { 307 __free_page(page); 308 page++; 309 } 310 } 311 312 static void *__alloc_from_contiguous(struct device *dev, size_t size, 313 pgprot_t prot, struct page **ret_page, 314 const void *caller, bool want_vaddr, 315 int coherent_flag, gfp_t gfp); 316 317 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 318 pgprot_t prot, struct page **ret_page, 319 const void *caller, bool want_vaddr); 320 321 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K 322 static struct gen_pool *atomic_pool __ro_after_init; 323 324 static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE; 325 326 static int __init early_coherent_pool(char *p) 327 { 328 atomic_pool_size = memparse(p, &p); 329 return 0; 330 } 331 early_param("coherent_pool", early_coherent_pool); 332 333 /* 334 * Initialise the coherent pool for atomic allocations. 335 */ 336 static int __init atomic_pool_init(void) 337 { 338 pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL); 339 gfp_t gfp = GFP_KERNEL | GFP_DMA; 340 struct page *page; 341 void *ptr; 342 343 atomic_pool = gen_pool_create(PAGE_SHIFT, -1); 344 if (!atomic_pool) 345 goto out; 346 /* 347 * The atomic pool is only used for non-coherent allocations 348 * so we must pass NORMAL for coherent_flag. 349 */ 350 if (dev_get_cma_area(NULL)) 351 ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot, 352 &page, atomic_pool_init, true, NORMAL, 353 GFP_KERNEL); 354 else 355 ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot, 356 &page, atomic_pool_init, true); 357 if (ptr) { 358 int ret; 359 360 ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr, 361 page_to_phys(page), 362 atomic_pool_size, -1); 363 if (ret) 364 goto destroy_genpool; 365 366 gen_pool_set_algo(atomic_pool, 367 gen_pool_first_fit_order_align, 368 NULL); 369 pr_info("DMA: preallocated %zu KiB pool for atomic coherent allocations\n", 370 atomic_pool_size / 1024); 371 return 0; 372 } 373 374 destroy_genpool: 375 gen_pool_destroy(atomic_pool); 376 atomic_pool = NULL; 377 out: 378 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n", 379 atomic_pool_size / 1024); 380 return -ENOMEM; 381 } 382 /* 383 * CMA is activated by core_initcall, so we must be called after it. 384 */ 385 postcore_initcall(atomic_pool_init); 386 387 struct dma_contig_early_reserve { 388 phys_addr_t base; 389 unsigned long size; 390 }; 391 392 static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata; 393 394 static int dma_mmu_remap_num __initdata; 395 396 void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) 397 { 398 dma_mmu_remap[dma_mmu_remap_num].base = base; 399 dma_mmu_remap[dma_mmu_remap_num].size = size; 400 dma_mmu_remap_num++; 401 } 402 403 void __init dma_contiguous_remap(void) 404 { 405 int i; 406 for (i = 0; i < dma_mmu_remap_num; i++) { 407 phys_addr_t start = dma_mmu_remap[i].base; 408 phys_addr_t end = start + dma_mmu_remap[i].size; 409 struct map_desc map; 410 unsigned long addr; 411 412 if (end > arm_lowmem_limit) 413 end = arm_lowmem_limit; 414 if (start >= end) 415 continue; 416 417 map.pfn = __phys_to_pfn(start); 418 map.virtual = __phys_to_virt(start); 419 map.length = end - start; 420 map.type = MT_MEMORY_DMA_READY; 421 422 /* 423 * Clear previous low-memory mapping to ensure that the 424 * TLB does not see any conflicting entries, then flush 425 * the TLB of the old entries before creating new mappings. 426 * 427 * This ensures that any speculatively loaded TLB entries 428 * (even though they may be rare) can not cause any problems, 429 * and ensures that this code is architecturally compliant. 430 */ 431 for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); 432 addr += PMD_SIZE) 433 pmd_clear(pmd_off_k(addr)); 434 435 flush_tlb_kernel_range(__phys_to_virt(start), 436 __phys_to_virt(end)); 437 438 iotable_init(&map, 1); 439 } 440 } 441 442 static int __dma_update_pte(pte_t *pte, unsigned long addr, void *data) 443 { 444 struct page *page = virt_to_page(addr); 445 pgprot_t prot = *(pgprot_t *)data; 446 447 set_pte_ext(pte, mk_pte(page, prot), 0); 448 return 0; 449 } 450 451 static void __dma_remap(struct page *page, size_t size, pgprot_t prot) 452 { 453 unsigned long start = (unsigned long) page_address(page); 454 unsigned end = start + size; 455 456 apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); 457 flush_tlb_kernel_range(start, end); 458 } 459 460 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 461 pgprot_t prot, struct page **ret_page, 462 const void *caller, bool want_vaddr) 463 { 464 struct page *page; 465 void *ptr = NULL; 466 /* 467 * __alloc_remap_buffer is only called when the device is 468 * non-coherent 469 */ 470 page = __dma_alloc_buffer(dev, size, gfp, NORMAL); 471 if (!page) 472 return NULL; 473 if (!want_vaddr) 474 goto out; 475 476 ptr = dma_common_contiguous_remap(page, size, prot, caller); 477 if (!ptr) { 478 __dma_free_buffer(page, size); 479 return NULL; 480 } 481 482 out: 483 *ret_page = page; 484 return ptr; 485 } 486 487 static void *__alloc_from_pool(size_t size, struct page **ret_page) 488 { 489 unsigned long val; 490 void *ptr = NULL; 491 492 if (!atomic_pool) { 493 WARN(1, "coherent pool not initialised!\n"); 494 return NULL; 495 } 496 497 val = gen_pool_alloc(atomic_pool, size); 498 if (val) { 499 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val); 500 501 *ret_page = phys_to_page(phys); 502 ptr = (void *)val; 503 } 504 505 return ptr; 506 } 507 508 static bool __in_atomic_pool(void *start, size_t size) 509 { 510 return gen_pool_has_addr(atomic_pool, (unsigned long)start, size); 511 } 512 513 static int __free_from_pool(void *start, size_t size) 514 { 515 if (!__in_atomic_pool(start, size)) 516 return 0; 517 518 gen_pool_free(atomic_pool, (unsigned long)start, size); 519 520 return 1; 521 } 522 523 static void *__alloc_from_contiguous(struct device *dev, size_t size, 524 pgprot_t prot, struct page **ret_page, 525 const void *caller, bool want_vaddr, 526 int coherent_flag, gfp_t gfp) 527 { 528 unsigned long order = get_order(size); 529 size_t count = size >> PAGE_SHIFT; 530 struct page *page; 531 void *ptr = NULL; 532 533 page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN); 534 if (!page) 535 return NULL; 536 537 __dma_clear_buffer(page, size, coherent_flag); 538 539 if (!want_vaddr) 540 goto out; 541 542 if (PageHighMem(page)) { 543 ptr = dma_common_contiguous_remap(page, size, prot, caller); 544 if (!ptr) { 545 dma_release_from_contiguous(dev, page, count); 546 return NULL; 547 } 548 } else { 549 __dma_remap(page, size, prot); 550 ptr = page_address(page); 551 } 552 553 out: 554 *ret_page = page; 555 return ptr; 556 } 557 558 static void __free_from_contiguous(struct device *dev, struct page *page, 559 void *cpu_addr, size_t size, bool want_vaddr) 560 { 561 if (want_vaddr) { 562 if (PageHighMem(page)) 563 dma_common_free_remap(cpu_addr, size); 564 else 565 __dma_remap(page, size, PAGE_KERNEL); 566 } 567 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); 568 } 569 570 static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot) 571 { 572 prot = (attrs & DMA_ATTR_WRITE_COMBINE) ? 573 pgprot_writecombine(prot) : 574 pgprot_dmacoherent(prot); 575 return prot; 576 } 577 578 static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, 579 struct page **ret_page) 580 { 581 struct page *page; 582 /* __alloc_simple_buffer is only called when the device is coherent */ 583 page = __dma_alloc_buffer(dev, size, gfp, COHERENT); 584 if (!page) 585 return NULL; 586 587 *ret_page = page; 588 return page_address(page); 589 } 590 591 static void *simple_allocator_alloc(struct arm_dma_alloc_args *args, 592 struct page **ret_page) 593 { 594 return __alloc_simple_buffer(args->dev, args->size, args->gfp, 595 ret_page); 596 } 597 598 static void simple_allocator_free(struct arm_dma_free_args *args) 599 { 600 __dma_free_buffer(args->page, args->size); 601 } 602 603 static struct arm_dma_allocator simple_allocator = { 604 .alloc = simple_allocator_alloc, 605 .free = simple_allocator_free, 606 }; 607 608 static void *cma_allocator_alloc(struct arm_dma_alloc_args *args, 609 struct page **ret_page) 610 { 611 return __alloc_from_contiguous(args->dev, args->size, args->prot, 612 ret_page, args->caller, 613 args->want_vaddr, args->coherent_flag, 614 args->gfp); 615 } 616 617 static void cma_allocator_free(struct arm_dma_free_args *args) 618 { 619 __free_from_contiguous(args->dev, args->page, args->cpu_addr, 620 args->size, args->want_vaddr); 621 } 622 623 static struct arm_dma_allocator cma_allocator = { 624 .alloc = cma_allocator_alloc, 625 .free = cma_allocator_free, 626 }; 627 628 static void *pool_allocator_alloc(struct arm_dma_alloc_args *args, 629 struct page **ret_page) 630 { 631 return __alloc_from_pool(args->size, ret_page); 632 } 633 634 static void pool_allocator_free(struct arm_dma_free_args *args) 635 { 636 __free_from_pool(args->cpu_addr, args->size); 637 } 638 639 static struct arm_dma_allocator pool_allocator = { 640 .alloc = pool_allocator_alloc, 641 .free = pool_allocator_free, 642 }; 643 644 static void *remap_allocator_alloc(struct arm_dma_alloc_args *args, 645 struct page **ret_page) 646 { 647 return __alloc_remap_buffer(args->dev, args->size, args->gfp, 648 args->prot, ret_page, args->caller, 649 args->want_vaddr); 650 } 651 652 static void remap_allocator_free(struct arm_dma_free_args *args) 653 { 654 if (args->want_vaddr) 655 dma_common_free_remap(args->cpu_addr, args->size); 656 657 __dma_free_buffer(args->page, args->size); 658 } 659 660 static struct arm_dma_allocator remap_allocator = { 661 .alloc = remap_allocator_alloc, 662 .free = remap_allocator_free, 663 }; 664 665 static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 666 gfp_t gfp, pgprot_t prot, bool is_coherent, 667 unsigned long attrs, const void *caller) 668 { 669 u64 mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); 670 struct page *page = NULL; 671 void *addr; 672 bool allowblock, cma; 673 struct arm_dma_buffer *buf; 674 struct arm_dma_alloc_args args = { 675 .dev = dev, 676 .size = PAGE_ALIGN(size), 677 .gfp = gfp, 678 .prot = prot, 679 .caller = caller, 680 .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0), 681 .coherent_flag = is_coherent ? COHERENT : NORMAL, 682 }; 683 684 #ifdef CONFIG_DMA_API_DEBUG 685 u64 limit = (mask + 1) & ~mask; 686 if (limit && size >= limit) { 687 dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n", 688 size, mask); 689 return NULL; 690 } 691 #endif 692 693 buf = kzalloc(sizeof(*buf), 694 gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)); 695 if (!buf) 696 return NULL; 697 698 if (mask < 0xffffffffULL) 699 gfp |= GFP_DMA; 700 701 /* 702 * Following is a work-around (a.k.a. hack) to prevent pages 703 * with __GFP_COMP being passed to split_page() which cannot 704 * handle them. The real problem is that this flag probably 705 * should be 0 on ARM as it is not supported on this 706 * platform; see CONFIG_HUGETLBFS. 707 */ 708 gfp &= ~(__GFP_COMP); 709 args.gfp = gfp; 710 711 *handle = DMA_MAPPING_ERROR; 712 allowblock = gfpflags_allow_blocking(gfp); 713 cma = allowblock ? dev_get_cma_area(dev) : false; 714 715 if (cma) 716 buf->allocator = &cma_allocator; 717 else if (is_coherent) 718 buf->allocator = &simple_allocator; 719 else if (allowblock) 720 buf->allocator = &remap_allocator; 721 else 722 buf->allocator = &pool_allocator; 723 724 addr = buf->allocator->alloc(&args, &page); 725 726 if (page) { 727 unsigned long flags; 728 729 *handle = pfn_to_dma(dev, page_to_pfn(page)); 730 buf->virt = args.want_vaddr ? addr : page; 731 732 spin_lock_irqsave(&arm_dma_bufs_lock, flags); 733 list_add(&buf->list, &arm_dma_bufs); 734 spin_unlock_irqrestore(&arm_dma_bufs_lock, flags); 735 } else { 736 kfree(buf); 737 } 738 739 return args.want_vaddr ? addr : page; 740 } 741 742 /* 743 * Allocate DMA-coherent memory space and return both the kernel remapped 744 * virtual and bus address for that space. 745 */ 746 void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 747 gfp_t gfp, unsigned long attrs) 748 { 749 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); 750 751 return __dma_alloc(dev, size, handle, gfp, prot, false, 752 attrs, __builtin_return_address(0)); 753 } 754 755 static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 756 dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 757 { 758 return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true, 759 attrs, __builtin_return_address(0)); 760 } 761 762 static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, 763 void *cpu_addr, dma_addr_t dma_addr, size_t size, 764 unsigned long attrs) 765 { 766 int ret = -ENXIO; 767 unsigned long nr_vma_pages = vma_pages(vma); 768 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 769 unsigned long pfn = dma_to_pfn(dev, dma_addr); 770 unsigned long off = vma->vm_pgoff; 771 772 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 773 return ret; 774 775 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { 776 ret = remap_pfn_range(vma, vma->vm_start, 777 pfn + off, 778 vma->vm_end - vma->vm_start, 779 vma->vm_page_prot); 780 } 781 782 return ret; 783 } 784 785 /* 786 * Create userspace mapping for the DMA-coherent memory. 787 */ 788 static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, 789 void *cpu_addr, dma_addr_t dma_addr, size_t size, 790 unsigned long attrs) 791 { 792 return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 793 } 794 795 int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, 796 void *cpu_addr, dma_addr_t dma_addr, size_t size, 797 unsigned long attrs) 798 { 799 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 800 return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 801 } 802 803 /* 804 * Free a buffer as defined by the above mapping. 805 */ 806 static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 807 dma_addr_t handle, unsigned long attrs, 808 bool is_coherent) 809 { 810 struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 811 struct arm_dma_buffer *buf; 812 struct arm_dma_free_args args = { 813 .dev = dev, 814 .size = PAGE_ALIGN(size), 815 .cpu_addr = cpu_addr, 816 .page = page, 817 .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0), 818 }; 819 820 buf = arm_dma_buffer_find(cpu_addr); 821 if (WARN(!buf, "Freeing invalid buffer %p\n", cpu_addr)) 822 return; 823 824 buf->allocator->free(&args); 825 kfree(buf); 826 } 827 828 void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 829 dma_addr_t handle, unsigned long attrs) 830 { 831 __arm_dma_free(dev, size, cpu_addr, handle, attrs, false); 832 } 833 834 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 835 dma_addr_t handle, unsigned long attrs) 836 { 837 __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); 838 } 839 840 int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 841 void *cpu_addr, dma_addr_t handle, size_t size, 842 unsigned long attrs) 843 { 844 unsigned long pfn = dma_to_pfn(dev, handle); 845 struct page *page; 846 int ret; 847 848 /* If the PFN is not valid, we do not have a struct page */ 849 if (!pfn_valid(pfn)) 850 return -ENXIO; 851 852 page = pfn_to_page(pfn); 853 854 ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 855 if (unlikely(ret)) 856 return ret; 857 858 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 859 return 0; 860 } 861 862 static void dma_cache_maint_page(struct page *page, unsigned long offset, 863 size_t size, enum dma_data_direction dir, 864 void (*op)(const void *, size_t, int)) 865 { 866 unsigned long pfn; 867 size_t left = size; 868 869 pfn = page_to_pfn(page) + offset / PAGE_SIZE; 870 offset %= PAGE_SIZE; 871 872 /* 873 * A single sg entry may refer to multiple physically contiguous 874 * pages. But we still need to process highmem pages individually. 875 * If highmem is not configured then the bulk of this loop gets 876 * optimized out. 877 */ 878 do { 879 size_t len = left; 880 void *vaddr; 881 882 page = pfn_to_page(pfn); 883 884 if (PageHighMem(page)) { 885 if (len + offset > PAGE_SIZE) 886 len = PAGE_SIZE - offset; 887 888 if (cache_is_vipt_nonaliasing()) { 889 vaddr = kmap_atomic(page); 890 op(vaddr + offset, len, dir); 891 kunmap_atomic(vaddr); 892 } else { 893 vaddr = kmap_high_get(page); 894 if (vaddr) { 895 op(vaddr + offset, len, dir); 896 kunmap_high(page); 897 } 898 } 899 } else { 900 vaddr = page_address(page) + offset; 901 op(vaddr, len, dir); 902 } 903 offset = 0; 904 pfn++; 905 left -= len; 906 } while (left); 907 } 908 909 /* 910 * Make an area consistent for devices. 911 * Note: Drivers should NOT use this function directly, as it will break 912 * platforms with CONFIG_DMABOUNCE. 913 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 914 */ 915 static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, 916 size_t size, enum dma_data_direction dir) 917 { 918 phys_addr_t paddr; 919 920 dma_cache_maint_page(page, off, size, dir, dmac_map_area); 921 922 paddr = page_to_phys(page) + off; 923 if (dir == DMA_FROM_DEVICE) { 924 outer_inv_range(paddr, paddr + size); 925 } else { 926 outer_clean_range(paddr, paddr + size); 927 } 928 /* FIXME: non-speculating: flush on bidirectional mappings? */ 929 } 930 931 static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, 932 size_t size, enum dma_data_direction dir) 933 { 934 phys_addr_t paddr = page_to_phys(page) + off; 935 936 /* FIXME: non-speculating: not required */ 937 /* in any case, don't bother invalidating if DMA to device */ 938 if (dir != DMA_TO_DEVICE) { 939 outer_inv_range(paddr, paddr + size); 940 941 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); 942 } 943 944 /* 945 * Mark the D-cache clean for these pages to avoid extra flushing. 946 */ 947 if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) { 948 unsigned long pfn; 949 size_t left = size; 950 951 pfn = page_to_pfn(page) + off / PAGE_SIZE; 952 off %= PAGE_SIZE; 953 if (off) { 954 pfn++; 955 left -= PAGE_SIZE - off; 956 } 957 while (left >= PAGE_SIZE) { 958 page = pfn_to_page(pfn++); 959 set_bit(PG_dcache_clean, &page->flags); 960 left -= PAGE_SIZE; 961 } 962 } 963 } 964 965 /** 966 * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA 967 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 968 * @sg: list of buffers 969 * @nents: number of buffers to map 970 * @dir: DMA transfer direction 971 * 972 * Map a set of buffers described by scatterlist in streaming mode for DMA. 973 * This is the scatter-gather version of the dma_map_single interface. 974 * Here the scatter gather list elements are each tagged with the 975 * appropriate dma address and length. They are obtained via 976 * sg_dma_{address,length}. 977 * 978 * Device ownership issues as mentioned for dma_map_single are the same 979 * here. 980 */ 981 int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 982 enum dma_data_direction dir, unsigned long attrs) 983 { 984 const struct dma_map_ops *ops = get_dma_ops(dev); 985 struct scatterlist *s; 986 int i, j; 987 988 for_each_sg(sg, s, nents, i) { 989 #ifdef CONFIG_NEED_SG_DMA_LENGTH 990 s->dma_length = s->length; 991 #endif 992 s->dma_address = ops->map_page(dev, sg_page(s), s->offset, 993 s->length, dir, attrs); 994 if (dma_mapping_error(dev, s->dma_address)) 995 goto bad_mapping; 996 } 997 return nents; 998 999 bad_mapping: 1000 for_each_sg(sg, s, i, j) 1001 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 1002 return 0; 1003 } 1004 1005 /** 1006 * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 1007 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1008 * @sg: list of buffers 1009 * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 1010 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1011 * 1012 * Unmap a set of streaming mode DMA translations. Again, CPU access 1013 * rules concerning calls here are the same as for dma_unmap_single(). 1014 */ 1015 void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 1016 enum dma_data_direction dir, unsigned long attrs) 1017 { 1018 const struct dma_map_ops *ops = get_dma_ops(dev); 1019 struct scatterlist *s; 1020 1021 int i; 1022 1023 for_each_sg(sg, s, nents, i) 1024 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 1025 } 1026 1027 /** 1028 * arm_dma_sync_sg_for_cpu 1029 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1030 * @sg: list of buffers 1031 * @nents: number of buffers to map (returned from dma_map_sg) 1032 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1033 */ 1034 void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 1035 int nents, enum dma_data_direction dir) 1036 { 1037 const struct dma_map_ops *ops = get_dma_ops(dev); 1038 struct scatterlist *s; 1039 int i; 1040 1041 for_each_sg(sg, s, nents, i) 1042 ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length, 1043 dir); 1044 } 1045 1046 /** 1047 * arm_dma_sync_sg_for_device 1048 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1049 * @sg: list of buffers 1050 * @nents: number of buffers to map (returned from dma_map_sg) 1051 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1052 */ 1053 void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 1054 int nents, enum dma_data_direction dir) 1055 { 1056 const struct dma_map_ops *ops = get_dma_ops(dev); 1057 struct scatterlist *s; 1058 int i; 1059 1060 for_each_sg(sg, s, nents, i) 1061 ops->sync_single_for_device(dev, sg_dma_address(s), s->length, 1062 dir); 1063 } 1064 1065 static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent) 1066 { 1067 /* 1068 * When CONFIG_ARM_LPAE is set, physical address can extend above 1069 * 32-bits, which then can't be addressed by devices that only support 1070 * 32-bit DMA. 1071 * Use the generic dma-direct / swiotlb ops code in that case, as that 1072 * handles bounce buffering for us. 1073 */ 1074 if (IS_ENABLED(CONFIG_ARM_LPAE)) 1075 return NULL; 1076 return coherent ? &arm_coherent_dma_ops : &arm_dma_ops; 1077 } 1078 1079 #ifdef CONFIG_ARM_DMA_USE_IOMMU 1080 1081 static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs) 1082 { 1083 int prot = 0; 1084 1085 if (attrs & DMA_ATTR_PRIVILEGED) 1086 prot |= IOMMU_PRIV; 1087 1088 switch (dir) { 1089 case DMA_BIDIRECTIONAL: 1090 return prot | IOMMU_READ | IOMMU_WRITE; 1091 case DMA_TO_DEVICE: 1092 return prot | IOMMU_READ; 1093 case DMA_FROM_DEVICE: 1094 return prot | IOMMU_WRITE; 1095 default: 1096 return prot; 1097 } 1098 } 1099 1100 /* IOMMU */ 1101 1102 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping); 1103 1104 static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, 1105 size_t size) 1106 { 1107 unsigned int order = get_order(size); 1108 unsigned int align = 0; 1109 unsigned int count, start; 1110 size_t mapping_size = mapping->bits << PAGE_SHIFT; 1111 unsigned long flags; 1112 dma_addr_t iova; 1113 int i; 1114 1115 if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT) 1116 order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT; 1117 1118 count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1119 align = (1 << order) - 1; 1120 1121 spin_lock_irqsave(&mapping->lock, flags); 1122 for (i = 0; i < mapping->nr_bitmaps; i++) { 1123 start = bitmap_find_next_zero_area(mapping->bitmaps[i], 1124 mapping->bits, 0, count, align); 1125 1126 if (start > mapping->bits) 1127 continue; 1128 1129 bitmap_set(mapping->bitmaps[i], start, count); 1130 break; 1131 } 1132 1133 /* 1134 * No unused range found. Try to extend the existing mapping 1135 * and perform a second attempt to reserve an IO virtual 1136 * address range of size bytes. 1137 */ 1138 if (i == mapping->nr_bitmaps) { 1139 if (extend_iommu_mapping(mapping)) { 1140 spin_unlock_irqrestore(&mapping->lock, flags); 1141 return DMA_MAPPING_ERROR; 1142 } 1143 1144 start = bitmap_find_next_zero_area(mapping->bitmaps[i], 1145 mapping->bits, 0, count, align); 1146 1147 if (start > mapping->bits) { 1148 spin_unlock_irqrestore(&mapping->lock, flags); 1149 return DMA_MAPPING_ERROR; 1150 } 1151 1152 bitmap_set(mapping->bitmaps[i], start, count); 1153 } 1154 spin_unlock_irqrestore(&mapping->lock, flags); 1155 1156 iova = mapping->base + (mapping_size * i); 1157 iova += start << PAGE_SHIFT; 1158 1159 return iova; 1160 } 1161 1162 static inline void __free_iova(struct dma_iommu_mapping *mapping, 1163 dma_addr_t addr, size_t size) 1164 { 1165 unsigned int start, count; 1166 size_t mapping_size = mapping->bits << PAGE_SHIFT; 1167 unsigned long flags; 1168 dma_addr_t bitmap_base; 1169 u32 bitmap_index; 1170 1171 if (!size) 1172 return; 1173 1174 bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size; 1175 BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions); 1176 1177 bitmap_base = mapping->base + mapping_size * bitmap_index; 1178 1179 start = (addr - bitmap_base) >> PAGE_SHIFT; 1180 1181 if (addr + size > bitmap_base + mapping_size) { 1182 /* 1183 * The address range to be freed reaches into the iova 1184 * range of the next bitmap. This should not happen as 1185 * we don't allow this in __alloc_iova (at the 1186 * moment). 1187 */ 1188 BUG(); 1189 } else 1190 count = size >> PAGE_SHIFT; 1191 1192 spin_lock_irqsave(&mapping->lock, flags); 1193 bitmap_clear(mapping->bitmaps[bitmap_index], start, count); 1194 spin_unlock_irqrestore(&mapping->lock, flags); 1195 } 1196 1197 /* We'll try 2M, 1M, 64K, and finally 4K; array must end with 0! */ 1198 static const int iommu_order_array[] = { 9, 8, 4, 0 }; 1199 1200 static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, 1201 gfp_t gfp, unsigned long attrs, 1202 int coherent_flag) 1203 { 1204 struct page **pages; 1205 int count = size >> PAGE_SHIFT; 1206 int array_size = count * sizeof(struct page *); 1207 int i = 0; 1208 int order_idx = 0; 1209 1210 if (array_size <= PAGE_SIZE) 1211 pages = kzalloc(array_size, GFP_KERNEL); 1212 else 1213 pages = vzalloc(array_size); 1214 if (!pages) 1215 return NULL; 1216 1217 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) 1218 { 1219 unsigned long order = get_order(size); 1220 struct page *page; 1221 1222 page = dma_alloc_from_contiguous(dev, count, order, 1223 gfp & __GFP_NOWARN); 1224 if (!page) 1225 goto error; 1226 1227 __dma_clear_buffer(page, size, coherent_flag); 1228 1229 for (i = 0; i < count; i++) 1230 pages[i] = page + i; 1231 1232 return pages; 1233 } 1234 1235 /* Go straight to 4K chunks if caller says it's OK. */ 1236 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) 1237 order_idx = ARRAY_SIZE(iommu_order_array) - 1; 1238 1239 /* 1240 * IOMMU can map any pages, so himem can also be used here 1241 */ 1242 gfp |= __GFP_NOWARN | __GFP_HIGHMEM; 1243 1244 while (count) { 1245 int j, order; 1246 1247 order = iommu_order_array[order_idx]; 1248 1249 /* Drop down when we get small */ 1250 if (__fls(count) < order) { 1251 order_idx++; 1252 continue; 1253 } 1254 1255 if (order) { 1256 /* See if it's easy to allocate a high-order chunk */ 1257 pages[i] = alloc_pages(gfp | __GFP_NORETRY, order); 1258 1259 /* Go down a notch at first sign of pressure */ 1260 if (!pages[i]) { 1261 order_idx++; 1262 continue; 1263 } 1264 } else { 1265 pages[i] = alloc_pages(gfp, 0); 1266 if (!pages[i]) 1267 goto error; 1268 } 1269 1270 if (order) { 1271 split_page(pages[i], order); 1272 j = 1 << order; 1273 while (--j) 1274 pages[i + j] = pages[i] + j; 1275 } 1276 1277 __dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag); 1278 i += 1 << order; 1279 count -= 1 << order; 1280 } 1281 1282 return pages; 1283 error: 1284 while (i--) 1285 if (pages[i]) 1286 __free_pages(pages[i], 0); 1287 kvfree(pages); 1288 return NULL; 1289 } 1290 1291 static int __iommu_free_buffer(struct device *dev, struct page **pages, 1292 size_t size, unsigned long attrs) 1293 { 1294 int count = size >> PAGE_SHIFT; 1295 int i; 1296 1297 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { 1298 dma_release_from_contiguous(dev, pages[0], count); 1299 } else { 1300 for (i = 0; i < count; i++) 1301 if (pages[i]) 1302 __free_pages(pages[i], 0); 1303 } 1304 1305 kvfree(pages); 1306 return 0; 1307 } 1308 1309 /* 1310 * Create a mapping in device IO address space for specified pages 1311 */ 1312 static dma_addr_t 1313 __iommu_create_mapping(struct device *dev, struct page **pages, size_t size, 1314 unsigned long attrs) 1315 { 1316 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1317 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1318 dma_addr_t dma_addr, iova; 1319 int i; 1320 1321 dma_addr = __alloc_iova(mapping, size); 1322 if (dma_addr == DMA_MAPPING_ERROR) 1323 return dma_addr; 1324 1325 iova = dma_addr; 1326 for (i = 0; i < count; ) { 1327 int ret; 1328 1329 unsigned int next_pfn = page_to_pfn(pages[i]) + 1; 1330 phys_addr_t phys = page_to_phys(pages[i]); 1331 unsigned int len, j; 1332 1333 for (j = i + 1; j < count; j++, next_pfn++) 1334 if (page_to_pfn(pages[j]) != next_pfn) 1335 break; 1336 1337 len = (j - i) << PAGE_SHIFT; 1338 ret = iommu_map(mapping->domain, iova, phys, len, 1339 __dma_info_to_prot(DMA_BIDIRECTIONAL, attrs)); 1340 if (ret < 0) 1341 goto fail; 1342 iova += len; 1343 i = j; 1344 } 1345 return dma_addr; 1346 fail: 1347 iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); 1348 __free_iova(mapping, dma_addr, size); 1349 return DMA_MAPPING_ERROR; 1350 } 1351 1352 static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) 1353 { 1354 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1355 1356 /* 1357 * add optional in-page offset from iova to size and align 1358 * result to page size 1359 */ 1360 size = PAGE_ALIGN((iova & ~PAGE_MASK) + size); 1361 iova &= PAGE_MASK; 1362 1363 iommu_unmap(mapping->domain, iova, size); 1364 __free_iova(mapping, iova, size); 1365 return 0; 1366 } 1367 1368 static struct page **__atomic_get_pages(void *addr) 1369 { 1370 struct page *page; 1371 phys_addr_t phys; 1372 1373 phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr); 1374 page = phys_to_page(phys); 1375 1376 return (struct page **)page; 1377 } 1378 1379 static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs) 1380 { 1381 if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) 1382 return __atomic_get_pages(cpu_addr); 1383 1384 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) 1385 return cpu_addr; 1386 1387 return dma_common_find_pages(cpu_addr); 1388 } 1389 1390 static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp, 1391 dma_addr_t *handle, int coherent_flag, 1392 unsigned long attrs) 1393 { 1394 struct page *page; 1395 void *addr; 1396 1397 if (coherent_flag == COHERENT) 1398 addr = __alloc_simple_buffer(dev, size, gfp, &page); 1399 else 1400 addr = __alloc_from_pool(size, &page); 1401 if (!addr) 1402 return NULL; 1403 1404 *handle = __iommu_create_mapping(dev, &page, size, attrs); 1405 if (*handle == DMA_MAPPING_ERROR) 1406 goto err_mapping; 1407 1408 return addr; 1409 1410 err_mapping: 1411 __free_from_pool(addr, size); 1412 return NULL; 1413 } 1414 1415 static void __iommu_free_atomic(struct device *dev, void *cpu_addr, 1416 dma_addr_t handle, size_t size, int coherent_flag) 1417 { 1418 __iommu_remove_mapping(dev, handle, size); 1419 if (coherent_flag == COHERENT) 1420 __dma_free_buffer(virt_to_page(cpu_addr), size); 1421 else 1422 __free_from_pool(cpu_addr, size); 1423 } 1424 1425 static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, 1426 dma_addr_t *handle, gfp_t gfp, unsigned long attrs, 1427 int coherent_flag) 1428 { 1429 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); 1430 struct page **pages; 1431 void *addr = NULL; 1432 1433 *handle = DMA_MAPPING_ERROR; 1434 size = PAGE_ALIGN(size); 1435 1436 if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp)) 1437 return __iommu_alloc_simple(dev, size, gfp, handle, 1438 coherent_flag, attrs); 1439 1440 /* 1441 * Following is a work-around (a.k.a. hack) to prevent pages 1442 * with __GFP_COMP being passed to split_page() which cannot 1443 * handle them. The real problem is that this flag probably 1444 * should be 0 on ARM as it is not supported on this 1445 * platform; see CONFIG_HUGETLBFS. 1446 */ 1447 gfp &= ~(__GFP_COMP); 1448 1449 pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag); 1450 if (!pages) 1451 return NULL; 1452 1453 *handle = __iommu_create_mapping(dev, pages, size, attrs); 1454 if (*handle == DMA_MAPPING_ERROR) 1455 goto err_buffer; 1456 1457 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) 1458 return pages; 1459 1460 addr = dma_common_pages_remap(pages, size, prot, 1461 __builtin_return_address(0)); 1462 if (!addr) 1463 goto err_mapping; 1464 1465 return addr; 1466 1467 err_mapping: 1468 __iommu_remove_mapping(dev, *handle, size); 1469 err_buffer: 1470 __iommu_free_buffer(dev, pages, size, attrs); 1471 return NULL; 1472 } 1473 1474 static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, 1475 dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 1476 { 1477 return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL); 1478 } 1479 1480 static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size, 1481 dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 1482 { 1483 return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT); 1484 } 1485 1486 static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 1487 void *cpu_addr, dma_addr_t dma_addr, size_t size, 1488 unsigned long attrs) 1489 { 1490 struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1491 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 1492 int err; 1493 1494 if (!pages) 1495 return -ENXIO; 1496 1497 if (vma->vm_pgoff >= nr_pages) 1498 return -ENXIO; 1499 1500 err = vm_map_pages(vma, pages, nr_pages); 1501 if (err) 1502 pr_err("Remapping memory failed: %d\n", err); 1503 1504 return err; 1505 } 1506 static int arm_iommu_mmap_attrs(struct device *dev, 1507 struct vm_area_struct *vma, void *cpu_addr, 1508 dma_addr_t dma_addr, size_t size, unsigned long attrs) 1509 { 1510 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 1511 1512 return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs); 1513 } 1514 1515 static int arm_coherent_iommu_mmap_attrs(struct device *dev, 1516 struct vm_area_struct *vma, void *cpu_addr, 1517 dma_addr_t dma_addr, size_t size, unsigned long attrs) 1518 { 1519 return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs); 1520 } 1521 1522 /* 1523 * free a page as defined by the above mapping. 1524 * Must not be called with IRQs disabled. 1525 */ 1526 static void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, 1527 dma_addr_t handle, unsigned long attrs, int coherent_flag) 1528 { 1529 struct page **pages; 1530 size = PAGE_ALIGN(size); 1531 1532 if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) { 1533 __iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag); 1534 return; 1535 } 1536 1537 pages = __iommu_get_pages(cpu_addr, attrs); 1538 if (!pages) { 1539 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); 1540 return; 1541 } 1542 1543 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) 1544 dma_common_free_remap(cpu_addr, size); 1545 1546 __iommu_remove_mapping(dev, handle, size); 1547 __iommu_free_buffer(dev, pages, size, attrs); 1548 } 1549 1550 static void arm_iommu_free_attrs(struct device *dev, size_t size, 1551 void *cpu_addr, dma_addr_t handle, 1552 unsigned long attrs) 1553 { 1554 __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL); 1555 } 1556 1557 static void arm_coherent_iommu_free_attrs(struct device *dev, size_t size, 1558 void *cpu_addr, dma_addr_t handle, unsigned long attrs) 1559 { 1560 __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT); 1561 } 1562 1563 static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, 1564 void *cpu_addr, dma_addr_t dma_addr, 1565 size_t size, unsigned long attrs) 1566 { 1567 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1568 struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1569 1570 if (!pages) 1571 return -ENXIO; 1572 1573 return sg_alloc_table_from_pages(sgt, pages, count, 0, size, 1574 GFP_KERNEL); 1575 } 1576 1577 /* 1578 * Map a part of the scatter-gather list into contiguous io address space 1579 */ 1580 static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, 1581 size_t size, dma_addr_t *handle, 1582 enum dma_data_direction dir, unsigned long attrs, 1583 bool is_coherent) 1584 { 1585 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1586 dma_addr_t iova, iova_base; 1587 int ret = 0; 1588 unsigned int count; 1589 struct scatterlist *s; 1590 int prot; 1591 1592 size = PAGE_ALIGN(size); 1593 *handle = DMA_MAPPING_ERROR; 1594 1595 iova_base = iova = __alloc_iova(mapping, size); 1596 if (iova == DMA_MAPPING_ERROR) 1597 return -ENOMEM; 1598 1599 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { 1600 phys_addr_t phys = page_to_phys(sg_page(s)); 1601 unsigned int len = PAGE_ALIGN(s->offset + s->length); 1602 1603 if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 1604 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 1605 1606 prot = __dma_info_to_prot(dir, attrs); 1607 1608 ret = iommu_map(mapping->domain, iova, phys, len, prot); 1609 if (ret < 0) 1610 goto fail; 1611 count += len >> PAGE_SHIFT; 1612 iova += len; 1613 } 1614 *handle = iova_base; 1615 1616 return 0; 1617 fail: 1618 iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); 1619 __free_iova(mapping, iova_base, size); 1620 return ret; 1621 } 1622 1623 static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, 1624 enum dma_data_direction dir, unsigned long attrs, 1625 bool is_coherent) 1626 { 1627 struct scatterlist *s = sg, *dma = sg, *start = sg; 1628 int i, count = 0; 1629 unsigned int offset = s->offset; 1630 unsigned int size = s->offset + s->length; 1631 unsigned int max = dma_get_max_seg_size(dev); 1632 1633 for (i = 1; i < nents; i++) { 1634 s = sg_next(s); 1635 1636 s->dma_address = DMA_MAPPING_ERROR; 1637 s->dma_length = 0; 1638 1639 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { 1640 if (__map_sg_chunk(dev, start, size, &dma->dma_address, 1641 dir, attrs, is_coherent) < 0) 1642 goto bad_mapping; 1643 1644 dma->dma_address += offset; 1645 dma->dma_length = size - offset; 1646 1647 size = offset = s->offset; 1648 start = s; 1649 dma = sg_next(dma); 1650 count += 1; 1651 } 1652 size += s->length; 1653 } 1654 if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, 1655 is_coherent) < 0) 1656 goto bad_mapping; 1657 1658 dma->dma_address += offset; 1659 dma->dma_length = size - offset; 1660 1661 return count+1; 1662 1663 bad_mapping: 1664 for_each_sg(sg, s, count, i) 1665 __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); 1666 return 0; 1667 } 1668 1669 /** 1670 * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA 1671 * @dev: valid struct device pointer 1672 * @sg: list of buffers 1673 * @nents: number of buffers to map 1674 * @dir: DMA transfer direction 1675 * 1676 * Map a set of i/o coherent buffers described by scatterlist in streaming 1677 * mode for DMA. The scatter gather list elements are merged together (if 1678 * possible) and tagged with the appropriate dma address and length. They are 1679 * obtained via sg_dma_{address,length}. 1680 */ 1681 static int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, 1682 int nents, enum dma_data_direction dir, unsigned long attrs) 1683 { 1684 return __iommu_map_sg(dev, sg, nents, dir, attrs, true); 1685 } 1686 1687 /** 1688 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA 1689 * @dev: valid struct device pointer 1690 * @sg: list of buffers 1691 * @nents: number of buffers to map 1692 * @dir: DMA transfer direction 1693 * 1694 * Map a set of buffers described by scatterlist in streaming mode for DMA. 1695 * The scatter gather list elements are merged together (if possible) and 1696 * tagged with the appropriate dma address and length. They are obtained via 1697 * sg_dma_{address,length}. 1698 */ 1699 static int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, 1700 int nents, enum dma_data_direction dir, unsigned long attrs) 1701 { 1702 return __iommu_map_sg(dev, sg, nents, dir, attrs, false); 1703 } 1704 1705 static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, 1706 int nents, enum dma_data_direction dir, 1707 unsigned long attrs, bool is_coherent) 1708 { 1709 struct scatterlist *s; 1710 int i; 1711 1712 for_each_sg(sg, s, nents, i) { 1713 if (sg_dma_len(s)) 1714 __iommu_remove_mapping(dev, sg_dma_address(s), 1715 sg_dma_len(s)); 1716 if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 1717 __dma_page_dev_to_cpu(sg_page(s), s->offset, 1718 s->length, dir); 1719 } 1720 } 1721 1722 /** 1723 * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 1724 * @dev: valid struct device pointer 1725 * @sg: list of buffers 1726 * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 1727 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1728 * 1729 * Unmap a set of streaming mode DMA translations. Again, CPU access 1730 * rules concerning calls here are the same as for dma_unmap_single(). 1731 */ 1732 static void arm_coherent_iommu_unmap_sg(struct device *dev, 1733 struct scatterlist *sg, int nents, enum dma_data_direction dir, 1734 unsigned long attrs) 1735 { 1736 __iommu_unmap_sg(dev, sg, nents, dir, attrs, true); 1737 } 1738 1739 /** 1740 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 1741 * @dev: valid struct device pointer 1742 * @sg: list of buffers 1743 * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 1744 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1745 * 1746 * Unmap a set of streaming mode DMA translations. Again, CPU access 1747 * rules concerning calls here are the same as for dma_unmap_single(). 1748 */ 1749 static void arm_iommu_unmap_sg(struct device *dev, 1750 struct scatterlist *sg, int nents, 1751 enum dma_data_direction dir, 1752 unsigned long attrs) 1753 { 1754 __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); 1755 } 1756 1757 /** 1758 * arm_iommu_sync_sg_for_cpu 1759 * @dev: valid struct device pointer 1760 * @sg: list of buffers 1761 * @nents: number of buffers to map (returned from dma_map_sg) 1762 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1763 */ 1764 static void arm_iommu_sync_sg_for_cpu(struct device *dev, 1765 struct scatterlist *sg, 1766 int nents, enum dma_data_direction dir) 1767 { 1768 struct scatterlist *s; 1769 int i; 1770 1771 for_each_sg(sg, s, nents, i) 1772 __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); 1773 1774 } 1775 1776 /** 1777 * arm_iommu_sync_sg_for_device 1778 * @dev: valid struct device pointer 1779 * @sg: list of buffers 1780 * @nents: number of buffers to map (returned from dma_map_sg) 1781 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1782 */ 1783 static void arm_iommu_sync_sg_for_device(struct device *dev, 1784 struct scatterlist *sg, 1785 int nents, enum dma_data_direction dir) 1786 { 1787 struct scatterlist *s; 1788 int i; 1789 1790 for_each_sg(sg, s, nents, i) 1791 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 1792 } 1793 1794 1795 /** 1796 * arm_coherent_iommu_map_page 1797 * @dev: valid struct device pointer 1798 * @page: page that buffer resides in 1799 * @offset: offset into page for start of buffer 1800 * @size: size of buffer to map 1801 * @dir: DMA transfer direction 1802 * 1803 * Coherent IOMMU aware version of arm_dma_map_page() 1804 */ 1805 static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page, 1806 unsigned long offset, size_t size, enum dma_data_direction dir, 1807 unsigned long attrs) 1808 { 1809 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1810 dma_addr_t dma_addr; 1811 int ret, prot, len = PAGE_ALIGN(size + offset); 1812 1813 dma_addr = __alloc_iova(mapping, len); 1814 if (dma_addr == DMA_MAPPING_ERROR) 1815 return dma_addr; 1816 1817 prot = __dma_info_to_prot(dir, attrs); 1818 1819 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); 1820 if (ret < 0) 1821 goto fail; 1822 1823 return dma_addr + offset; 1824 fail: 1825 __free_iova(mapping, dma_addr, len); 1826 return DMA_MAPPING_ERROR; 1827 } 1828 1829 /** 1830 * arm_iommu_map_page 1831 * @dev: valid struct device pointer 1832 * @page: page that buffer resides in 1833 * @offset: offset into page for start of buffer 1834 * @size: size of buffer to map 1835 * @dir: DMA transfer direction 1836 * 1837 * IOMMU aware version of arm_dma_map_page() 1838 */ 1839 static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, 1840 unsigned long offset, size_t size, enum dma_data_direction dir, 1841 unsigned long attrs) 1842 { 1843 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 1844 __dma_page_cpu_to_dev(page, offset, size, dir); 1845 1846 return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); 1847 } 1848 1849 /** 1850 * arm_coherent_iommu_unmap_page 1851 * @dev: valid struct device pointer 1852 * @handle: DMA address of buffer 1853 * @size: size of buffer (same as passed to dma_map_page) 1854 * @dir: DMA transfer direction (same as passed to dma_map_page) 1855 * 1856 * Coherent IOMMU aware version of arm_dma_unmap_page() 1857 */ 1858 static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, 1859 size_t size, enum dma_data_direction dir, unsigned long attrs) 1860 { 1861 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1862 dma_addr_t iova = handle & PAGE_MASK; 1863 int offset = handle & ~PAGE_MASK; 1864 int len = PAGE_ALIGN(size + offset); 1865 1866 if (!iova) 1867 return; 1868 1869 iommu_unmap(mapping->domain, iova, len); 1870 __free_iova(mapping, iova, len); 1871 } 1872 1873 /** 1874 * arm_iommu_unmap_page 1875 * @dev: valid struct device pointer 1876 * @handle: DMA address of buffer 1877 * @size: size of buffer (same as passed to dma_map_page) 1878 * @dir: DMA transfer direction (same as passed to dma_map_page) 1879 * 1880 * IOMMU aware version of arm_dma_unmap_page() 1881 */ 1882 static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, 1883 size_t size, enum dma_data_direction dir, unsigned long attrs) 1884 { 1885 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1886 dma_addr_t iova = handle & PAGE_MASK; 1887 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 1888 int offset = handle & ~PAGE_MASK; 1889 int len = PAGE_ALIGN(size + offset); 1890 1891 if (!iova) 1892 return; 1893 1894 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 1895 __dma_page_dev_to_cpu(page, offset, size, dir); 1896 1897 iommu_unmap(mapping->domain, iova, len); 1898 __free_iova(mapping, iova, len); 1899 } 1900 1901 /** 1902 * arm_iommu_map_resource - map a device resource for DMA 1903 * @dev: valid struct device pointer 1904 * @phys_addr: physical address of resource 1905 * @size: size of resource to map 1906 * @dir: DMA transfer direction 1907 */ 1908 static dma_addr_t arm_iommu_map_resource(struct device *dev, 1909 phys_addr_t phys_addr, size_t size, 1910 enum dma_data_direction dir, unsigned long attrs) 1911 { 1912 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1913 dma_addr_t dma_addr; 1914 int ret, prot; 1915 phys_addr_t addr = phys_addr & PAGE_MASK; 1916 unsigned int offset = phys_addr & ~PAGE_MASK; 1917 size_t len = PAGE_ALIGN(size + offset); 1918 1919 dma_addr = __alloc_iova(mapping, len); 1920 if (dma_addr == DMA_MAPPING_ERROR) 1921 return dma_addr; 1922 1923 prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO; 1924 1925 ret = iommu_map(mapping->domain, dma_addr, addr, len, prot); 1926 if (ret < 0) 1927 goto fail; 1928 1929 return dma_addr + offset; 1930 fail: 1931 __free_iova(mapping, dma_addr, len); 1932 return DMA_MAPPING_ERROR; 1933 } 1934 1935 /** 1936 * arm_iommu_unmap_resource - unmap a device DMA resource 1937 * @dev: valid struct device pointer 1938 * @dma_handle: DMA address to resource 1939 * @size: size of resource to map 1940 * @dir: DMA transfer direction 1941 */ 1942 static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle, 1943 size_t size, enum dma_data_direction dir, 1944 unsigned long attrs) 1945 { 1946 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1947 dma_addr_t iova = dma_handle & PAGE_MASK; 1948 unsigned int offset = dma_handle & ~PAGE_MASK; 1949 size_t len = PAGE_ALIGN(size + offset); 1950 1951 if (!iova) 1952 return; 1953 1954 iommu_unmap(mapping->domain, iova, len); 1955 __free_iova(mapping, iova, len); 1956 } 1957 1958 static void arm_iommu_sync_single_for_cpu(struct device *dev, 1959 dma_addr_t handle, size_t size, enum dma_data_direction dir) 1960 { 1961 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1962 dma_addr_t iova = handle & PAGE_MASK; 1963 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 1964 unsigned int offset = handle & ~PAGE_MASK; 1965 1966 if (!iova) 1967 return; 1968 1969 __dma_page_dev_to_cpu(page, offset, size, dir); 1970 } 1971 1972 static void arm_iommu_sync_single_for_device(struct device *dev, 1973 dma_addr_t handle, size_t size, enum dma_data_direction dir) 1974 { 1975 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1976 dma_addr_t iova = handle & PAGE_MASK; 1977 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 1978 unsigned int offset = handle & ~PAGE_MASK; 1979 1980 if (!iova) 1981 return; 1982 1983 __dma_page_cpu_to_dev(page, offset, size, dir); 1984 } 1985 1986 static const struct dma_map_ops iommu_ops = { 1987 .alloc = arm_iommu_alloc_attrs, 1988 .free = arm_iommu_free_attrs, 1989 .mmap = arm_iommu_mmap_attrs, 1990 .get_sgtable = arm_iommu_get_sgtable, 1991 1992 .map_page = arm_iommu_map_page, 1993 .unmap_page = arm_iommu_unmap_page, 1994 .sync_single_for_cpu = arm_iommu_sync_single_for_cpu, 1995 .sync_single_for_device = arm_iommu_sync_single_for_device, 1996 1997 .map_sg = arm_iommu_map_sg, 1998 .unmap_sg = arm_iommu_unmap_sg, 1999 .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, 2000 .sync_sg_for_device = arm_iommu_sync_sg_for_device, 2001 2002 .map_resource = arm_iommu_map_resource, 2003 .unmap_resource = arm_iommu_unmap_resource, 2004 2005 .dma_supported = arm_dma_supported, 2006 }; 2007 2008 static const struct dma_map_ops iommu_coherent_ops = { 2009 .alloc = arm_coherent_iommu_alloc_attrs, 2010 .free = arm_coherent_iommu_free_attrs, 2011 .mmap = arm_coherent_iommu_mmap_attrs, 2012 .get_sgtable = arm_iommu_get_sgtable, 2013 2014 .map_page = arm_coherent_iommu_map_page, 2015 .unmap_page = arm_coherent_iommu_unmap_page, 2016 2017 .map_sg = arm_coherent_iommu_map_sg, 2018 .unmap_sg = arm_coherent_iommu_unmap_sg, 2019 2020 .map_resource = arm_iommu_map_resource, 2021 .unmap_resource = arm_iommu_unmap_resource, 2022 2023 .dma_supported = arm_dma_supported, 2024 }; 2025 2026 /** 2027 * arm_iommu_create_mapping 2028 * @bus: pointer to the bus holding the client device (for IOMMU calls) 2029 * @base: start address of the valid IO address space 2030 * @size: maximum size of the valid IO address space 2031 * 2032 * Creates a mapping structure which holds information about used/unused 2033 * IO address ranges, which is required to perform memory allocation and 2034 * mapping with IOMMU aware functions. 2035 * 2036 * The client device need to be attached to the mapping with 2037 * arm_iommu_attach_device function. 2038 */ 2039 struct dma_iommu_mapping * 2040 arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size) 2041 { 2042 unsigned int bits = size >> PAGE_SHIFT; 2043 unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long); 2044 struct dma_iommu_mapping *mapping; 2045 int extensions = 1; 2046 int err = -ENOMEM; 2047 2048 /* currently only 32-bit DMA address space is supported */ 2049 if (size > DMA_BIT_MASK(32) + 1) 2050 return ERR_PTR(-ERANGE); 2051 2052 if (!bitmap_size) 2053 return ERR_PTR(-EINVAL); 2054 2055 if (bitmap_size > PAGE_SIZE) { 2056 extensions = bitmap_size / PAGE_SIZE; 2057 bitmap_size = PAGE_SIZE; 2058 } 2059 2060 mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); 2061 if (!mapping) 2062 goto err; 2063 2064 mapping->bitmap_size = bitmap_size; 2065 mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *), 2066 GFP_KERNEL); 2067 if (!mapping->bitmaps) 2068 goto err2; 2069 2070 mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL); 2071 if (!mapping->bitmaps[0]) 2072 goto err3; 2073 2074 mapping->nr_bitmaps = 1; 2075 mapping->extensions = extensions; 2076 mapping->base = base; 2077 mapping->bits = BITS_PER_BYTE * bitmap_size; 2078 2079 spin_lock_init(&mapping->lock); 2080 2081 mapping->domain = iommu_domain_alloc(bus); 2082 if (!mapping->domain) 2083 goto err4; 2084 2085 kref_init(&mapping->kref); 2086 return mapping; 2087 err4: 2088 kfree(mapping->bitmaps[0]); 2089 err3: 2090 kfree(mapping->bitmaps); 2091 err2: 2092 kfree(mapping); 2093 err: 2094 return ERR_PTR(err); 2095 } 2096 EXPORT_SYMBOL_GPL(arm_iommu_create_mapping); 2097 2098 static void release_iommu_mapping(struct kref *kref) 2099 { 2100 int i; 2101 struct dma_iommu_mapping *mapping = 2102 container_of(kref, struct dma_iommu_mapping, kref); 2103 2104 iommu_domain_free(mapping->domain); 2105 for (i = 0; i < mapping->nr_bitmaps; i++) 2106 kfree(mapping->bitmaps[i]); 2107 kfree(mapping->bitmaps); 2108 kfree(mapping); 2109 } 2110 2111 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping) 2112 { 2113 int next_bitmap; 2114 2115 if (mapping->nr_bitmaps >= mapping->extensions) 2116 return -EINVAL; 2117 2118 next_bitmap = mapping->nr_bitmaps; 2119 mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size, 2120 GFP_ATOMIC); 2121 if (!mapping->bitmaps[next_bitmap]) 2122 return -ENOMEM; 2123 2124 mapping->nr_bitmaps++; 2125 2126 return 0; 2127 } 2128 2129 void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) 2130 { 2131 if (mapping) 2132 kref_put(&mapping->kref, release_iommu_mapping); 2133 } 2134 EXPORT_SYMBOL_GPL(arm_iommu_release_mapping); 2135 2136 static int __arm_iommu_attach_device(struct device *dev, 2137 struct dma_iommu_mapping *mapping) 2138 { 2139 int err; 2140 2141 err = iommu_attach_device(mapping->domain, dev); 2142 if (err) 2143 return err; 2144 2145 kref_get(&mapping->kref); 2146 to_dma_iommu_mapping(dev) = mapping; 2147 2148 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); 2149 return 0; 2150 } 2151 2152 /** 2153 * arm_iommu_attach_device 2154 * @dev: valid struct device pointer 2155 * @mapping: io address space mapping structure (returned from 2156 * arm_iommu_create_mapping) 2157 * 2158 * Attaches specified io address space mapping to the provided device. 2159 * This replaces the dma operations (dma_map_ops pointer) with the 2160 * IOMMU aware version. 2161 * 2162 * More than one client might be attached to the same io address space 2163 * mapping. 2164 */ 2165 int arm_iommu_attach_device(struct device *dev, 2166 struct dma_iommu_mapping *mapping) 2167 { 2168 int err; 2169 2170 err = __arm_iommu_attach_device(dev, mapping); 2171 if (err) 2172 return err; 2173 2174 set_dma_ops(dev, &iommu_ops); 2175 return 0; 2176 } 2177 EXPORT_SYMBOL_GPL(arm_iommu_attach_device); 2178 2179 /** 2180 * arm_iommu_detach_device 2181 * @dev: valid struct device pointer 2182 * 2183 * Detaches the provided device from a previously attached map. 2184 * This overwrites the dma_ops pointer with appropriate non-IOMMU ops. 2185 */ 2186 void arm_iommu_detach_device(struct device *dev) 2187 { 2188 struct dma_iommu_mapping *mapping; 2189 2190 mapping = to_dma_iommu_mapping(dev); 2191 if (!mapping) { 2192 dev_warn(dev, "Not attached\n"); 2193 return; 2194 } 2195 2196 iommu_detach_device(mapping->domain, dev); 2197 kref_put(&mapping->kref, release_iommu_mapping); 2198 to_dma_iommu_mapping(dev) = NULL; 2199 set_dma_ops(dev, arm_get_dma_map_ops(dev->archdata.dma_coherent)); 2200 2201 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); 2202 } 2203 EXPORT_SYMBOL_GPL(arm_iommu_detach_device); 2204 2205 static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) 2206 { 2207 return coherent ? &iommu_coherent_ops : &iommu_ops; 2208 } 2209 2210 static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, 2211 const struct iommu_ops *iommu) 2212 { 2213 struct dma_iommu_mapping *mapping; 2214 2215 if (!iommu) 2216 return false; 2217 2218 mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); 2219 if (IS_ERR(mapping)) { 2220 pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", 2221 size, dev_name(dev)); 2222 return false; 2223 } 2224 2225 if (__arm_iommu_attach_device(dev, mapping)) { 2226 pr_warn("Failed to attached device %s to IOMMU_mapping\n", 2227 dev_name(dev)); 2228 arm_iommu_release_mapping(mapping); 2229 return false; 2230 } 2231 2232 return true; 2233 } 2234 2235 static void arm_teardown_iommu_dma_ops(struct device *dev) 2236 { 2237 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 2238 2239 if (!mapping) 2240 return; 2241 2242 arm_iommu_detach_device(dev); 2243 arm_iommu_release_mapping(mapping); 2244 } 2245 2246 #else 2247 2248 static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, 2249 const struct iommu_ops *iommu) 2250 { 2251 return false; 2252 } 2253 2254 static void arm_teardown_iommu_dma_ops(struct device *dev) { } 2255 2256 #define arm_get_iommu_dma_map_ops arm_get_dma_map_ops 2257 2258 #endif /* CONFIG_ARM_DMA_USE_IOMMU */ 2259 2260 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, 2261 const struct iommu_ops *iommu, bool coherent) 2262 { 2263 const struct dma_map_ops *dma_ops; 2264 2265 dev->archdata.dma_coherent = coherent; 2266 #ifdef CONFIG_SWIOTLB 2267 dev->dma_coherent = coherent; 2268 #endif 2269 2270 /* 2271 * Don't override the dma_ops if they have already been set. Ideally 2272 * this should be the only location where dma_ops are set, remove this 2273 * check when all other callers of set_dma_ops will have disappeared. 2274 */ 2275 if (dev->dma_ops) 2276 return; 2277 2278 if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu)) 2279 dma_ops = arm_get_iommu_dma_map_ops(coherent); 2280 else 2281 dma_ops = arm_get_dma_map_ops(coherent); 2282 2283 set_dma_ops(dev, dma_ops); 2284 2285 #ifdef CONFIG_XEN 2286 if (xen_initial_domain()) 2287 dev->dma_ops = &xen_swiotlb_dma_ops; 2288 #endif 2289 dev->archdata.dma_ops_setup = true; 2290 } 2291 2292 void arch_teardown_dma_ops(struct device *dev) 2293 { 2294 if (!dev->archdata.dma_ops_setup) 2295 return; 2296 2297 arm_teardown_iommu_dma_ops(dev); 2298 /* Let arch_setup_dma_ops() start again from scratch upon re-probe */ 2299 set_dma_ops(dev, NULL); 2300 } 2301 2302 #ifdef CONFIG_SWIOTLB 2303 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, 2304 enum dma_data_direction dir) 2305 { 2306 __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), 2307 size, dir); 2308 } 2309 2310 void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, 2311 enum dma_data_direction dir) 2312 { 2313 __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), 2314 size, dir); 2315 } 2316 2317 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 2318 gfp_t gfp, unsigned long attrs) 2319 { 2320 return __dma_alloc(dev, size, dma_handle, gfp, 2321 __get_dma_pgprot(attrs, PAGE_KERNEL), false, 2322 attrs, __builtin_return_address(0)); 2323 } 2324 2325 void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, 2326 dma_addr_t dma_handle, unsigned long attrs) 2327 { 2328 __arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false); 2329 } 2330 #endif /* CONFIG_SWIOTLB */ 2331