1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 20ddbccd1SRussell King /* 30ddbccd1SRussell King * linux/arch/arm/mm/dma-mapping.c 40ddbccd1SRussell King * 50ddbccd1SRussell King * Copyright (C) 2000-2004 Russell King 60ddbccd1SRussell King * 70ddbccd1SRussell King * DMA uncached mapping support. 80ddbccd1SRussell King */ 90ddbccd1SRussell King #include <linux/module.h> 100ddbccd1SRussell King #include <linux/mm.h> 1136d0fd21SLaura Abbott #include <linux/genalloc.h> 125a0e3ad6STejun Heo #include <linux/gfp.h> 130ddbccd1SRussell King #include <linux/errno.h> 140ddbccd1SRussell King #include <linux/list.h> 150ddbccd1SRussell King #include <linux/init.h> 160ddbccd1SRussell King #include <linux/device.h> 17249baa54SChristoph Hellwig #include <linux/dma-direct.h> 180a0f0d8bSChristoph Hellwig #include <linux/dma-map-ops.h> 1939af22a7SNicolas Pitre #include <linux/highmem.h> 20c7909509SMarek Szyprowski #include <linux/memblock.h> 2199d1717dSJon Medhurst #include <linux/slab.h> 224ce63fcdSMarek Szyprowski #include <linux/iommu.h> 23e9da6e99SMarek Szyprowski #include <linux/io.h> 244ce63fcdSMarek Szyprowski #include <linux/vmalloc.h> 25158e8bfeSAlessandro Rubini #include <linux/sizes.h> 26a254129eSJoonsoo Kim #include <linux/cma.h> 270ddbccd1SRussell King 280ddbccd1SRussell King #include <asm/memory.h> 2943377453SNicolas Pitre #include <asm/highmem.h> 300ddbccd1SRussell King #include <asm/cacheflush.h> 310ddbccd1SRussell King #include <asm/tlbflush.h> 3299d1717dSJon Medhurst #include <asm/mach/arch.h> 334ce63fcdSMarek Szyprowski #include <asm/dma-iommu.h> 34c7909509SMarek Szyprowski #include <asm/mach/map.h> 35c7909509SMarek Szyprowski #include <asm/system_info.h> 369bf22421SOleksandr Tyshchenko #include <asm/xen/xen-ops.h> 370ddbccd1SRussell King 381234e3fdSRussell King #include "dma.h" 39022ae537SRussell King #include "mm.h" 40022ae537SRussell King 41b4268676SRabin Vincent struct arm_dma_alloc_args { 42b4268676SRabin Vincent struct device *dev; 43b4268676SRabin Vincent size_t size; 44b4268676SRabin Vincent gfp_t gfp; 45b4268676SRabin Vincent pgprot_t prot; 46b4268676SRabin Vincent const void *caller; 47b4268676SRabin Vincent bool want_vaddr; 48f1270896SGregory CLEMENT int coherent_flag; 49b4268676SRabin Vincent }; 50b4268676SRabin Vincent 51b4268676SRabin Vincent struct arm_dma_free_args { 52b4268676SRabin Vincent struct device *dev; 53b4268676SRabin Vincent size_t size; 54b4268676SRabin Vincent void *cpu_addr; 55b4268676SRabin Vincent struct page *page; 56b4268676SRabin Vincent bool want_vaddr; 57b4268676SRabin Vincent }; 58b4268676SRabin Vincent 59f1270896SGregory CLEMENT #define NORMAL 0 60f1270896SGregory CLEMENT #define COHERENT 1 61f1270896SGregory CLEMENT 62b4268676SRabin Vincent struct arm_dma_allocator { 63b4268676SRabin Vincent void *(*alloc)(struct arm_dma_alloc_args *args, 64b4268676SRabin Vincent struct page **ret_page); 65b4268676SRabin Vincent void (*free)(struct arm_dma_free_args *args); 66b4268676SRabin Vincent }; 67b4268676SRabin Vincent 6819e6e5e5SRabin Vincent struct arm_dma_buffer { 6919e6e5e5SRabin Vincent struct list_head list; 7019e6e5e5SRabin Vincent void *virt; 71b4268676SRabin Vincent struct arm_dma_allocator *allocator; 7219e6e5e5SRabin Vincent }; 7319e6e5e5SRabin Vincent 7419e6e5e5SRabin Vincent static LIST_HEAD(arm_dma_bufs); 7519e6e5e5SRabin Vincent static DEFINE_SPINLOCK(arm_dma_bufs_lock); 7619e6e5e5SRabin Vincent 7719e6e5e5SRabin Vincent static struct arm_dma_buffer *arm_dma_buffer_find(void *virt) 7819e6e5e5SRabin Vincent { 7919e6e5e5SRabin Vincent struct arm_dma_buffer *buf, *found = NULL; 8019e6e5e5SRabin Vincent unsigned long flags; 8119e6e5e5SRabin Vincent 8219e6e5e5SRabin Vincent spin_lock_irqsave(&arm_dma_bufs_lock, flags); 8319e6e5e5SRabin Vincent list_for_each_entry(buf, &arm_dma_bufs, list) { 8419e6e5e5SRabin Vincent if (buf->virt == virt) { 8519e6e5e5SRabin Vincent list_del(&buf->list); 8619e6e5e5SRabin Vincent found = buf; 8719e6e5e5SRabin Vincent break; 8819e6e5e5SRabin Vincent } 8919e6e5e5SRabin Vincent } 9019e6e5e5SRabin Vincent spin_unlock_irqrestore(&arm_dma_bufs_lock, flags); 9119e6e5e5SRabin Vincent return found; 9219e6e5e5SRabin Vincent } 9319e6e5e5SRabin Vincent 9415237e1fSMarek Szyprowski /* 9515237e1fSMarek Szyprowski * The DMA API is built upon the notion of "buffer ownership". A buffer 9615237e1fSMarek Szyprowski * is either exclusively owned by the CPU (and therefore may be accessed 9715237e1fSMarek Szyprowski * by it) or exclusively owned by the DMA device. These helper functions 9815237e1fSMarek Szyprowski * represent the transitions between these two ownership states. 9915237e1fSMarek Szyprowski * 10015237e1fSMarek Szyprowski * Note, however, that on later ARMs, this notion does not work due to 10115237e1fSMarek Szyprowski * speculative prefetches. We model our approach on the assumption that 10215237e1fSMarek Szyprowski * the CPU does do speculative prefetches, which means we clean caches 10315237e1fSMarek Szyprowski * before transfers and delay cache invalidation until transfer completion. 10415237e1fSMarek Szyprowski * 10515237e1fSMarek Szyprowski */ 106dd37e940SRob Herring 107f1270896SGregory CLEMENT static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag) 108c7909509SMarek Szyprowski { 109c7909509SMarek Szyprowski /* 110c7909509SMarek Szyprowski * Ensure that the allocated pages are zeroed, and that any data 111c7909509SMarek Szyprowski * lurking in the kernel direct-mapped region is invalidated. 112c7909509SMarek Szyprowski */ 1139848e48fSMarek Szyprowski if (PageHighMem(page)) { 1149848e48fSMarek Szyprowski phys_addr_t base = __pfn_to_phys(page_to_pfn(page)); 1159848e48fSMarek Szyprowski phys_addr_t end = base + size; 1169848e48fSMarek Szyprowski while (size > 0) { 1179848e48fSMarek Szyprowski void *ptr = kmap_atomic(page); 1189848e48fSMarek Szyprowski memset(ptr, 0, PAGE_SIZE); 119f1270896SGregory CLEMENT if (coherent_flag != COHERENT) 1209848e48fSMarek Szyprowski dmac_flush_range(ptr, ptr + PAGE_SIZE); 1219848e48fSMarek Szyprowski kunmap_atomic(ptr); 1229848e48fSMarek Szyprowski page++; 1239848e48fSMarek Szyprowski size -= PAGE_SIZE; 1249848e48fSMarek Szyprowski } 125f1270896SGregory CLEMENT if (coherent_flag != COHERENT) 1269848e48fSMarek Szyprowski outer_flush_range(base, end); 1279848e48fSMarek Szyprowski } else { 1289848e48fSMarek Szyprowski void *ptr = page_address(page); 129c7909509SMarek Szyprowski memset(ptr, 0, size); 130f1270896SGregory CLEMENT if (coherent_flag != COHERENT) { 131c7909509SMarek Szyprowski dmac_flush_range(ptr, ptr + size); 132c7909509SMarek Szyprowski outer_flush_range(__pa(ptr), __pa(ptr) + size); 133c7909509SMarek Szyprowski } 1344ce63fcdSMarek Szyprowski } 135f1270896SGregory CLEMENT } 136c7909509SMarek Szyprowski 1377a9a32a9SRussell King /* 1387a9a32a9SRussell King * Allocate a DMA buffer for 'dev' of size 'size' using the 1397a9a32a9SRussell King * specified gfp mask. Note that 'size' must be page aligned. 1407a9a32a9SRussell King */ 141f1270896SGregory CLEMENT static struct page *__dma_alloc_buffer(struct device *dev, size_t size, 142f1270896SGregory CLEMENT gfp_t gfp, int coherent_flag) 1437a9a32a9SRussell King { 1447a9a32a9SRussell King unsigned long order = get_order(size); 1457a9a32a9SRussell King struct page *page, *p, *e; 1467a9a32a9SRussell King 1477a9a32a9SRussell King page = alloc_pages(gfp, order); 1487a9a32a9SRussell King if (!page) 1497a9a32a9SRussell King return NULL; 1507a9a32a9SRussell King 1517a9a32a9SRussell King /* 1527a9a32a9SRussell King * Now split the huge page and free the excess pages 1537a9a32a9SRussell King */ 1547a9a32a9SRussell King split_page(page, order); 1557a9a32a9SRussell King for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) 1567a9a32a9SRussell King __free_page(p); 1577a9a32a9SRussell King 158f1270896SGregory CLEMENT __dma_clear_buffer(page, size, coherent_flag); 1597a9a32a9SRussell King 1607a9a32a9SRussell King return page; 1617a9a32a9SRussell King } 1627a9a32a9SRussell King 1637a9a32a9SRussell King /* 1647a9a32a9SRussell King * Free a DMA buffer. 'size' must be page aligned. 1657a9a32a9SRussell King */ 1667a9a32a9SRussell King static void __dma_free_buffer(struct page *page, size_t size) 1677a9a32a9SRussell King { 1687a9a32a9SRussell King struct page *e = page + (size >> PAGE_SHIFT); 1697a9a32a9SRussell King 1707a9a32a9SRussell King while (page < e) { 1717a9a32a9SRussell King __free_page(page); 1727a9a32a9SRussell King page++; 1737a9a32a9SRussell King } 1747a9a32a9SRussell King } 1757a9a32a9SRussell King 176c7909509SMarek Szyprowski static void *__alloc_from_contiguous(struct device *dev, size_t size, 1779848e48fSMarek Szyprowski pgprot_t prot, struct page **ret_page, 178f1270896SGregory CLEMENT const void *caller, bool want_vaddr, 179712c604dSLucas Stach int coherent_flag, gfp_t gfp); 180c7909509SMarek Szyprowski 181e9da6e99SMarek Szyprowski static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 182e9da6e99SMarek Szyprowski pgprot_t prot, struct page **ret_page, 1836e8266e3SCarlo Caione const void *caller, bool want_vaddr); 184e9da6e99SMarek Szyprowski 1856e5267aaSMarek Szyprowski #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K 186b337e1c4SVladimir Murzin static struct gen_pool *atomic_pool __ro_after_init; 1876e5267aaSMarek Szyprowski 188b337e1c4SVladimir Murzin static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE; 189c7909509SMarek Szyprowski 190c7909509SMarek Szyprowski static int __init early_coherent_pool(char *p) 191c7909509SMarek Szyprowski { 19236d0fd21SLaura Abbott atomic_pool_size = memparse(p, &p); 193c7909509SMarek Szyprowski return 0; 194c7909509SMarek Szyprowski } 195c7909509SMarek Szyprowski early_param("coherent_pool", early_coherent_pool); 196c7909509SMarek Szyprowski 197c7909509SMarek Szyprowski /* 198c7909509SMarek Szyprowski * Initialise the coherent pool for atomic allocations. 199c7909509SMarek Szyprowski */ 200e9da6e99SMarek Szyprowski static int __init atomic_pool_init(void) 201c7909509SMarek Szyprowski { 20271b55663SRussell King pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL); 2039d1400cfSMarek Szyprowski gfp_t gfp = GFP_KERNEL | GFP_DMA; 204c7909509SMarek Szyprowski struct page *page; 205c7909509SMarek Szyprowski void *ptr; 206c7909509SMarek Szyprowski 20736d0fd21SLaura Abbott atomic_pool = gen_pool_create(PAGE_SHIFT, -1); 20836d0fd21SLaura Abbott if (!atomic_pool) 20936d0fd21SLaura Abbott goto out; 210f1270896SGregory CLEMENT /* 211f1270896SGregory CLEMENT * The atomic pool is only used for non-coherent allocations 212f1270896SGregory CLEMENT * so we must pass NORMAL for coherent_flag. 213f1270896SGregory CLEMENT */ 214e464ef16SGioh Kim if (dev_get_cma_area(NULL)) 21536d0fd21SLaura Abbott ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot, 216712c604dSLucas Stach &page, atomic_pool_init, true, NORMAL, 217712c604dSLucas Stach GFP_KERNEL); 218e9da6e99SMarek Szyprowski else 21936d0fd21SLaura Abbott ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot, 2206e8266e3SCarlo Caione &page, atomic_pool_init, true); 221c7909509SMarek Szyprowski if (ptr) { 22236d0fd21SLaura Abbott int ret; 2236b3fe472SHiroshi Doyu 22436d0fd21SLaura Abbott ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr, 22536d0fd21SLaura Abbott page_to_phys(page), 22636d0fd21SLaura Abbott atomic_pool_size, -1); 22736d0fd21SLaura Abbott if (ret) 22836d0fd21SLaura Abbott goto destroy_genpool; 2296b3fe472SHiroshi Doyu 23036d0fd21SLaura Abbott gen_pool_set_algo(atomic_pool, 23136d0fd21SLaura Abbott gen_pool_first_fit_order_align, 232acb62448SVladimir Murzin NULL); 233bf31c5e0SFabio Estevam pr_info("DMA: preallocated %zu KiB pool for atomic coherent allocations\n", 23436d0fd21SLaura Abbott atomic_pool_size / 1024); 235c7909509SMarek Szyprowski return 0; 236c7909509SMarek Szyprowski } 237ec10665cSSachin Kamat 23836d0fd21SLaura Abbott destroy_genpool: 23936d0fd21SLaura Abbott gen_pool_destroy(atomic_pool); 24036d0fd21SLaura Abbott atomic_pool = NULL; 24136d0fd21SLaura Abbott out: 242bf31c5e0SFabio Estevam pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n", 24336d0fd21SLaura Abbott atomic_pool_size / 1024); 244c7909509SMarek Szyprowski return -ENOMEM; 245c7909509SMarek Szyprowski } 246c7909509SMarek Szyprowski /* 247c7909509SMarek Szyprowski * CMA is activated by core_initcall, so we must be called after it. 248c7909509SMarek Szyprowski */ 249e9da6e99SMarek Szyprowski postcore_initcall(atomic_pool_init); 250c7909509SMarek Szyprowski 251229a08a4SKees Cook #ifdef CONFIG_CMA_AREAS 252c7909509SMarek Szyprowski struct dma_contig_early_reserve { 253c7909509SMarek Szyprowski phys_addr_t base; 254c7909509SMarek Szyprowski unsigned long size; 255c7909509SMarek Szyprowski }; 256c7909509SMarek Szyprowski 257c7909509SMarek Szyprowski static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata; 258c7909509SMarek Szyprowski 259c7909509SMarek Szyprowski static int dma_mmu_remap_num __initdata; 260c7909509SMarek Szyprowski 261c7909509SMarek Szyprowski void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) 262c7909509SMarek Szyprowski { 263c7909509SMarek Szyprowski dma_mmu_remap[dma_mmu_remap_num].base = base; 264c7909509SMarek Szyprowski dma_mmu_remap[dma_mmu_remap_num].size = size; 265c7909509SMarek Szyprowski dma_mmu_remap_num++; 266c7909509SMarek Szyprowski } 267c7909509SMarek Szyprowski 268c7909509SMarek Szyprowski void __init dma_contiguous_remap(void) 269c7909509SMarek Szyprowski { 270c7909509SMarek Szyprowski int i; 271c7909509SMarek Szyprowski for (i = 0; i < dma_mmu_remap_num; i++) { 272c7909509SMarek Szyprowski phys_addr_t start = dma_mmu_remap[i].base; 273c7909509SMarek Szyprowski phys_addr_t end = start + dma_mmu_remap[i].size; 274c7909509SMarek Szyprowski struct map_desc map; 275c7909509SMarek Szyprowski unsigned long addr; 276c7909509SMarek Szyprowski 277c7909509SMarek Szyprowski if (end > arm_lowmem_limit) 278c7909509SMarek Szyprowski end = arm_lowmem_limit; 279c7909509SMarek Szyprowski if (start >= end) 28039f78e70SChris Brand continue; 281c7909509SMarek Szyprowski 282c7909509SMarek Szyprowski map.pfn = __phys_to_pfn(start); 283c7909509SMarek Szyprowski map.virtual = __phys_to_virt(start); 284c7909509SMarek Szyprowski map.length = end - start; 285c7909509SMarek Szyprowski map.type = MT_MEMORY_DMA_READY; 286c7909509SMarek Szyprowski 287c7909509SMarek Szyprowski /* 2886b076991SRussell King * Clear previous low-memory mapping to ensure that the 2896b076991SRussell King * TLB does not see any conflicting entries, then flush 2906b076991SRussell King * the TLB of the old entries before creating new mappings. 2916b076991SRussell King * 2926b076991SRussell King * This ensures that any speculatively loaded TLB entries 2936b076991SRussell King * (even though they may be rare) can not cause any problems, 2946b076991SRussell King * and ensures that this code is architecturally compliant. 295c7909509SMarek Szyprowski */ 296c7909509SMarek Szyprowski for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); 29761f6c7a4SVitaly Andrianov addr += PMD_SIZE) 298c7909509SMarek Szyprowski pmd_clear(pmd_off_k(addr)); 299c7909509SMarek Szyprowski 3006b076991SRussell King flush_tlb_kernel_range(__phys_to_virt(start), 3016b076991SRussell King __phys_to_virt(end)); 3026b076991SRussell King 303c7909509SMarek Szyprowski iotable_init(&map, 1); 304c7909509SMarek Szyprowski } 305c7909509SMarek Szyprowski } 306229a08a4SKees Cook #endif 307c7909509SMarek Szyprowski 3088b1e0f81SAnshuman Khandual static int __dma_update_pte(pte_t *pte, unsigned long addr, void *data) 309c7909509SMarek Szyprowski { 310c7909509SMarek Szyprowski struct page *page = virt_to_page(addr); 311c7909509SMarek Szyprowski pgprot_t prot = *(pgprot_t *)data; 312c7909509SMarek Szyprowski 313c7909509SMarek Szyprowski set_pte_ext(pte, mk_pte(page, prot), 0); 314c7909509SMarek Szyprowski return 0; 315c7909509SMarek Szyprowski } 316c7909509SMarek Szyprowski 317c7909509SMarek Szyprowski static void __dma_remap(struct page *page, size_t size, pgprot_t prot) 318c7909509SMarek Szyprowski { 319c7909509SMarek Szyprowski unsigned long start = (unsigned long) page_address(page); 320c7909509SMarek Szyprowski unsigned end = start + size; 321c7909509SMarek Szyprowski 322c7909509SMarek Szyprowski apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); 323c7909509SMarek Szyprowski flush_tlb_kernel_range(start, end); 324c7909509SMarek Szyprowski } 325c7909509SMarek Szyprowski 326c7909509SMarek Szyprowski static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 327c7909509SMarek Szyprowski pgprot_t prot, struct page **ret_page, 3286e8266e3SCarlo Caione const void *caller, bool want_vaddr) 329c7909509SMarek Szyprowski { 330c7909509SMarek Szyprowski struct page *page; 3316e8266e3SCarlo Caione void *ptr = NULL; 332f1270896SGregory CLEMENT /* 333f1270896SGregory CLEMENT * __alloc_remap_buffer is only called when the device is 334f1270896SGregory CLEMENT * non-coherent 335f1270896SGregory CLEMENT */ 336f1270896SGregory CLEMENT page = __dma_alloc_buffer(dev, size, gfp, NORMAL); 337c7909509SMarek Szyprowski if (!page) 338c7909509SMarek Szyprowski return NULL; 3396e8266e3SCarlo Caione if (!want_vaddr) 3406e8266e3SCarlo Caione goto out; 341c7909509SMarek Szyprowski 34278406ff5SChristoph Hellwig ptr = dma_common_contiguous_remap(page, size, prot, caller); 343c7909509SMarek Szyprowski if (!ptr) { 344c7909509SMarek Szyprowski __dma_free_buffer(page, size); 345c7909509SMarek Szyprowski return NULL; 346c7909509SMarek Szyprowski } 347c7909509SMarek Szyprowski 3486e8266e3SCarlo Caione out: 349c7909509SMarek Szyprowski *ret_page = page; 350c7909509SMarek Szyprowski return ptr; 351c7909509SMarek Szyprowski } 352c7909509SMarek Szyprowski 353e9da6e99SMarek Szyprowski static void *__alloc_from_pool(size_t size, struct page **ret_page) 354c7909509SMarek Szyprowski { 35536d0fd21SLaura Abbott unsigned long val; 356e9da6e99SMarek Szyprowski void *ptr = NULL; 357c7909509SMarek Szyprowski 35836d0fd21SLaura Abbott if (!atomic_pool) { 359e9da6e99SMarek Szyprowski WARN(1, "coherent pool not initialised!\n"); 360c7909509SMarek Szyprowski return NULL; 361c7909509SMarek Szyprowski } 362c7909509SMarek Szyprowski 36336d0fd21SLaura Abbott val = gen_pool_alloc(atomic_pool, size); 36436d0fd21SLaura Abbott if (val) { 36536d0fd21SLaura Abbott phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val); 366e9da6e99SMarek Szyprowski 36736d0fd21SLaura Abbott *ret_page = phys_to_page(phys); 36836d0fd21SLaura Abbott ptr = (void *)val; 369e9da6e99SMarek Szyprowski } 370e9da6e99SMarek Szyprowski 371c7909509SMarek Szyprowski return ptr; 372c7909509SMarek Szyprowski } 373c7909509SMarek Szyprowski 37421d0a759SHiroshi Doyu static bool __in_atomic_pool(void *start, size_t size) 37521d0a759SHiroshi Doyu { 376964975acSHuang Shijie return gen_pool_has_addr(atomic_pool, (unsigned long)start, size); 37721d0a759SHiroshi Doyu } 37821d0a759SHiroshi Doyu 379e9da6e99SMarek Szyprowski static int __free_from_pool(void *start, size_t size) 380c7909509SMarek Szyprowski { 38121d0a759SHiroshi Doyu if (!__in_atomic_pool(start, size)) 382c7909509SMarek Szyprowski return 0; 383c7909509SMarek Szyprowski 38436d0fd21SLaura Abbott gen_pool_free(atomic_pool, (unsigned long)start, size); 385e9da6e99SMarek Szyprowski 386c7909509SMarek Szyprowski return 1; 387c7909509SMarek Szyprowski } 388c7909509SMarek Szyprowski 389c7909509SMarek Szyprowski static void *__alloc_from_contiguous(struct device *dev, size_t size, 3909848e48fSMarek Szyprowski pgprot_t prot, struct page **ret_page, 391f1270896SGregory CLEMENT const void *caller, bool want_vaddr, 392712c604dSLucas Stach int coherent_flag, gfp_t gfp) 393c7909509SMarek Szyprowski { 394c7909509SMarek Szyprowski unsigned long order = get_order(size); 395c7909509SMarek Szyprowski size_t count = size >> PAGE_SHIFT; 396c7909509SMarek Szyprowski struct page *page; 3976e8266e3SCarlo Caione void *ptr = NULL; 398c7909509SMarek Szyprowski 399d834c5abSMarek Szyprowski page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN); 400c7909509SMarek Szyprowski if (!page) 401c7909509SMarek Szyprowski return NULL; 402c7909509SMarek Szyprowski 403f1270896SGregory CLEMENT __dma_clear_buffer(page, size, coherent_flag); 404c7909509SMarek Szyprowski 4056e8266e3SCarlo Caione if (!want_vaddr) 4066e8266e3SCarlo Caione goto out; 4076e8266e3SCarlo Caione 4089848e48fSMarek Szyprowski if (PageHighMem(page)) { 40978406ff5SChristoph Hellwig ptr = dma_common_contiguous_remap(page, size, prot, caller); 4109848e48fSMarek Szyprowski if (!ptr) { 4119848e48fSMarek Szyprowski dma_release_from_contiguous(dev, page, count); 4129848e48fSMarek Szyprowski return NULL; 4139848e48fSMarek Szyprowski } 4149848e48fSMarek Szyprowski } else { 4159848e48fSMarek Szyprowski __dma_remap(page, size, prot); 4169848e48fSMarek Szyprowski ptr = page_address(page); 4179848e48fSMarek Szyprowski } 4186e8266e3SCarlo Caione 4196e8266e3SCarlo Caione out: 420c7909509SMarek Szyprowski *ret_page = page; 4219848e48fSMarek Szyprowski return ptr; 422c7909509SMarek Szyprowski } 423c7909509SMarek Szyprowski 424c7909509SMarek Szyprowski static void __free_from_contiguous(struct device *dev, struct page *page, 4256e8266e3SCarlo Caione void *cpu_addr, size_t size, bool want_vaddr) 426c7909509SMarek Szyprowski { 4276e8266e3SCarlo Caione if (want_vaddr) { 4289848e48fSMarek Szyprowski if (PageHighMem(page)) 42978406ff5SChristoph Hellwig dma_common_free_remap(cpu_addr, size); 4309848e48fSMarek Szyprowski else 43171b55663SRussell King __dma_remap(page, size, PAGE_KERNEL); 4326e8266e3SCarlo Caione } 433c7909509SMarek Szyprowski dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); 434c7909509SMarek Szyprowski } 435c7909509SMarek Szyprowski 43600085f1eSKrzysztof Kozlowski static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot) 437f99d6034SMarek Szyprowski { 43800085f1eSKrzysztof Kozlowski prot = (attrs & DMA_ATTR_WRITE_COMBINE) ? 439f99d6034SMarek Szyprowski pgprot_writecombine(prot) : 440f99d6034SMarek Szyprowski pgprot_dmacoherent(prot); 441f99d6034SMarek Szyprowski return prot; 442f99d6034SMarek Szyprowski } 443f99d6034SMarek Szyprowski 444c7909509SMarek Szyprowski static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, 445c7909509SMarek Szyprowski struct page **ret_page) 446ab6494f0SCatalin Marinas { 44704da5694SRussell King struct page *page; 448f1270896SGregory CLEMENT /* __alloc_simple_buffer is only called when the device is coherent */ 449f1270896SGregory CLEMENT page = __dma_alloc_buffer(dev, size, gfp, COHERENT); 450c7909509SMarek Szyprowski if (!page) 451c7909509SMarek Szyprowski return NULL; 452c7909509SMarek Szyprowski 453c7909509SMarek Szyprowski *ret_page = page; 454c7909509SMarek Szyprowski return page_address(page); 455c7909509SMarek Szyprowski } 456c7909509SMarek Szyprowski 457b4268676SRabin Vincent static void *simple_allocator_alloc(struct arm_dma_alloc_args *args, 458b4268676SRabin Vincent struct page **ret_page) 459b4268676SRabin Vincent { 460b4268676SRabin Vincent return __alloc_simple_buffer(args->dev, args->size, args->gfp, 461b4268676SRabin Vincent ret_page); 462b4268676SRabin Vincent } 463c7909509SMarek Szyprowski 464b4268676SRabin Vincent static void simple_allocator_free(struct arm_dma_free_args *args) 465b4268676SRabin Vincent { 466b4268676SRabin Vincent __dma_free_buffer(args->page, args->size); 467b4268676SRabin Vincent } 468b4268676SRabin Vincent 469b4268676SRabin Vincent static struct arm_dma_allocator simple_allocator = { 470b4268676SRabin Vincent .alloc = simple_allocator_alloc, 471b4268676SRabin Vincent .free = simple_allocator_free, 472b4268676SRabin Vincent }; 473b4268676SRabin Vincent 474b4268676SRabin Vincent static void *cma_allocator_alloc(struct arm_dma_alloc_args *args, 475b4268676SRabin Vincent struct page **ret_page) 476b4268676SRabin Vincent { 477b4268676SRabin Vincent return __alloc_from_contiguous(args->dev, args->size, args->prot, 478b4268676SRabin Vincent ret_page, args->caller, 479712c604dSLucas Stach args->want_vaddr, args->coherent_flag, 480712c604dSLucas Stach args->gfp); 481b4268676SRabin Vincent } 482b4268676SRabin Vincent 483b4268676SRabin Vincent static void cma_allocator_free(struct arm_dma_free_args *args) 484b4268676SRabin Vincent { 485b4268676SRabin Vincent __free_from_contiguous(args->dev, args->page, args->cpu_addr, 486b4268676SRabin Vincent args->size, args->want_vaddr); 487b4268676SRabin Vincent } 488b4268676SRabin Vincent 489b4268676SRabin Vincent static struct arm_dma_allocator cma_allocator = { 490b4268676SRabin Vincent .alloc = cma_allocator_alloc, 491b4268676SRabin Vincent .free = cma_allocator_free, 492b4268676SRabin Vincent }; 493b4268676SRabin Vincent 494b4268676SRabin Vincent static void *pool_allocator_alloc(struct arm_dma_alloc_args *args, 495b4268676SRabin Vincent struct page **ret_page) 496b4268676SRabin Vincent { 497b4268676SRabin Vincent return __alloc_from_pool(args->size, ret_page); 498b4268676SRabin Vincent } 499b4268676SRabin Vincent 500b4268676SRabin Vincent static void pool_allocator_free(struct arm_dma_free_args *args) 501b4268676SRabin Vincent { 502b4268676SRabin Vincent __free_from_pool(args->cpu_addr, args->size); 503b4268676SRabin Vincent } 504b4268676SRabin Vincent 505b4268676SRabin Vincent static struct arm_dma_allocator pool_allocator = { 506b4268676SRabin Vincent .alloc = pool_allocator_alloc, 507b4268676SRabin Vincent .free = pool_allocator_free, 508b4268676SRabin Vincent }; 509b4268676SRabin Vincent 510b4268676SRabin Vincent static void *remap_allocator_alloc(struct arm_dma_alloc_args *args, 511b4268676SRabin Vincent struct page **ret_page) 512b4268676SRabin Vincent { 513b4268676SRabin Vincent return __alloc_remap_buffer(args->dev, args->size, args->gfp, 514b4268676SRabin Vincent args->prot, ret_page, args->caller, 515b4268676SRabin Vincent args->want_vaddr); 516b4268676SRabin Vincent } 517b4268676SRabin Vincent 518b4268676SRabin Vincent static void remap_allocator_free(struct arm_dma_free_args *args) 519b4268676SRabin Vincent { 520b4268676SRabin Vincent if (args->want_vaddr) 52178406ff5SChristoph Hellwig dma_common_free_remap(args->cpu_addr, args->size); 522b4268676SRabin Vincent 523b4268676SRabin Vincent __dma_free_buffer(args->page, args->size); 524b4268676SRabin Vincent } 525b4268676SRabin Vincent 526b4268676SRabin Vincent static struct arm_dma_allocator remap_allocator = { 527b4268676SRabin Vincent .alloc = remap_allocator_alloc, 528b4268676SRabin Vincent .free = remap_allocator_free, 529b4268676SRabin Vincent }; 530c7909509SMarek Szyprowski 531c7909509SMarek Szyprowski static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 5326e8266e3SCarlo Caione gfp_t gfp, pgprot_t prot, bool is_coherent, 53300085f1eSKrzysztof Kozlowski unsigned long attrs, const void *caller) 534c7909509SMarek Szyprowski { 5357607cb73SChristoph Hellwig u64 mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); 5363dd7ea92SJingoo Han struct page *page = NULL; 53731ebf944SRussell King void *addr; 538b4268676SRabin Vincent bool allowblock, cma; 53919e6e5e5SRabin Vincent struct arm_dma_buffer *buf; 540b4268676SRabin Vincent struct arm_dma_alloc_args args = { 541b4268676SRabin Vincent .dev = dev, 542b4268676SRabin Vincent .size = PAGE_ALIGN(size), 543b4268676SRabin Vincent .gfp = gfp, 544b4268676SRabin Vincent .prot = prot, 545b4268676SRabin Vincent .caller = caller, 54600085f1eSKrzysztof Kozlowski .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0), 547f1270896SGregory CLEMENT .coherent_flag = is_coherent ? COHERENT : NORMAL, 548b4268676SRabin Vincent }; 549ab6494f0SCatalin Marinas 550c7909509SMarek Szyprowski #ifdef CONFIG_DMA_API_DEBUG 551c7909509SMarek Szyprowski u64 limit = (mask + 1) & ~mask; 552c7909509SMarek Szyprowski if (limit && size >= limit) { 553c7909509SMarek Szyprowski dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n", 554c7909509SMarek Szyprowski size, mask); 555c7909509SMarek Szyprowski return NULL; 556c7909509SMarek Szyprowski } 557c7909509SMarek Szyprowski #endif 558c7909509SMarek Szyprowski 5599c18fcf7SAlexandre Courbot buf = kzalloc(sizeof(*buf), 5609c18fcf7SAlexandre Courbot gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)); 56119e6e5e5SRabin Vincent if (!buf) 56219e6e5e5SRabin Vincent return NULL; 56319e6e5e5SRabin Vincent 564c7909509SMarek Szyprowski if (mask < 0xffffffffULL) 565c7909509SMarek Szyprowski gfp |= GFP_DMA; 566c7909509SMarek Szyprowski 567ea2e7057SSumit Bhattacharya /* 568ea2e7057SSumit Bhattacharya * Following is a work-around (a.k.a. hack) to prevent pages 569ea2e7057SSumit Bhattacharya * with __GFP_COMP being passed to split_page() which cannot 570ea2e7057SSumit Bhattacharya * handle them. The real problem is that this flag probably 571ea2e7057SSumit Bhattacharya * should be 0 on ARM as it is not supported on this 572ea2e7057SSumit Bhattacharya * platform; see CONFIG_HUGETLBFS. 573ea2e7057SSumit Bhattacharya */ 574ea2e7057SSumit Bhattacharya gfp &= ~(__GFP_COMP); 575b4268676SRabin Vincent args.gfp = gfp; 576ea2e7057SSumit Bhattacharya 57772fd97bfSChristoph Hellwig *handle = DMA_MAPPING_ERROR; 578b4268676SRabin Vincent allowblock = gfpflags_allow_blocking(gfp); 57934370214SBen Dooks cma = allowblock ? dev_get_cma_area(dev) : NULL; 58004da5694SRussell King 581b4268676SRabin Vincent if (cma) 582b4268676SRabin Vincent buf->allocator = &cma_allocator; 5831655cf88SVladimir Murzin else if (is_coherent) 584b4268676SRabin Vincent buf->allocator = &simple_allocator; 585b4268676SRabin Vincent else if (allowblock) 586b4268676SRabin Vincent buf->allocator = &remap_allocator; 58731ebf944SRussell King else 588b4268676SRabin Vincent buf->allocator = &pool_allocator; 589b4268676SRabin Vincent 590b4268676SRabin Vincent addr = buf->allocator->alloc(&args, &page); 59131ebf944SRussell King 59219e6e5e5SRabin Vincent if (page) { 59319e6e5e5SRabin Vincent unsigned long flags; 59419e6e5e5SRabin Vincent 595f9774cfdSChristoph Hellwig *handle = phys_to_dma(dev, page_to_phys(page)); 596b4268676SRabin Vincent buf->virt = args.want_vaddr ? addr : page; 59719e6e5e5SRabin Vincent 59819e6e5e5SRabin Vincent spin_lock_irqsave(&arm_dma_bufs_lock, flags); 59919e6e5e5SRabin Vincent list_add(&buf->list, &arm_dma_bufs); 60019e6e5e5SRabin Vincent spin_unlock_irqrestore(&arm_dma_bufs_lock, flags); 60119e6e5e5SRabin Vincent } else { 60219e6e5e5SRabin Vincent kfree(buf); 60319e6e5e5SRabin Vincent } 60431ebf944SRussell King 605b4268676SRabin Vincent return args.want_vaddr ? addr : page; 606ab6494f0SCatalin Marinas } 607695ae0afSRussell King 6080ddbccd1SRussell King /* 609c7909509SMarek Szyprowski * Free a buffer as defined by the above mapping. 6100ddbccd1SRussell King */ 611dd37e940SRob Herring static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 61200085f1eSKrzysztof Kozlowski dma_addr_t handle, unsigned long attrs, 613dd37e940SRob Herring bool is_coherent) 6140ddbccd1SRussell King { 615f9774cfdSChristoph Hellwig struct page *page = phys_to_page(dma_to_phys(dev, handle)); 61619e6e5e5SRabin Vincent struct arm_dma_buffer *buf; 617b4268676SRabin Vincent struct arm_dma_free_args args = { 618b4268676SRabin Vincent .dev = dev, 619b4268676SRabin Vincent .size = PAGE_ALIGN(size), 620b4268676SRabin Vincent .cpu_addr = cpu_addr, 621b4268676SRabin Vincent .page = page, 62200085f1eSKrzysztof Kozlowski .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0), 623b4268676SRabin Vincent }; 62419e6e5e5SRabin Vincent 62519e6e5e5SRabin Vincent buf = arm_dma_buffer_find(cpu_addr); 62619e6e5e5SRabin Vincent if (WARN(!buf, "Freeing invalid buffer %p\n", cpu_addr)) 62719e6e5e5SRabin Vincent return; 6280ddbccd1SRussell King 629b4268676SRabin Vincent buf->allocator->free(&args); 63019e6e5e5SRabin Vincent kfree(buf); 6310ddbccd1SRussell King } 632afd1a321SRussell King 63365af191aSRussell King static void dma_cache_maint_page(struct page *page, unsigned long offset, 634a9c9147eSRussell King size_t size, enum dma_data_direction dir, 635a9c9147eSRussell King void (*op)(const void *, size_t, int)) 63665af191aSRussell King { 63715653371SRussell King unsigned long pfn; 63815653371SRussell King size_t left = size; 63915653371SRussell King 64015653371SRussell King pfn = page_to_pfn(page) + offset / PAGE_SIZE; 64115653371SRussell King offset %= PAGE_SIZE; 64215653371SRussell King 64365af191aSRussell King /* 64465af191aSRussell King * A single sg entry may refer to multiple physically contiguous 64565af191aSRussell King * pages. But we still need to process highmem pages individually. 64665af191aSRussell King * If highmem is not configured then the bulk of this loop gets 64765af191aSRussell King * optimized out. 64865af191aSRussell King */ 64965af191aSRussell King do { 65065af191aSRussell King size_t len = left; 65193f1d629SRussell King void *vaddr; 65293f1d629SRussell King 65315653371SRussell King page = pfn_to_page(pfn); 65415653371SRussell King 65593f1d629SRussell King if (PageHighMem(page)) { 65615653371SRussell King if (len + offset > PAGE_SIZE) 65765af191aSRussell King len = PAGE_SIZE - offset; 658dd0f67f4SJoonsoo Kim 659dd0f67f4SJoonsoo Kim if (cache_is_vipt_nonaliasing()) { 66039af22a7SNicolas Pitre vaddr = kmap_atomic(page); 6617e5a69e8SNicolas Pitre op(vaddr + offset, len, dir); 66239af22a7SNicolas Pitre kunmap_atomic(vaddr); 663dd0f67f4SJoonsoo Kim } else { 664dd0f67f4SJoonsoo Kim vaddr = kmap_high_get(page); 665dd0f67f4SJoonsoo Kim if (vaddr) { 666dd0f67f4SJoonsoo Kim op(vaddr + offset, len, dir); 667dd0f67f4SJoonsoo Kim kunmap_high(page); 668dd0f67f4SJoonsoo Kim } 66993f1d629SRussell King } 67093f1d629SRussell King } else { 67193f1d629SRussell King vaddr = page_address(page) + offset; 672a9c9147eSRussell King op(vaddr, len, dir); 67393f1d629SRussell King } 67465af191aSRussell King offset = 0; 67515653371SRussell King pfn++; 67665af191aSRussell King left -= len; 67765af191aSRussell King } while (left); 67865af191aSRussell King } 67965af191aSRussell King 68051fde349SMarek Szyprowski /* 68151fde349SMarek Szyprowski * Make an area consistent for devices. 682a45e52bfSLukas Bulwahn * Note: Drivers should NOT use this function directly. 68351fde349SMarek Szyprowski * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 68451fde349SMarek Szyprowski */ 68551fde349SMarek Szyprowski static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, 68665af191aSRussell King size_t size, enum dma_data_direction dir) 68765af191aSRussell King { 6882161c248SSantosh Shilimkar phys_addr_t paddr; 68943377453SNicolas Pitre 690a9c9147eSRussell King dma_cache_maint_page(page, off, size, dir, dmac_map_area); 69143377453SNicolas Pitre 69265af191aSRussell King paddr = page_to_phys(page) + off; 6932ffe2da3SRussell King if (dir == DMA_FROM_DEVICE) { 6942ffe2da3SRussell King outer_inv_range(paddr, paddr + size); 6952ffe2da3SRussell King } else { 6962ffe2da3SRussell King outer_clean_range(paddr, paddr + size); 6972ffe2da3SRussell King } 6982ffe2da3SRussell King /* FIXME: non-speculating: flush on bidirectional mappings? */ 69943377453SNicolas Pitre } 7004ea0d737SRussell King 70151fde349SMarek Szyprowski static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, 7024ea0d737SRussell King size_t size, enum dma_data_direction dir) 7034ea0d737SRussell King { 7042161c248SSantosh Shilimkar phys_addr_t paddr = page_to_phys(page) + off; 7052ffe2da3SRussell King 7062ffe2da3SRussell King /* FIXME: non-speculating: not required */ 707deace4a6SRussell King /* in any case, don't bother invalidating if DMA to device */ 708deace4a6SRussell King if (dir != DMA_TO_DEVICE) { 7092ffe2da3SRussell King outer_inv_range(paddr, paddr + size); 7102ffe2da3SRussell King 711a9c9147eSRussell King dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); 712deace4a6SRussell King } 713c0177800SCatalin Marinas 714c0177800SCatalin Marinas /* 715b2a234edSMing Lei * Mark the D-cache clean for these pages to avoid extra flushing. 716c0177800SCatalin Marinas */ 717b2a234edSMing Lei if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) { 718b2a234edSMing Lei unsigned long pfn; 719b2a234edSMing Lei size_t left = size; 720b2a234edSMing Lei 721b2a234edSMing Lei pfn = page_to_pfn(page) + off / PAGE_SIZE; 722b2a234edSMing Lei off %= PAGE_SIZE; 723b2a234edSMing Lei if (off) { 724b2a234edSMing Lei pfn++; 725b2a234edSMing Lei left -= PAGE_SIZE - off; 726b2a234edSMing Lei } 727b2a234edSMing Lei while (left >= PAGE_SIZE) { 728b2a234edSMing Lei page = pfn_to_page(pfn++); 729c0177800SCatalin Marinas set_bit(PG_dcache_clean, &page->flags); 730b2a234edSMing Lei left -= PAGE_SIZE; 731b2a234edSMing Lei } 732b2a234edSMing Lei } 7334ea0d737SRussell King } 73443377453SNicolas Pitre 7354ce63fcdSMarek Szyprowski #ifdef CONFIG_ARM_DMA_USE_IOMMU 7364ce63fcdSMarek Szyprowski 7377d2822dfSSricharan R static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs) 7387d2822dfSSricharan R { 7397d2822dfSSricharan R int prot = 0; 7407d2822dfSSricharan R 7417d2822dfSSricharan R if (attrs & DMA_ATTR_PRIVILEGED) 7427d2822dfSSricharan R prot |= IOMMU_PRIV; 7437d2822dfSSricharan R 7447d2822dfSSricharan R switch (dir) { 7457d2822dfSSricharan R case DMA_BIDIRECTIONAL: 7467d2822dfSSricharan R return prot | IOMMU_READ | IOMMU_WRITE; 7477d2822dfSSricharan R case DMA_TO_DEVICE: 7487d2822dfSSricharan R return prot | IOMMU_READ; 7497d2822dfSSricharan R case DMA_FROM_DEVICE: 7507d2822dfSSricharan R return prot | IOMMU_WRITE; 7517d2822dfSSricharan R default: 7527d2822dfSSricharan R return prot; 7537d2822dfSSricharan R } 7547d2822dfSSricharan R } 7557d2822dfSSricharan R 7564ce63fcdSMarek Szyprowski /* IOMMU */ 7574ce63fcdSMarek Szyprowski 7584d852ef8SAndreas Herrmann static int extend_iommu_mapping(struct dma_iommu_mapping *mapping); 7594d852ef8SAndreas Herrmann 7604ce63fcdSMarek Szyprowski static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, 7614ce63fcdSMarek Szyprowski size_t size) 7624ce63fcdSMarek Szyprowski { 7634ce63fcdSMarek Szyprowski unsigned int order = get_order(size); 7644ce63fcdSMarek Szyprowski unsigned int align = 0; 7654ce63fcdSMarek Szyprowski unsigned int count, start; 766006f841dSRitesh Harjani size_t mapping_size = mapping->bits << PAGE_SHIFT; 7674ce63fcdSMarek Szyprowski unsigned long flags; 7684d852ef8SAndreas Herrmann dma_addr_t iova; 7694d852ef8SAndreas Herrmann int i; 7704ce63fcdSMarek Szyprowski 77160460abfSSeung-Woo Kim if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT) 77260460abfSSeung-Woo Kim order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT; 77360460abfSSeung-Woo Kim 77468efd7d2SMarek Szyprowski count = PAGE_ALIGN(size) >> PAGE_SHIFT; 77568efd7d2SMarek Szyprowski align = (1 << order) - 1; 7764ce63fcdSMarek Szyprowski 7774ce63fcdSMarek Szyprowski spin_lock_irqsave(&mapping->lock, flags); 7784d852ef8SAndreas Herrmann for (i = 0; i < mapping->nr_bitmaps; i++) { 7794d852ef8SAndreas Herrmann start = bitmap_find_next_zero_area(mapping->bitmaps[i], 7804d852ef8SAndreas Herrmann mapping->bits, 0, count, align); 7814d852ef8SAndreas Herrmann 7824d852ef8SAndreas Herrmann if (start > mapping->bits) 7834d852ef8SAndreas Herrmann continue; 7844d852ef8SAndreas Herrmann 7854d852ef8SAndreas Herrmann bitmap_set(mapping->bitmaps[i], start, count); 7864d852ef8SAndreas Herrmann break; 7874d852ef8SAndreas Herrmann } 7884d852ef8SAndreas Herrmann 7894d852ef8SAndreas Herrmann /* 7904d852ef8SAndreas Herrmann * No unused range found. Try to extend the existing mapping 7914d852ef8SAndreas Herrmann * and perform a second attempt to reserve an IO virtual 7924d852ef8SAndreas Herrmann * address range of size bytes. 7934d852ef8SAndreas Herrmann */ 7944d852ef8SAndreas Herrmann if (i == mapping->nr_bitmaps) { 7954d852ef8SAndreas Herrmann if (extend_iommu_mapping(mapping)) { 7964d852ef8SAndreas Herrmann spin_unlock_irqrestore(&mapping->lock, flags); 79772fd97bfSChristoph Hellwig return DMA_MAPPING_ERROR; 7984d852ef8SAndreas Herrmann } 7994d852ef8SAndreas Herrmann 8004d852ef8SAndreas Herrmann start = bitmap_find_next_zero_area(mapping->bitmaps[i], 8014d852ef8SAndreas Herrmann mapping->bits, 0, count, align); 8024d852ef8SAndreas Herrmann 8034ce63fcdSMarek Szyprowski if (start > mapping->bits) { 8044ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 80572fd97bfSChristoph Hellwig return DMA_MAPPING_ERROR; 8064ce63fcdSMarek Szyprowski } 8074ce63fcdSMarek Szyprowski 8084d852ef8SAndreas Herrmann bitmap_set(mapping->bitmaps[i], start, count); 8094d852ef8SAndreas Herrmann } 8104ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 8114ce63fcdSMarek Szyprowski 812006f841dSRitesh Harjani iova = mapping->base + (mapping_size * i); 81368efd7d2SMarek Szyprowski iova += start << PAGE_SHIFT; 8144d852ef8SAndreas Herrmann 8154d852ef8SAndreas Herrmann return iova; 8164ce63fcdSMarek Szyprowski } 8174ce63fcdSMarek Szyprowski 8184ce63fcdSMarek Szyprowski static inline void __free_iova(struct dma_iommu_mapping *mapping, 8194ce63fcdSMarek Szyprowski dma_addr_t addr, size_t size) 8204ce63fcdSMarek Szyprowski { 8214d852ef8SAndreas Herrmann unsigned int start, count; 822006f841dSRitesh Harjani size_t mapping_size = mapping->bits << PAGE_SHIFT; 8234ce63fcdSMarek Szyprowski unsigned long flags; 8244d852ef8SAndreas Herrmann dma_addr_t bitmap_base; 8254d852ef8SAndreas Herrmann u32 bitmap_index; 8264d852ef8SAndreas Herrmann 8274d852ef8SAndreas Herrmann if (!size) 8284d852ef8SAndreas Herrmann return; 8294d852ef8SAndreas Herrmann 830006f841dSRitesh Harjani bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size; 8314d852ef8SAndreas Herrmann BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions); 8324d852ef8SAndreas Herrmann 833006f841dSRitesh Harjani bitmap_base = mapping->base + mapping_size * bitmap_index; 8344d852ef8SAndreas Herrmann 83568efd7d2SMarek Szyprowski start = (addr - bitmap_base) >> PAGE_SHIFT; 8364d852ef8SAndreas Herrmann 837006f841dSRitesh Harjani if (addr + size > bitmap_base + mapping_size) { 8384d852ef8SAndreas Herrmann /* 8394d852ef8SAndreas Herrmann * The address range to be freed reaches into the iova 8404d852ef8SAndreas Herrmann * range of the next bitmap. This should not happen as 8414d852ef8SAndreas Herrmann * we don't allow this in __alloc_iova (at the 8424d852ef8SAndreas Herrmann * moment). 8434d852ef8SAndreas Herrmann */ 8444d852ef8SAndreas Herrmann BUG(); 8454d852ef8SAndreas Herrmann } else 84668efd7d2SMarek Szyprowski count = size >> PAGE_SHIFT; 8474ce63fcdSMarek Szyprowski 8484ce63fcdSMarek Szyprowski spin_lock_irqsave(&mapping->lock, flags); 8494d852ef8SAndreas Herrmann bitmap_clear(mapping->bitmaps[bitmap_index], start, count); 8504ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 8514ce63fcdSMarek Szyprowski } 8524ce63fcdSMarek Szyprowski 85333298ef6SDoug Anderson /* We'll try 2M, 1M, 64K, and finally 4K; array must end with 0! */ 85433298ef6SDoug Anderson static const int iommu_order_array[] = { 9, 8, 4, 0 }; 85533298ef6SDoug Anderson 856549a17e4SMarek Szyprowski static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, 85700085f1eSKrzysztof Kozlowski gfp_t gfp, unsigned long attrs, 858f1270896SGregory CLEMENT int coherent_flag) 8594ce63fcdSMarek Szyprowski { 8604ce63fcdSMarek Szyprowski struct page **pages; 8614ce63fcdSMarek Szyprowski int count = size >> PAGE_SHIFT; 8624ce63fcdSMarek Szyprowski int array_size = count * sizeof(struct page *); 8634ce63fcdSMarek Szyprowski int i = 0; 86433298ef6SDoug Anderson int order_idx = 0; 8654ce63fcdSMarek Szyprowski 8664ce63fcdSMarek Szyprowski if (array_size <= PAGE_SIZE) 86723be7fdaSAlexandre Courbot pages = kzalloc(array_size, GFP_KERNEL); 8684ce63fcdSMarek Szyprowski else 8694ce63fcdSMarek Szyprowski pages = vzalloc(array_size); 8704ce63fcdSMarek Szyprowski if (!pages) 8714ce63fcdSMarek Szyprowski return NULL; 8724ce63fcdSMarek Szyprowski 87300085f1eSKrzysztof Kozlowski if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) 874549a17e4SMarek Szyprowski { 875549a17e4SMarek Szyprowski unsigned long order = get_order(size); 876549a17e4SMarek Szyprowski struct page *page; 877549a17e4SMarek Szyprowski 878d834c5abSMarek Szyprowski page = dma_alloc_from_contiguous(dev, count, order, 879d834c5abSMarek Szyprowski gfp & __GFP_NOWARN); 880549a17e4SMarek Szyprowski if (!page) 881549a17e4SMarek Szyprowski goto error; 882549a17e4SMarek Szyprowski 883f1270896SGregory CLEMENT __dma_clear_buffer(page, size, coherent_flag); 884549a17e4SMarek Szyprowski 885549a17e4SMarek Szyprowski for (i = 0; i < count; i++) 886549a17e4SMarek Szyprowski pages[i] = page + i; 887549a17e4SMarek Szyprowski 888549a17e4SMarek Szyprowski return pages; 889549a17e4SMarek Szyprowski } 890549a17e4SMarek Szyprowski 89114d3ae2eSDoug Anderson /* Go straight to 4K chunks if caller says it's OK. */ 89200085f1eSKrzysztof Kozlowski if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) 89314d3ae2eSDoug Anderson order_idx = ARRAY_SIZE(iommu_order_array) - 1; 89414d3ae2eSDoug Anderson 895f8669befSMarek Szyprowski /* 896f8669befSMarek Szyprowski * IOMMU can map any pages, so himem can also be used here 897f8669befSMarek Szyprowski */ 898f8669befSMarek Szyprowski gfp |= __GFP_NOWARN | __GFP_HIGHMEM; 899f8669befSMarek Szyprowski 9004ce63fcdSMarek Szyprowski while (count) { 90149f28aa6STomasz Figa int j, order; 9024ce63fcdSMarek Szyprowski 90333298ef6SDoug Anderson order = iommu_order_array[order_idx]; 90433298ef6SDoug Anderson 90533298ef6SDoug Anderson /* Drop down when we get small */ 90633298ef6SDoug Anderson if (__fls(count) < order) { 90733298ef6SDoug Anderson order_idx++; 90833298ef6SDoug Anderson continue; 90949f28aa6STomasz Figa } 91049f28aa6STomasz Figa 91133298ef6SDoug Anderson if (order) { 91233298ef6SDoug Anderson /* See if it's easy to allocate a high-order chunk */ 91333298ef6SDoug Anderson pages[i] = alloc_pages(gfp | __GFP_NORETRY, order); 91433298ef6SDoug Anderson 91533298ef6SDoug Anderson /* Go down a notch at first sign of pressure */ 91649f28aa6STomasz Figa if (!pages[i]) { 91733298ef6SDoug Anderson order_idx++; 91833298ef6SDoug Anderson continue; 91933298ef6SDoug Anderson } 92033298ef6SDoug Anderson } else { 92149f28aa6STomasz Figa pages[i] = alloc_pages(gfp, 0); 9224ce63fcdSMarek Szyprowski if (!pages[i]) 9234ce63fcdSMarek Szyprowski goto error; 92449f28aa6STomasz Figa } 9254ce63fcdSMarek Szyprowski 9265a796eebSHiroshi Doyu if (order) { 9274ce63fcdSMarek Szyprowski split_page(pages[i], order); 9284ce63fcdSMarek Szyprowski j = 1 << order; 9294ce63fcdSMarek Szyprowski while (--j) 9304ce63fcdSMarek Szyprowski pages[i + j] = pages[i] + j; 9315a796eebSHiroshi Doyu } 9324ce63fcdSMarek Szyprowski 933f1270896SGregory CLEMENT __dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag); 9344ce63fcdSMarek Szyprowski i += 1 << order; 9354ce63fcdSMarek Szyprowski count -= 1 << order; 9364ce63fcdSMarek Szyprowski } 9374ce63fcdSMarek Szyprowski 9384ce63fcdSMarek Szyprowski return pages; 9394ce63fcdSMarek Szyprowski error: 9409fa8af91SMarek Szyprowski while (i--) 9414ce63fcdSMarek Szyprowski if (pages[i]) 9424ce63fcdSMarek Szyprowski __free_pages(pages[i], 0); 9431d5cfdb0STetsuo Handa kvfree(pages); 9444ce63fcdSMarek Szyprowski return NULL; 9454ce63fcdSMarek Szyprowski } 9464ce63fcdSMarek Szyprowski 947549a17e4SMarek Szyprowski static int __iommu_free_buffer(struct device *dev, struct page **pages, 94800085f1eSKrzysztof Kozlowski size_t size, unsigned long attrs) 9494ce63fcdSMarek Szyprowski { 9504ce63fcdSMarek Szyprowski int count = size >> PAGE_SHIFT; 9514ce63fcdSMarek Szyprowski int i; 952549a17e4SMarek Szyprowski 95300085f1eSKrzysztof Kozlowski if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { 954549a17e4SMarek Szyprowski dma_release_from_contiguous(dev, pages[0], count); 955549a17e4SMarek Szyprowski } else { 9564ce63fcdSMarek Szyprowski for (i = 0; i < count; i++) 9574ce63fcdSMarek Szyprowski if (pages[i]) 9584ce63fcdSMarek Szyprowski __free_pages(pages[i], 0); 959549a17e4SMarek Szyprowski } 960549a17e4SMarek Szyprowski 9611d5cfdb0STetsuo Handa kvfree(pages); 9624ce63fcdSMarek Szyprowski return 0; 9634ce63fcdSMarek Szyprowski } 9644ce63fcdSMarek Szyprowski 9654ce63fcdSMarek Szyprowski /* 9664ce63fcdSMarek Szyprowski * Create a mapping in device IO address space for specified pages 9674ce63fcdSMarek Szyprowski */ 9684ce63fcdSMarek Szyprowski static dma_addr_t 9697d2822dfSSricharan R __iommu_create_mapping(struct device *dev, struct page **pages, size_t size, 9707d2822dfSSricharan R unsigned long attrs) 9714ce63fcdSMarek Szyprowski { 97289cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 9734ce63fcdSMarek Szyprowski unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 9744ce63fcdSMarek Szyprowski dma_addr_t dma_addr, iova; 97590cde558SAndre Przywara int i; 9764ce63fcdSMarek Szyprowski 9774ce63fcdSMarek Szyprowski dma_addr = __alloc_iova(mapping, size); 97872fd97bfSChristoph Hellwig if (dma_addr == DMA_MAPPING_ERROR) 9794ce63fcdSMarek Szyprowski return dma_addr; 9804ce63fcdSMarek Szyprowski 9814ce63fcdSMarek Szyprowski iova = dma_addr; 9824ce63fcdSMarek Szyprowski for (i = 0; i < count; ) { 98390cde558SAndre Przywara int ret; 98490cde558SAndre Przywara 9854ce63fcdSMarek Szyprowski unsigned int next_pfn = page_to_pfn(pages[i]) + 1; 9864ce63fcdSMarek Szyprowski phys_addr_t phys = page_to_phys(pages[i]); 9874ce63fcdSMarek Szyprowski unsigned int len, j; 9884ce63fcdSMarek Szyprowski 9894ce63fcdSMarek Szyprowski for (j = i + 1; j < count; j++, next_pfn++) 9904ce63fcdSMarek Szyprowski if (page_to_pfn(pages[j]) != next_pfn) 9914ce63fcdSMarek Szyprowski break; 9924ce63fcdSMarek Szyprowski 9934ce63fcdSMarek Szyprowski len = (j - i) << PAGE_SHIFT; 994c9b24996SAndreas Herrmann ret = iommu_map(mapping->domain, iova, phys, len, 9957d2822dfSSricharan R __dma_info_to_prot(DMA_BIDIRECTIONAL, attrs)); 9964ce63fcdSMarek Szyprowski if (ret < 0) 9974ce63fcdSMarek Szyprowski goto fail; 9984ce63fcdSMarek Szyprowski iova += len; 9994ce63fcdSMarek Szyprowski i = j; 10004ce63fcdSMarek Szyprowski } 10014ce63fcdSMarek Szyprowski return dma_addr; 10024ce63fcdSMarek Szyprowski fail: 10034ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); 10044ce63fcdSMarek Szyprowski __free_iova(mapping, dma_addr, size); 100572fd97bfSChristoph Hellwig return DMA_MAPPING_ERROR; 10064ce63fcdSMarek Szyprowski } 10074ce63fcdSMarek Szyprowski 10084ce63fcdSMarek Szyprowski static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) 10094ce63fcdSMarek Szyprowski { 101089cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 10114ce63fcdSMarek Szyprowski 10124ce63fcdSMarek Szyprowski /* 10134ce63fcdSMarek Szyprowski * add optional in-page offset from iova to size and align 10144ce63fcdSMarek Szyprowski * result to page size 10154ce63fcdSMarek Szyprowski */ 10164ce63fcdSMarek Szyprowski size = PAGE_ALIGN((iova & ~PAGE_MASK) + size); 10174ce63fcdSMarek Szyprowski iova &= PAGE_MASK; 10184ce63fcdSMarek Szyprowski 10194ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova, size); 10204ce63fcdSMarek Szyprowski __free_iova(mapping, iova, size); 10214ce63fcdSMarek Szyprowski return 0; 10224ce63fcdSMarek Szyprowski } 10234ce63fcdSMarek Szyprowski 1024665bad7bSHiroshi Doyu static struct page **__atomic_get_pages(void *addr) 1025665bad7bSHiroshi Doyu { 102636d0fd21SLaura Abbott struct page *page; 102736d0fd21SLaura Abbott phys_addr_t phys; 1028665bad7bSHiroshi Doyu 102936d0fd21SLaura Abbott phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr); 103036d0fd21SLaura Abbott page = phys_to_page(phys); 103136d0fd21SLaura Abbott 103236d0fd21SLaura Abbott return (struct page **)page; 1033665bad7bSHiroshi Doyu } 1034665bad7bSHiroshi Doyu 103500085f1eSKrzysztof Kozlowski static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs) 1036e9da6e99SMarek Szyprowski { 1037665bad7bSHiroshi Doyu if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) 1038665bad7bSHiroshi Doyu return __atomic_get_pages(cpu_addr); 1039665bad7bSHiroshi Doyu 104000085f1eSKrzysztof Kozlowski if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) 1041955c757eSMarek Szyprowski return cpu_addr; 1042955c757eSMarek Szyprowski 10435cf45379SChristoph Hellwig return dma_common_find_pages(cpu_addr); 1044e9da6e99SMarek Szyprowski } 1045e9da6e99SMarek Szyprowski 104656506822SGregory CLEMENT static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp, 10477d2822dfSSricharan R dma_addr_t *handle, int coherent_flag, 10487d2822dfSSricharan R unsigned long attrs) 1049479ed93aSHiroshi Doyu { 1050479ed93aSHiroshi Doyu struct page *page; 1051479ed93aSHiroshi Doyu void *addr; 1052479ed93aSHiroshi Doyu 105356506822SGregory CLEMENT if (coherent_flag == COHERENT) 105456506822SGregory CLEMENT addr = __alloc_simple_buffer(dev, size, gfp, &page); 105556506822SGregory CLEMENT else 1056479ed93aSHiroshi Doyu addr = __alloc_from_pool(size, &page); 1057479ed93aSHiroshi Doyu if (!addr) 1058479ed93aSHiroshi Doyu return NULL; 1059479ed93aSHiroshi Doyu 10607d2822dfSSricharan R *handle = __iommu_create_mapping(dev, &page, size, attrs); 106172fd97bfSChristoph Hellwig if (*handle == DMA_MAPPING_ERROR) 1062479ed93aSHiroshi Doyu goto err_mapping; 1063479ed93aSHiroshi Doyu 1064479ed93aSHiroshi Doyu return addr; 1065479ed93aSHiroshi Doyu 1066479ed93aSHiroshi Doyu err_mapping: 1067479ed93aSHiroshi Doyu __free_from_pool(addr, size); 1068479ed93aSHiroshi Doyu return NULL; 1069479ed93aSHiroshi Doyu } 1070479ed93aSHiroshi Doyu 1071d5898291SMarek Szyprowski static void __iommu_free_atomic(struct device *dev, void *cpu_addr, 107256506822SGregory CLEMENT dma_addr_t handle, size_t size, int coherent_flag) 1073479ed93aSHiroshi Doyu { 1074479ed93aSHiroshi Doyu __iommu_remove_mapping(dev, handle, size); 107556506822SGregory CLEMENT if (coherent_flag == COHERENT) 107656506822SGregory CLEMENT __dma_free_buffer(virt_to_page(cpu_addr), size); 107756506822SGregory CLEMENT else 1078d5898291SMarek Szyprowski __free_from_pool(cpu_addr, size); 1079479ed93aSHiroshi Doyu } 1080479ed93aSHiroshi Doyu 1081d563bccfSRobin Murphy static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, 1082d563bccfSRobin Murphy dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 10834ce63fcdSMarek Szyprowski { 108471b55663SRussell King pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); 10854ce63fcdSMarek Szyprowski struct page **pages; 10864ce63fcdSMarek Szyprowski void *addr = NULL; 1087d563bccfSRobin Murphy int coherent_flag = dev->dma_coherent ? COHERENT : NORMAL; 10884ce63fcdSMarek Szyprowski 108972fd97bfSChristoph Hellwig *handle = DMA_MAPPING_ERROR; 10904ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 10914ce63fcdSMarek Szyprowski 109256506822SGregory CLEMENT if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp)) 109356506822SGregory CLEMENT return __iommu_alloc_simple(dev, size, gfp, handle, 10947d2822dfSSricharan R coherent_flag, attrs); 1095479ed93aSHiroshi Doyu 10965b91a98cSRichard Zhao /* 10975b91a98cSRichard Zhao * Following is a work-around (a.k.a. hack) to prevent pages 10985b91a98cSRichard Zhao * with __GFP_COMP being passed to split_page() which cannot 10995b91a98cSRichard Zhao * handle them. The real problem is that this flag probably 11005b91a98cSRichard Zhao * should be 0 on ARM as it is not supported on this 11015b91a98cSRichard Zhao * platform; see CONFIG_HUGETLBFS. 11025b91a98cSRichard Zhao */ 11035b91a98cSRichard Zhao gfp &= ~(__GFP_COMP); 11045b91a98cSRichard Zhao 110556506822SGregory CLEMENT pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag); 11064ce63fcdSMarek Szyprowski if (!pages) 11074ce63fcdSMarek Szyprowski return NULL; 11084ce63fcdSMarek Szyprowski 11097d2822dfSSricharan R *handle = __iommu_create_mapping(dev, pages, size, attrs); 111072fd97bfSChristoph Hellwig if (*handle == DMA_MAPPING_ERROR) 11114ce63fcdSMarek Szyprowski goto err_buffer; 11124ce63fcdSMarek Szyprowski 111300085f1eSKrzysztof Kozlowski if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) 1114955c757eSMarek Szyprowski return pages; 1115955c757eSMarek Szyprowski 111678406ff5SChristoph Hellwig addr = dma_common_pages_remap(pages, size, prot, 1117e9da6e99SMarek Szyprowski __builtin_return_address(0)); 11184ce63fcdSMarek Szyprowski if (!addr) 11194ce63fcdSMarek Szyprowski goto err_mapping; 11204ce63fcdSMarek Szyprowski 11214ce63fcdSMarek Szyprowski return addr; 11224ce63fcdSMarek Szyprowski 11234ce63fcdSMarek Szyprowski err_mapping: 11244ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, *handle, size); 11254ce63fcdSMarek Szyprowski err_buffer: 1126549a17e4SMarek Szyprowski __iommu_free_buffer(dev, pages, size, attrs); 11274ce63fcdSMarek Szyprowski return NULL; 11284ce63fcdSMarek Szyprowski } 11294ce63fcdSMarek Szyprowski 1130d563bccfSRobin Murphy static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 11314ce63fcdSMarek Szyprowski void *cpu_addr, dma_addr_t dma_addr, size_t size, 113200085f1eSKrzysztof Kozlowski unsigned long attrs) 11334ce63fcdSMarek Szyprowski { 1134955c757eSMarek Szyprowski struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1135371f0f08SMarek Szyprowski unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 11366248461dSSouptick Joarder int err; 1137e9da6e99SMarek Szyprowski 1138e9da6e99SMarek Szyprowski if (!pages) 1139e9da6e99SMarek Szyprowski return -ENXIO; 11404ce63fcdSMarek Szyprowski 11416248461dSSouptick Joarder if (vma->vm_pgoff >= nr_pages) 1142371f0f08SMarek Szyprowski return -ENXIO; 1143371f0f08SMarek Szyprowski 1144d563bccfSRobin Murphy if (!dev->dma_coherent) 1145d563bccfSRobin Murphy vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 1146d563bccfSRobin Murphy 11476248461dSSouptick Joarder err = vm_map_pages(vma, pages, nr_pages); 11486248461dSSouptick Joarder if (err) 11496248461dSSouptick Joarder pr_err("Remapping memory failed: %d\n", err); 11507e312103SMarek Szyprowski 11516248461dSSouptick Joarder return err; 11524ce63fcdSMarek Szyprowski } 11534ce63fcdSMarek Szyprowski 11544ce63fcdSMarek Szyprowski /* 11554ce63fcdSMarek Szyprowski * free a page as defined by the above mapping. 11564ce63fcdSMarek Szyprowski * Must not be called with IRQs disabled. 11574ce63fcdSMarek Szyprowski */ 1158d563bccfSRobin Murphy static void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, 1159d563bccfSRobin Murphy dma_addr_t handle, unsigned long attrs) 11604ce63fcdSMarek Szyprowski { 1161d563bccfSRobin Murphy int coherent_flag = dev->dma_coherent ? COHERENT : NORMAL; 1162836bfa0dSYoungJun Cho struct page **pages; 11634ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 11644ce63fcdSMarek Szyprowski 116556506822SGregory CLEMENT if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) { 116656506822SGregory CLEMENT __iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag); 1167479ed93aSHiroshi Doyu return; 1168479ed93aSHiroshi Doyu } 1169479ed93aSHiroshi Doyu 1170836bfa0dSYoungJun Cho pages = __iommu_get_pages(cpu_addr, attrs); 1171836bfa0dSYoungJun Cho if (!pages) { 1172836bfa0dSYoungJun Cho WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); 1173836bfa0dSYoungJun Cho return; 1174836bfa0dSYoungJun Cho } 1175836bfa0dSYoungJun Cho 1176fe9041c2SChristoph Hellwig if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) 117751231740SChristoph Hellwig dma_common_free_remap(cpu_addr, size); 1178e9da6e99SMarek Szyprowski 11794ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, handle, size); 1180549a17e4SMarek Szyprowski __iommu_free_buffer(dev, pages, size, attrs); 11814ce63fcdSMarek Szyprowski } 11824ce63fcdSMarek Szyprowski 1183dc2832e1SMarek Szyprowski static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, 1184dc2832e1SMarek Szyprowski void *cpu_addr, dma_addr_t dma_addr, 118500085f1eSKrzysztof Kozlowski size_t size, unsigned long attrs) 1186dc2832e1SMarek Szyprowski { 1187dc2832e1SMarek Szyprowski unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1188dc2832e1SMarek Szyprowski struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1189dc2832e1SMarek Szyprowski 1190dc2832e1SMarek Szyprowski if (!pages) 1191dc2832e1SMarek Szyprowski return -ENXIO; 1192dc2832e1SMarek Szyprowski 1193dc2832e1SMarek Szyprowski return sg_alloc_table_from_pages(sgt, pages, count, 0, size, 1194dc2832e1SMarek Szyprowski GFP_KERNEL); 11954ce63fcdSMarek Szyprowski } 11964ce63fcdSMarek Szyprowski 11974ce63fcdSMarek Szyprowski /* 11984ce63fcdSMarek Szyprowski * Map a part of the scatter-gather list into contiguous io address space 11994ce63fcdSMarek Szyprowski */ 12004ce63fcdSMarek Szyprowski static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, 12014ce63fcdSMarek Szyprowski size_t size, dma_addr_t *handle, 1202d563bccfSRobin Murphy enum dma_data_direction dir, unsigned long attrs) 12034ce63fcdSMarek Szyprowski { 120489cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 12054ce63fcdSMarek Szyprowski dma_addr_t iova, iova_base; 12064ce63fcdSMarek Szyprowski int ret = 0; 12074ce63fcdSMarek Szyprowski unsigned int count; 12084ce63fcdSMarek Szyprowski struct scatterlist *s; 1209c9b24996SAndreas Herrmann int prot; 12104ce63fcdSMarek Szyprowski 12114ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 121272fd97bfSChristoph Hellwig *handle = DMA_MAPPING_ERROR; 12134ce63fcdSMarek Szyprowski 12144ce63fcdSMarek Szyprowski iova_base = iova = __alloc_iova(mapping, size); 121572fd97bfSChristoph Hellwig if (iova == DMA_MAPPING_ERROR) 12164ce63fcdSMarek Szyprowski return -ENOMEM; 12174ce63fcdSMarek Szyprowski 12184ce63fcdSMarek Szyprowski for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { 12193e6110fdSDan Williams phys_addr_t phys = page_to_phys(sg_page(s)); 12204ce63fcdSMarek Szyprowski unsigned int len = PAGE_ALIGN(s->offset + s->length); 12214ce63fcdSMarek Szyprowski 1222d563bccfSRobin Murphy if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 12234ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 12244ce63fcdSMarek Szyprowski 12257d2822dfSSricharan R prot = __dma_info_to_prot(dir, attrs); 1226c9b24996SAndreas Herrmann 1227c9b24996SAndreas Herrmann ret = iommu_map(mapping->domain, iova, phys, len, prot); 12284ce63fcdSMarek Szyprowski if (ret < 0) 12294ce63fcdSMarek Szyprowski goto fail; 12304ce63fcdSMarek Szyprowski count += len >> PAGE_SHIFT; 12314ce63fcdSMarek Szyprowski iova += len; 12324ce63fcdSMarek Szyprowski } 12334ce63fcdSMarek Szyprowski *handle = iova_base; 12344ce63fcdSMarek Szyprowski 12354ce63fcdSMarek Szyprowski return 0; 12364ce63fcdSMarek Szyprowski fail: 12374ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); 12384ce63fcdSMarek Szyprowski __free_iova(mapping, iova_base, size); 12394ce63fcdSMarek Szyprowski return ret; 12404ce63fcdSMarek Szyprowski } 12414ce63fcdSMarek Szyprowski 1242d563bccfSRobin Murphy /** 1243d563bccfSRobin Murphy * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA 1244d563bccfSRobin Murphy * @dev: valid struct device pointer 1245d563bccfSRobin Murphy * @sg: list of buffers 1246d563bccfSRobin Murphy * @nents: number of buffers to map 1247d563bccfSRobin Murphy * @dir: DMA transfer direction 1248d563bccfSRobin Murphy * 1249d563bccfSRobin Murphy * Map a set of buffers described by scatterlist in streaming mode for DMA. 1250d563bccfSRobin Murphy * The scatter gather list elements are merged together (if possible) and 1251d563bccfSRobin Murphy * tagged with the appropriate dma address and length. They are obtained via 1252d563bccfSRobin Murphy * sg_dma_{address,length}. 1253d563bccfSRobin Murphy */ 1254d563bccfSRobin Murphy static int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, 1255d563bccfSRobin Murphy int nents, enum dma_data_direction dir, unsigned long attrs) 12564ce63fcdSMarek Szyprowski { 12574ce63fcdSMarek Szyprowski struct scatterlist *s = sg, *dma = sg, *start = sg; 12586506932bSMartin Oliveira int i, count = 0, ret; 12594ce63fcdSMarek Szyprowski unsigned int offset = s->offset; 12604ce63fcdSMarek Szyprowski unsigned int size = s->offset + s->length; 12614ce63fcdSMarek Szyprowski unsigned int max = dma_get_max_seg_size(dev); 12624ce63fcdSMarek Szyprowski 12634ce63fcdSMarek Szyprowski for (i = 1; i < nents; i++) { 12644ce63fcdSMarek Szyprowski s = sg_next(s); 12654ce63fcdSMarek Szyprowski 12664ce63fcdSMarek Szyprowski s->dma_length = 0; 12674ce63fcdSMarek Szyprowski 12684ce63fcdSMarek Szyprowski if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { 12696506932bSMartin Oliveira ret = __map_sg_chunk(dev, start, size, 1270d563bccfSRobin Murphy &dma->dma_address, dir, attrs); 12716506932bSMartin Oliveira if (ret < 0) 12724ce63fcdSMarek Szyprowski goto bad_mapping; 12734ce63fcdSMarek Szyprowski 12744ce63fcdSMarek Szyprowski dma->dma_address += offset; 12754ce63fcdSMarek Szyprowski dma->dma_length = size - offset; 12764ce63fcdSMarek Szyprowski 12774ce63fcdSMarek Szyprowski size = offset = s->offset; 12784ce63fcdSMarek Szyprowski start = s; 12794ce63fcdSMarek Szyprowski dma = sg_next(dma); 12804ce63fcdSMarek Szyprowski count += 1; 12814ce63fcdSMarek Szyprowski } 12824ce63fcdSMarek Szyprowski size += s->length; 12834ce63fcdSMarek Szyprowski } 1284d563bccfSRobin Murphy ret = __map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs); 12856506932bSMartin Oliveira if (ret < 0) 12864ce63fcdSMarek Szyprowski goto bad_mapping; 12874ce63fcdSMarek Szyprowski 12884ce63fcdSMarek Szyprowski dma->dma_address += offset; 12894ce63fcdSMarek Szyprowski dma->dma_length = size - offset; 12904ce63fcdSMarek Szyprowski 12914ce63fcdSMarek Szyprowski return count+1; 12924ce63fcdSMarek Szyprowski 12934ce63fcdSMarek Szyprowski bad_mapping: 12944ce63fcdSMarek Szyprowski for_each_sg(sg, s, count, i) 12954ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); 12966506932bSMartin Oliveira if (ret == -ENOMEM) 12976506932bSMartin Oliveira return ret; 12986506932bSMartin Oliveira return -EINVAL; 12994ce63fcdSMarek Szyprowski } 13004ce63fcdSMarek Szyprowski 13014ce63fcdSMarek Szyprowski /** 13024ce63fcdSMarek Szyprowski * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 13034ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 13044ce63fcdSMarek Szyprowski * @sg: list of buffers 13054ce63fcdSMarek Szyprowski * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 13064ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 13074ce63fcdSMarek Szyprowski * 13084ce63fcdSMarek Szyprowski * Unmap a set of streaming mode DMA translations. Again, CPU access 13094ce63fcdSMarek Szyprowski * rules concerning calls here are the same as for dma_unmap_single(). 13104ce63fcdSMarek Szyprowski */ 131117fe8684SBen Dooks static void arm_iommu_unmap_sg(struct device *dev, 131217fe8684SBen Dooks struct scatterlist *sg, int nents, 131300085f1eSKrzysztof Kozlowski enum dma_data_direction dir, 131400085f1eSKrzysztof Kozlowski unsigned long attrs) 13154ce63fcdSMarek Szyprowski { 1316d563bccfSRobin Murphy struct scatterlist *s; 1317d563bccfSRobin Murphy int i; 1318d563bccfSRobin Murphy 1319d563bccfSRobin Murphy for_each_sg(sg, s, nents, i) { 1320d563bccfSRobin Murphy if (sg_dma_len(s)) 1321d563bccfSRobin Murphy __iommu_remove_mapping(dev, sg_dma_address(s), 1322d563bccfSRobin Murphy sg_dma_len(s)); 1323d563bccfSRobin Murphy if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 1324d563bccfSRobin Murphy __dma_page_dev_to_cpu(sg_page(s), s->offset, 1325d563bccfSRobin Murphy s->length, dir); 1326d563bccfSRobin Murphy } 13274ce63fcdSMarek Szyprowski } 13284ce63fcdSMarek Szyprowski 13294ce63fcdSMarek Szyprowski /** 13304ce63fcdSMarek Szyprowski * arm_iommu_sync_sg_for_cpu 13314ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 13324ce63fcdSMarek Szyprowski * @sg: list of buffers 13334ce63fcdSMarek Szyprowski * @nents: number of buffers to map (returned from dma_map_sg) 13344ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 13354ce63fcdSMarek Szyprowski */ 133617fe8684SBen Dooks static void arm_iommu_sync_sg_for_cpu(struct device *dev, 133717fe8684SBen Dooks struct scatterlist *sg, 13384ce63fcdSMarek Szyprowski int nents, enum dma_data_direction dir) 13394ce63fcdSMarek Szyprowski { 13404ce63fcdSMarek Szyprowski struct scatterlist *s; 13414ce63fcdSMarek Szyprowski int i; 13424ce63fcdSMarek Szyprowski 13434136ce90SRobin Murphy if (dev->dma_coherent) 13444136ce90SRobin Murphy return; 13454136ce90SRobin Murphy 13464ce63fcdSMarek Szyprowski for_each_sg(sg, s, nents, i) 13474ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); 13484ce63fcdSMarek Szyprowski 13494ce63fcdSMarek Szyprowski } 13504ce63fcdSMarek Szyprowski 13514ce63fcdSMarek Szyprowski /** 13524ce63fcdSMarek Szyprowski * arm_iommu_sync_sg_for_device 13534ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 13544ce63fcdSMarek Szyprowski * @sg: list of buffers 13554ce63fcdSMarek Szyprowski * @nents: number of buffers to map (returned from dma_map_sg) 13564ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 13574ce63fcdSMarek Szyprowski */ 135817fe8684SBen Dooks static void arm_iommu_sync_sg_for_device(struct device *dev, 135917fe8684SBen Dooks struct scatterlist *sg, 13604ce63fcdSMarek Szyprowski int nents, enum dma_data_direction dir) 13614ce63fcdSMarek Szyprowski { 13624ce63fcdSMarek Szyprowski struct scatterlist *s; 13634ce63fcdSMarek Szyprowski int i; 13644ce63fcdSMarek Szyprowski 13654136ce90SRobin Murphy if (dev->dma_coherent) 13664136ce90SRobin Murphy return; 13674136ce90SRobin Murphy 13684ce63fcdSMarek Szyprowski for_each_sg(sg, s, nents, i) 13694ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 13704ce63fcdSMarek Szyprowski } 13714ce63fcdSMarek Szyprowski 13724ce63fcdSMarek Szyprowski /** 1373d563bccfSRobin Murphy * arm_iommu_map_page 13740fa478dfSRob Herring * @dev: valid struct device pointer 13750fa478dfSRob Herring * @page: page that buffer resides in 13760fa478dfSRob Herring * @offset: offset into page for start of buffer 13770fa478dfSRob Herring * @size: size of buffer to map 13780fa478dfSRob Herring * @dir: DMA transfer direction 13790fa478dfSRob Herring * 1380d563bccfSRobin Murphy * IOMMU aware version of arm_dma_map_page() 13810fa478dfSRob Herring */ 1382d563bccfSRobin Murphy static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, 13830fa478dfSRob Herring unsigned long offset, size_t size, enum dma_data_direction dir, 138400085f1eSKrzysztof Kozlowski unsigned long attrs) 13850fa478dfSRob Herring { 138689cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 13870fa478dfSRob Herring dma_addr_t dma_addr; 138813987d68SWill Deacon int ret, prot, len = PAGE_ALIGN(size + offset); 13890fa478dfSRob Herring 1390d563bccfSRobin Murphy if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 1391d563bccfSRobin Murphy __dma_page_cpu_to_dev(page, offset, size, dir); 1392d563bccfSRobin Murphy 13930fa478dfSRob Herring dma_addr = __alloc_iova(mapping, len); 139472fd97bfSChristoph Hellwig if (dma_addr == DMA_MAPPING_ERROR) 13950fa478dfSRob Herring return dma_addr; 13960fa478dfSRob Herring 13977d2822dfSSricharan R prot = __dma_info_to_prot(dir, attrs); 139813987d68SWill Deacon 139913987d68SWill Deacon ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); 14000fa478dfSRob Herring if (ret < 0) 14010fa478dfSRob Herring goto fail; 14020fa478dfSRob Herring 14030fa478dfSRob Herring return dma_addr + offset; 14040fa478dfSRob Herring fail: 14050fa478dfSRob Herring __free_iova(mapping, dma_addr, len); 140672fd97bfSChristoph Hellwig return DMA_MAPPING_ERROR; 14070fa478dfSRob Herring } 14080fa478dfSRob Herring 14090fa478dfSRob Herring /** 14104ce63fcdSMarek Szyprowski * arm_iommu_unmap_page 14114ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 14124ce63fcdSMarek Szyprowski * @handle: DMA address of buffer 14134ce63fcdSMarek Szyprowski * @size: size of buffer (same as passed to dma_map_page) 14144ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as passed to dma_map_page) 14154ce63fcdSMarek Szyprowski * 14164ce63fcdSMarek Szyprowski * IOMMU aware version of arm_dma_unmap_page() 14174ce63fcdSMarek Szyprowski */ 14184ce63fcdSMarek Szyprowski static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, 141900085f1eSKrzysztof Kozlowski size_t size, enum dma_data_direction dir, unsigned long attrs) 14204ce63fcdSMarek Szyprowski { 142189cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 14224ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 1423d563bccfSRobin Murphy struct page *page; 14244ce63fcdSMarek Szyprowski int offset = handle & ~PAGE_MASK; 14254ce63fcdSMarek Szyprowski int len = PAGE_ALIGN(size + offset); 14264ce63fcdSMarek Szyprowski 14274ce63fcdSMarek Szyprowski if (!iova) 14284ce63fcdSMarek Szyprowski return; 14294ce63fcdSMarek Szyprowski 1430d563bccfSRobin Murphy if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { 1431d563bccfSRobin Murphy page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 14324ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(page, offset, size, dir); 1433d563bccfSRobin Murphy } 14344ce63fcdSMarek Szyprowski 14354ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova, len); 14364ce63fcdSMarek Szyprowski __free_iova(mapping, iova, len); 14374ce63fcdSMarek Szyprowski } 14384ce63fcdSMarek Szyprowski 143924ed5d2cSNiklas Söderlund /** 144024ed5d2cSNiklas Söderlund * arm_iommu_map_resource - map a device resource for DMA 144124ed5d2cSNiklas Söderlund * @dev: valid struct device pointer 144224ed5d2cSNiklas Söderlund * @phys_addr: physical address of resource 144324ed5d2cSNiklas Söderlund * @size: size of resource to map 144424ed5d2cSNiklas Söderlund * @dir: DMA transfer direction 144524ed5d2cSNiklas Söderlund */ 144624ed5d2cSNiklas Söderlund static dma_addr_t arm_iommu_map_resource(struct device *dev, 144724ed5d2cSNiklas Söderlund phys_addr_t phys_addr, size_t size, 144824ed5d2cSNiklas Söderlund enum dma_data_direction dir, unsigned long attrs) 144924ed5d2cSNiklas Söderlund { 145024ed5d2cSNiklas Söderlund struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 145124ed5d2cSNiklas Söderlund dma_addr_t dma_addr; 145224ed5d2cSNiklas Söderlund int ret, prot; 145324ed5d2cSNiklas Söderlund phys_addr_t addr = phys_addr & PAGE_MASK; 145424ed5d2cSNiklas Söderlund unsigned int offset = phys_addr & ~PAGE_MASK; 145524ed5d2cSNiklas Söderlund size_t len = PAGE_ALIGN(size + offset); 145624ed5d2cSNiklas Söderlund 145724ed5d2cSNiklas Söderlund dma_addr = __alloc_iova(mapping, len); 145872fd97bfSChristoph Hellwig if (dma_addr == DMA_MAPPING_ERROR) 145924ed5d2cSNiklas Söderlund return dma_addr; 146024ed5d2cSNiklas Söderlund 14617d2822dfSSricharan R prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO; 146224ed5d2cSNiklas Söderlund 146324ed5d2cSNiklas Söderlund ret = iommu_map(mapping->domain, dma_addr, addr, len, prot); 146424ed5d2cSNiklas Söderlund if (ret < 0) 146524ed5d2cSNiklas Söderlund goto fail; 146624ed5d2cSNiklas Söderlund 146724ed5d2cSNiklas Söderlund return dma_addr + offset; 146824ed5d2cSNiklas Söderlund fail: 146924ed5d2cSNiklas Söderlund __free_iova(mapping, dma_addr, len); 147072fd97bfSChristoph Hellwig return DMA_MAPPING_ERROR; 147124ed5d2cSNiklas Söderlund } 147224ed5d2cSNiklas Söderlund 147324ed5d2cSNiklas Söderlund /** 147424ed5d2cSNiklas Söderlund * arm_iommu_unmap_resource - unmap a device DMA resource 147524ed5d2cSNiklas Söderlund * @dev: valid struct device pointer 147624ed5d2cSNiklas Söderlund * @dma_handle: DMA address to resource 147724ed5d2cSNiklas Söderlund * @size: size of resource to map 147824ed5d2cSNiklas Söderlund * @dir: DMA transfer direction 147924ed5d2cSNiklas Söderlund */ 148024ed5d2cSNiklas Söderlund static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle, 148124ed5d2cSNiklas Söderlund size_t size, enum dma_data_direction dir, 148224ed5d2cSNiklas Söderlund unsigned long attrs) 148324ed5d2cSNiklas Söderlund { 148424ed5d2cSNiklas Söderlund struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 148524ed5d2cSNiklas Söderlund dma_addr_t iova = dma_handle & PAGE_MASK; 148624ed5d2cSNiklas Söderlund unsigned int offset = dma_handle & ~PAGE_MASK; 148724ed5d2cSNiklas Söderlund size_t len = PAGE_ALIGN(size + offset); 148824ed5d2cSNiklas Söderlund 148924ed5d2cSNiklas Söderlund if (!iova) 149024ed5d2cSNiklas Söderlund return; 149124ed5d2cSNiklas Söderlund 149224ed5d2cSNiklas Söderlund iommu_unmap(mapping->domain, iova, len); 149324ed5d2cSNiklas Söderlund __free_iova(mapping, iova, len); 149424ed5d2cSNiklas Söderlund } 149524ed5d2cSNiklas Söderlund 14964ce63fcdSMarek Szyprowski static void arm_iommu_sync_single_for_cpu(struct device *dev, 14974ce63fcdSMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 14984ce63fcdSMarek Szyprowski { 149989cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 15004ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 15014136ce90SRobin Murphy struct page *page; 15024ce63fcdSMarek Szyprowski unsigned int offset = handle & ~PAGE_MASK; 15034ce63fcdSMarek Szyprowski 15044136ce90SRobin Murphy if (dev->dma_coherent || !iova) 15054ce63fcdSMarek Szyprowski return; 15064ce63fcdSMarek Szyprowski 15074136ce90SRobin Murphy page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 15084ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(page, offset, size, dir); 15094ce63fcdSMarek Szyprowski } 15104ce63fcdSMarek Szyprowski 15114ce63fcdSMarek Szyprowski static void arm_iommu_sync_single_for_device(struct device *dev, 15124ce63fcdSMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 15134ce63fcdSMarek Szyprowski { 151489cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 15154ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 15164136ce90SRobin Murphy struct page *page; 15174ce63fcdSMarek Szyprowski unsigned int offset = handle & ~PAGE_MASK; 15184ce63fcdSMarek Szyprowski 15194136ce90SRobin Murphy if (dev->dma_coherent || !iova) 15204ce63fcdSMarek Szyprowski return; 15214ce63fcdSMarek Szyprowski 15224136ce90SRobin Murphy page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 15234ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 15244ce63fcdSMarek Szyprowski } 15254ce63fcdSMarek Szyprowski 152617fe8684SBen Dooks static const struct dma_map_ops iommu_ops = { 15274ce63fcdSMarek Szyprowski .alloc = arm_iommu_alloc_attrs, 15284ce63fcdSMarek Szyprowski .free = arm_iommu_free_attrs, 15294ce63fcdSMarek Szyprowski .mmap = arm_iommu_mmap_attrs, 1530dc2832e1SMarek Szyprowski .get_sgtable = arm_iommu_get_sgtable, 15314ce63fcdSMarek Szyprowski 15324ce63fcdSMarek Szyprowski .map_page = arm_iommu_map_page, 15334ce63fcdSMarek Szyprowski .unmap_page = arm_iommu_unmap_page, 15344ce63fcdSMarek Szyprowski .sync_single_for_cpu = arm_iommu_sync_single_for_cpu, 15354ce63fcdSMarek Szyprowski .sync_single_for_device = arm_iommu_sync_single_for_device, 15364ce63fcdSMarek Szyprowski 15374ce63fcdSMarek Szyprowski .map_sg = arm_iommu_map_sg, 15384ce63fcdSMarek Szyprowski .unmap_sg = arm_iommu_unmap_sg, 15394ce63fcdSMarek Szyprowski .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, 15404ce63fcdSMarek Szyprowski .sync_sg_for_device = arm_iommu_sync_sg_for_device, 154124ed5d2cSNiklas Söderlund 154224ed5d2cSNiklas Söderlund .map_resource = arm_iommu_map_resource, 154324ed5d2cSNiklas Söderlund .unmap_resource = arm_iommu_unmap_resource, 15440fa478dfSRob Herring }; 15450fa478dfSRob Herring 15464ce63fcdSMarek Szyprowski /** 15474ce63fcdSMarek Szyprowski * arm_iommu_create_mapping 15484ce63fcdSMarek Szyprowski * @bus: pointer to the bus holding the client device (for IOMMU calls) 15494ce63fcdSMarek Szyprowski * @base: start address of the valid IO address space 155068efd7d2SMarek Szyprowski * @size: maximum size of the valid IO address space 15514ce63fcdSMarek Szyprowski * 15524ce63fcdSMarek Szyprowski * Creates a mapping structure which holds information about used/unused 15534ce63fcdSMarek Szyprowski * IO address ranges, which is required to perform memory allocation and 15544ce63fcdSMarek Szyprowski * mapping with IOMMU aware functions. 15554ce63fcdSMarek Szyprowski * 15564ce63fcdSMarek Szyprowski * The client device need to be attached to the mapping with 15574ce63fcdSMarek Szyprowski * arm_iommu_attach_device function. 15584ce63fcdSMarek Szyprowski */ 15594ce63fcdSMarek Szyprowski struct dma_iommu_mapping * 15601424532bSMarek Szyprowski arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size) 15614ce63fcdSMarek Szyprowski { 156268efd7d2SMarek Szyprowski unsigned int bits = size >> PAGE_SHIFT; 156368efd7d2SMarek Szyprowski unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long); 15644ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping; 156568efd7d2SMarek Szyprowski int extensions = 1; 15664ce63fcdSMarek Szyprowski int err = -ENOMEM; 15674ce63fcdSMarek Szyprowski 15681424532bSMarek Szyprowski /* currently only 32-bit DMA address space is supported */ 15691424532bSMarek Szyprowski if (size > DMA_BIT_MASK(32) + 1) 15701424532bSMarek Szyprowski return ERR_PTR(-ERANGE); 15711424532bSMarek Szyprowski 157268efd7d2SMarek Szyprowski if (!bitmap_size) 15734ce63fcdSMarek Szyprowski return ERR_PTR(-EINVAL); 15744ce63fcdSMarek Szyprowski 157568efd7d2SMarek Szyprowski if (bitmap_size > PAGE_SIZE) { 157668efd7d2SMarek Szyprowski extensions = bitmap_size / PAGE_SIZE; 157768efd7d2SMarek Szyprowski bitmap_size = PAGE_SIZE; 157868efd7d2SMarek Szyprowski } 157968efd7d2SMarek Szyprowski 15804ce63fcdSMarek Szyprowski mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); 15814ce63fcdSMarek Szyprowski if (!mapping) 15824ce63fcdSMarek Szyprowski goto err; 15834ce63fcdSMarek Szyprowski 158468efd7d2SMarek Szyprowski mapping->bitmap_size = bitmap_size; 15856396bb22SKees Cook mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *), 15864d852ef8SAndreas Herrmann GFP_KERNEL); 15874d852ef8SAndreas Herrmann if (!mapping->bitmaps) 15884ce63fcdSMarek Szyprowski goto err2; 15894ce63fcdSMarek Szyprowski 159068efd7d2SMarek Szyprowski mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL); 15914d852ef8SAndreas Herrmann if (!mapping->bitmaps[0]) 15924d852ef8SAndreas Herrmann goto err3; 15934d852ef8SAndreas Herrmann 15944d852ef8SAndreas Herrmann mapping->nr_bitmaps = 1; 15954d852ef8SAndreas Herrmann mapping->extensions = extensions; 15964ce63fcdSMarek Szyprowski mapping->base = base; 159768efd7d2SMarek Szyprowski mapping->bits = BITS_PER_BYTE * bitmap_size; 15984d852ef8SAndreas Herrmann 15994ce63fcdSMarek Szyprowski spin_lock_init(&mapping->lock); 16004ce63fcdSMarek Szyprowski 16014ce63fcdSMarek Szyprowski mapping->domain = iommu_domain_alloc(bus); 16024ce63fcdSMarek Szyprowski if (!mapping->domain) 16034d852ef8SAndreas Herrmann goto err4; 16044ce63fcdSMarek Szyprowski 16054ce63fcdSMarek Szyprowski kref_init(&mapping->kref); 16064ce63fcdSMarek Szyprowski return mapping; 16074d852ef8SAndreas Herrmann err4: 16084d852ef8SAndreas Herrmann kfree(mapping->bitmaps[0]); 16094ce63fcdSMarek Szyprowski err3: 16104d852ef8SAndreas Herrmann kfree(mapping->bitmaps); 16114ce63fcdSMarek Szyprowski err2: 16124ce63fcdSMarek Szyprowski kfree(mapping); 16134ce63fcdSMarek Szyprowski err: 16144ce63fcdSMarek Szyprowski return ERR_PTR(err); 16154ce63fcdSMarek Szyprowski } 161618177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_create_mapping); 16174ce63fcdSMarek Szyprowski 16184ce63fcdSMarek Szyprowski static void release_iommu_mapping(struct kref *kref) 16194ce63fcdSMarek Szyprowski { 16204d852ef8SAndreas Herrmann int i; 16214ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = 16224ce63fcdSMarek Szyprowski container_of(kref, struct dma_iommu_mapping, kref); 16234ce63fcdSMarek Szyprowski 16244ce63fcdSMarek Szyprowski iommu_domain_free(mapping->domain); 16254d852ef8SAndreas Herrmann for (i = 0; i < mapping->nr_bitmaps; i++) 16264d852ef8SAndreas Herrmann kfree(mapping->bitmaps[i]); 16274d852ef8SAndreas Herrmann kfree(mapping->bitmaps); 16284ce63fcdSMarek Szyprowski kfree(mapping); 16294ce63fcdSMarek Szyprowski } 16304ce63fcdSMarek Szyprowski 16314d852ef8SAndreas Herrmann static int extend_iommu_mapping(struct dma_iommu_mapping *mapping) 16324d852ef8SAndreas Herrmann { 16334d852ef8SAndreas Herrmann int next_bitmap; 16344d852ef8SAndreas Herrmann 1635462859aaSMarek Szyprowski if (mapping->nr_bitmaps >= mapping->extensions) 16364d852ef8SAndreas Herrmann return -EINVAL; 16374d852ef8SAndreas Herrmann 16384d852ef8SAndreas Herrmann next_bitmap = mapping->nr_bitmaps; 16394d852ef8SAndreas Herrmann mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size, 16404d852ef8SAndreas Herrmann GFP_ATOMIC); 16414d852ef8SAndreas Herrmann if (!mapping->bitmaps[next_bitmap]) 16424d852ef8SAndreas Herrmann return -ENOMEM; 16434d852ef8SAndreas Herrmann 16444d852ef8SAndreas Herrmann mapping->nr_bitmaps++; 16454d852ef8SAndreas Herrmann 16464d852ef8SAndreas Herrmann return 0; 16474d852ef8SAndreas Herrmann } 16484d852ef8SAndreas Herrmann 16494ce63fcdSMarek Szyprowski void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) 16504ce63fcdSMarek Szyprowski { 16514ce63fcdSMarek Szyprowski if (mapping) 16524ce63fcdSMarek Szyprowski kref_put(&mapping->kref, release_iommu_mapping); 16534ce63fcdSMarek Szyprowski } 165418177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_release_mapping); 16554ce63fcdSMarek Szyprowski 1656eab8d653SLaurent Pinchart static int __arm_iommu_attach_device(struct device *dev, 16574ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping) 16584ce63fcdSMarek Szyprowski { 16594ce63fcdSMarek Szyprowski int err; 16604ce63fcdSMarek Szyprowski 16614ce63fcdSMarek Szyprowski err = iommu_attach_device(mapping->domain, dev); 16624ce63fcdSMarek Szyprowski if (err) 16634ce63fcdSMarek Szyprowski return err; 16644ce63fcdSMarek Szyprowski 16654ce63fcdSMarek Szyprowski kref_get(&mapping->kref); 166689cfdb19SWill Deacon to_dma_iommu_mapping(dev) = mapping; 16674ce63fcdSMarek Szyprowski 166875c59716SHiroshi Doyu pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); 16694ce63fcdSMarek Szyprowski return 0; 16704ce63fcdSMarek Szyprowski } 16714ce63fcdSMarek Szyprowski 16726fe36758SHiroshi Doyu /** 1673eab8d653SLaurent Pinchart * arm_iommu_attach_device 16746fe36758SHiroshi Doyu * @dev: valid struct device pointer 1675eab8d653SLaurent Pinchart * @mapping: io address space mapping structure (returned from 1676eab8d653SLaurent Pinchart * arm_iommu_create_mapping) 16776fe36758SHiroshi Doyu * 1678eab8d653SLaurent Pinchart * Attaches specified io address space mapping to the provided device. 1679eab8d653SLaurent Pinchart * This replaces the dma operations (dma_map_ops pointer) with the 1680eab8d653SLaurent Pinchart * IOMMU aware version. 1681eab8d653SLaurent Pinchart * 1682eab8d653SLaurent Pinchart * More than one client might be attached to the same io address space 1683eab8d653SLaurent Pinchart * mapping. 16846fe36758SHiroshi Doyu */ 1685eab8d653SLaurent Pinchart int arm_iommu_attach_device(struct device *dev, 1686eab8d653SLaurent Pinchart struct dma_iommu_mapping *mapping) 1687eab8d653SLaurent Pinchart { 1688eab8d653SLaurent Pinchart int err; 1689eab8d653SLaurent Pinchart 1690eab8d653SLaurent Pinchart err = __arm_iommu_attach_device(dev, mapping); 1691eab8d653SLaurent Pinchart if (err) 1692eab8d653SLaurent Pinchart return err; 1693eab8d653SLaurent Pinchart 1694eab8d653SLaurent Pinchart set_dma_ops(dev, &iommu_ops); 1695eab8d653SLaurent Pinchart return 0; 1696eab8d653SLaurent Pinchart } 1697eab8d653SLaurent Pinchart EXPORT_SYMBOL_GPL(arm_iommu_attach_device); 1698eab8d653SLaurent Pinchart 1699d3e01c51SSricharan R /** 1700d3e01c51SSricharan R * arm_iommu_detach_device 1701d3e01c51SSricharan R * @dev: valid struct device pointer 1702d3e01c51SSricharan R * 1703d3e01c51SSricharan R * Detaches the provided device from a previously attached map. 17044a4d68fcSWolfram Sang (Renesas) * This overwrites the dma_ops pointer with appropriate non-IOMMU ops. 1705d3e01c51SSricharan R */ 1706d3e01c51SSricharan R void arm_iommu_detach_device(struct device *dev) 17076fe36758SHiroshi Doyu { 17086fe36758SHiroshi Doyu struct dma_iommu_mapping *mapping; 17096fe36758SHiroshi Doyu 17106fe36758SHiroshi Doyu mapping = to_dma_iommu_mapping(dev); 17116fe36758SHiroshi Doyu if (!mapping) { 17126fe36758SHiroshi Doyu dev_warn(dev, "Not attached\n"); 17136fe36758SHiroshi Doyu return; 17146fe36758SHiroshi Doyu } 17156fe36758SHiroshi Doyu 17166fe36758SHiroshi Doyu iommu_detach_device(mapping->domain, dev); 17176fe36758SHiroshi Doyu kref_put(&mapping->kref, release_iommu_mapping); 171889cfdb19SWill Deacon to_dma_iommu_mapping(dev) = NULL; 1719ae626eb9SChristoph Hellwig set_dma_ops(dev, NULL); 17206fe36758SHiroshi Doyu 17216fe36758SHiroshi Doyu pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); 17226fe36758SHiroshi Doyu } 172318177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_detach_device); 17246fe36758SHiroshi Doyu 1725ae626eb9SChristoph Hellwig static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, 1726ae626eb9SChristoph Hellwig const struct iommu_ops *iommu, bool coherent) 17274bb25789SWill Deacon { 17284bb25789SWill Deacon struct dma_iommu_mapping *mapping; 17294bb25789SWill Deacon 17304bb25789SWill Deacon mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); 17314bb25789SWill Deacon if (IS_ERR(mapping)) { 17324bb25789SWill Deacon pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", 17334bb25789SWill Deacon size, dev_name(dev)); 1734ae626eb9SChristoph Hellwig return; 17354bb25789SWill Deacon } 17364bb25789SWill Deacon 1737eab8d653SLaurent Pinchart if (__arm_iommu_attach_device(dev, mapping)) { 17384bb25789SWill Deacon pr_warn("Failed to attached device %s to IOMMU_mapping\n", 17394bb25789SWill Deacon dev_name(dev)); 17404bb25789SWill Deacon arm_iommu_release_mapping(mapping); 1741ae626eb9SChristoph Hellwig return; 17424bb25789SWill Deacon } 17434bb25789SWill Deacon 1744ae626eb9SChristoph Hellwig set_dma_ops(dev, &iommu_ops); 17454bb25789SWill Deacon } 17464bb25789SWill Deacon 17474bb25789SWill Deacon static void arm_teardown_iommu_dma_ops(struct device *dev) 17484bb25789SWill Deacon { 174989cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 17504bb25789SWill Deacon 1751c2273a18SWill Deacon if (!mapping) 1752c2273a18SWill Deacon return; 1753c2273a18SWill Deacon 1754d3e01c51SSricharan R arm_iommu_detach_device(dev); 17554bb25789SWill Deacon arm_iommu_release_mapping(mapping); 17564bb25789SWill Deacon } 17574bb25789SWill Deacon 17584bb25789SWill Deacon #else 17594bb25789SWill Deacon 1760ae626eb9SChristoph Hellwig static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, 1761ae626eb9SChristoph Hellwig const struct iommu_ops *iommu, bool coherent) 17624bb25789SWill Deacon { 17634bb25789SWill Deacon } 17644bb25789SWill Deacon 17654bb25789SWill Deacon static void arm_teardown_iommu_dma_ops(struct device *dev) { } 17664bb25789SWill Deacon 17674bb25789SWill Deacon #endif /* CONFIG_ARM_DMA_USE_IOMMU */ 17684bb25789SWill Deacon 17694bb25789SWill Deacon void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, 177053c92d79SRobin Murphy const struct iommu_ops *iommu, bool coherent) 17714bb25789SWill Deacon { 1772*49bc8bebSChristoph Hellwig /* 1773*49bc8bebSChristoph Hellwig * Due to legacy code that sets the ->dma_coherent flag from a bus 1774*49bc8bebSChristoph Hellwig * notifier we can't just assign coherent to the ->dma_coherent flag 1775*49bc8bebSChristoph Hellwig * here, but instead have to make sure we only set but never clear it 1776*49bc8bebSChristoph Hellwig * for now. 1777*49bc8bebSChristoph Hellwig */ 1778*49bc8bebSChristoph Hellwig if (coherent) { 1779*49bc8bebSChristoph Hellwig dev->archdata.dma_coherent = true; 1780*49bc8bebSChristoph Hellwig dev->dma_coherent = true; 1781*49bc8bebSChristoph Hellwig } 178226b37b94SLaurent Pinchart 178326b37b94SLaurent Pinchart /* 178426b37b94SLaurent Pinchart * Don't override the dma_ops if they have already been set. Ideally 178526b37b94SLaurent Pinchart * this should be the only location where dma_ops are set, remove this 178626b37b94SLaurent Pinchart * check when all other callers of set_dma_ops will have disappeared. 178726b37b94SLaurent Pinchart */ 178826b37b94SLaurent Pinchart if (dev->dma_ops) 178926b37b94SLaurent Pinchart return; 179026b37b94SLaurent Pinchart 1791ae626eb9SChristoph Hellwig if (iommu) 1792ae626eb9SChristoph Hellwig arm_setup_iommu_dma_ops(dev, dma_base, size, iommu, coherent); 1793e0586326SStefano Stabellini 17949bf22421SOleksandr Tyshchenko xen_setup_dma_ops(dev); 1795a93a121aSLaurent Pinchart dev->archdata.dma_ops_setup = true; 17964bb25789SWill Deacon } 17974bb25789SWill Deacon 17984bb25789SWill Deacon void arch_teardown_dma_ops(struct device *dev) 17994bb25789SWill Deacon { 1800a93a121aSLaurent Pinchart if (!dev->archdata.dma_ops_setup) 1801a93a121aSLaurent Pinchart return; 1802a93a121aSLaurent Pinchart 18034bb25789SWill Deacon arm_teardown_iommu_dma_ops(dev); 1804fc67e6f1SRobin Murphy /* Let arch_setup_dma_ops() start again from scratch upon re-probe */ 1805fc67e6f1SRobin Murphy set_dma_ops(dev, NULL); 18064bb25789SWill Deacon } 1807ad3c7b18SChristoph Hellwig 180856e35f9cSChristoph Hellwig void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, 180956e35f9cSChristoph Hellwig enum dma_data_direction dir) 1810ad3c7b18SChristoph Hellwig { 1811ad3c7b18SChristoph Hellwig __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), 1812ad3c7b18SChristoph Hellwig size, dir); 1813ad3c7b18SChristoph Hellwig } 1814ad3c7b18SChristoph Hellwig 181556e35f9cSChristoph Hellwig void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, 181656e35f9cSChristoph Hellwig enum dma_data_direction dir) 1817ad3c7b18SChristoph Hellwig { 1818ad3c7b18SChristoph Hellwig __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), 1819ad3c7b18SChristoph Hellwig size, dir); 1820ad3c7b18SChristoph Hellwig } 1821ad3c7b18SChristoph Hellwig 1822ad3c7b18SChristoph Hellwig void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 1823ad3c7b18SChristoph Hellwig gfp_t gfp, unsigned long attrs) 1824ad3c7b18SChristoph Hellwig { 1825ad3c7b18SChristoph Hellwig return __dma_alloc(dev, size, dma_handle, gfp, 1826ad3c7b18SChristoph Hellwig __get_dma_pgprot(attrs, PAGE_KERNEL), false, 1827ad3c7b18SChristoph Hellwig attrs, __builtin_return_address(0)); 1828ad3c7b18SChristoph Hellwig } 1829ad3c7b18SChristoph Hellwig 1830ad3c7b18SChristoph Hellwig void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, 1831ad3c7b18SChristoph Hellwig dma_addr_t dma_handle, unsigned long attrs) 1832ad3c7b18SChristoph Hellwig { 1833ad3c7b18SChristoph Hellwig __arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false); 1834ad3c7b18SChristoph Hellwig } 1835