10ddbccd1SRussell King /* 20ddbccd1SRussell King * linux/arch/arm/mm/dma-mapping.c 30ddbccd1SRussell King * 40ddbccd1SRussell King * Copyright (C) 2000-2004 Russell King 50ddbccd1SRussell King * 60ddbccd1SRussell King * This program is free software; you can redistribute it and/or modify 70ddbccd1SRussell King * it under the terms of the GNU General Public License version 2 as 80ddbccd1SRussell King * published by the Free Software Foundation. 90ddbccd1SRussell King * 100ddbccd1SRussell King * DMA uncached mapping support. 110ddbccd1SRussell King */ 120ddbccd1SRussell King #include <linux/module.h> 130ddbccd1SRussell King #include <linux/mm.h> 145a0e3ad6STejun Heo #include <linux/gfp.h> 150ddbccd1SRussell King #include <linux/errno.h> 160ddbccd1SRussell King #include <linux/list.h> 170ddbccd1SRussell King #include <linux/init.h> 180ddbccd1SRussell King #include <linux/device.h> 190ddbccd1SRussell King #include <linux/dma-mapping.h> 20c7909509SMarek Szyprowski #include <linux/dma-contiguous.h> 2139af22a7SNicolas Pitre #include <linux/highmem.h> 22c7909509SMarek Szyprowski #include <linux/memblock.h> 2399d1717dSJon Medhurst #include <linux/slab.h> 244ce63fcdSMarek Szyprowski #include <linux/iommu.h> 25e9da6e99SMarek Szyprowski #include <linux/io.h> 264ce63fcdSMarek Szyprowski #include <linux/vmalloc.h> 27158e8bfeSAlessandro Rubini #include <linux/sizes.h> 280ddbccd1SRussell King 290ddbccd1SRussell King #include <asm/memory.h> 3043377453SNicolas Pitre #include <asm/highmem.h> 310ddbccd1SRussell King #include <asm/cacheflush.h> 320ddbccd1SRussell King #include <asm/tlbflush.h> 3399d1717dSJon Medhurst #include <asm/mach/arch.h> 344ce63fcdSMarek Szyprowski #include <asm/dma-iommu.h> 35c7909509SMarek Szyprowski #include <asm/mach/map.h> 36c7909509SMarek Szyprowski #include <asm/system_info.h> 37c7909509SMarek Szyprowski #include <asm/dma-contiguous.h> 380ddbccd1SRussell King 39022ae537SRussell King #include "mm.h" 40022ae537SRussell King 4115237e1fSMarek Szyprowski /* 4215237e1fSMarek Szyprowski * The DMA API is built upon the notion of "buffer ownership". A buffer 4315237e1fSMarek Szyprowski * is either exclusively owned by the CPU (and therefore may be accessed 4415237e1fSMarek Szyprowski * by it) or exclusively owned by the DMA device. These helper functions 4515237e1fSMarek Szyprowski * represent the transitions between these two ownership states. 4615237e1fSMarek Szyprowski * 4715237e1fSMarek Szyprowski * Note, however, that on later ARMs, this notion does not work due to 4815237e1fSMarek Szyprowski * speculative prefetches. We model our approach on the assumption that 4915237e1fSMarek Szyprowski * the CPU does do speculative prefetches, which means we clean caches 5015237e1fSMarek Szyprowski * before transfers and delay cache invalidation until transfer completion. 5115237e1fSMarek Szyprowski * 5215237e1fSMarek Szyprowski */ 5351fde349SMarek Szyprowski static void __dma_page_cpu_to_dev(struct page *, unsigned long, 5415237e1fSMarek Szyprowski size_t, enum dma_data_direction); 5551fde349SMarek Szyprowski static void __dma_page_dev_to_cpu(struct page *, unsigned long, 5615237e1fSMarek Szyprowski size_t, enum dma_data_direction); 5715237e1fSMarek Szyprowski 582dc6a016SMarek Szyprowski /** 592dc6a016SMarek Szyprowski * arm_dma_map_page - map a portion of a page for streaming DMA 602dc6a016SMarek Szyprowski * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 612dc6a016SMarek Szyprowski * @page: page that buffer resides in 622dc6a016SMarek Szyprowski * @offset: offset into page for start of buffer 632dc6a016SMarek Szyprowski * @size: size of buffer to map 642dc6a016SMarek Szyprowski * @dir: DMA transfer direction 652dc6a016SMarek Szyprowski * 662dc6a016SMarek Szyprowski * Ensure that any data held in the cache is appropriately discarded 672dc6a016SMarek Szyprowski * or written back. 682dc6a016SMarek Szyprowski * 692dc6a016SMarek Szyprowski * The device owns this memory once this call has completed. The CPU 702dc6a016SMarek Szyprowski * can regain ownership by calling dma_unmap_page(). 712dc6a016SMarek Szyprowski */ 7251fde349SMarek Szyprowski static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, 732dc6a016SMarek Szyprowski unsigned long offset, size_t size, enum dma_data_direction dir, 742dc6a016SMarek Szyprowski struct dma_attrs *attrs) 752dc6a016SMarek Szyprowski { 76dd37e940SRob Herring if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 7751fde349SMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 7851fde349SMarek Szyprowski return pfn_to_dma(dev, page_to_pfn(page)) + offset; 792dc6a016SMarek Szyprowski } 802dc6a016SMarek Szyprowski 81dd37e940SRob Herring static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, 82dd37e940SRob Herring unsigned long offset, size_t size, enum dma_data_direction dir, 83dd37e940SRob Herring struct dma_attrs *attrs) 84dd37e940SRob Herring { 85dd37e940SRob Herring return pfn_to_dma(dev, page_to_pfn(page)) + offset; 86dd37e940SRob Herring } 87dd37e940SRob Herring 882dc6a016SMarek Szyprowski /** 892dc6a016SMarek Szyprowski * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page() 902dc6a016SMarek Szyprowski * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 912dc6a016SMarek Szyprowski * @handle: DMA address of buffer 922dc6a016SMarek Szyprowski * @size: size of buffer (same as passed to dma_map_page) 932dc6a016SMarek Szyprowski * @dir: DMA transfer direction (same as passed to dma_map_page) 942dc6a016SMarek Szyprowski * 952dc6a016SMarek Szyprowski * Unmap a page streaming mode DMA translation. The handle and size 962dc6a016SMarek Szyprowski * must match what was provided in the previous dma_map_page() call. 972dc6a016SMarek Szyprowski * All other usages are undefined. 982dc6a016SMarek Szyprowski * 992dc6a016SMarek Szyprowski * After this call, reads by the CPU to the buffer are guaranteed to see 1002dc6a016SMarek Szyprowski * whatever the device wrote there. 1012dc6a016SMarek Szyprowski */ 10251fde349SMarek Szyprowski static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, 1032dc6a016SMarek Szyprowski size_t size, enum dma_data_direction dir, 1042dc6a016SMarek Szyprowski struct dma_attrs *attrs) 1052dc6a016SMarek Szyprowski { 106dd37e940SRob Herring if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 10751fde349SMarek Szyprowski __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), 10851fde349SMarek Szyprowski handle & ~PAGE_MASK, size, dir); 1092dc6a016SMarek Szyprowski } 1102dc6a016SMarek Szyprowski 11151fde349SMarek Szyprowski static void arm_dma_sync_single_for_cpu(struct device *dev, 1122dc6a016SMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 1132dc6a016SMarek Szyprowski { 1142dc6a016SMarek Szyprowski unsigned int offset = handle & (PAGE_SIZE - 1); 1152dc6a016SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 1162dc6a016SMarek Szyprowski __dma_page_dev_to_cpu(page, offset, size, dir); 1172dc6a016SMarek Szyprowski } 1182dc6a016SMarek Szyprowski 11951fde349SMarek Szyprowski static void arm_dma_sync_single_for_device(struct device *dev, 1202dc6a016SMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 1212dc6a016SMarek Szyprowski { 1222dc6a016SMarek Szyprowski unsigned int offset = handle & (PAGE_SIZE - 1); 1232dc6a016SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 1242dc6a016SMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 1252dc6a016SMarek Szyprowski } 1262dc6a016SMarek Szyprowski 1272dc6a016SMarek Szyprowski struct dma_map_ops arm_dma_ops = { 128f99d6034SMarek Szyprowski .alloc = arm_dma_alloc, 129f99d6034SMarek Szyprowski .free = arm_dma_free, 130f99d6034SMarek Szyprowski .mmap = arm_dma_mmap, 131dc2832e1SMarek Szyprowski .get_sgtable = arm_dma_get_sgtable, 1322dc6a016SMarek Szyprowski .map_page = arm_dma_map_page, 1332dc6a016SMarek Szyprowski .unmap_page = arm_dma_unmap_page, 1342dc6a016SMarek Szyprowski .map_sg = arm_dma_map_sg, 1352dc6a016SMarek Szyprowski .unmap_sg = arm_dma_unmap_sg, 1362dc6a016SMarek Szyprowski .sync_single_for_cpu = arm_dma_sync_single_for_cpu, 1372dc6a016SMarek Szyprowski .sync_single_for_device = arm_dma_sync_single_for_device, 1382dc6a016SMarek Szyprowski .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, 1392dc6a016SMarek Szyprowski .sync_sg_for_device = arm_dma_sync_sg_for_device, 1402dc6a016SMarek Szyprowski .set_dma_mask = arm_dma_set_mask, 1412dc6a016SMarek Szyprowski }; 1422dc6a016SMarek Szyprowski EXPORT_SYMBOL(arm_dma_ops); 1432dc6a016SMarek Szyprowski 144dd37e940SRob Herring static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 145dd37e940SRob Herring dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs); 146dd37e940SRob Herring static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 147dd37e940SRob Herring dma_addr_t handle, struct dma_attrs *attrs); 148dd37e940SRob Herring 149dd37e940SRob Herring struct dma_map_ops arm_coherent_dma_ops = { 150dd37e940SRob Herring .alloc = arm_coherent_dma_alloc, 151dd37e940SRob Herring .free = arm_coherent_dma_free, 152dd37e940SRob Herring .mmap = arm_dma_mmap, 153dd37e940SRob Herring .get_sgtable = arm_dma_get_sgtable, 154dd37e940SRob Herring .map_page = arm_coherent_dma_map_page, 155dd37e940SRob Herring .map_sg = arm_dma_map_sg, 156dd37e940SRob Herring .set_dma_mask = arm_dma_set_mask, 157dd37e940SRob Herring }; 158dd37e940SRob Herring EXPORT_SYMBOL(arm_coherent_dma_ops); 159dd37e940SRob Herring 160ab6494f0SCatalin Marinas static u64 get_coherent_dma_mask(struct device *dev) 161ab6494f0SCatalin Marinas { 162022ae537SRussell King u64 mask = (u64)arm_dma_limit; 1630ddbccd1SRussell King 164ab6494f0SCatalin Marinas if (dev) { 165ab6494f0SCatalin Marinas mask = dev->coherent_dma_mask; 166ab6494f0SCatalin Marinas 167ab6494f0SCatalin Marinas /* 168ab6494f0SCatalin Marinas * Sanity check the DMA mask - it must be non-zero, and 169ab6494f0SCatalin Marinas * must be able to be satisfied by a DMA allocation. 170ab6494f0SCatalin Marinas */ 171ab6494f0SCatalin Marinas if (mask == 0) { 172ab6494f0SCatalin Marinas dev_warn(dev, "coherent DMA mask is unset\n"); 173ab6494f0SCatalin Marinas return 0; 174ab6494f0SCatalin Marinas } 175ab6494f0SCatalin Marinas 176022ae537SRussell King if ((~mask) & (u64)arm_dma_limit) { 177ab6494f0SCatalin Marinas dev_warn(dev, "coherent DMA mask %#llx is smaller " 178ab6494f0SCatalin Marinas "than system GFP_DMA mask %#llx\n", 179022ae537SRussell King mask, (u64)arm_dma_limit); 180ab6494f0SCatalin Marinas return 0; 181ab6494f0SCatalin Marinas } 182ab6494f0SCatalin Marinas } 183ab6494f0SCatalin Marinas 184ab6494f0SCatalin Marinas return mask; 185ab6494f0SCatalin Marinas } 186ab6494f0SCatalin Marinas 187c7909509SMarek Szyprowski static void __dma_clear_buffer(struct page *page, size_t size) 188c7909509SMarek Szyprowski { 189c7909509SMarek Szyprowski void *ptr; 190c7909509SMarek Szyprowski /* 191c7909509SMarek Szyprowski * Ensure that the allocated pages are zeroed, and that any data 192c7909509SMarek Szyprowski * lurking in the kernel direct-mapped region is invalidated. 193c7909509SMarek Szyprowski */ 194c7909509SMarek Szyprowski ptr = page_address(page); 1954ce63fcdSMarek Szyprowski if (ptr) { 196c7909509SMarek Szyprowski memset(ptr, 0, size); 197c7909509SMarek Szyprowski dmac_flush_range(ptr, ptr + size); 198c7909509SMarek Szyprowski outer_flush_range(__pa(ptr), __pa(ptr) + size); 199c7909509SMarek Szyprowski } 2004ce63fcdSMarek Szyprowski } 201c7909509SMarek Szyprowski 2027a9a32a9SRussell King /* 2037a9a32a9SRussell King * Allocate a DMA buffer for 'dev' of size 'size' using the 2047a9a32a9SRussell King * specified gfp mask. Note that 'size' must be page aligned. 2057a9a32a9SRussell King */ 2067a9a32a9SRussell King static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) 2077a9a32a9SRussell King { 2087a9a32a9SRussell King unsigned long order = get_order(size); 2097a9a32a9SRussell King struct page *page, *p, *e; 2107a9a32a9SRussell King 2117a9a32a9SRussell King page = alloc_pages(gfp, order); 2127a9a32a9SRussell King if (!page) 2137a9a32a9SRussell King return NULL; 2147a9a32a9SRussell King 2157a9a32a9SRussell King /* 2167a9a32a9SRussell King * Now split the huge page and free the excess pages 2177a9a32a9SRussell King */ 2187a9a32a9SRussell King split_page(page, order); 2197a9a32a9SRussell King for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) 2207a9a32a9SRussell King __free_page(p); 2217a9a32a9SRussell King 222c7909509SMarek Szyprowski __dma_clear_buffer(page, size); 2237a9a32a9SRussell King 2247a9a32a9SRussell King return page; 2257a9a32a9SRussell King } 2267a9a32a9SRussell King 2277a9a32a9SRussell King /* 2287a9a32a9SRussell King * Free a DMA buffer. 'size' must be page aligned. 2297a9a32a9SRussell King */ 2307a9a32a9SRussell King static void __dma_free_buffer(struct page *page, size_t size) 2317a9a32a9SRussell King { 2327a9a32a9SRussell King struct page *e = page + (size >> PAGE_SHIFT); 2337a9a32a9SRussell King 2347a9a32a9SRussell King while (page < e) { 2357a9a32a9SRussell King __free_page(page); 2367a9a32a9SRussell King page++; 2377a9a32a9SRussell King } 2387a9a32a9SRussell King } 2397a9a32a9SRussell King 240ab6494f0SCatalin Marinas #ifdef CONFIG_MMU 2410ddbccd1SRussell King #ifdef CONFIG_HUGETLB_PAGE 2420ddbccd1SRussell King #error ARM Coherent DMA allocator does not (yet) support huge TLB 2430ddbccd1SRussell King #endif 2440ddbccd1SRussell King 245c7909509SMarek Szyprowski static void *__alloc_from_contiguous(struct device *dev, size_t size, 246c7909509SMarek Szyprowski pgprot_t prot, struct page **ret_page); 247c7909509SMarek Szyprowski 248e9da6e99SMarek Szyprowski static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 249e9da6e99SMarek Szyprowski pgprot_t prot, struct page **ret_page, 250e9da6e99SMarek Szyprowski const void *caller); 251e9da6e99SMarek Szyprowski 252e9da6e99SMarek Szyprowski static void * 253e9da6e99SMarek Szyprowski __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, 254e9da6e99SMarek Szyprowski const void *caller) 255e9da6e99SMarek Szyprowski { 256e9da6e99SMarek Szyprowski struct vm_struct *area; 257e9da6e99SMarek Szyprowski unsigned long addr; 258e9da6e99SMarek Szyprowski 259e9da6e99SMarek Szyprowski /* 260e9da6e99SMarek Szyprowski * DMA allocation can be mapped to user space, so lets 261e9da6e99SMarek Szyprowski * set VM_USERMAP flags too. 262e9da6e99SMarek Szyprowski */ 263e9da6e99SMarek Szyprowski area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, 264e9da6e99SMarek Szyprowski caller); 265e9da6e99SMarek Szyprowski if (!area) 266e9da6e99SMarek Szyprowski return NULL; 267e9da6e99SMarek Szyprowski addr = (unsigned long)area->addr; 268e9da6e99SMarek Szyprowski area->phys_addr = __pfn_to_phys(page_to_pfn(page)); 269e9da6e99SMarek Szyprowski 270e9da6e99SMarek Szyprowski if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) { 271e9da6e99SMarek Szyprowski vunmap((void *)addr); 272e9da6e99SMarek Szyprowski return NULL; 273e9da6e99SMarek Szyprowski } 274e9da6e99SMarek Szyprowski return (void *)addr; 275e9da6e99SMarek Szyprowski } 276e9da6e99SMarek Szyprowski 277e9da6e99SMarek Szyprowski static void __dma_free_remap(void *cpu_addr, size_t size) 278e9da6e99SMarek Szyprowski { 279e9da6e99SMarek Szyprowski unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP; 280e9da6e99SMarek Szyprowski struct vm_struct *area = find_vm_area(cpu_addr); 281e9da6e99SMarek Szyprowski if (!area || (area->flags & flags) != flags) { 282e9da6e99SMarek Szyprowski WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); 283e9da6e99SMarek Szyprowski return; 284e9da6e99SMarek Szyprowski } 285e9da6e99SMarek Szyprowski unmap_kernel_range((unsigned long)cpu_addr, size); 286e9da6e99SMarek Szyprowski vunmap(cpu_addr); 287e9da6e99SMarek Szyprowski } 288e9da6e99SMarek Szyprowski 2896e5267aaSMarek Szyprowski #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K 2906e5267aaSMarek Szyprowski 291e9da6e99SMarek Szyprowski struct dma_pool { 292e9da6e99SMarek Szyprowski size_t size; 293e9da6e99SMarek Szyprowski spinlock_t lock; 294e9da6e99SMarek Szyprowski unsigned long *bitmap; 295e9da6e99SMarek Szyprowski unsigned long nr_pages; 296e9da6e99SMarek Szyprowski void *vaddr; 2976b3fe472SHiroshi Doyu struct page **pages; 298c7909509SMarek Szyprowski }; 299c7909509SMarek Szyprowski 300e9da6e99SMarek Szyprowski static struct dma_pool atomic_pool = { 3016e5267aaSMarek Szyprowski .size = DEFAULT_DMA_COHERENT_POOL_SIZE, 302e9da6e99SMarek Szyprowski }; 303c7909509SMarek Szyprowski 304c7909509SMarek Szyprowski static int __init early_coherent_pool(char *p) 305c7909509SMarek Szyprowski { 306e9da6e99SMarek Szyprowski atomic_pool.size = memparse(p, &p); 307c7909509SMarek Szyprowski return 0; 308c7909509SMarek Szyprowski } 309c7909509SMarek Szyprowski early_param("coherent_pool", early_coherent_pool); 310c7909509SMarek Szyprowski 3116e5267aaSMarek Szyprowski void __init init_dma_coherent_pool_size(unsigned long size) 3126e5267aaSMarek Szyprowski { 3136e5267aaSMarek Szyprowski /* 3146e5267aaSMarek Szyprowski * Catch any attempt to set the pool size too late. 3156e5267aaSMarek Szyprowski */ 3166e5267aaSMarek Szyprowski BUG_ON(atomic_pool.vaddr); 3176e5267aaSMarek Szyprowski 3186e5267aaSMarek Szyprowski /* 3196e5267aaSMarek Szyprowski * Set architecture specific coherent pool size only if 3206e5267aaSMarek Szyprowski * it has not been changed by kernel command line parameter. 3216e5267aaSMarek Szyprowski */ 3226e5267aaSMarek Szyprowski if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE) 3236e5267aaSMarek Szyprowski atomic_pool.size = size; 3246e5267aaSMarek Szyprowski } 3256e5267aaSMarek Szyprowski 326c7909509SMarek Szyprowski /* 327c7909509SMarek Szyprowski * Initialise the coherent pool for atomic allocations. 328c7909509SMarek Szyprowski */ 329e9da6e99SMarek Szyprowski static int __init atomic_pool_init(void) 330c7909509SMarek Szyprowski { 331e9da6e99SMarek Szyprowski struct dma_pool *pool = &atomic_pool; 332c7909509SMarek Szyprowski pgprot_t prot = pgprot_dmacoherent(pgprot_kernel); 333e9da6e99SMarek Szyprowski unsigned long nr_pages = pool->size >> PAGE_SHIFT; 334e9da6e99SMarek Szyprowski unsigned long *bitmap; 335c7909509SMarek Szyprowski struct page *page; 3366b3fe472SHiroshi Doyu struct page **pages; 337c7909509SMarek Szyprowski void *ptr; 338e9da6e99SMarek Szyprowski int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long); 339c7909509SMarek Szyprowski 340e9da6e99SMarek Szyprowski bitmap = kzalloc(bitmap_size, GFP_KERNEL); 341e9da6e99SMarek Szyprowski if (!bitmap) 342e9da6e99SMarek Szyprowski goto no_bitmap; 343c7909509SMarek Szyprowski 3446b3fe472SHiroshi Doyu pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); 3456b3fe472SHiroshi Doyu if (!pages) 3466b3fe472SHiroshi Doyu goto no_pages; 3476b3fe472SHiroshi Doyu 348e9da6e99SMarek Szyprowski if (IS_ENABLED(CONFIG_CMA)) 349e9da6e99SMarek Szyprowski ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page); 350e9da6e99SMarek Szyprowski else 351e9da6e99SMarek Szyprowski ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot, 352e9da6e99SMarek Szyprowski &page, NULL); 353c7909509SMarek Szyprowski if (ptr) { 3546b3fe472SHiroshi Doyu int i; 3556b3fe472SHiroshi Doyu 3566b3fe472SHiroshi Doyu for (i = 0; i < nr_pages; i++) 3576b3fe472SHiroshi Doyu pages[i] = page + i; 3586b3fe472SHiroshi Doyu 359e9da6e99SMarek Szyprowski spin_lock_init(&pool->lock); 360e9da6e99SMarek Szyprowski pool->vaddr = ptr; 3616b3fe472SHiroshi Doyu pool->pages = pages; 362e9da6e99SMarek Szyprowski pool->bitmap = bitmap; 363e9da6e99SMarek Szyprowski pool->nr_pages = nr_pages; 364e9da6e99SMarek Szyprowski pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n", 365e9da6e99SMarek Szyprowski (unsigned)pool->size / 1024); 366c7909509SMarek Szyprowski return 0; 367c7909509SMarek Szyprowski } 368ec10665cSSachin Kamat 369ec10665cSSachin Kamat kfree(pages); 3706b3fe472SHiroshi Doyu no_pages: 371e9da6e99SMarek Szyprowski kfree(bitmap); 372e9da6e99SMarek Szyprowski no_bitmap: 373e9da6e99SMarek Szyprowski pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n", 374e9da6e99SMarek Szyprowski (unsigned)pool->size / 1024); 375c7909509SMarek Szyprowski return -ENOMEM; 376c7909509SMarek Szyprowski } 377c7909509SMarek Szyprowski /* 378c7909509SMarek Szyprowski * CMA is activated by core_initcall, so we must be called after it. 379c7909509SMarek Szyprowski */ 380e9da6e99SMarek Szyprowski postcore_initcall(atomic_pool_init); 381c7909509SMarek Szyprowski 382c7909509SMarek Szyprowski struct dma_contig_early_reserve { 383c7909509SMarek Szyprowski phys_addr_t base; 384c7909509SMarek Szyprowski unsigned long size; 385c7909509SMarek Szyprowski }; 386c7909509SMarek Szyprowski 387c7909509SMarek Szyprowski static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata; 388c7909509SMarek Szyprowski 389c7909509SMarek Szyprowski static int dma_mmu_remap_num __initdata; 390c7909509SMarek Szyprowski 391c7909509SMarek Szyprowski void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) 392c7909509SMarek Szyprowski { 393c7909509SMarek Szyprowski dma_mmu_remap[dma_mmu_remap_num].base = base; 394c7909509SMarek Szyprowski dma_mmu_remap[dma_mmu_remap_num].size = size; 395c7909509SMarek Szyprowski dma_mmu_remap_num++; 396c7909509SMarek Szyprowski } 397c7909509SMarek Szyprowski 398c7909509SMarek Szyprowski void __init dma_contiguous_remap(void) 399c7909509SMarek Szyprowski { 400c7909509SMarek Szyprowski int i; 401c7909509SMarek Szyprowski for (i = 0; i < dma_mmu_remap_num; i++) { 402c7909509SMarek Szyprowski phys_addr_t start = dma_mmu_remap[i].base; 403c7909509SMarek Szyprowski phys_addr_t end = start + dma_mmu_remap[i].size; 404c7909509SMarek Szyprowski struct map_desc map; 405c7909509SMarek Szyprowski unsigned long addr; 406c7909509SMarek Szyprowski 407c7909509SMarek Szyprowski if (end > arm_lowmem_limit) 408c7909509SMarek Szyprowski end = arm_lowmem_limit; 409c7909509SMarek Szyprowski if (start >= end) 41039f78e70SChris Brand continue; 411c7909509SMarek Szyprowski 412c7909509SMarek Szyprowski map.pfn = __phys_to_pfn(start); 413c7909509SMarek Szyprowski map.virtual = __phys_to_virt(start); 414c7909509SMarek Szyprowski map.length = end - start; 415c7909509SMarek Szyprowski map.type = MT_MEMORY_DMA_READY; 416c7909509SMarek Szyprowski 417c7909509SMarek Szyprowski /* 418c7909509SMarek Szyprowski * Clear previous low-memory mapping 419c7909509SMarek Szyprowski */ 420c7909509SMarek Szyprowski for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); 42161f6c7a4SVitaly Andrianov addr += PMD_SIZE) 422c7909509SMarek Szyprowski pmd_clear(pmd_off_k(addr)); 423c7909509SMarek Szyprowski 424c7909509SMarek Szyprowski iotable_init(&map, 1); 425c7909509SMarek Szyprowski } 426c7909509SMarek Szyprowski } 427c7909509SMarek Szyprowski 428c7909509SMarek Szyprowski static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr, 429c7909509SMarek Szyprowski void *data) 430c7909509SMarek Szyprowski { 431c7909509SMarek Szyprowski struct page *page = virt_to_page(addr); 432c7909509SMarek Szyprowski pgprot_t prot = *(pgprot_t *)data; 433c7909509SMarek Szyprowski 434c7909509SMarek Szyprowski set_pte_ext(pte, mk_pte(page, prot), 0); 435c7909509SMarek Szyprowski return 0; 436c7909509SMarek Szyprowski } 437c7909509SMarek Szyprowski 438c7909509SMarek Szyprowski static void __dma_remap(struct page *page, size_t size, pgprot_t prot) 439c7909509SMarek Szyprowski { 440c7909509SMarek Szyprowski unsigned long start = (unsigned long) page_address(page); 441c7909509SMarek Szyprowski unsigned end = start + size; 442c7909509SMarek Szyprowski 443c7909509SMarek Szyprowski apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); 444c7909509SMarek Szyprowski dsb(); 445c7909509SMarek Szyprowski flush_tlb_kernel_range(start, end); 446c7909509SMarek Szyprowski } 447c7909509SMarek Szyprowski 448c7909509SMarek Szyprowski static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 449c7909509SMarek Szyprowski pgprot_t prot, struct page **ret_page, 450c7909509SMarek Szyprowski const void *caller) 451c7909509SMarek Szyprowski { 452c7909509SMarek Szyprowski struct page *page; 453c7909509SMarek Szyprowski void *ptr; 454c7909509SMarek Szyprowski page = __dma_alloc_buffer(dev, size, gfp); 455c7909509SMarek Szyprowski if (!page) 456c7909509SMarek Szyprowski return NULL; 457c7909509SMarek Szyprowski 458c7909509SMarek Szyprowski ptr = __dma_alloc_remap(page, size, gfp, prot, caller); 459c7909509SMarek Szyprowski if (!ptr) { 460c7909509SMarek Szyprowski __dma_free_buffer(page, size); 461c7909509SMarek Szyprowski return NULL; 462c7909509SMarek Szyprowski } 463c7909509SMarek Szyprowski 464c7909509SMarek Szyprowski *ret_page = page; 465c7909509SMarek Szyprowski return ptr; 466c7909509SMarek Szyprowski } 467c7909509SMarek Szyprowski 468e9da6e99SMarek Szyprowski static void *__alloc_from_pool(size_t size, struct page **ret_page) 469c7909509SMarek Szyprowski { 470e9da6e99SMarek Szyprowski struct dma_pool *pool = &atomic_pool; 471e9da6e99SMarek Szyprowski unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 472e9da6e99SMarek Szyprowski unsigned int pageno; 473e9da6e99SMarek Szyprowski unsigned long flags; 474e9da6e99SMarek Szyprowski void *ptr = NULL; 475e4ea6918SAaro Koskinen unsigned long align_mask; 476c7909509SMarek Szyprowski 477e9da6e99SMarek Szyprowski if (!pool->vaddr) { 478e9da6e99SMarek Szyprowski WARN(1, "coherent pool not initialised!\n"); 479c7909509SMarek Szyprowski return NULL; 480c7909509SMarek Szyprowski } 481c7909509SMarek Szyprowski 482c7909509SMarek Szyprowski /* 483c7909509SMarek Szyprowski * Align the region allocation - allocations from pool are rather 484c7909509SMarek Szyprowski * small, so align them to their order in pages, minimum is a page 485c7909509SMarek Szyprowski * size. This helps reduce fragmentation of the DMA space. 486c7909509SMarek Szyprowski */ 487e4ea6918SAaro Koskinen align_mask = (1 << get_order(size)) - 1; 488e9da6e99SMarek Szyprowski 489e9da6e99SMarek Szyprowski spin_lock_irqsave(&pool->lock, flags); 490e9da6e99SMarek Szyprowski pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages, 491e4ea6918SAaro Koskinen 0, count, align_mask); 492e9da6e99SMarek Szyprowski if (pageno < pool->nr_pages) { 493e9da6e99SMarek Szyprowski bitmap_set(pool->bitmap, pageno, count); 494e9da6e99SMarek Szyprowski ptr = pool->vaddr + PAGE_SIZE * pageno; 4956b3fe472SHiroshi Doyu *ret_page = pool->pages[pageno]; 496fb71285fSMarek Szyprowski } else { 497fb71285fSMarek Szyprowski pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n" 498fb71285fSMarek Szyprowski "Please increase it with coherent_pool= kernel parameter!\n", 499fb71285fSMarek Szyprowski (unsigned)pool->size / 1024); 500e9da6e99SMarek Szyprowski } 501e9da6e99SMarek Szyprowski spin_unlock_irqrestore(&pool->lock, flags); 502e9da6e99SMarek Szyprowski 503c7909509SMarek Szyprowski return ptr; 504c7909509SMarek Szyprowski } 505c7909509SMarek Szyprowski 50621d0a759SHiroshi Doyu static bool __in_atomic_pool(void *start, size_t size) 50721d0a759SHiroshi Doyu { 50821d0a759SHiroshi Doyu struct dma_pool *pool = &atomic_pool; 50921d0a759SHiroshi Doyu void *end = start + size; 51021d0a759SHiroshi Doyu void *pool_start = pool->vaddr; 51121d0a759SHiroshi Doyu void *pool_end = pool->vaddr + pool->size; 51221d0a759SHiroshi Doyu 513f3d87524SThomas Petazzoni if (start < pool_start || start >= pool_end) 51421d0a759SHiroshi Doyu return false; 51521d0a759SHiroshi Doyu 51621d0a759SHiroshi Doyu if (end <= pool_end) 51721d0a759SHiroshi Doyu return true; 51821d0a759SHiroshi Doyu 51921d0a759SHiroshi Doyu WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n", 52021d0a759SHiroshi Doyu start, end - 1, pool_start, pool_end - 1); 52121d0a759SHiroshi Doyu 52221d0a759SHiroshi Doyu return false; 52321d0a759SHiroshi Doyu } 52421d0a759SHiroshi Doyu 525e9da6e99SMarek Szyprowski static int __free_from_pool(void *start, size_t size) 526c7909509SMarek Szyprowski { 527e9da6e99SMarek Szyprowski struct dma_pool *pool = &atomic_pool; 528e9da6e99SMarek Szyprowski unsigned long pageno, count; 529e9da6e99SMarek Szyprowski unsigned long flags; 530c7909509SMarek Szyprowski 53121d0a759SHiroshi Doyu if (!__in_atomic_pool(start, size)) 532c7909509SMarek Szyprowski return 0; 533c7909509SMarek Szyprowski 534e9da6e99SMarek Szyprowski pageno = (start - pool->vaddr) >> PAGE_SHIFT; 535e9da6e99SMarek Szyprowski count = size >> PAGE_SHIFT; 536e9da6e99SMarek Szyprowski 537e9da6e99SMarek Szyprowski spin_lock_irqsave(&pool->lock, flags); 538e9da6e99SMarek Szyprowski bitmap_clear(pool->bitmap, pageno, count); 539e9da6e99SMarek Szyprowski spin_unlock_irqrestore(&pool->lock, flags); 540e9da6e99SMarek Szyprowski 541c7909509SMarek Szyprowski return 1; 542c7909509SMarek Szyprowski } 543c7909509SMarek Szyprowski 544c7909509SMarek Szyprowski static void *__alloc_from_contiguous(struct device *dev, size_t size, 545c7909509SMarek Szyprowski pgprot_t prot, struct page **ret_page) 546c7909509SMarek Szyprowski { 547c7909509SMarek Szyprowski unsigned long order = get_order(size); 548c7909509SMarek Szyprowski size_t count = size >> PAGE_SHIFT; 549c7909509SMarek Szyprowski struct page *page; 550c7909509SMarek Szyprowski 551c7909509SMarek Szyprowski page = dma_alloc_from_contiguous(dev, count, order); 552c7909509SMarek Szyprowski if (!page) 553c7909509SMarek Szyprowski return NULL; 554c7909509SMarek Szyprowski 555c7909509SMarek Szyprowski __dma_clear_buffer(page, size); 556c7909509SMarek Szyprowski __dma_remap(page, size, prot); 557c7909509SMarek Szyprowski 558c7909509SMarek Szyprowski *ret_page = page; 559c7909509SMarek Szyprowski return page_address(page); 560c7909509SMarek Szyprowski } 561c7909509SMarek Szyprowski 562c7909509SMarek Szyprowski static void __free_from_contiguous(struct device *dev, struct page *page, 563c7909509SMarek Szyprowski size_t size) 564c7909509SMarek Szyprowski { 565c7909509SMarek Szyprowski __dma_remap(page, size, pgprot_kernel); 566c7909509SMarek Szyprowski dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); 567c7909509SMarek Szyprowski } 568c7909509SMarek Szyprowski 569f99d6034SMarek Szyprowski static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot) 570f99d6034SMarek Szyprowski { 571f99d6034SMarek Szyprowski prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ? 572f99d6034SMarek Szyprowski pgprot_writecombine(prot) : 573f99d6034SMarek Szyprowski pgprot_dmacoherent(prot); 574f99d6034SMarek Szyprowski return prot; 575f99d6034SMarek Szyprowski } 576f99d6034SMarek Szyprowski 577c7909509SMarek Szyprowski #define nommu() 0 578c7909509SMarek Szyprowski 579ab6494f0SCatalin Marinas #else /* !CONFIG_MMU */ 580695ae0afSRussell King 581c7909509SMarek Szyprowski #define nommu() 1 582c7909509SMarek Szyprowski 583f99d6034SMarek Szyprowski #define __get_dma_pgprot(attrs, prot) __pgprot(0) 584c7909509SMarek Szyprowski #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL 585e9da6e99SMarek Szyprowski #define __alloc_from_pool(size, ret_page) NULL 586c7909509SMarek Szyprowski #define __alloc_from_contiguous(dev, size, prot, ret) NULL 587c7909509SMarek Szyprowski #define __free_from_pool(cpu_addr, size) 0 588c7909509SMarek Szyprowski #define __free_from_contiguous(dev, page, size) do { } while (0) 589c7909509SMarek Szyprowski #define __dma_free_remap(cpu_addr, size) do { } while (0) 59031ebf944SRussell King 59131ebf944SRussell King #endif /* CONFIG_MMU */ 59231ebf944SRussell King 593c7909509SMarek Szyprowski static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, 594c7909509SMarek Szyprowski struct page **ret_page) 595ab6494f0SCatalin Marinas { 59604da5694SRussell King struct page *page; 597c7909509SMarek Szyprowski page = __dma_alloc_buffer(dev, size, gfp); 598c7909509SMarek Szyprowski if (!page) 599c7909509SMarek Szyprowski return NULL; 600c7909509SMarek Szyprowski 601c7909509SMarek Szyprowski *ret_page = page; 602c7909509SMarek Szyprowski return page_address(page); 603c7909509SMarek Szyprowski } 604c7909509SMarek Szyprowski 605c7909509SMarek Szyprowski 606c7909509SMarek Szyprowski 607c7909509SMarek Szyprowski static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 608dd37e940SRob Herring gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller) 609c7909509SMarek Szyprowski { 610c7909509SMarek Szyprowski u64 mask = get_coherent_dma_mask(dev); 6113dd7ea92SJingoo Han struct page *page = NULL; 61231ebf944SRussell King void *addr; 613ab6494f0SCatalin Marinas 614c7909509SMarek Szyprowski #ifdef CONFIG_DMA_API_DEBUG 615c7909509SMarek Szyprowski u64 limit = (mask + 1) & ~mask; 616c7909509SMarek Szyprowski if (limit && size >= limit) { 617c7909509SMarek Szyprowski dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n", 618c7909509SMarek Szyprowski size, mask); 619c7909509SMarek Szyprowski return NULL; 620c7909509SMarek Szyprowski } 621c7909509SMarek Szyprowski #endif 622c7909509SMarek Szyprowski 623c7909509SMarek Szyprowski if (!mask) 624c7909509SMarek Szyprowski return NULL; 625c7909509SMarek Szyprowski 626c7909509SMarek Szyprowski if (mask < 0xffffffffULL) 627c7909509SMarek Szyprowski gfp |= GFP_DMA; 628c7909509SMarek Szyprowski 629ea2e7057SSumit Bhattacharya /* 630ea2e7057SSumit Bhattacharya * Following is a work-around (a.k.a. hack) to prevent pages 631ea2e7057SSumit Bhattacharya * with __GFP_COMP being passed to split_page() which cannot 632ea2e7057SSumit Bhattacharya * handle them. The real problem is that this flag probably 633ea2e7057SSumit Bhattacharya * should be 0 on ARM as it is not supported on this 634ea2e7057SSumit Bhattacharya * platform; see CONFIG_HUGETLBFS. 635ea2e7057SSumit Bhattacharya */ 636ea2e7057SSumit Bhattacharya gfp &= ~(__GFP_COMP); 637ea2e7057SSumit Bhattacharya 638553ac788SMarek Szyprowski *handle = DMA_ERROR_CODE; 63904da5694SRussell King size = PAGE_ALIGN(size); 64004da5694SRussell King 641dd37e940SRob Herring if (is_coherent || nommu()) 642c7909509SMarek Szyprowski addr = __alloc_simple_buffer(dev, size, gfp, &page); 643633dc92aSRussell King else if (!(gfp & __GFP_WAIT)) 644e9da6e99SMarek Szyprowski addr = __alloc_from_pool(size, &page); 645f1ae98daSMarek Szyprowski else if (!IS_ENABLED(CONFIG_CMA)) 646c7909509SMarek Szyprowski addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); 64731ebf944SRussell King else 648c7909509SMarek Szyprowski addr = __alloc_from_contiguous(dev, size, prot, &page); 64931ebf944SRussell King 65031ebf944SRussell King if (addr) 6519eedd963SRussell King *handle = pfn_to_dma(dev, page_to_pfn(page)); 65231ebf944SRussell King 65331ebf944SRussell King return addr; 654ab6494f0SCatalin Marinas } 655695ae0afSRussell King 6560ddbccd1SRussell King /* 6570ddbccd1SRussell King * Allocate DMA-coherent memory space and return both the kernel remapped 6580ddbccd1SRussell King * virtual and bus address for that space. 6590ddbccd1SRussell King */ 660f99d6034SMarek Szyprowski void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 661f99d6034SMarek Szyprowski gfp_t gfp, struct dma_attrs *attrs) 6620ddbccd1SRussell King { 663f99d6034SMarek Szyprowski pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel); 6640ddbccd1SRussell King void *memory; 6650ddbccd1SRussell King 6660ddbccd1SRussell King if (dma_alloc_from_coherent(dev, size, handle, &memory)) 6670ddbccd1SRussell King return memory; 6680ddbccd1SRussell King 669dd37e940SRob Herring return __dma_alloc(dev, size, handle, gfp, prot, false, 670dd37e940SRob Herring __builtin_return_address(0)); 671dd37e940SRob Herring } 672dd37e940SRob Herring 673dd37e940SRob Herring static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 674dd37e940SRob Herring dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) 675dd37e940SRob Herring { 676dd37e940SRob Herring pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel); 677dd37e940SRob Herring void *memory; 678dd37e940SRob Herring 679dd37e940SRob Herring if (dma_alloc_from_coherent(dev, size, handle, &memory)) 680dd37e940SRob Herring return memory; 681dd37e940SRob Herring 682dd37e940SRob Herring return __dma_alloc(dev, size, handle, gfp, prot, true, 68345cd5290SRussell King __builtin_return_address(0)); 6840ddbccd1SRussell King } 6850ddbccd1SRussell King 6860ddbccd1SRussell King /* 687f99d6034SMarek Szyprowski * Create userspace mapping for the DMA-coherent memory. 6880ddbccd1SRussell King */ 689f99d6034SMarek Szyprowski int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, 690f99d6034SMarek Szyprowski void *cpu_addr, dma_addr_t dma_addr, size_t size, 691f99d6034SMarek Szyprowski struct dma_attrs *attrs) 6920ddbccd1SRussell King { 693ab6494f0SCatalin Marinas int ret = -ENXIO; 694ab6494f0SCatalin Marinas #ifdef CONFIG_MMU 69550262a4bSMarek Szyprowski unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 69650262a4bSMarek Szyprowski unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 697c7909509SMarek Szyprowski unsigned long pfn = dma_to_pfn(dev, dma_addr); 69850262a4bSMarek Szyprowski unsigned long off = vma->vm_pgoff; 69950262a4bSMarek Szyprowski 700f99d6034SMarek Szyprowski vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 701f99d6034SMarek Szyprowski 70247142f07SMarek Szyprowski if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) 70347142f07SMarek Szyprowski return ret; 70447142f07SMarek Szyprowski 70550262a4bSMarek Szyprowski if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { 7060ddbccd1SRussell King ret = remap_pfn_range(vma, vma->vm_start, 70750262a4bSMarek Szyprowski pfn + off, 708c7909509SMarek Szyprowski vma->vm_end - vma->vm_start, 7090ddbccd1SRussell King vma->vm_page_prot); 71050262a4bSMarek Szyprowski } 711ab6494f0SCatalin Marinas #endif /* CONFIG_MMU */ 7120ddbccd1SRussell King 7130ddbccd1SRussell King return ret; 7140ddbccd1SRussell King } 7150ddbccd1SRussell King 7160ddbccd1SRussell King /* 717c7909509SMarek Szyprowski * Free a buffer as defined by the above mapping. 7180ddbccd1SRussell King */ 719dd37e940SRob Herring static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 720dd37e940SRob Herring dma_addr_t handle, struct dma_attrs *attrs, 721dd37e940SRob Herring bool is_coherent) 7220ddbccd1SRussell King { 723c7909509SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 7240ddbccd1SRussell King 7250ddbccd1SRussell King if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) 7260ddbccd1SRussell King return; 7270ddbccd1SRussell King 7283e82d012SRussell King size = PAGE_ALIGN(size); 7293e82d012SRussell King 730dd37e940SRob Herring if (is_coherent || nommu()) { 731c7909509SMarek Szyprowski __dma_free_buffer(page, size); 732d9e0d149SAaro Koskinen } else if (__free_from_pool(cpu_addr, size)) { 733d9e0d149SAaro Koskinen return; 734f1ae98daSMarek Szyprowski } else if (!IS_ENABLED(CONFIG_CMA)) { 735695ae0afSRussell King __dma_free_remap(cpu_addr, size); 736c7909509SMarek Szyprowski __dma_free_buffer(page, size); 737c7909509SMarek Szyprowski } else { 738c7909509SMarek Szyprowski /* 739c7909509SMarek Szyprowski * Non-atomic allocations cannot be freed with IRQs disabled 740c7909509SMarek Szyprowski */ 741c7909509SMarek Szyprowski WARN_ON(irqs_disabled()); 742c7909509SMarek Szyprowski __free_from_contiguous(dev, page, size); 743c7909509SMarek Szyprowski } 7440ddbccd1SRussell King } 745afd1a321SRussell King 746dd37e940SRob Herring void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 747dd37e940SRob Herring dma_addr_t handle, struct dma_attrs *attrs) 748dd37e940SRob Herring { 749dd37e940SRob Herring __arm_dma_free(dev, size, cpu_addr, handle, attrs, false); 750dd37e940SRob Herring } 751dd37e940SRob Herring 752dd37e940SRob Herring static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 753dd37e940SRob Herring dma_addr_t handle, struct dma_attrs *attrs) 754dd37e940SRob Herring { 755dd37e940SRob Herring __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); 756dd37e940SRob Herring } 757dd37e940SRob Herring 758dc2832e1SMarek Szyprowski int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 759dc2832e1SMarek Szyprowski void *cpu_addr, dma_addr_t handle, size_t size, 760dc2832e1SMarek Szyprowski struct dma_attrs *attrs) 761dc2832e1SMarek Szyprowski { 762dc2832e1SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 763dc2832e1SMarek Szyprowski int ret; 764dc2832e1SMarek Szyprowski 765dc2832e1SMarek Szyprowski ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 766dc2832e1SMarek Szyprowski if (unlikely(ret)) 767dc2832e1SMarek Szyprowski return ret; 768dc2832e1SMarek Szyprowski 769dc2832e1SMarek Szyprowski sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 770dc2832e1SMarek Szyprowski return 0; 771dc2832e1SMarek Szyprowski } 772dc2832e1SMarek Szyprowski 77365af191aSRussell King static void dma_cache_maint_page(struct page *page, unsigned long offset, 774a9c9147eSRussell King size_t size, enum dma_data_direction dir, 775a9c9147eSRussell King void (*op)(const void *, size_t, int)) 77665af191aSRussell King { 77715653371SRussell King unsigned long pfn; 77815653371SRussell King size_t left = size; 77915653371SRussell King 78015653371SRussell King pfn = page_to_pfn(page) + offset / PAGE_SIZE; 78115653371SRussell King offset %= PAGE_SIZE; 78215653371SRussell King 78365af191aSRussell King /* 78465af191aSRussell King * A single sg entry may refer to multiple physically contiguous 78565af191aSRussell King * pages. But we still need to process highmem pages individually. 78665af191aSRussell King * If highmem is not configured then the bulk of this loop gets 78765af191aSRussell King * optimized out. 78865af191aSRussell King */ 78965af191aSRussell King do { 79065af191aSRussell King size_t len = left; 79193f1d629SRussell King void *vaddr; 79293f1d629SRussell King 79315653371SRussell King page = pfn_to_page(pfn); 79415653371SRussell King 79593f1d629SRussell King if (PageHighMem(page)) { 79615653371SRussell King if (len + offset > PAGE_SIZE) 79765af191aSRussell King len = PAGE_SIZE - offset; 79893f1d629SRussell King vaddr = kmap_high_get(page); 79993f1d629SRussell King if (vaddr) { 80093f1d629SRussell King vaddr += offset; 801a9c9147eSRussell King op(vaddr, len, dir); 80293f1d629SRussell King kunmap_high(page); 8037e5a69e8SNicolas Pitre } else if (cache_is_vipt()) { 80439af22a7SNicolas Pitre /* unmapped pages might still be cached */ 80539af22a7SNicolas Pitre vaddr = kmap_atomic(page); 8067e5a69e8SNicolas Pitre op(vaddr + offset, len, dir); 80739af22a7SNicolas Pitre kunmap_atomic(vaddr); 80893f1d629SRussell King } 80993f1d629SRussell King } else { 81093f1d629SRussell King vaddr = page_address(page) + offset; 811a9c9147eSRussell King op(vaddr, len, dir); 81293f1d629SRussell King } 81365af191aSRussell King offset = 0; 81415653371SRussell King pfn++; 81565af191aSRussell King left -= len; 81665af191aSRussell King } while (left); 81765af191aSRussell King } 81865af191aSRussell King 81951fde349SMarek Szyprowski /* 82051fde349SMarek Szyprowski * Make an area consistent for devices. 82151fde349SMarek Szyprowski * Note: Drivers should NOT use this function directly, as it will break 82251fde349SMarek Szyprowski * platforms with CONFIG_DMABOUNCE. 82351fde349SMarek Szyprowski * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 82451fde349SMarek Szyprowski */ 82551fde349SMarek Szyprowski static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, 82665af191aSRussell King size_t size, enum dma_data_direction dir) 82765af191aSRussell King { 82843377453SNicolas Pitre unsigned long paddr; 82943377453SNicolas Pitre 830a9c9147eSRussell King dma_cache_maint_page(page, off, size, dir, dmac_map_area); 83143377453SNicolas Pitre 83265af191aSRussell King paddr = page_to_phys(page) + off; 8332ffe2da3SRussell King if (dir == DMA_FROM_DEVICE) { 8342ffe2da3SRussell King outer_inv_range(paddr, paddr + size); 8352ffe2da3SRussell King } else { 8362ffe2da3SRussell King outer_clean_range(paddr, paddr + size); 8372ffe2da3SRussell King } 8382ffe2da3SRussell King /* FIXME: non-speculating: flush on bidirectional mappings? */ 83943377453SNicolas Pitre } 8404ea0d737SRussell King 84151fde349SMarek Szyprowski static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, 8424ea0d737SRussell King size_t size, enum dma_data_direction dir) 8434ea0d737SRussell King { 8442ffe2da3SRussell King unsigned long paddr = page_to_phys(page) + off; 8452ffe2da3SRussell King 8462ffe2da3SRussell King /* FIXME: non-speculating: not required */ 8472ffe2da3SRussell King /* don't bother invalidating if DMA to device */ 8482ffe2da3SRussell King if (dir != DMA_TO_DEVICE) 8492ffe2da3SRussell King outer_inv_range(paddr, paddr + size); 8502ffe2da3SRussell King 851a9c9147eSRussell King dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); 852c0177800SCatalin Marinas 853c0177800SCatalin Marinas /* 854c0177800SCatalin Marinas * Mark the D-cache clean for this page to avoid extra flushing. 855c0177800SCatalin Marinas */ 856c0177800SCatalin Marinas if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE) 857c0177800SCatalin Marinas set_bit(PG_dcache_clean, &page->flags); 8584ea0d737SRussell King } 85943377453SNicolas Pitre 860afd1a321SRussell King /** 8612a550e73SMarek Szyprowski * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA 862afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 863afd1a321SRussell King * @sg: list of buffers 864afd1a321SRussell King * @nents: number of buffers to map 865afd1a321SRussell King * @dir: DMA transfer direction 866afd1a321SRussell King * 867afd1a321SRussell King * Map a set of buffers described by scatterlist in streaming mode for DMA. 868afd1a321SRussell King * This is the scatter-gather version of the dma_map_single interface. 869afd1a321SRussell King * Here the scatter gather list elements are each tagged with the 870afd1a321SRussell King * appropriate dma address and length. They are obtained via 871afd1a321SRussell King * sg_dma_{address,length}. 872afd1a321SRussell King * 873afd1a321SRussell King * Device ownership issues as mentioned for dma_map_single are the same 874afd1a321SRussell King * here. 875afd1a321SRussell King */ 8762dc6a016SMarek Szyprowski int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 8772dc6a016SMarek Szyprowski enum dma_data_direction dir, struct dma_attrs *attrs) 878afd1a321SRussell King { 8792a550e73SMarek Szyprowski struct dma_map_ops *ops = get_dma_ops(dev); 880afd1a321SRussell King struct scatterlist *s; 88101135d92SRussell King int i, j; 882afd1a321SRussell King 883afd1a321SRussell King for_each_sg(sg, s, nents, i) { 8844ce63fcdSMarek Szyprowski #ifdef CONFIG_NEED_SG_DMA_LENGTH 8854ce63fcdSMarek Szyprowski s->dma_length = s->length; 8864ce63fcdSMarek Szyprowski #endif 8872a550e73SMarek Szyprowski s->dma_address = ops->map_page(dev, sg_page(s), s->offset, 8882a550e73SMarek Szyprowski s->length, dir, attrs); 88901135d92SRussell King if (dma_mapping_error(dev, s->dma_address)) 89001135d92SRussell King goto bad_mapping; 891afd1a321SRussell King } 892afd1a321SRussell King return nents; 89301135d92SRussell King 89401135d92SRussell King bad_mapping: 89501135d92SRussell King for_each_sg(sg, s, i, j) 8962a550e73SMarek Szyprowski ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 89701135d92SRussell King return 0; 898afd1a321SRussell King } 899afd1a321SRussell King 900afd1a321SRussell King /** 9012a550e73SMarek Szyprowski * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 902afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 903afd1a321SRussell King * @sg: list of buffers 9040adfca6fSLinus Walleij * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 905afd1a321SRussell King * @dir: DMA transfer direction (same as was passed to dma_map_sg) 906afd1a321SRussell King * 907afd1a321SRussell King * Unmap a set of streaming mode DMA translations. Again, CPU access 908afd1a321SRussell King * rules concerning calls here are the same as for dma_unmap_single(). 909afd1a321SRussell King */ 9102dc6a016SMarek Szyprowski void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 9112dc6a016SMarek Szyprowski enum dma_data_direction dir, struct dma_attrs *attrs) 912afd1a321SRussell King { 9132a550e73SMarek Szyprowski struct dma_map_ops *ops = get_dma_ops(dev); 91401135d92SRussell King struct scatterlist *s; 91501135d92SRussell King 91601135d92SRussell King int i; 91724056f52SRussell King 91801135d92SRussell King for_each_sg(sg, s, nents, i) 9192a550e73SMarek Szyprowski ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 920afd1a321SRussell King } 921afd1a321SRussell King 922afd1a321SRussell King /** 9232a550e73SMarek Szyprowski * arm_dma_sync_sg_for_cpu 924afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 925afd1a321SRussell King * @sg: list of buffers 926afd1a321SRussell King * @nents: number of buffers to map (returned from dma_map_sg) 927afd1a321SRussell King * @dir: DMA transfer direction (same as was passed to dma_map_sg) 928afd1a321SRussell King */ 9292dc6a016SMarek Szyprowski void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 930afd1a321SRussell King int nents, enum dma_data_direction dir) 931afd1a321SRussell King { 9322a550e73SMarek Szyprowski struct dma_map_ops *ops = get_dma_ops(dev); 933afd1a321SRussell King struct scatterlist *s; 934afd1a321SRussell King int i; 935afd1a321SRussell King 9362a550e73SMarek Szyprowski for_each_sg(sg, s, nents, i) 9372a550e73SMarek Szyprowski ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length, 9382a550e73SMarek Szyprowski dir); 939afd1a321SRussell King } 94024056f52SRussell King 941afd1a321SRussell King /** 9422a550e73SMarek Szyprowski * arm_dma_sync_sg_for_device 943afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 944afd1a321SRussell King * @sg: list of buffers 945afd1a321SRussell King * @nents: number of buffers to map (returned from dma_map_sg) 946afd1a321SRussell King * @dir: DMA transfer direction (same as was passed to dma_map_sg) 947afd1a321SRussell King */ 9482dc6a016SMarek Szyprowski void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 949afd1a321SRussell King int nents, enum dma_data_direction dir) 950afd1a321SRussell King { 9512a550e73SMarek Szyprowski struct dma_map_ops *ops = get_dma_ops(dev); 952afd1a321SRussell King struct scatterlist *s; 953afd1a321SRussell King int i; 954afd1a321SRussell King 9552a550e73SMarek Szyprowski for_each_sg(sg, s, nents, i) 9562a550e73SMarek Szyprowski ops->sync_single_for_device(dev, sg_dma_address(s), s->length, 9572a550e73SMarek Szyprowski dir); 958afd1a321SRussell King } 95924056f52SRussell King 960022ae537SRussell King /* 961022ae537SRussell King * Return whether the given device DMA address mask can be supported 962022ae537SRussell King * properly. For example, if your device can only drive the low 24-bits 963022ae537SRussell King * during bus mastering, then you would pass 0x00ffffff as the mask 964022ae537SRussell King * to this function. 965022ae537SRussell King */ 966022ae537SRussell King int dma_supported(struct device *dev, u64 mask) 967022ae537SRussell King { 968022ae537SRussell King if (mask < (u64)arm_dma_limit) 969022ae537SRussell King return 0; 970022ae537SRussell King return 1; 971022ae537SRussell King } 972022ae537SRussell King EXPORT_SYMBOL(dma_supported); 973022ae537SRussell King 97487b54e78SGregory CLEMENT int arm_dma_set_mask(struct device *dev, u64 dma_mask) 975022ae537SRussell King { 976022ae537SRussell King if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 977022ae537SRussell King return -EIO; 978022ae537SRussell King 979022ae537SRussell King *dev->dma_mask = dma_mask; 980022ae537SRussell King 981022ae537SRussell King return 0; 982022ae537SRussell King } 983022ae537SRussell King 98424056f52SRussell King #define PREALLOC_DMA_DEBUG_ENTRIES 4096 98524056f52SRussell King 98624056f52SRussell King static int __init dma_debug_do_init(void) 98724056f52SRussell King { 98824056f52SRussell King dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); 98924056f52SRussell King return 0; 99024056f52SRussell King } 99124056f52SRussell King fs_initcall(dma_debug_do_init); 9924ce63fcdSMarek Szyprowski 9934ce63fcdSMarek Szyprowski #ifdef CONFIG_ARM_DMA_USE_IOMMU 9944ce63fcdSMarek Szyprowski 9954ce63fcdSMarek Szyprowski /* IOMMU */ 9964ce63fcdSMarek Szyprowski 9974ce63fcdSMarek Szyprowski static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, 9984ce63fcdSMarek Szyprowski size_t size) 9994ce63fcdSMarek Szyprowski { 10004ce63fcdSMarek Szyprowski unsigned int order = get_order(size); 10014ce63fcdSMarek Szyprowski unsigned int align = 0; 10024ce63fcdSMarek Szyprowski unsigned int count, start; 10034ce63fcdSMarek Szyprowski unsigned long flags; 10044ce63fcdSMarek Szyprowski 10054ce63fcdSMarek Szyprowski count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) + 10064ce63fcdSMarek Szyprowski (1 << mapping->order) - 1) >> mapping->order; 10074ce63fcdSMarek Szyprowski 10084ce63fcdSMarek Szyprowski if (order > mapping->order) 10094ce63fcdSMarek Szyprowski align = (1 << (order - mapping->order)) - 1; 10104ce63fcdSMarek Szyprowski 10114ce63fcdSMarek Szyprowski spin_lock_irqsave(&mapping->lock, flags); 10124ce63fcdSMarek Szyprowski start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0, 10134ce63fcdSMarek Szyprowski count, align); 10144ce63fcdSMarek Szyprowski if (start > mapping->bits) { 10154ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 10164ce63fcdSMarek Szyprowski return DMA_ERROR_CODE; 10174ce63fcdSMarek Szyprowski } 10184ce63fcdSMarek Szyprowski 10194ce63fcdSMarek Szyprowski bitmap_set(mapping->bitmap, start, count); 10204ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 10214ce63fcdSMarek Szyprowski 10224ce63fcdSMarek Szyprowski return mapping->base + (start << (mapping->order + PAGE_SHIFT)); 10234ce63fcdSMarek Szyprowski } 10244ce63fcdSMarek Szyprowski 10254ce63fcdSMarek Szyprowski static inline void __free_iova(struct dma_iommu_mapping *mapping, 10264ce63fcdSMarek Szyprowski dma_addr_t addr, size_t size) 10274ce63fcdSMarek Szyprowski { 10284ce63fcdSMarek Szyprowski unsigned int start = (addr - mapping->base) >> 10294ce63fcdSMarek Szyprowski (mapping->order + PAGE_SHIFT); 10304ce63fcdSMarek Szyprowski unsigned int count = ((size >> PAGE_SHIFT) + 10314ce63fcdSMarek Szyprowski (1 << mapping->order) - 1) >> mapping->order; 10324ce63fcdSMarek Szyprowski unsigned long flags; 10334ce63fcdSMarek Szyprowski 10344ce63fcdSMarek Szyprowski spin_lock_irqsave(&mapping->lock, flags); 10354ce63fcdSMarek Szyprowski bitmap_clear(mapping->bitmap, start, count); 10364ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 10374ce63fcdSMarek Szyprowski } 10384ce63fcdSMarek Szyprowski 1039549a17e4SMarek Szyprowski static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, 1040549a17e4SMarek Szyprowski gfp_t gfp, struct dma_attrs *attrs) 10414ce63fcdSMarek Szyprowski { 10424ce63fcdSMarek Szyprowski struct page **pages; 10434ce63fcdSMarek Szyprowski int count = size >> PAGE_SHIFT; 10444ce63fcdSMarek Szyprowski int array_size = count * sizeof(struct page *); 10454ce63fcdSMarek Szyprowski int i = 0; 10464ce63fcdSMarek Szyprowski 10474ce63fcdSMarek Szyprowski if (array_size <= PAGE_SIZE) 10484ce63fcdSMarek Szyprowski pages = kzalloc(array_size, gfp); 10494ce63fcdSMarek Szyprowski else 10504ce63fcdSMarek Szyprowski pages = vzalloc(array_size); 10514ce63fcdSMarek Szyprowski if (!pages) 10524ce63fcdSMarek Szyprowski return NULL; 10534ce63fcdSMarek Szyprowski 1054549a17e4SMarek Szyprowski if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) 1055549a17e4SMarek Szyprowski { 1056549a17e4SMarek Szyprowski unsigned long order = get_order(size); 1057549a17e4SMarek Szyprowski struct page *page; 1058549a17e4SMarek Szyprowski 1059549a17e4SMarek Szyprowski page = dma_alloc_from_contiguous(dev, count, order); 1060549a17e4SMarek Szyprowski if (!page) 1061549a17e4SMarek Szyprowski goto error; 1062549a17e4SMarek Szyprowski 1063549a17e4SMarek Szyprowski __dma_clear_buffer(page, size); 1064549a17e4SMarek Szyprowski 1065549a17e4SMarek Szyprowski for (i = 0; i < count; i++) 1066549a17e4SMarek Szyprowski pages[i] = page + i; 1067549a17e4SMarek Szyprowski 1068549a17e4SMarek Szyprowski return pages; 1069549a17e4SMarek Szyprowski } 1070549a17e4SMarek Szyprowski 10714ce63fcdSMarek Szyprowski while (count) { 1072593f4735SMarek Szyprowski int j, order = __fls(count); 10734ce63fcdSMarek Szyprowski 10744ce63fcdSMarek Szyprowski pages[i] = alloc_pages(gfp | __GFP_NOWARN, order); 10754ce63fcdSMarek Szyprowski while (!pages[i] && order) 10764ce63fcdSMarek Szyprowski pages[i] = alloc_pages(gfp | __GFP_NOWARN, --order); 10774ce63fcdSMarek Szyprowski if (!pages[i]) 10784ce63fcdSMarek Szyprowski goto error; 10794ce63fcdSMarek Szyprowski 10805a796eebSHiroshi Doyu if (order) { 10814ce63fcdSMarek Szyprowski split_page(pages[i], order); 10824ce63fcdSMarek Szyprowski j = 1 << order; 10834ce63fcdSMarek Szyprowski while (--j) 10844ce63fcdSMarek Szyprowski pages[i + j] = pages[i] + j; 10855a796eebSHiroshi Doyu } 10864ce63fcdSMarek Szyprowski 10874ce63fcdSMarek Szyprowski __dma_clear_buffer(pages[i], PAGE_SIZE << order); 10884ce63fcdSMarek Szyprowski i += 1 << order; 10894ce63fcdSMarek Szyprowski count -= 1 << order; 10904ce63fcdSMarek Szyprowski } 10914ce63fcdSMarek Szyprowski 10924ce63fcdSMarek Szyprowski return pages; 10934ce63fcdSMarek Szyprowski error: 10949fa8af91SMarek Szyprowski while (i--) 10954ce63fcdSMarek Szyprowski if (pages[i]) 10964ce63fcdSMarek Szyprowski __free_pages(pages[i], 0); 109746c87852SPrathyush K if (array_size <= PAGE_SIZE) 10984ce63fcdSMarek Szyprowski kfree(pages); 10994ce63fcdSMarek Szyprowski else 11004ce63fcdSMarek Szyprowski vfree(pages); 11014ce63fcdSMarek Szyprowski return NULL; 11024ce63fcdSMarek Szyprowski } 11034ce63fcdSMarek Szyprowski 1104549a17e4SMarek Szyprowski static int __iommu_free_buffer(struct device *dev, struct page **pages, 1105549a17e4SMarek Szyprowski size_t size, struct dma_attrs *attrs) 11064ce63fcdSMarek Szyprowski { 11074ce63fcdSMarek Szyprowski int count = size >> PAGE_SHIFT; 11084ce63fcdSMarek Szyprowski int array_size = count * sizeof(struct page *); 11094ce63fcdSMarek Szyprowski int i; 1110549a17e4SMarek Szyprowski 1111549a17e4SMarek Szyprowski if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) { 1112549a17e4SMarek Szyprowski dma_release_from_contiguous(dev, pages[0], count); 1113549a17e4SMarek Szyprowski } else { 11144ce63fcdSMarek Szyprowski for (i = 0; i < count; i++) 11154ce63fcdSMarek Szyprowski if (pages[i]) 11164ce63fcdSMarek Szyprowski __free_pages(pages[i], 0); 1117549a17e4SMarek Szyprowski } 1118549a17e4SMarek Szyprowski 111946c87852SPrathyush K if (array_size <= PAGE_SIZE) 11204ce63fcdSMarek Szyprowski kfree(pages); 11214ce63fcdSMarek Szyprowski else 11224ce63fcdSMarek Szyprowski vfree(pages); 11234ce63fcdSMarek Szyprowski return 0; 11244ce63fcdSMarek Szyprowski } 11254ce63fcdSMarek Szyprowski 11264ce63fcdSMarek Szyprowski /* 11274ce63fcdSMarek Szyprowski * Create a CPU mapping for a specified pages 11284ce63fcdSMarek Szyprowski */ 11294ce63fcdSMarek Szyprowski static void * 1130e9da6e99SMarek Szyprowski __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, 1131e9da6e99SMarek Szyprowski const void *caller) 11324ce63fcdSMarek Szyprowski { 1133e9da6e99SMarek Szyprowski unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 1134e9da6e99SMarek Szyprowski struct vm_struct *area; 1135e9da6e99SMarek Szyprowski unsigned long p; 11364ce63fcdSMarek Szyprowski 1137e9da6e99SMarek Szyprowski area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, 1138e9da6e99SMarek Szyprowski caller); 1139e9da6e99SMarek Szyprowski if (!area) 11404ce63fcdSMarek Szyprowski return NULL; 1141e9da6e99SMarek Szyprowski 1142e9da6e99SMarek Szyprowski area->pages = pages; 1143e9da6e99SMarek Szyprowski area->nr_pages = nr_pages; 1144e9da6e99SMarek Szyprowski p = (unsigned long)area->addr; 1145e9da6e99SMarek Szyprowski 1146e9da6e99SMarek Szyprowski for (i = 0; i < nr_pages; i++) { 1147e9da6e99SMarek Szyprowski phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i])); 1148e9da6e99SMarek Szyprowski if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot)) 1149e9da6e99SMarek Szyprowski goto err; 1150e9da6e99SMarek Szyprowski p += PAGE_SIZE; 11514ce63fcdSMarek Szyprowski } 1152e9da6e99SMarek Szyprowski return area->addr; 1153e9da6e99SMarek Szyprowski err: 1154e9da6e99SMarek Szyprowski unmap_kernel_range((unsigned long)area->addr, size); 1155e9da6e99SMarek Szyprowski vunmap(area->addr); 11564ce63fcdSMarek Szyprowski return NULL; 11574ce63fcdSMarek Szyprowski } 11584ce63fcdSMarek Szyprowski 11594ce63fcdSMarek Szyprowski /* 11604ce63fcdSMarek Szyprowski * Create a mapping in device IO address space for specified pages 11614ce63fcdSMarek Szyprowski */ 11624ce63fcdSMarek Szyprowski static dma_addr_t 11634ce63fcdSMarek Szyprowski __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) 11644ce63fcdSMarek Szyprowski { 11654ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = dev->archdata.mapping; 11664ce63fcdSMarek Szyprowski unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 11674ce63fcdSMarek Szyprowski dma_addr_t dma_addr, iova; 11684ce63fcdSMarek Szyprowski int i, ret = DMA_ERROR_CODE; 11694ce63fcdSMarek Szyprowski 11704ce63fcdSMarek Szyprowski dma_addr = __alloc_iova(mapping, size); 11714ce63fcdSMarek Szyprowski if (dma_addr == DMA_ERROR_CODE) 11724ce63fcdSMarek Szyprowski return dma_addr; 11734ce63fcdSMarek Szyprowski 11744ce63fcdSMarek Szyprowski iova = dma_addr; 11754ce63fcdSMarek Szyprowski for (i = 0; i < count; ) { 11764ce63fcdSMarek Szyprowski unsigned int next_pfn = page_to_pfn(pages[i]) + 1; 11774ce63fcdSMarek Szyprowski phys_addr_t phys = page_to_phys(pages[i]); 11784ce63fcdSMarek Szyprowski unsigned int len, j; 11794ce63fcdSMarek Szyprowski 11804ce63fcdSMarek Szyprowski for (j = i + 1; j < count; j++, next_pfn++) 11814ce63fcdSMarek Szyprowski if (page_to_pfn(pages[j]) != next_pfn) 11824ce63fcdSMarek Szyprowski break; 11834ce63fcdSMarek Szyprowski 11844ce63fcdSMarek Szyprowski len = (j - i) << PAGE_SHIFT; 11854ce63fcdSMarek Szyprowski ret = iommu_map(mapping->domain, iova, phys, len, 0); 11864ce63fcdSMarek Szyprowski if (ret < 0) 11874ce63fcdSMarek Szyprowski goto fail; 11884ce63fcdSMarek Szyprowski iova += len; 11894ce63fcdSMarek Szyprowski i = j; 11904ce63fcdSMarek Szyprowski } 11914ce63fcdSMarek Szyprowski return dma_addr; 11924ce63fcdSMarek Szyprowski fail: 11934ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); 11944ce63fcdSMarek Szyprowski __free_iova(mapping, dma_addr, size); 11954ce63fcdSMarek Szyprowski return DMA_ERROR_CODE; 11964ce63fcdSMarek Szyprowski } 11974ce63fcdSMarek Szyprowski 11984ce63fcdSMarek Szyprowski static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) 11994ce63fcdSMarek Szyprowski { 12004ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = dev->archdata.mapping; 12014ce63fcdSMarek Szyprowski 12024ce63fcdSMarek Szyprowski /* 12034ce63fcdSMarek Szyprowski * add optional in-page offset from iova to size and align 12044ce63fcdSMarek Szyprowski * result to page size 12054ce63fcdSMarek Szyprowski */ 12064ce63fcdSMarek Szyprowski size = PAGE_ALIGN((iova & ~PAGE_MASK) + size); 12074ce63fcdSMarek Szyprowski iova &= PAGE_MASK; 12084ce63fcdSMarek Szyprowski 12094ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova, size); 12104ce63fcdSMarek Szyprowski __free_iova(mapping, iova, size); 12114ce63fcdSMarek Szyprowski return 0; 12124ce63fcdSMarek Szyprowski } 12134ce63fcdSMarek Szyprowski 1214665bad7bSHiroshi Doyu static struct page **__atomic_get_pages(void *addr) 1215665bad7bSHiroshi Doyu { 1216665bad7bSHiroshi Doyu struct dma_pool *pool = &atomic_pool; 1217665bad7bSHiroshi Doyu struct page **pages = pool->pages; 1218665bad7bSHiroshi Doyu int offs = (addr - pool->vaddr) >> PAGE_SHIFT; 1219665bad7bSHiroshi Doyu 1220665bad7bSHiroshi Doyu return pages + offs; 1221665bad7bSHiroshi Doyu } 1222665bad7bSHiroshi Doyu 1223955c757eSMarek Szyprowski static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) 1224e9da6e99SMarek Szyprowski { 1225e9da6e99SMarek Szyprowski struct vm_struct *area; 1226e9da6e99SMarek Szyprowski 1227665bad7bSHiroshi Doyu if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) 1228665bad7bSHiroshi Doyu return __atomic_get_pages(cpu_addr); 1229665bad7bSHiroshi Doyu 1230955c757eSMarek Szyprowski if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) 1231955c757eSMarek Szyprowski return cpu_addr; 1232955c757eSMarek Szyprowski 1233e9da6e99SMarek Szyprowski area = find_vm_area(cpu_addr); 1234e9da6e99SMarek Szyprowski if (area && (area->flags & VM_ARM_DMA_CONSISTENT)) 1235e9da6e99SMarek Szyprowski return area->pages; 1236e9da6e99SMarek Szyprowski return NULL; 1237e9da6e99SMarek Szyprowski } 1238e9da6e99SMarek Szyprowski 1239479ed93aSHiroshi Doyu static void *__iommu_alloc_atomic(struct device *dev, size_t size, 1240479ed93aSHiroshi Doyu dma_addr_t *handle) 1241479ed93aSHiroshi Doyu { 1242479ed93aSHiroshi Doyu struct page *page; 1243479ed93aSHiroshi Doyu void *addr; 1244479ed93aSHiroshi Doyu 1245479ed93aSHiroshi Doyu addr = __alloc_from_pool(size, &page); 1246479ed93aSHiroshi Doyu if (!addr) 1247479ed93aSHiroshi Doyu return NULL; 1248479ed93aSHiroshi Doyu 1249479ed93aSHiroshi Doyu *handle = __iommu_create_mapping(dev, &page, size); 1250479ed93aSHiroshi Doyu if (*handle == DMA_ERROR_CODE) 1251479ed93aSHiroshi Doyu goto err_mapping; 1252479ed93aSHiroshi Doyu 1253479ed93aSHiroshi Doyu return addr; 1254479ed93aSHiroshi Doyu 1255479ed93aSHiroshi Doyu err_mapping: 1256479ed93aSHiroshi Doyu __free_from_pool(addr, size); 1257479ed93aSHiroshi Doyu return NULL; 1258479ed93aSHiroshi Doyu } 1259479ed93aSHiroshi Doyu 1260479ed93aSHiroshi Doyu static void __iommu_free_atomic(struct device *dev, struct page **pages, 1261479ed93aSHiroshi Doyu dma_addr_t handle, size_t size) 1262479ed93aSHiroshi Doyu { 1263479ed93aSHiroshi Doyu __iommu_remove_mapping(dev, handle, size); 1264479ed93aSHiroshi Doyu __free_from_pool(page_address(pages[0]), size); 1265479ed93aSHiroshi Doyu } 1266479ed93aSHiroshi Doyu 12674ce63fcdSMarek Szyprowski static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, 12684ce63fcdSMarek Szyprowski dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) 12694ce63fcdSMarek Szyprowski { 12704ce63fcdSMarek Szyprowski pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel); 12714ce63fcdSMarek Szyprowski struct page **pages; 12724ce63fcdSMarek Szyprowski void *addr = NULL; 12734ce63fcdSMarek Szyprowski 12744ce63fcdSMarek Szyprowski *handle = DMA_ERROR_CODE; 12754ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 12764ce63fcdSMarek Szyprowski 1277479ed93aSHiroshi Doyu if (gfp & GFP_ATOMIC) 1278479ed93aSHiroshi Doyu return __iommu_alloc_atomic(dev, size, handle); 1279479ed93aSHiroshi Doyu 1280549a17e4SMarek Szyprowski pages = __iommu_alloc_buffer(dev, size, gfp, attrs); 12814ce63fcdSMarek Szyprowski if (!pages) 12824ce63fcdSMarek Szyprowski return NULL; 12834ce63fcdSMarek Szyprowski 12844ce63fcdSMarek Szyprowski *handle = __iommu_create_mapping(dev, pages, size); 12854ce63fcdSMarek Szyprowski if (*handle == DMA_ERROR_CODE) 12864ce63fcdSMarek Szyprowski goto err_buffer; 12874ce63fcdSMarek Szyprowski 1288955c757eSMarek Szyprowski if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) 1289955c757eSMarek Szyprowski return pages; 1290955c757eSMarek Szyprowski 1291e9da6e99SMarek Szyprowski addr = __iommu_alloc_remap(pages, size, gfp, prot, 1292e9da6e99SMarek Szyprowski __builtin_return_address(0)); 12934ce63fcdSMarek Szyprowski if (!addr) 12944ce63fcdSMarek Szyprowski goto err_mapping; 12954ce63fcdSMarek Szyprowski 12964ce63fcdSMarek Szyprowski return addr; 12974ce63fcdSMarek Szyprowski 12984ce63fcdSMarek Szyprowski err_mapping: 12994ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, *handle, size); 13004ce63fcdSMarek Szyprowski err_buffer: 1301549a17e4SMarek Szyprowski __iommu_free_buffer(dev, pages, size, attrs); 13024ce63fcdSMarek Szyprowski return NULL; 13034ce63fcdSMarek Szyprowski } 13044ce63fcdSMarek Szyprowski 13054ce63fcdSMarek Szyprowski static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 13064ce63fcdSMarek Szyprowski void *cpu_addr, dma_addr_t dma_addr, size_t size, 13074ce63fcdSMarek Szyprowski struct dma_attrs *attrs) 13084ce63fcdSMarek Szyprowski { 13094ce63fcdSMarek Szyprowski unsigned long uaddr = vma->vm_start; 13104ce63fcdSMarek Szyprowski unsigned long usize = vma->vm_end - vma->vm_start; 1311955c757eSMarek Szyprowski struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1312e9da6e99SMarek Szyprowski 1313e9da6e99SMarek Szyprowski vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 1314e9da6e99SMarek Szyprowski 1315e9da6e99SMarek Szyprowski if (!pages) 1316e9da6e99SMarek Szyprowski return -ENXIO; 13174ce63fcdSMarek Szyprowski 13184ce63fcdSMarek Szyprowski do { 1319e9da6e99SMarek Szyprowski int ret = vm_insert_page(vma, uaddr, *pages++); 13204ce63fcdSMarek Szyprowski if (ret) { 1321e9da6e99SMarek Szyprowski pr_err("Remapping memory failed: %d\n", ret); 13224ce63fcdSMarek Szyprowski return ret; 13234ce63fcdSMarek Szyprowski } 13244ce63fcdSMarek Szyprowski uaddr += PAGE_SIZE; 13254ce63fcdSMarek Szyprowski usize -= PAGE_SIZE; 13264ce63fcdSMarek Szyprowski } while (usize > 0); 1327e9da6e99SMarek Szyprowski 13284ce63fcdSMarek Szyprowski return 0; 13294ce63fcdSMarek Szyprowski } 13304ce63fcdSMarek Szyprowski 13314ce63fcdSMarek Szyprowski /* 13324ce63fcdSMarek Szyprowski * free a page as defined by the above mapping. 13334ce63fcdSMarek Szyprowski * Must not be called with IRQs disabled. 13344ce63fcdSMarek Szyprowski */ 13354ce63fcdSMarek Szyprowski void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, 13364ce63fcdSMarek Szyprowski dma_addr_t handle, struct dma_attrs *attrs) 13374ce63fcdSMarek Szyprowski { 1338955c757eSMarek Szyprowski struct page **pages = __iommu_get_pages(cpu_addr, attrs); 13394ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 13404ce63fcdSMarek Szyprowski 1341e9da6e99SMarek Szyprowski if (!pages) { 1342e9da6e99SMarek Szyprowski WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); 1343e9da6e99SMarek Szyprowski return; 1344e9da6e99SMarek Szyprowski } 1345e9da6e99SMarek Szyprowski 1346479ed93aSHiroshi Doyu if (__in_atomic_pool(cpu_addr, size)) { 1347479ed93aSHiroshi Doyu __iommu_free_atomic(dev, pages, handle, size); 1348479ed93aSHiroshi Doyu return; 1349479ed93aSHiroshi Doyu } 1350479ed93aSHiroshi Doyu 1351955c757eSMarek Szyprowski if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { 1352e9da6e99SMarek Szyprowski unmap_kernel_range((unsigned long)cpu_addr, size); 1353e9da6e99SMarek Szyprowski vunmap(cpu_addr); 1354955c757eSMarek Szyprowski } 1355e9da6e99SMarek Szyprowski 13564ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, handle, size); 1357549a17e4SMarek Szyprowski __iommu_free_buffer(dev, pages, size, attrs); 13584ce63fcdSMarek Szyprowski } 13594ce63fcdSMarek Szyprowski 1360dc2832e1SMarek Szyprowski static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, 1361dc2832e1SMarek Szyprowski void *cpu_addr, dma_addr_t dma_addr, 1362dc2832e1SMarek Szyprowski size_t size, struct dma_attrs *attrs) 1363dc2832e1SMarek Szyprowski { 1364dc2832e1SMarek Szyprowski unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1365dc2832e1SMarek Szyprowski struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1366dc2832e1SMarek Szyprowski 1367dc2832e1SMarek Szyprowski if (!pages) 1368dc2832e1SMarek Szyprowski return -ENXIO; 1369dc2832e1SMarek Szyprowski 1370dc2832e1SMarek Szyprowski return sg_alloc_table_from_pages(sgt, pages, count, 0, size, 1371dc2832e1SMarek Szyprowski GFP_KERNEL); 13724ce63fcdSMarek Szyprowski } 13734ce63fcdSMarek Szyprowski 13744ce63fcdSMarek Szyprowski /* 13754ce63fcdSMarek Szyprowski * Map a part of the scatter-gather list into contiguous io address space 13764ce63fcdSMarek Szyprowski */ 13774ce63fcdSMarek Szyprowski static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, 13784ce63fcdSMarek Szyprowski size_t size, dma_addr_t *handle, 13790fa478dfSRob Herring enum dma_data_direction dir, struct dma_attrs *attrs, 13800fa478dfSRob Herring bool is_coherent) 13814ce63fcdSMarek Szyprowski { 13824ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = dev->archdata.mapping; 13834ce63fcdSMarek Szyprowski dma_addr_t iova, iova_base; 13844ce63fcdSMarek Szyprowski int ret = 0; 13854ce63fcdSMarek Szyprowski unsigned int count; 13864ce63fcdSMarek Szyprowski struct scatterlist *s; 13874ce63fcdSMarek Szyprowski 13884ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 13894ce63fcdSMarek Szyprowski *handle = DMA_ERROR_CODE; 13904ce63fcdSMarek Szyprowski 13914ce63fcdSMarek Szyprowski iova_base = iova = __alloc_iova(mapping, size); 13924ce63fcdSMarek Szyprowski if (iova == DMA_ERROR_CODE) 13934ce63fcdSMarek Szyprowski return -ENOMEM; 13944ce63fcdSMarek Szyprowski 13954ce63fcdSMarek Szyprowski for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { 13964ce63fcdSMarek Szyprowski phys_addr_t phys = page_to_phys(sg_page(s)); 13974ce63fcdSMarek Szyprowski unsigned int len = PAGE_ALIGN(s->offset + s->length); 13984ce63fcdSMarek Szyprowski 13990fa478dfSRob Herring if (!is_coherent && 140097ef952aSMarek Szyprowski !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 14014ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 14024ce63fcdSMarek Szyprowski 14034ce63fcdSMarek Szyprowski ret = iommu_map(mapping->domain, iova, phys, len, 0); 14044ce63fcdSMarek Szyprowski if (ret < 0) 14054ce63fcdSMarek Szyprowski goto fail; 14064ce63fcdSMarek Szyprowski count += len >> PAGE_SHIFT; 14074ce63fcdSMarek Szyprowski iova += len; 14084ce63fcdSMarek Szyprowski } 14094ce63fcdSMarek Szyprowski *handle = iova_base; 14104ce63fcdSMarek Szyprowski 14114ce63fcdSMarek Szyprowski return 0; 14124ce63fcdSMarek Szyprowski fail: 14134ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); 14144ce63fcdSMarek Szyprowski __free_iova(mapping, iova_base, size); 14154ce63fcdSMarek Szyprowski return ret; 14164ce63fcdSMarek Szyprowski } 14174ce63fcdSMarek Szyprowski 14180fa478dfSRob Herring static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, 14190fa478dfSRob Herring enum dma_data_direction dir, struct dma_attrs *attrs, 14200fa478dfSRob Herring bool is_coherent) 14214ce63fcdSMarek Szyprowski { 14224ce63fcdSMarek Szyprowski struct scatterlist *s = sg, *dma = sg, *start = sg; 14234ce63fcdSMarek Szyprowski int i, count = 0; 14244ce63fcdSMarek Szyprowski unsigned int offset = s->offset; 14254ce63fcdSMarek Szyprowski unsigned int size = s->offset + s->length; 14264ce63fcdSMarek Szyprowski unsigned int max = dma_get_max_seg_size(dev); 14274ce63fcdSMarek Szyprowski 14284ce63fcdSMarek Szyprowski for (i = 1; i < nents; i++) { 14294ce63fcdSMarek Szyprowski s = sg_next(s); 14304ce63fcdSMarek Szyprowski 14314ce63fcdSMarek Szyprowski s->dma_address = DMA_ERROR_CODE; 14324ce63fcdSMarek Szyprowski s->dma_length = 0; 14334ce63fcdSMarek Szyprowski 14344ce63fcdSMarek Szyprowski if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { 14354ce63fcdSMarek Szyprowski if (__map_sg_chunk(dev, start, size, &dma->dma_address, 14360fa478dfSRob Herring dir, attrs, is_coherent) < 0) 14374ce63fcdSMarek Szyprowski goto bad_mapping; 14384ce63fcdSMarek Szyprowski 14394ce63fcdSMarek Szyprowski dma->dma_address += offset; 14404ce63fcdSMarek Szyprowski dma->dma_length = size - offset; 14414ce63fcdSMarek Szyprowski 14424ce63fcdSMarek Szyprowski size = offset = s->offset; 14434ce63fcdSMarek Szyprowski start = s; 14444ce63fcdSMarek Szyprowski dma = sg_next(dma); 14454ce63fcdSMarek Szyprowski count += 1; 14464ce63fcdSMarek Szyprowski } 14474ce63fcdSMarek Szyprowski size += s->length; 14484ce63fcdSMarek Szyprowski } 14490fa478dfSRob Herring if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, 14500fa478dfSRob Herring is_coherent) < 0) 14514ce63fcdSMarek Szyprowski goto bad_mapping; 14524ce63fcdSMarek Szyprowski 14534ce63fcdSMarek Szyprowski dma->dma_address += offset; 14544ce63fcdSMarek Szyprowski dma->dma_length = size - offset; 14554ce63fcdSMarek Szyprowski 14564ce63fcdSMarek Szyprowski return count+1; 14574ce63fcdSMarek Szyprowski 14584ce63fcdSMarek Szyprowski bad_mapping: 14594ce63fcdSMarek Szyprowski for_each_sg(sg, s, count, i) 14604ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); 14614ce63fcdSMarek Szyprowski return 0; 14624ce63fcdSMarek Szyprowski } 14634ce63fcdSMarek Szyprowski 14644ce63fcdSMarek Szyprowski /** 14650fa478dfSRob Herring * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA 14660fa478dfSRob Herring * @dev: valid struct device pointer 14670fa478dfSRob Herring * @sg: list of buffers 14680fa478dfSRob Herring * @nents: number of buffers to map 14690fa478dfSRob Herring * @dir: DMA transfer direction 14700fa478dfSRob Herring * 14710fa478dfSRob Herring * Map a set of i/o coherent buffers described by scatterlist in streaming 14720fa478dfSRob Herring * mode for DMA. The scatter gather list elements are merged together (if 14730fa478dfSRob Herring * possible) and tagged with the appropriate dma address and length. They are 14740fa478dfSRob Herring * obtained via sg_dma_{address,length}. 14750fa478dfSRob Herring */ 14760fa478dfSRob Herring int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, 14770fa478dfSRob Herring int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 14780fa478dfSRob Herring { 14790fa478dfSRob Herring return __iommu_map_sg(dev, sg, nents, dir, attrs, true); 14800fa478dfSRob Herring } 14810fa478dfSRob Herring 14820fa478dfSRob Herring /** 14830fa478dfSRob Herring * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA 14840fa478dfSRob Herring * @dev: valid struct device pointer 14850fa478dfSRob Herring * @sg: list of buffers 14860fa478dfSRob Herring * @nents: number of buffers to map 14870fa478dfSRob Herring * @dir: DMA transfer direction 14880fa478dfSRob Herring * 14890fa478dfSRob Herring * Map a set of buffers described by scatterlist in streaming mode for DMA. 14900fa478dfSRob Herring * The scatter gather list elements are merged together (if possible) and 14910fa478dfSRob Herring * tagged with the appropriate dma address and length. They are obtained via 14920fa478dfSRob Herring * sg_dma_{address,length}. 14930fa478dfSRob Herring */ 14940fa478dfSRob Herring int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, 14950fa478dfSRob Herring int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 14960fa478dfSRob Herring { 14970fa478dfSRob Herring return __iommu_map_sg(dev, sg, nents, dir, attrs, false); 14980fa478dfSRob Herring } 14990fa478dfSRob Herring 15000fa478dfSRob Herring static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, 15010fa478dfSRob Herring int nents, enum dma_data_direction dir, struct dma_attrs *attrs, 15020fa478dfSRob Herring bool is_coherent) 15030fa478dfSRob Herring { 15040fa478dfSRob Herring struct scatterlist *s; 15050fa478dfSRob Herring int i; 15060fa478dfSRob Herring 15070fa478dfSRob Herring for_each_sg(sg, s, nents, i) { 15080fa478dfSRob Herring if (sg_dma_len(s)) 15090fa478dfSRob Herring __iommu_remove_mapping(dev, sg_dma_address(s), 15100fa478dfSRob Herring sg_dma_len(s)); 15110fa478dfSRob Herring if (!is_coherent && 15120fa478dfSRob Herring !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 15130fa478dfSRob Herring __dma_page_dev_to_cpu(sg_page(s), s->offset, 15140fa478dfSRob Herring s->length, dir); 15150fa478dfSRob Herring } 15160fa478dfSRob Herring } 15170fa478dfSRob Herring 15180fa478dfSRob Herring /** 15190fa478dfSRob Herring * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 15200fa478dfSRob Herring * @dev: valid struct device pointer 15210fa478dfSRob Herring * @sg: list of buffers 15220fa478dfSRob Herring * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 15230fa478dfSRob Herring * @dir: DMA transfer direction (same as was passed to dma_map_sg) 15240fa478dfSRob Herring * 15250fa478dfSRob Herring * Unmap a set of streaming mode DMA translations. Again, CPU access 15260fa478dfSRob Herring * rules concerning calls here are the same as for dma_unmap_single(). 15270fa478dfSRob Herring */ 15280fa478dfSRob Herring void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, 15290fa478dfSRob Herring int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 15300fa478dfSRob Herring { 15310fa478dfSRob Herring __iommu_unmap_sg(dev, sg, nents, dir, attrs, true); 15320fa478dfSRob Herring } 15330fa478dfSRob Herring 15340fa478dfSRob Herring /** 15354ce63fcdSMarek Szyprowski * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 15364ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 15374ce63fcdSMarek Szyprowski * @sg: list of buffers 15384ce63fcdSMarek Szyprowski * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 15394ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 15404ce63fcdSMarek Szyprowski * 15414ce63fcdSMarek Szyprowski * Unmap a set of streaming mode DMA translations. Again, CPU access 15424ce63fcdSMarek Szyprowski * rules concerning calls here are the same as for dma_unmap_single(). 15434ce63fcdSMarek Szyprowski */ 15444ce63fcdSMarek Szyprowski void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 15454ce63fcdSMarek Szyprowski enum dma_data_direction dir, struct dma_attrs *attrs) 15464ce63fcdSMarek Szyprowski { 15470fa478dfSRob Herring __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); 15484ce63fcdSMarek Szyprowski } 15494ce63fcdSMarek Szyprowski 15504ce63fcdSMarek Szyprowski /** 15514ce63fcdSMarek Szyprowski * arm_iommu_sync_sg_for_cpu 15524ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 15534ce63fcdSMarek Szyprowski * @sg: list of buffers 15544ce63fcdSMarek Szyprowski * @nents: number of buffers to map (returned from dma_map_sg) 15554ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 15564ce63fcdSMarek Szyprowski */ 15574ce63fcdSMarek Szyprowski void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 15584ce63fcdSMarek Szyprowski int nents, enum dma_data_direction dir) 15594ce63fcdSMarek Szyprowski { 15604ce63fcdSMarek Szyprowski struct scatterlist *s; 15614ce63fcdSMarek Szyprowski int i; 15624ce63fcdSMarek Szyprowski 15634ce63fcdSMarek Szyprowski for_each_sg(sg, s, nents, i) 15644ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); 15654ce63fcdSMarek Szyprowski 15664ce63fcdSMarek Szyprowski } 15674ce63fcdSMarek Szyprowski 15684ce63fcdSMarek Szyprowski /** 15694ce63fcdSMarek Szyprowski * arm_iommu_sync_sg_for_device 15704ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 15714ce63fcdSMarek Szyprowski * @sg: list of buffers 15724ce63fcdSMarek Szyprowski * @nents: number of buffers to map (returned from dma_map_sg) 15734ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 15744ce63fcdSMarek Szyprowski */ 15754ce63fcdSMarek Szyprowski void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 15764ce63fcdSMarek Szyprowski int nents, enum dma_data_direction dir) 15774ce63fcdSMarek Szyprowski { 15784ce63fcdSMarek Szyprowski struct scatterlist *s; 15794ce63fcdSMarek Szyprowski int i; 15804ce63fcdSMarek Szyprowski 15814ce63fcdSMarek Szyprowski for_each_sg(sg, s, nents, i) 15824ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 15834ce63fcdSMarek Szyprowski } 15844ce63fcdSMarek Szyprowski 15854ce63fcdSMarek Szyprowski 15864ce63fcdSMarek Szyprowski /** 15870fa478dfSRob Herring * arm_coherent_iommu_map_page 15880fa478dfSRob Herring * @dev: valid struct device pointer 15890fa478dfSRob Herring * @page: page that buffer resides in 15900fa478dfSRob Herring * @offset: offset into page for start of buffer 15910fa478dfSRob Herring * @size: size of buffer to map 15920fa478dfSRob Herring * @dir: DMA transfer direction 15930fa478dfSRob Herring * 15940fa478dfSRob Herring * Coherent IOMMU aware version of arm_dma_map_page() 15950fa478dfSRob Herring */ 15960fa478dfSRob Herring static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page, 15970fa478dfSRob Herring unsigned long offset, size_t size, enum dma_data_direction dir, 15980fa478dfSRob Herring struct dma_attrs *attrs) 15990fa478dfSRob Herring { 16000fa478dfSRob Herring struct dma_iommu_mapping *mapping = dev->archdata.mapping; 16010fa478dfSRob Herring dma_addr_t dma_addr; 16020fa478dfSRob Herring int ret, len = PAGE_ALIGN(size + offset); 16030fa478dfSRob Herring 16040fa478dfSRob Herring dma_addr = __alloc_iova(mapping, len); 16050fa478dfSRob Herring if (dma_addr == DMA_ERROR_CODE) 16060fa478dfSRob Herring return dma_addr; 16070fa478dfSRob Herring 16080fa478dfSRob Herring ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0); 16090fa478dfSRob Herring if (ret < 0) 16100fa478dfSRob Herring goto fail; 16110fa478dfSRob Herring 16120fa478dfSRob Herring return dma_addr + offset; 16130fa478dfSRob Herring fail: 16140fa478dfSRob Herring __free_iova(mapping, dma_addr, len); 16150fa478dfSRob Herring return DMA_ERROR_CODE; 16160fa478dfSRob Herring } 16170fa478dfSRob Herring 16180fa478dfSRob Herring /** 16194ce63fcdSMarek Szyprowski * arm_iommu_map_page 16204ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 16214ce63fcdSMarek Szyprowski * @page: page that buffer resides in 16224ce63fcdSMarek Szyprowski * @offset: offset into page for start of buffer 16234ce63fcdSMarek Szyprowski * @size: size of buffer to map 16244ce63fcdSMarek Szyprowski * @dir: DMA transfer direction 16254ce63fcdSMarek Szyprowski * 16264ce63fcdSMarek Szyprowski * IOMMU aware version of arm_dma_map_page() 16274ce63fcdSMarek Szyprowski */ 16284ce63fcdSMarek Szyprowski static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, 16294ce63fcdSMarek Szyprowski unsigned long offset, size_t size, enum dma_data_direction dir, 16304ce63fcdSMarek Szyprowski struct dma_attrs *attrs) 16314ce63fcdSMarek Szyprowski { 16320fa478dfSRob Herring if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 16334ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 16344ce63fcdSMarek Szyprowski 16350fa478dfSRob Herring return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); 16360fa478dfSRob Herring } 16374ce63fcdSMarek Szyprowski 16380fa478dfSRob Herring /** 16390fa478dfSRob Herring * arm_coherent_iommu_unmap_page 16400fa478dfSRob Herring * @dev: valid struct device pointer 16410fa478dfSRob Herring * @handle: DMA address of buffer 16420fa478dfSRob Herring * @size: size of buffer (same as passed to dma_map_page) 16430fa478dfSRob Herring * @dir: DMA transfer direction (same as passed to dma_map_page) 16440fa478dfSRob Herring * 16450fa478dfSRob Herring * Coherent IOMMU aware version of arm_dma_unmap_page() 16460fa478dfSRob Herring */ 16470fa478dfSRob Herring static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, 16480fa478dfSRob Herring size_t size, enum dma_data_direction dir, 16490fa478dfSRob Herring struct dma_attrs *attrs) 16500fa478dfSRob Herring { 16510fa478dfSRob Herring struct dma_iommu_mapping *mapping = dev->archdata.mapping; 16520fa478dfSRob Herring dma_addr_t iova = handle & PAGE_MASK; 16530fa478dfSRob Herring int offset = handle & ~PAGE_MASK; 16540fa478dfSRob Herring int len = PAGE_ALIGN(size + offset); 16554ce63fcdSMarek Szyprowski 16560fa478dfSRob Herring if (!iova) 16570fa478dfSRob Herring return; 16580fa478dfSRob Herring 16590fa478dfSRob Herring iommu_unmap(mapping->domain, iova, len); 16600fa478dfSRob Herring __free_iova(mapping, iova, len); 16614ce63fcdSMarek Szyprowski } 16624ce63fcdSMarek Szyprowski 16634ce63fcdSMarek Szyprowski /** 16644ce63fcdSMarek Szyprowski * arm_iommu_unmap_page 16654ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 16664ce63fcdSMarek Szyprowski * @handle: DMA address of buffer 16674ce63fcdSMarek Szyprowski * @size: size of buffer (same as passed to dma_map_page) 16684ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as passed to dma_map_page) 16694ce63fcdSMarek Szyprowski * 16704ce63fcdSMarek Szyprowski * IOMMU aware version of arm_dma_unmap_page() 16714ce63fcdSMarek Szyprowski */ 16724ce63fcdSMarek Szyprowski static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, 16734ce63fcdSMarek Szyprowski size_t size, enum dma_data_direction dir, 16744ce63fcdSMarek Szyprowski struct dma_attrs *attrs) 16754ce63fcdSMarek Szyprowski { 16764ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = dev->archdata.mapping; 16774ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 16784ce63fcdSMarek Szyprowski struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 16794ce63fcdSMarek Szyprowski int offset = handle & ~PAGE_MASK; 16804ce63fcdSMarek Szyprowski int len = PAGE_ALIGN(size + offset); 16814ce63fcdSMarek Szyprowski 16824ce63fcdSMarek Szyprowski if (!iova) 16834ce63fcdSMarek Szyprowski return; 16844ce63fcdSMarek Szyprowski 16850fa478dfSRob Herring if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 16864ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(page, offset, size, dir); 16874ce63fcdSMarek Szyprowski 16884ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova, len); 16894ce63fcdSMarek Szyprowski __free_iova(mapping, iova, len); 16904ce63fcdSMarek Szyprowski } 16914ce63fcdSMarek Szyprowski 16924ce63fcdSMarek Szyprowski static void arm_iommu_sync_single_for_cpu(struct device *dev, 16934ce63fcdSMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 16944ce63fcdSMarek Szyprowski { 16954ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = dev->archdata.mapping; 16964ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 16974ce63fcdSMarek Szyprowski struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 16984ce63fcdSMarek Szyprowski unsigned int offset = handle & ~PAGE_MASK; 16994ce63fcdSMarek Szyprowski 17004ce63fcdSMarek Szyprowski if (!iova) 17014ce63fcdSMarek Szyprowski return; 17024ce63fcdSMarek Szyprowski 17034ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(page, offset, size, dir); 17044ce63fcdSMarek Szyprowski } 17054ce63fcdSMarek Szyprowski 17064ce63fcdSMarek Szyprowski static void arm_iommu_sync_single_for_device(struct device *dev, 17074ce63fcdSMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 17084ce63fcdSMarek Szyprowski { 17094ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = dev->archdata.mapping; 17104ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 17114ce63fcdSMarek Szyprowski struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 17124ce63fcdSMarek Szyprowski unsigned int offset = handle & ~PAGE_MASK; 17134ce63fcdSMarek Szyprowski 17144ce63fcdSMarek Szyprowski if (!iova) 17154ce63fcdSMarek Szyprowski return; 17164ce63fcdSMarek Szyprowski 17174ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 17184ce63fcdSMarek Szyprowski } 17194ce63fcdSMarek Szyprowski 17204ce63fcdSMarek Szyprowski struct dma_map_ops iommu_ops = { 17214ce63fcdSMarek Szyprowski .alloc = arm_iommu_alloc_attrs, 17224ce63fcdSMarek Szyprowski .free = arm_iommu_free_attrs, 17234ce63fcdSMarek Szyprowski .mmap = arm_iommu_mmap_attrs, 1724dc2832e1SMarek Szyprowski .get_sgtable = arm_iommu_get_sgtable, 17254ce63fcdSMarek Szyprowski 17264ce63fcdSMarek Szyprowski .map_page = arm_iommu_map_page, 17274ce63fcdSMarek Szyprowski .unmap_page = arm_iommu_unmap_page, 17284ce63fcdSMarek Szyprowski .sync_single_for_cpu = arm_iommu_sync_single_for_cpu, 17294ce63fcdSMarek Szyprowski .sync_single_for_device = arm_iommu_sync_single_for_device, 17304ce63fcdSMarek Szyprowski 17314ce63fcdSMarek Szyprowski .map_sg = arm_iommu_map_sg, 17324ce63fcdSMarek Szyprowski .unmap_sg = arm_iommu_unmap_sg, 17334ce63fcdSMarek Szyprowski .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, 17344ce63fcdSMarek Szyprowski .sync_sg_for_device = arm_iommu_sync_sg_for_device, 1735d09e1333SHiroshi Doyu 1736d09e1333SHiroshi Doyu .set_dma_mask = arm_dma_set_mask, 17374ce63fcdSMarek Szyprowski }; 17384ce63fcdSMarek Szyprowski 17390fa478dfSRob Herring struct dma_map_ops iommu_coherent_ops = { 17400fa478dfSRob Herring .alloc = arm_iommu_alloc_attrs, 17410fa478dfSRob Herring .free = arm_iommu_free_attrs, 17420fa478dfSRob Herring .mmap = arm_iommu_mmap_attrs, 17430fa478dfSRob Herring .get_sgtable = arm_iommu_get_sgtable, 17440fa478dfSRob Herring 17450fa478dfSRob Herring .map_page = arm_coherent_iommu_map_page, 17460fa478dfSRob Herring .unmap_page = arm_coherent_iommu_unmap_page, 17470fa478dfSRob Herring 17480fa478dfSRob Herring .map_sg = arm_coherent_iommu_map_sg, 17490fa478dfSRob Herring .unmap_sg = arm_coherent_iommu_unmap_sg, 1750d09e1333SHiroshi Doyu 1751d09e1333SHiroshi Doyu .set_dma_mask = arm_dma_set_mask, 17520fa478dfSRob Herring }; 17530fa478dfSRob Herring 17544ce63fcdSMarek Szyprowski /** 17554ce63fcdSMarek Szyprowski * arm_iommu_create_mapping 17564ce63fcdSMarek Szyprowski * @bus: pointer to the bus holding the client device (for IOMMU calls) 17574ce63fcdSMarek Szyprowski * @base: start address of the valid IO address space 17584ce63fcdSMarek Szyprowski * @size: size of the valid IO address space 17594ce63fcdSMarek Szyprowski * @order: accuracy of the IO addresses allocations 17604ce63fcdSMarek Szyprowski * 17614ce63fcdSMarek Szyprowski * Creates a mapping structure which holds information about used/unused 17624ce63fcdSMarek Szyprowski * IO address ranges, which is required to perform memory allocation and 17634ce63fcdSMarek Szyprowski * mapping with IOMMU aware functions. 17644ce63fcdSMarek Szyprowski * 17654ce63fcdSMarek Szyprowski * The client device need to be attached to the mapping with 17664ce63fcdSMarek Szyprowski * arm_iommu_attach_device function. 17674ce63fcdSMarek Szyprowski */ 17684ce63fcdSMarek Szyprowski struct dma_iommu_mapping * 17694ce63fcdSMarek Szyprowski arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size, 17704ce63fcdSMarek Szyprowski int order) 17714ce63fcdSMarek Szyprowski { 17724ce63fcdSMarek Szyprowski unsigned int count = size >> (PAGE_SHIFT + order); 17734ce63fcdSMarek Szyprowski unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long); 17744ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping; 17754ce63fcdSMarek Szyprowski int err = -ENOMEM; 17764ce63fcdSMarek Szyprowski 17774ce63fcdSMarek Szyprowski if (!count) 17784ce63fcdSMarek Szyprowski return ERR_PTR(-EINVAL); 17794ce63fcdSMarek Szyprowski 17804ce63fcdSMarek Szyprowski mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); 17814ce63fcdSMarek Szyprowski if (!mapping) 17824ce63fcdSMarek Szyprowski goto err; 17834ce63fcdSMarek Szyprowski 17844ce63fcdSMarek Szyprowski mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 17854ce63fcdSMarek Szyprowski if (!mapping->bitmap) 17864ce63fcdSMarek Szyprowski goto err2; 17874ce63fcdSMarek Szyprowski 17884ce63fcdSMarek Szyprowski mapping->base = base; 17894ce63fcdSMarek Szyprowski mapping->bits = BITS_PER_BYTE * bitmap_size; 17904ce63fcdSMarek Szyprowski mapping->order = order; 17914ce63fcdSMarek Szyprowski spin_lock_init(&mapping->lock); 17924ce63fcdSMarek Szyprowski 17934ce63fcdSMarek Szyprowski mapping->domain = iommu_domain_alloc(bus); 17944ce63fcdSMarek Szyprowski if (!mapping->domain) 17954ce63fcdSMarek Szyprowski goto err3; 17964ce63fcdSMarek Szyprowski 17974ce63fcdSMarek Szyprowski kref_init(&mapping->kref); 17984ce63fcdSMarek Szyprowski return mapping; 17994ce63fcdSMarek Szyprowski err3: 18004ce63fcdSMarek Szyprowski kfree(mapping->bitmap); 18014ce63fcdSMarek Szyprowski err2: 18024ce63fcdSMarek Szyprowski kfree(mapping); 18034ce63fcdSMarek Szyprowski err: 18044ce63fcdSMarek Szyprowski return ERR_PTR(err); 18054ce63fcdSMarek Szyprowski } 18064ce63fcdSMarek Szyprowski 18074ce63fcdSMarek Szyprowski static void release_iommu_mapping(struct kref *kref) 18084ce63fcdSMarek Szyprowski { 18094ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = 18104ce63fcdSMarek Szyprowski container_of(kref, struct dma_iommu_mapping, kref); 18114ce63fcdSMarek Szyprowski 18124ce63fcdSMarek Szyprowski iommu_domain_free(mapping->domain); 18134ce63fcdSMarek Szyprowski kfree(mapping->bitmap); 18144ce63fcdSMarek Szyprowski kfree(mapping); 18154ce63fcdSMarek Szyprowski } 18164ce63fcdSMarek Szyprowski 18174ce63fcdSMarek Szyprowski void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) 18184ce63fcdSMarek Szyprowski { 18194ce63fcdSMarek Szyprowski if (mapping) 18204ce63fcdSMarek Szyprowski kref_put(&mapping->kref, release_iommu_mapping); 18214ce63fcdSMarek Szyprowski } 18224ce63fcdSMarek Szyprowski 18234ce63fcdSMarek Szyprowski /** 18244ce63fcdSMarek Szyprowski * arm_iommu_attach_device 18254ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 18264ce63fcdSMarek Szyprowski * @mapping: io address space mapping structure (returned from 18274ce63fcdSMarek Szyprowski * arm_iommu_create_mapping) 18284ce63fcdSMarek Szyprowski * 18294ce63fcdSMarek Szyprowski * Attaches specified io address space mapping to the provided device, 18304ce63fcdSMarek Szyprowski * this replaces the dma operations (dma_map_ops pointer) with the 18314ce63fcdSMarek Szyprowski * IOMMU aware version. More than one client might be attached to 18324ce63fcdSMarek Szyprowski * the same io address space mapping. 18334ce63fcdSMarek Szyprowski */ 18344ce63fcdSMarek Szyprowski int arm_iommu_attach_device(struct device *dev, 18354ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping) 18364ce63fcdSMarek Szyprowski { 18374ce63fcdSMarek Szyprowski int err; 18384ce63fcdSMarek Szyprowski 18394ce63fcdSMarek Szyprowski err = iommu_attach_device(mapping->domain, dev); 18404ce63fcdSMarek Szyprowski if (err) 18414ce63fcdSMarek Szyprowski return err; 18424ce63fcdSMarek Szyprowski 18434ce63fcdSMarek Szyprowski kref_get(&mapping->kref); 18444ce63fcdSMarek Szyprowski dev->archdata.mapping = mapping; 18454ce63fcdSMarek Szyprowski set_dma_ops(dev, &iommu_ops); 18464ce63fcdSMarek Szyprowski 184775c59716SHiroshi Doyu pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); 18484ce63fcdSMarek Szyprowski return 0; 18494ce63fcdSMarek Szyprowski } 18504ce63fcdSMarek Szyprowski 18514ce63fcdSMarek Szyprowski #endif 1852