1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 20ddbccd1SRussell King /* 30ddbccd1SRussell King * linux/arch/arm/mm/dma-mapping.c 40ddbccd1SRussell King * 50ddbccd1SRussell King * Copyright (C) 2000-2004 Russell King 60ddbccd1SRussell King * 70ddbccd1SRussell King * DMA uncached mapping support. 80ddbccd1SRussell King */ 90ddbccd1SRussell King #include <linux/module.h> 100ddbccd1SRussell King #include <linux/mm.h> 1136d0fd21SLaura Abbott #include <linux/genalloc.h> 125a0e3ad6STejun Heo #include <linux/gfp.h> 130ddbccd1SRussell King #include <linux/errno.h> 140ddbccd1SRussell King #include <linux/list.h> 150ddbccd1SRussell King #include <linux/init.h> 160ddbccd1SRussell King #include <linux/device.h> 17249baa54SChristoph Hellwig #include <linux/dma-direct.h> 180ddbccd1SRussell King #include <linux/dma-mapping.h> 19ad3c7b18SChristoph Hellwig #include <linux/dma-noncoherent.h> 20c7909509SMarek Szyprowski #include <linux/dma-contiguous.h> 2139af22a7SNicolas Pitre #include <linux/highmem.h> 22c7909509SMarek Szyprowski #include <linux/memblock.h> 2399d1717dSJon Medhurst #include <linux/slab.h> 244ce63fcdSMarek Szyprowski #include <linux/iommu.h> 25e9da6e99SMarek Szyprowski #include <linux/io.h> 264ce63fcdSMarek Szyprowski #include <linux/vmalloc.h> 27158e8bfeSAlessandro Rubini #include <linux/sizes.h> 28a254129eSJoonsoo Kim #include <linux/cma.h> 290ddbccd1SRussell King 300ddbccd1SRussell King #include <asm/memory.h> 3143377453SNicolas Pitre #include <asm/highmem.h> 320ddbccd1SRussell King #include <asm/cacheflush.h> 330ddbccd1SRussell King #include <asm/tlbflush.h> 3499d1717dSJon Medhurst #include <asm/mach/arch.h> 354ce63fcdSMarek Szyprowski #include <asm/dma-iommu.h> 36c7909509SMarek Szyprowski #include <asm/mach/map.h> 37c7909509SMarek Szyprowski #include <asm/system_info.h> 38c7909509SMarek Szyprowski #include <asm/dma-contiguous.h> 390ddbccd1SRussell King 401234e3fdSRussell King #include "dma.h" 41022ae537SRussell King #include "mm.h" 42022ae537SRussell King 43b4268676SRabin Vincent struct arm_dma_alloc_args { 44b4268676SRabin Vincent struct device *dev; 45b4268676SRabin Vincent size_t size; 46b4268676SRabin Vincent gfp_t gfp; 47b4268676SRabin Vincent pgprot_t prot; 48b4268676SRabin Vincent const void *caller; 49b4268676SRabin Vincent bool want_vaddr; 50f1270896SGregory CLEMENT int coherent_flag; 51b4268676SRabin Vincent }; 52b4268676SRabin Vincent 53b4268676SRabin Vincent struct arm_dma_free_args { 54b4268676SRabin Vincent struct device *dev; 55b4268676SRabin Vincent size_t size; 56b4268676SRabin Vincent void *cpu_addr; 57b4268676SRabin Vincent struct page *page; 58b4268676SRabin Vincent bool want_vaddr; 59b4268676SRabin Vincent }; 60b4268676SRabin Vincent 61f1270896SGregory CLEMENT #define NORMAL 0 62f1270896SGregory CLEMENT #define COHERENT 1 63f1270896SGregory CLEMENT 64b4268676SRabin Vincent struct arm_dma_allocator { 65b4268676SRabin Vincent void *(*alloc)(struct arm_dma_alloc_args *args, 66b4268676SRabin Vincent struct page **ret_page); 67b4268676SRabin Vincent void (*free)(struct arm_dma_free_args *args); 68b4268676SRabin Vincent }; 69b4268676SRabin Vincent 7019e6e5e5SRabin Vincent struct arm_dma_buffer { 7119e6e5e5SRabin Vincent struct list_head list; 7219e6e5e5SRabin Vincent void *virt; 73b4268676SRabin Vincent struct arm_dma_allocator *allocator; 7419e6e5e5SRabin Vincent }; 7519e6e5e5SRabin Vincent 7619e6e5e5SRabin Vincent static LIST_HEAD(arm_dma_bufs); 7719e6e5e5SRabin Vincent static DEFINE_SPINLOCK(arm_dma_bufs_lock); 7819e6e5e5SRabin Vincent 7919e6e5e5SRabin Vincent static struct arm_dma_buffer *arm_dma_buffer_find(void *virt) 8019e6e5e5SRabin Vincent { 8119e6e5e5SRabin Vincent struct arm_dma_buffer *buf, *found = NULL; 8219e6e5e5SRabin Vincent unsigned long flags; 8319e6e5e5SRabin Vincent 8419e6e5e5SRabin Vincent spin_lock_irqsave(&arm_dma_bufs_lock, flags); 8519e6e5e5SRabin Vincent list_for_each_entry(buf, &arm_dma_bufs, list) { 8619e6e5e5SRabin Vincent if (buf->virt == virt) { 8719e6e5e5SRabin Vincent list_del(&buf->list); 8819e6e5e5SRabin Vincent found = buf; 8919e6e5e5SRabin Vincent break; 9019e6e5e5SRabin Vincent } 9119e6e5e5SRabin Vincent } 9219e6e5e5SRabin Vincent spin_unlock_irqrestore(&arm_dma_bufs_lock, flags); 9319e6e5e5SRabin Vincent return found; 9419e6e5e5SRabin Vincent } 9519e6e5e5SRabin Vincent 9615237e1fSMarek Szyprowski /* 9715237e1fSMarek Szyprowski * The DMA API is built upon the notion of "buffer ownership". A buffer 9815237e1fSMarek Szyprowski * is either exclusively owned by the CPU (and therefore may be accessed 9915237e1fSMarek Szyprowski * by it) or exclusively owned by the DMA device. These helper functions 10015237e1fSMarek Szyprowski * represent the transitions between these two ownership states. 10115237e1fSMarek Szyprowski * 10215237e1fSMarek Szyprowski * Note, however, that on later ARMs, this notion does not work due to 10315237e1fSMarek Szyprowski * speculative prefetches. We model our approach on the assumption that 10415237e1fSMarek Szyprowski * the CPU does do speculative prefetches, which means we clean caches 10515237e1fSMarek Szyprowski * before transfers and delay cache invalidation until transfer completion. 10615237e1fSMarek Szyprowski * 10715237e1fSMarek Szyprowski */ 10851fde349SMarek Szyprowski static void __dma_page_cpu_to_dev(struct page *, unsigned long, 10915237e1fSMarek Szyprowski size_t, enum dma_data_direction); 11051fde349SMarek Szyprowski static void __dma_page_dev_to_cpu(struct page *, unsigned long, 11115237e1fSMarek Szyprowski size_t, enum dma_data_direction); 11215237e1fSMarek Szyprowski 1132dc6a016SMarek Szyprowski /** 1142dc6a016SMarek Szyprowski * arm_dma_map_page - map a portion of a page for streaming DMA 1152dc6a016SMarek Szyprowski * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1162dc6a016SMarek Szyprowski * @page: page that buffer resides in 1172dc6a016SMarek Szyprowski * @offset: offset into page for start of buffer 1182dc6a016SMarek Szyprowski * @size: size of buffer to map 1192dc6a016SMarek Szyprowski * @dir: DMA transfer direction 1202dc6a016SMarek Szyprowski * 1212dc6a016SMarek Szyprowski * Ensure that any data held in the cache is appropriately discarded 1222dc6a016SMarek Szyprowski * or written back. 1232dc6a016SMarek Szyprowski * 1242dc6a016SMarek Szyprowski * The device owns this memory once this call has completed. The CPU 1252dc6a016SMarek Szyprowski * can regain ownership by calling dma_unmap_page(). 1262dc6a016SMarek Szyprowski */ 12751fde349SMarek Szyprowski static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, 1282dc6a016SMarek Szyprowski unsigned long offset, size_t size, enum dma_data_direction dir, 12900085f1eSKrzysztof Kozlowski unsigned long attrs) 1302dc6a016SMarek Szyprowski { 13100085f1eSKrzysztof Kozlowski if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 13251fde349SMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 13351fde349SMarek Szyprowski return pfn_to_dma(dev, page_to_pfn(page)) + offset; 1342dc6a016SMarek Szyprowski } 1352dc6a016SMarek Szyprowski 136dd37e940SRob Herring static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, 137dd37e940SRob Herring unsigned long offset, size_t size, enum dma_data_direction dir, 13800085f1eSKrzysztof Kozlowski unsigned long attrs) 139dd37e940SRob Herring { 140dd37e940SRob Herring return pfn_to_dma(dev, page_to_pfn(page)) + offset; 141dd37e940SRob Herring } 142dd37e940SRob Herring 1432dc6a016SMarek Szyprowski /** 1442dc6a016SMarek Szyprowski * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page() 1452dc6a016SMarek Szyprowski * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1462dc6a016SMarek Szyprowski * @handle: DMA address of buffer 1472dc6a016SMarek Szyprowski * @size: size of buffer (same as passed to dma_map_page) 1482dc6a016SMarek Szyprowski * @dir: DMA transfer direction (same as passed to dma_map_page) 1492dc6a016SMarek Szyprowski * 1502dc6a016SMarek Szyprowski * Unmap a page streaming mode DMA translation. The handle and size 1512dc6a016SMarek Szyprowski * must match what was provided in the previous dma_map_page() call. 1522dc6a016SMarek Szyprowski * All other usages are undefined. 1532dc6a016SMarek Szyprowski * 1542dc6a016SMarek Szyprowski * After this call, reads by the CPU to the buffer are guaranteed to see 1552dc6a016SMarek Szyprowski * whatever the device wrote there. 1562dc6a016SMarek Szyprowski */ 15751fde349SMarek Szyprowski static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, 15800085f1eSKrzysztof Kozlowski size_t size, enum dma_data_direction dir, unsigned long attrs) 1592dc6a016SMarek Szyprowski { 16000085f1eSKrzysztof Kozlowski if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 16151fde349SMarek Szyprowski __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), 16251fde349SMarek Szyprowski handle & ~PAGE_MASK, size, dir); 1632dc6a016SMarek Szyprowski } 1642dc6a016SMarek Szyprowski 16551fde349SMarek Szyprowski static void arm_dma_sync_single_for_cpu(struct device *dev, 1662dc6a016SMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 1672dc6a016SMarek Szyprowski { 1682dc6a016SMarek Szyprowski unsigned int offset = handle & (PAGE_SIZE - 1); 1692dc6a016SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 1702dc6a016SMarek Szyprowski __dma_page_dev_to_cpu(page, offset, size, dir); 1712dc6a016SMarek Szyprowski } 1722dc6a016SMarek Szyprowski 17351fde349SMarek Szyprowski static void arm_dma_sync_single_for_device(struct device *dev, 1742dc6a016SMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 1752dc6a016SMarek Szyprowski { 1762dc6a016SMarek Szyprowski unsigned int offset = handle & (PAGE_SIZE - 1); 1772dc6a016SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 1782dc6a016SMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 1792dc6a016SMarek Szyprowski } 1802dc6a016SMarek Szyprowski 1815299709dSBart Van Assche const struct dma_map_ops arm_dma_ops = { 182f99d6034SMarek Szyprowski .alloc = arm_dma_alloc, 183f99d6034SMarek Szyprowski .free = arm_dma_free, 184f99d6034SMarek Szyprowski .mmap = arm_dma_mmap, 185dc2832e1SMarek Szyprowski .get_sgtable = arm_dma_get_sgtable, 1862dc6a016SMarek Szyprowski .map_page = arm_dma_map_page, 1872dc6a016SMarek Szyprowski .unmap_page = arm_dma_unmap_page, 1882dc6a016SMarek Szyprowski .map_sg = arm_dma_map_sg, 1892dc6a016SMarek Szyprowski .unmap_sg = arm_dma_unmap_sg, 190cfced786SChristoph Hellwig .map_resource = dma_direct_map_resource, 1912dc6a016SMarek Szyprowski .sync_single_for_cpu = arm_dma_sync_single_for_cpu, 1922dc6a016SMarek Szyprowski .sync_single_for_device = arm_dma_sync_single_for_device, 1932dc6a016SMarek Szyprowski .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, 1942dc6a016SMarek Szyprowski .sync_sg_for_device = arm_dma_sync_sg_for_device, 195418a7a7eSChristoph Hellwig .dma_supported = arm_dma_supported, 196249baa54SChristoph Hellwig .get_required_mask = dma_direct_get_required_mask, 1972dc6a016SMarek Szyprowski }; 1982dc6a016SMarek Szyprowski EXPORT_SYMBOL(arm_dma_ops); 1992dc6a016SMarek Szyprowski 200dd37e940SRob Herring static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 20100085f1eSKrzysztof Kozlowski dma_addr_t *handle, gfp_t gfp, unsigned long attrs); 202dd37e940SRob Herring static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 20300085f1eSKrzysztof Kozlowski dma_addr_t handle, unsigned long attrs); 20455af8a91SMike Looijmans static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, 20555af8a91SMike Looijmans void *cpu_addr, dma_addr_t dma_addr, size_t size, 20600085f1eSKrzysztof Kozlowski unsigned long attrs); 207dd37e940SRob Herring 2085299709dSBart Van Assche const struct dma_map_ops arm_coherent_dma_ops = { 209dd37e940SRob Herring .alloc = arm_coherent_dma_alloc, 210dd37e940SRob Herring .free = arm_coherent_dma_free, 21155af8a91SMike Looijmans .mmap = arm_coherent_dma_mmap, 212dd37e940SRob Herring .get_sgtable = arm_dma_get_sgtable, 213dd37e940SRob Herring .map_page = arm_coherent_dma_map_page, 214dd37e940SRob Herring .map_sg = arm_dma_map_sg, 215cfced786SChristoph Hellwig .map_resource = dma_direct_map_resource, 216418a7a7eSChristoph Hellwig .dma_supported = arm_dma_supported, 217249baa54SChristoph Hellwig .get_required_mask = dma_direct_get_required_mask, 218dd37e940SRob Herring }; 219dd37e940SRob Herring EXPORT_SYMBOL(arm_coherent_dma_ops); 220dd37e940SRob Herring 2219f28cde0SRussell King static int __dma_supported(struct device *dev, u64 mask, bool warn) 2229f28cde0SRussell King { 223ab746573SChristoph Hellwig unsigned long max_dma_pfn = min(max_pfn, arm_dma_pfn_limit); 2249f28cde0SRussell King 2259f28cde0SRussell King /* 2269f28cde0SRussell King * Translate the device's DMA mask to a PFN limit. This 2279f28cde0SRussell King * PFN number includes the page which we can DMA to. 2289f28cde0SRussell King */ 2299f28cde0SRussell King if (dma_to_pfn(dev, mask) < max_dma_pfn) { 2309f28cde0SRussell King if (warn) 2319f28cde0SRussell King dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", 2329f28cde0SRussell King mask, 2339f28cde0SRussell King dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, 2349f28cde0SRussell King max_dma_pfn + 1); 2359f28cde0SRussell King return 0; 2369f28cde0SRussell King } 2379f28cde0SRussell King 2389f28cde0SRussell King return 1; 2399f28cde0SRussell King } 2409f28cde0SRussell King 241ab6494f0SCatalin Marinas static u64 get_coherent_dma_mask(struct device *dev) 242ab6494f0SCatalin Marinas { 2434dcfa600SRussell King u64 mask = (u64)DMA_BIT_MASK(32); 2440ddbccd1SRussell King 245ab6494f0SCatalin Marinas if (dev) { 246ab6494f0SCatalin Marinas mask = dev->coherent_dma_mask; 247ab6494f0SCatalin Marinas 248ab6494f0SCatalin Marinas /* 249ab6494f0SCatalin Marinas * Sanity check the DMA mask - it must be non-zero, and 250ab6494f0SCatalin Marinas * must be able to be satisfied by a DMA allocation. 251ab6494f0SCatalin Marinas */ 252ab6494f0SCatalin Marinas if (mask == 0) { 253ab6494f0SCatalin Marinas dev_warn(dev, "coherent DMA mask is unset\n"); 254ab6494f0SCatalin Marinas return 0; 255ab6494f0SCatalin Marinas } 256ab6494f0SCatalin Marinas 2579f28cde0SRussell King if (!__dma_supported(dev, mask, true)) 2584dcfa600SRussell King return 0; 2594dcfa600SRussell King } 2604dcfa600SRussell King 261ab6494f0SCatalin Marinas return mask; 262ab6494f0SCatalin Marinas } 263ab6494f0SCatalin Marinas 264f1270896SGregory CLEMENT static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag) 265c7909509SMarek Szyprowski { 266c7909509SMarek Szyprowski /* 267c7909509SMarek Szyprowski * Ensure that the allocated pages are zeroed, and that any data 268c7909509SMarek Szyprowski * lurking in the kernel direct-mapped region is invalidated. 269c7909509SMarek Szyprowski */ 2709848e48fSMarek Szyprowski if (PageHighMem(page)) { 2719848e48fSMarek Szyprowski phys_addr_t base = __pfn_to_phys(page_to_pfn(page)); 2729848e48fSMarek Szyprowski phys_addr_t end = base + size; 2739848e48fSMarek Szyprowski while (size > 0) { 2749848e48fSMarek Szyprowski void *ptr = kmap_atomic(page); 2759848e48fSMarek Szyprowski memset(ptr, 0, PAGE_SIZE); 276f1270896SGregory CLEMENT if (coherent_flag != COHERENT) 2779848e48fSMarek Szyprowski dmac_flush_range(ptr, ptr + PAGE_SIZE); 2789848e48fSMarek Szyprowski kunmap_atomic(ptr); 2799848e48fSMarek Szyprowski page++; 2809848e48fSMarek Szyprowski size -= PAGE_SIZE; 2819848e48fSMarek Szyprowski } 282f1270896SGregory CLEMENT if (coherent_flag != COHERENT) 2839848e48fSMarek Szyprowski outer_flush_range(base, end); 2849848e48fSMarek Szyprowski } else { 2859848e48fSMarek Szyprowski void *ptr = page_address(page); 286c7909509SMarek Szyprowski memset(ptr, 0, size); 287f1270896SGregory CLEMENT if (coherent_flag != COHERENT) { 288c7909509SMarek Szyprowski dmac_flush_range(ptr, ptr + size); 289c7909509SMarek Szyprowski outer_flush_range(__pa(ptr), __pa(ptr) + size); 290c7909509SMarek Szyprowski } 2914ce63fcdSMarek Szyprowski } 292f1270896SGregory CLEMENT } 293c7909509SMarek Szyprowski 2947a9a32a9SRussell King /* 2957a9a32a9SRussell King * Allocate a DMA buffer for 'dev' of size 'size' using the 2967a9a32a9SRussell King * specified gfp mask. Note that 'size' must be page aligned. 2977a9a32a9SRussell King */ 298f1270896SGregory CLEMENT static struct page *__dma_alloc_buffer(struct device *dev, size_t size, 299f1270896SGregory CLEMENT gfp_t gfp, int coherent_flag) 3007a9a32a9SRussell King { 3017a9a32a9SRussell King unsigned long order = get_order(size); 3027a9a32a9SRussell King struct page *page, *p, *e; 3037a9a32a9SRussell King 3047a9a32a9SRussell King page = alloc_pages(gfp, order); 3057a9a32a9SRussell King if (!page) 3067a9a32a9SRussell King return NULL; 3077a9a32a9SRussell King 3087a9a32a9SRussell King /* 3097a9a32a9SRussell King * Now split the huge page and free the excess pages 3107a9a32a9SRussell King */ 3117a9a32a9SRussell King split_page(page, order); 3127a9a32a9SRussell King for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) 3137a9a32a9SRussell King __free_page(p); 3147a9a32a9SRussell King 315f1270896SGregory CLEMENT __dma_clear_buffer(page, size, coherent_flag); 3167a9a32a9SRussell King 3177a9a32a9SRussell King return page; 3187a9a32a9SRussell King } 3197a9a32a9SRussell King 3207a9a32a9SRussell King /* 3217a9a32a9SRussell King * Free a DMA buffer. 'size' must be page aligned. 3227a9a32a9SRussell King */ 3237a9a32a9SRussell King static void __dma_free_buffer(struct page *page, size_t size) 3247a9a32a9SRussell King { 3257a9a32a9SRussell King struct page *e = page + (size >> PAGE_SHIFT); 3267a9a32a9SRussell King 3277a9a32a9SRussell King while (page < e) { 3287a9a32a9SRussell King __free_page(page); 3297a9a32a9SRussell King page++; 3307a9a32a9SRussell King } 3317a9a32a9SRussell King } 3327a9a32a9SRussell King 333c7909509SMarek Szyprowski static void *__alloc_from_contiguous(struct device *dev, size_t size, 3349848e48fSMarek Szyprowski pgprot_t prot, struct page **ret_page, 335f1270896SGregory CLEMENT const void *caller, bool want_vaddr, 336712c604dSLucas Stach int coherent_flag, gfp_t gfp); 337c7909509SMarek Szyprowski 338e9da6e99SMarek Szyprowski static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 339e9da6e99SMarek Szyprowski pgprot_t prot, struct page **ret_page, 3406e8266e3SCarlo Caione const void *caller, bool want_vaddr); 341e9da6e99SMarek Szyprowski 3426e5267aaSMarek Szyprowski #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K 343b337e1c4SVladimir Murzin static struct gen_pool *atomic_pool __ro_after_init; 3446e5267aaSMarek Szyprowski 345b337e1c4SVladimir Murzin static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE; 346c7909509SMarek Szyprowski 347c7909509SMarek Szyprowski static int __init early_coherent_pool(char *p) 348c7909509SMarek Szyprowski { 34936d0fd21SLaura Abbott atomic_pool_size = memparse(p, &p); 350c7909509SMarek Szyprowski return 0; 351c7909509SMarek Szyprowski } 352c7909509SMarek Szyprowski early_param("coherent_pool", early_coherent_pool); 353c7909509SMarek Szyprowski 354c7909509SMarek Szyprowski /* 355c7909509SMarek Szyprowski * Initialise the coherent pool for atomic allocations. 356c7909509SMarek Szyprowski */ 357e9da6e99SMarek Szyprowski static int __init atomic_pool_init(void) 358c7909509SMarek Szyprowski { 35971b55663SRussell King pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL); 3609d1400cfSMarek Szyprowski gfp_t gfp = GFP_KERNEL | GFP_DMA; 361c7909509SMarek Szyprowski struct page *page; 362c7909509SMarek Szyprowski void *ptr; 363c7909509SMarek Szyprowski 36436d0fd21SLaura Abbott atomic_pool = gen_pool_create(PAGE_SHIFT, -1); 36536d0fd21SLaura Abbott if (!atomic_pool) 36636d0fd21SLaura Abbott goto out; 367f1270896SGregory CLEMENT /* 368f1270896SGregory CLEMENT * The atomic pool is only used for non-coherent allocations 369f1270896SGregory CLEMENT * so we must pass NORMAL for coherent_flag. 370f1270896SGregory CLEMENT */ 371e464ef16SGioh Kim if (dev_get_cma_area(NULL)) 37236d0fd21SLaura Abbott ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot, 373712c604dSLucas Stach &page, atomic_pool_init, true, NORMAL, 374712c604dSLucas Stach GFP_KERNEL); 375e9da6e99SMarek Szyprowski else 37636d0fd21SLaura Abbott ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot, 3776e8266e3SCarlo Caione &page, atomic_pool_init, true); 378c7909509SMarek Szyprowski if (ptr) { 37936d0fd21SLaura Abbott int ret; 3806b3fe472SHiroshi Doyu 38136d0fd21SLaura Abbott ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr, 38236d0fd21SLaura Abbott page_to_phys(page), 38336d0fd21SLaura Abbott atomic_pool_size, -1); 38436d0fd21SLaura Abbott if (ret) 38536d0fd21SLaura Abbott goto destroy_genpool; 3866b3fe472SHiroshi Doyu 38736d0fd21SLaura Abbott gen_pool_set_algo(atomic_pool, 38836d0fd21SLaura Abbott gen_pool_first_fit_order_align, 389acb62448SVladimir Murzin NULL); 390bf31c5e0SFabio Estevam pr_info("DMA: preallocated %zu KiB pool for atomic coherent allocations\n", 39136d0fd21SLaura Abbott atomic_pool_size / 1024); 392c7909509SMarek Szyprowski return 0; 393c7909509SMarek Szyprowski } 394ec10665cSSachin Kamat 39536d0fd21SLaura Abbott destroy_genpool: 39636d0fd21SLaura Abbott gen_pool_destroy(atomic_pool); 39736d0fd21SLaura Abbott atomic_pool = NULL; 39836d0fd21SLaura Abbott out: 399bf31c5e0SFabio Estevam pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n", 40036d0fd21SLaura Abbott atomic_pool_size / 1024); 401c7909509SMarek Szyprowski return -ENOMEM; 402c7909509SMarek Szyprowski } 403c7909509SMarek Szyprowski /* 404c7909509SMarek Szyprowski * CMA is activated by core_initcall, so we must be called after it. 405c7909509SMarek Szyprowski */ 406e9da6e99SMarek Szyprowski postcore_initcall(atomic_pool_init); 407c7909509SMarek Szyprowski 408c7909509SMarek Szyprowski struct dma_contig_early_reserve { 409c7909509SMarek Szyprowski phys_addr_t base; 410c7909509SMarek Szyprowski unsigned long size; 411c7909509SMarek Szyprowski }; 412c7909509SMarek Szyprowski 413c7909509SMarek Szyprowski static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata; 414c7909509SMarek Szyprowski 415c7909509SMarek Szyprowski static int dma_mmu_remap_num __initdata; 416c7909509SMarek Szyprowski 417c7909509SMarek Szyprowski void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) 418c7909509SMarek Szyprowski { 419c7909509SMarek Szyprowski dma_mmu_remap[dma_mmu_remap_num].base = base; 420c7909509SMarek Szyprowski dma_mmu_remap[dma_mmu_remap_num].size = size; 421c7909509SMarek Szyprowski dma_mmu_remap_num++; 422c7909509SMarek Szyprowski } 423c7909509SMarek Szyprowski 424c7909509SMarek Szyprowski void __init dma_contiguous_remap(void) 425c7909509SMarek Szyprowski { 426c7909509SMarek Szyprowski int i; 427c7909509SMarek Szyprowski for (i = 0; i < dma_mmu_remap_num; i++) { 428c7909509SMarek Szyprowski phys_addr_t start = dma_mmu_remap[i].base; 429c7909509SMarek Szyprowski phys_addr_t end = start + dma_mmu_remap[i].size; 430c7909509SMarek Szyprowski struct map_desc map; 431c7909509SMarek Szyprowski unsigned long addr; 432c7909509SMarek Szyprowski 433c7909509SMarek Szyprowski if (end > arm_lowmem_limit) 434c7909509SMarek Szyprowski end = arm_lowmem_limit; 435c7909509SMarek Szyprowski if (start >= end) 43639f78e70SChris Brand continue; 437c7909509SMarek Szyprowski 438c7909509SMarek Szyprowski map.pfn = __phys_to_pfn(start); 439c7909509SMarek Szyprowski map.virtual = __phys_to_virt(start); 440c7909509SMarek Szyprowski map.length = end - start; 441c7909509SMarek Szyprowski map.type = MT_MEMORY_DMA_READY; 442c7909509SMarek Szyprowski 443c7909509SMarek Szyprowski /* 4446b076991SRussell King * Clear previous low-memory mapping to ensure that the 4456b076991SRussell King * TLB does not see any conflicting entries, then flush 4466b076991SRussell King * the TLB of the old entries before creating new mappings. 4476b076991SRussell King * 4486b076991SRussell King * This ensures that any speculatively loaded TLB entries 4496b076991SRussell King * (even though they may be rare) can not cause any problems, 4506b076991SRussell King * and ensures that this code is architecturally compliant. 451c7909509SMarek Szyprowski */ 452c7909509SMarek Szyprowski for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); 45361f6c7a4SVitaly Andrianov addr += PMD_SIZE) 454c7909509SMarek Szyprowski pmd_clear(pmd_off_k(addr)); 455c7909509SMarek Szyprowski 4566b076991SRussell King flush_tlb_kernel_range(__phys_to_virt(start), 4576b076991SRussell King __phys_to_virt(end)); 4586b076991SRussell King 459c7909509SMarek Szyprowski iotable_init(&map, 1); 460c7909509SMarek Szyprowski } 461c7909509SMarek Szyprowski } 462c7909509SMarek Szyprowski 4638b1e0f81SAnshuman Khandual static int __dma_update_pte(pte_t *pte, unsigned long addr, void *data) 464c7909509SMarek Szyprowski { 465c7909509SMarek Szyprowski struct page *page = virt_to_page(addr); 466c7909509SMarek Szyprowski pgprot_t prot = *(pgprot_t *)data; 467c7909509SMarek Szyprowski 468c7909509SMarek Szyprowski set_pte_ext(pte, mk_pte(page, prot), 0); 469c7909509SMarek Szyprowski return 0; 470c7909509SMarek Szyprowski } 471c7909509SMarek Szyprowski 472c7909509SMarek Szyprowski static void __dma_remap(struct page *page, size_t size, pgprot_t prot) 473c7909509SMarek Szyprowski { 474c7909509SMarek Szyprowski unsigned long start = (unsigned long) page_address(page); 475c7909509SMarek Szyprowski unsigned end = start + size; 476c7909509SMarek Szyprowski 477c7909509SMarek Szyprowski apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); 478c7909509SMarek Szyprowski flush_tlb_kernel_range(start, end); 479c7909509SMarek Szyprowski } 480c7909509SMarek Szyprowski 481c7909509SMarek Szyprowski static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 482c7909509SMarek Szyprowski pgprot_t prot, struct page **ret_page, 4836e8266e3SCarlo Caione const void *caller, bool want_vaddr) 484c7909509SMarek Szyprowski { 485c7909509SMarek Szyprowski struct page *page; 4866e8266e3SCarlo Caione void *ptr = NULL; 487f1270896SGregory CLEMENT /* 488f1270896SGregory CLEMENT * __alloc_remap_buffer is only called when the device is 489f1270896SGregory CLEMENT * non-coherent 490f1270896SGregory CLEMENT */ 491f1270896SGregory CLEMENT page = __dma_alloc_buffer(dev, size, gfp, NORMAL); 492c7909509SMarek Szyprowski if (!page) 493c7909509SMarek Szyprowski return NULL; 4946e8266e3SCarlo Caione if (!want_vaddr) 4956e8266e3SCarlo Caione goto out; 496c7909509SMarek Szyprowski 49778406ff5SChristoph Hellwig ptr = dma_common_contiguous_remap(page, size, prot, caller); 498c7909509SMarek Szyprowski if (!ptr) { 499c7909509SMarek Szyprowski __dma_free_buffer(page, size); 500c7909509SMarek Szyprowski return NULL; 501c7909509SMarek Szyprowski } 502c7909509SMarek Szyprowski 5036e8266e3SCarlo Caione out: 504c7909509SMarek Szyprowski *ret_page = page; 505c7909509SMarek Szyprowski return ptr; 506c7909509SMarek Szyprowski } 507c7909509SMarek Szyprowski 508e9da6e99SMarek Szyprowski static void *__alloc_from_pool(size_t size, struct page **ret_page) 509c7909509SMarek Szyprowski { 51036d0fd21SLaura Abbott unsigned long val; 511e9da6e99SMarek Szyprowski void *ptr = NULL; 512c7909509SMarek Szyprowski 51336d0fd21SLaura Abbott if (!atomic_pool) { 514e9da6e99SMarek Szyprowski WARN(1, "coherent pool not initialised!\n"); 515c7909509SMarek Szyprowski return NULL; 516c7909509SMarek Szyprowski } 517c7909509SMarek Szyprowski 51836d0fd21SLaura Abbott val = gen_pool_alloc(atomic_pool, size); 51936d0fd21SLaura Abbott if (val) { 52036d0fd21SLaura Abbott phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val); 521e9da6e99SMarek Szyprowski 52236d0fd21SLaura Abbott *ret_page = phys_to_page(phys); 52336d0fd21SLaura Abbott ptr = (void *)val; 524e9da6e99SMarek Szyprowski } 525e9da6e99SMarek Szyprowski 526c7909509SMarek Szyprowski return ptr; 527c7909509SMarek Szyprowski } 528c7909509SMarek Szyprowski 52921d0a759SHiroshi Doyu static bool __in_atomic_pool(void *start, size_t size) 53021d0a759SHiroshi Doyu { 53136d0fd21SLaura Abbott return addr_in_gen_pool(atomic_pool, (unsigned long)start, size); 53221d0a759SHiroshi Doyu } 53321d0a759SHiroshi Doyu 534e9da6e99SMarek Szyprowski static int __free_from_pool(void *start, size_t size) 535c7909509SMarek Szyprowski { 53621d0a759SHiroshi Doyu if (!__in_atomic_pool(start, size)) 537c7909509SMarek Szyprowski return 0; 538c7909509SMarek Szyprowski 53936d0fd21SLaura Abbott gen_pool_free(atomic_pool, (unsigned long)start, size); 540e9da6e99SMarek Szyprowski 541c7909509SMarek Szyprowski return 1; 542c7909509SMarek Szyprowski } 543c7909509SMarek Szyprowski 544c7909509SMarek Szyprowski static void *__alloc_from_contiguous(struct device *dev, size_t size, 5459848e48fSMarek Szyprowski pgprot_t prot, struct page **ret_page, 546f1270896SGregory CLEMENT const void *caller, bool want_vaddr, 547712c604dSLucas Stach int coherent_flag, gfp_t gfp) 548c7909509SMarek Szyprowski { 549c7909509SMarek Szyprowski unsigned long order = get_order(size); 550c7909509SMarek Szyprowski size_t count = size >> PAGE_SHIFT; 551c7909509SMarek Szyprowski struct page *page; 5526e8266e3SCarlo Caione void *ptr = NULL; 553c7909509SMarek Szyprowski 554d834c5abSMarek Szyprowski page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN); 555c7909509SMarek Szyprowski if (!page) 556c7909509SMarek Szyprowski return NULL; 557c7909509SMarek Szyprowski 558f1270896SGregory CLEMENT __dma_clear_buffer(page, size, coherent_flag); 559c7909509SMarek Szyprowski 5606e8266e3SCarlo Caione if (!want_vaddr) 5616e8266e3SCarlo Caione goto out; 5626e8266e3SCarlo Caione 5639848e48fSMarek Szyprowski if (PageHighMem(page)) { 56478406ff5SChristoph Hellwig ptr = dma_common_contiguous_remap(page, size, prot, caller); 5659848e48fSMarek Szyprowski if (!ptr) { 5669848e48fSMarek Szyprowski dma_release_from_contiguous(dev, page, count); 5679848e48fSMarek Szyprowski return NULL; 5689848e48fSMarek Szyprowski } 5699848e48fSMarek Szyprowski } else { 5709848e48fSMarek Szyprowski __dma_remap(page, size, prot); 5719848e48fSMarek Szyprowski ptr = page_address(page); 5729848e48fSMarek Szyprowski } 5736e8266e3SCarlo Caione 5746e8266e3SCarlo Caione out: 575c7909509SMarek Szyprowski *ret_page = page; 5769848e48fSMarek Szyprowski return ptr; 577c7909509SMarek Szyprowski } 578c7909509SMarek Szyprowski 579c7909509SMarek Szyprowski static void __free_from_contiguous(struct device *dev, struct page *page, 5806e8266e3SCarlo Caione void *cpu_addr, size_t size, bool want_vaddr) 581c7909509SMarek Szyprowski { 5826e8266e3SCarlo Caione if (want_vaddr) { 5839848e48fSMarek Szyprowski if (PageHighMem(page)) 58478406ff5SChristoph Hellwig dma_common_free_remap(cpu_addr, size); 5859848e48fSMarek Szyprowski else 58671b55663SRussell King __dma_remap(page, size, PAGE_KERNEL); 5876e8266e3SCarlo Caione } 588c7909509SMarek Szyprowski dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); 589c7909509SMarek Szyprowski } 590c7909509SMarek Szyprowski 59100085f1eSKrzysztof Kozlowski static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot) 592f99d6034SMarek Szyprowski { 59300085f1eSKrzysztof Kozlowski prot = (attrs & DMA_ATTR_WRITE_COMBINE) ? 594f99d6034SMarek Szyprowski pgprot_writecombine(prot) : 595f99d6034SMarek Szyprowski pgprot_dmacoherent(prot); 596f99d6034SMarek Szyprowski return prot; 597f99d6034SMarek Szyprowski } 598f99d6034SMarek Szyprowski 599c7909509SMarek Szyprowski static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, 600c7909509SMarek Szyprowski struct page **ret_page) 601ab6494f0SCatalin Marinas { 60204da5694SRussell King struct page *page; 603f1270896SGregory CLEMENT /* __alloc_simple_buffer is only called when the device is coherent */ 604f1270896SGregory CLEMENT page = __dma_alloc_buffer(dev, size, gfp, COHERENT); 605c7909509SMarek Szyprowski if (!page) 606c7909509SMarek Szyprowski return NULL; 607c7909509SMarek Szyprowski 608c7909509SMarek Szyprowski *ret_page = page; 609c7909509SMarek Szyprowski return page_address(page); 610c7909509SMarek Szyprowski } 611c7909509SMarek Szyprowski 612b4268676SRabin Vincent static void *simple_allocator_alloc(struct arm_dma_alloc_args *args, 613b4268676SRabin Vincent struct page **ret_page) 614b4268676SRabin Vincent { 615b4268676SRabin Vincent return __alloc_simple_buffer(args->dev, args->size, args->gfp, 616b4268676SRabin Vincent ret_page); 617b4268676SRabin Vincent } 618c7909509SMarek Szyprowski 619b4268676SRabin Vincent static void simple_allocator_free(struct arm_dma_free_args *args) 620b4268676SRabin Vincent { 621b4268676SRabin Vincent __dma_free_buffer(args->page, args->size); 622b4268676SRabin Vincent } 623b4268676SRabin Vincent 624b4268676SRabin Vincent static struct arm_dma_allocator simple_allocator = { 625b4268676SRabin Vincent .alloc = simple_allocator_alloc, 626b4268676SRabin Vincent .free = simple_allocator_free, 627b4268676SRabin Vincent }; 628b4268676SRabin Vincent 629b4268676SRabin Vincent static void *cma_allocator_alloc(struct arm_dma_alloc_args *args, 630b4268676SRabin Vincent struct page **ret_page) 631b4268676SRabin Vincent { 632b4268676SRabin Vincent return __alloc_from_contiguous(args->dev, args->size, args->prot, 633b4268676SRabin Vincent ret_page, args->caller, 634712c604dSLucas Stach args->want_vaddr, args->coherent_flag, 635712c604dSLucas Stach args->gfp); 636b4268676SRabin Vincent } 637b4268676SRabin Vincent 638b4268676SRabin Vincent static void cma_allocator_free(struct arm_dma_free_args *args) 639b4268676SRabin Vincent { 640b4268676SRabin Vincent __free_from_contiguous(args->dev, args->page, args->cpu_addr, 641b4268676SRabin Vincent args->size, args->want_vaddr); 642b4268676SRabin Vincent } 643b4268676SRabin Vincent 644b4268676SRabin Vincent static struct arm_dma_allocator cma_allocator = { 645b4268676SRabin Vincent .alloc = cma_allocator_alloc, 646b4268676SRabin Vincent .free = cma_allocator_free, 647b4268676SRabin Vincent }; 648b4268676SRabin Vincent 649b4268676SRabin Vincent static void *pool_allocator_alloc(struct arm_dma_alloc_args *args, 650b4268676SRabin Vincent struct page **ret_page) 651b4268676SRabin Vincent { 652b4268676SRabin Vincent return __alloc_from_pool(args->size, ret_page); 653b4268676SRabin Vincent } 654b4268676SRabin Vincent 655b4268676SRabin Vincent static void pool_allocator_free(struct arm_dma_free_args *args) 656b4268676SRabin Vincent { 657b4268676SRabin Vincent __free_from_pool(args->cpu_addr, args->size); 658b4268676SRabin Vincent } 659b4268676SRabin Vincent 660b4268676SRabin Vincent static struct arm_dma_allocator pool_allocator = { 661b4268676SRabin Vincent .alloc = pool_allocator_alloc, 662b4268676SRabin Vincent .free = pool_allocator_free, 663b4268676SRabin Vincent }; 664b4268676SRabin Vincent 665b4268676SRabin Vincent static void *remap_allocator_alloc(struct arm_dma_alloc_args *args, 666b4268676SRabin Vincent struct page **ret_page) 667b4268676SRabin Vincent { 668b4268676SRabin Vincent return __alloc_remap_buffer(args->dev, args->size, args->gfp, 669b4268676SRabin Vincent args->prot, ret_page, args->caller, 670b4268676SRabin Vincent args->want_vaddr); 671b4268676SRabin Vincent } 672b4268676SRabin Vincent 673b4268676SRabin Vincent static void remap_allocator_free(struct arm_dma_free_args *args) 674b4268676SRabin Vincent { 675b4268676SRabin Vincent if (args->want_vaddr) 67678406ff5SChristoph Hellwig dma_common_free_remap(args->cpu_addr, args->size); 677b4268676SRabin Vincent 678b4268676SRabin Vincent __dma_free_buffer(args->page, args->size); 679b4268676SRabin Vincent } 680b4268676SRabin Vincent 681b4268676SRabin Vincent static struct arm_dma_allocator remap_allocator = { 682b4268676SRabin Vincent .alloc = remap_allocator_alloc, 683b4268676SRabin Vincent .free = remap_allocator_free, 684b4268676SRabin Vincent }; 685c7909509SMarek Szyprowski 686c7909509SMarek Szyprowski static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 6876e8266e3SCarlo Caione gfp_t gfp, pgprot_t prot, bool is_coherent, 68800085f1eSKrzysztof Kozlowski unsigned long attrs, const void *caller) 689c7909509SMarek Szyprowski { 690c7909509SMarek Szyprowski u64 mask = get_coherent_dma_mask(dev); 6913dd7ea92SJingoo Han struct page *page = NULL; 69231ebf944SRussell King void *addr; 693b4268676SRabin Vincent bool allowblock, cma; 69419e6e5e5SRabin Vincent struct arm_dma_buffer *buf; 695b4268676SRabin Vincent struct arm_dma_alloc_args args = { 696b4268676SRabin Vincent .dev = dev, 697b4268676SRabin Vincent .size = PAGE_ALIGN(size), 698b4268676SRabin Vincent .gfp = gfp, 699b4268676SRabin Vincent .prot = prot, 700b4268676SRabin Vincent .caller = caller, 70100085f1eSKrzysztof Kozlowski .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0), 702f1270896SGregory CLEMENT .coherent_flag = is_coherent ? COHERENT : NORMAL, 703b4268676SRabin Vincent }; 704ab6494f0SCatalin Marinas 705c7909509SMarek Szyprowski #ifdef CONFIG_DMA_API_DEBUG 706c7909509SMarek Szyprowski u64 limit = (mask + 1) & ~mask; 707c7909509SMarek Szyprowski if (limit && size >= limit) { 708c7909509SMarek Szyprowski dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n", 709c7909509SMarek Szyprowski size, mask); 710c7909509SMarek Szyprowski return NULL; 711c7909509SMarek Szyprowski } 712c7909509SMarek Szyprowski #endif 713c7909509SMarek Szyprowski 714c7909509SMarek Szyprowski if (!mask) 715c7909509SMarek Szyprowski return NULL; 716c7909509SMarek Szyprowski 7179c18fcf7SAlexandre Courbot buf = kzalloc(sizeof(*buf), 7189c18fcf7SAlexandre Courbot gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)); 71919e6e5e5SRabin Vincent if (!buf) 72019e6e5e5SRabin Vincent return NULL; 72119e6e5e5SRabin Vincent 722c7909509SMarek Szyprowski if (mask < 0xffffffffULL) 723c7909509SMarek Szyprowski gfp |= GFP_DMA; 724c7909509SMarek Szyprowski 725ea2e7057SSumit Bhattacharya /* 726ea2e7057SSumit Bhattacharya * Following is a work-around (a.k.a. hack) to prevent pages 727ea2e7057SSumit Bhattacharya * with __GFP_COMP being passed to split_page() which cannot 728ea2e7057SSumit Bhattacharya * handle them. The real problem is that this flag probably 729ea2e7057SSumit Bhattacharya * should be 0 on ARM as it is not supported on this 730ea2e7057SSumit Bhattacharya * platform; see CONFIG_HUGETLBFS. 731ea2e7057SSumit Bhattacharya */ 732ea2e7057SSumit Bhattacharya gfp &= ~(__GFP_COMP); 733b4268676SRabin Vincent args.gfp = gfp; 734ea2e7057SSumit Bhattacharya 73572fd97bfSChristoph Hellwig *handle = DMA_MAPPING_ERROR; 736b4268676SRabin Vincent allowblock = gfpflags_allow_blocking(gfp); 737b4268676SRabin Vincent cma = allowblock ? dev_get_cma_area(dev) : false; 73804da5694SRussell King 739b4268676SRabin Vincent if (cma) 740b4268676SRabin Vincent buf->allocator = &cma_allocator; 7411655cf88SVladimir Murzin else if (is_coherent) 742b4268676SRabin Vincent buf->allocator = &simple_allocator; 743b4268676SRabin Vincent else if (allowblock) 744b4268676SRabin Vincent buf->allocator = &remap_allocator; 74531ebf944SRussell King else 746b4268676SRabin Vincent buf->allocator = &pool_allocator; 747b4268676SRabin Vincent 748b4268676SRabin Vincent addr = buf->allocator->alloc(&args, &page); 74931ebf944SRussell King 75019e6e5e5SRabin Vincent if (page) { 75119e6e5e5SRabin Vincent unsigned long flags; 75219e6e5e5SRabin Vincent 7539eedd963SRussell King *handle = pfn_to_dma(dev, page_to_pfn(page)); 754b4268676SRabin Vincent buf->virt = args.want_vaddr ? addr : page; 75519e6e5e5SRabin Vincent 75619e6e5e5SRabin Vincent spin_lock_irqsave(&arm_dma_bufs_lock, flags); 75719e6e5e5SRabin Vincent list_add(&buf->list, &arm_dma_bufs); 75819e6e5e5SRabin Vincent spin_unlock_irqrestore(&arm_dma_bufs_lock, flags); 75919e6e5e5SRabin Vincent } else { 76019e6e5e5SRabin Vincent kfree(buf); 76119e6e5e5SRabin Vincent } 76231ebf944SRussell King 763b4268676SRabin Vincent return args.want_vaddr ? addr : page; 764ab6494f0SCatalin Marinas } 765695ae0afSRussell King 7660ddbccd1SRussell King /* 7670ddbccd1SRussell King * Allocate DMA-coherent memory space and return both the kernel remapped 7680ddbccd1SRussell King * virtual and bus address for that space. 7690ddbccd1SRussell King */ 770f99d6034SMarek Szyprowski void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 77100085f1eSKrzysztof Kozlowski gfp_t gfp, unsigned long attrs) 7720ddbccd1SRussell King { 7730ea1ec71SRussell King pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); 7740ddbccd1SRussell King 775dd37e940SRob Herring return __dma_alloc(dev, size, handle, gfp, prot, false, 7766e8266e3SCarlo Caione attrs, __builtin_return_address(0)); 777dd37e940SRob Herring } 778dd37e940SRob Herring 779dd37e940SRob Herring static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 78000085f1eSKrzysztof Kozlowski dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 781dd37e940SRob Herring { 78221caf3a7SLorenzo Nava return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true, 7836e8266e3SCarlo Caione attrs, __builtin_return_address(0)); 7840ddbccd1SRussell King } 7850ddbccd1SRussell King 78655af8a91SMike Looijmans static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, 787f99d6034SMarek Szyprowski void *cpu_addr, dma_addr_t dma_addr, size_t size, 78800085f1eSKrzysztof Kozlowski unsigned long attrs) 7890ddbccd1SRussell King { 790c2a3831dSNathan Jones int ret = -ENXIO; 791a70c3ee3SFabio Estevam unsigned long nr_vma_pages = vma_pages(vma); 79250262a4bSMarek Szyprowski unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 793c7909509SMarek Szyprowski unsigned long pfn = dma_to_pfn(dev, dma_addr); 79450262a4bSMarek Szyprowski unsigned long off = vma->vm_pgoff; 79550262a4bSMarek Szyprowski 79643fc509cSVladimir Murzin if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 79747142f07SMarek Szyprowski return ret; 79847142f07SMarek Szyprowski 79950262a4bSMarek Szyprowski if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { 8000ddbccd1SRussell King ret = remap_pfn_range(vma, vma->vm_start, 80150262a4bSMarek Szyprowski pfn + off, 802c7909509SMarek Szyprowski vma->vm_end - vma->vm_start, 8030ddbccd1SRussell King vma->vm_page_prot); 80450262a4bSMarek Szyprowski } 8050ddbccd1SRussell King 8060ddbccd1SRussell King return ret; 8070ddbccd1SRussell King } 8080ddbccd1SRussell King 8090ddbccd1SRussell King /* 81055af8a91SMike Looijmans * Create userspace mapping for the DMA-coherent memory. 81155af8a91SMike Looijmans */ 81255af8a91SMike Looijmans static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, 81355af8a91SMike Looijmans void *cpu_addr, dma_addr_t dma_addr, size_t size, 81400085f1eSKrzysztof Kozlowski unsigned long attrs) 81555af8a91SMike Looijmans { 81655af8a91SMike Looijmans return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 81755af8a91SMike Looijmans } 81855af8a91SMike Looijmans 81955af8a91SMike Looijmans int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, 82055af8a91SMike Looijmans void *cpu_addr, dma_addr_t dma_addr, size_t size, 82100085f1eSKrzysztof Kozlowski unsigned long attrs) 82255af8a91SMike Looijmans { 82355af8a91SMike Looijmans vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 82455af8a91SMike Looijmans return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 82555af8a91SMike Looijmans } 82655af8a91SMike Looijmans 82755af8a91SMike Looijmans /* 828c7909509SMarek Szyprowski * Free a buffer as defined by the above mapping. 8290ddbccd1SRussell King */ 830dd37e940SRob Herring static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 83100085f1eSKrzysztof Kozlowski dma_addr_t handle, unsigned long attrs, 832dd37e940SRob Herring bool is_coherent) 8330ddbccd1SRussell King { 834c7909509SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 83519e6e5e5SRabin Vincent struct arm_dma_buffer *buf; 836b4268676SRabin Vincent struct arm_dma_free_args args = { 837b4268676SRabin Vincent .dev = dev, 838b4268676SRabin Vincent .size = PAGE_ALIGN(size), 839b4268676SRabin Vincent .cpu_addr = cpu_addr, 840b4268676SRabin Vincent .page = page, 84100085f1eSKrzysztof Kozlowski .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0), 842b4268676SRabin Vincent }; 84319e6e5e5SRabin Vincent 84419e6e5e5SRabin Vincent buf = arm_dma_buffer_find(cpu_addr); 84519e6e5e5SRabin Vincent if (WARN(!buf, "Freeing invalid buffer %p\n", cpu_addr)) 84619e6e5e5SRabin Vincent return; 8470ddbccd1SRussell King 848b4268676SRabin Vincent buf->allocator->free(&args); 84919e6e5e5SRabin Vincent kfree(buf); 8500ddbccd1SRussell King } 851afd1a321SRussell King 852dd37e940SRob Herring void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 85300085f1eSKrzysztof Kozlowski dma_addr_t handle, unsigned long attrs) 854dd37e940SRob Herring { 855dd37e940SRob Herring __arm_dma_free(dev, size, cpu_addr, handle, attrs, false); 856dd37e940SRob Herring } 857dd37e940SRob Herring 858dd37e940SRob Herring static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 85900085f1eSKrzysztof Kozlowski dma_addr_t handle, unsigned long attrs) 860dd37e940SRob Herring { 861dd37e940SRob Herring __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); 862dd37e940SRob Herring } 863dd37e940SRob Herring 864dc2832e1SMarek Szyprowski int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 865dc2832e1SMarek Szyprowski void *cpu_addr, dma_addr_t handle, size_t size, 86600085f1eSKrzysztof Kozlowski unsigned long attrs) 867dc2832e1SMarek Szyprowski { 868916a008bSRussell King unsigned long pfn = dma_to_pfn(dev, handle); 869916a008bSRussell King struct page *page; 870dc2832e1SMarek Szyprowski int ret; 871dc2832e1SMarek Szyprowski 872916a008bSRussell King /* If the PFN is not valid, we do not have a struct page */ 873916a008bSRussell King if (!pfn_valid(pfn)) 874916a008bSRussell King return -ENXIO; 875916a008bSRussell King 876916a008bSRussell King page = pfn_to_page(pfn); 877916a008bSRussell King 878dc2832e1SMarek Szyprowski ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 879dc2832e1SMarek Szyprowski if (unlikely(ret)) 880dc2832e1SMarek Szyprowski return ret; 881dc2832e1SMarek Szyprowski 882dc2832e1SMarek Szyprowski sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 883dc2832e1SMarek Szyprowski return 0; 884dc2832e1SMarek Szyprowski } 885dc2832e1SMarek Szyprowski 88665af191aSRussell King static void dma_cache_maint_page(struct page *page, unsigned long offset, 887a9c9147eSRussell King size_t size, enum dma_data_direction dir, 888a9c9147eSRussell King void (*op)(const void *, size_t, int)) 88965af191aSRussell King { 89015653371SRussell King unsigned long pfn; 89115653371SRussell King size_t left = size; 89215653371SRussell King 89315653371SRussell King pfn = page_to_pfn(page) + offset / PAGE_SIZE; 89415653371SRussell King offset %= PAGE_SIZE; 89515653371SRussell King 89665af191aSRussell King /* 89765af191aSRussell King * A single sg entry may refer to multiple physically contiguous 89865af191aSRussell King * pages. But we still need to process highmem pages individually. 89965af191aSRussell King * If highmem is not configured then the bulk of this loop gets 90065af191aSRussell King * optimized out. 90165af191aSRussell King */ 90265af191aSRussell King do { 90365af191aSRussell King size_t len = left; 90493f1d629SRussell King void *vaddr; 90593f1d629SRussell King 90615653371SRussell King page = pfn_to_page(pfn); 90715653371SRussell King 90893f1d629SRussell King if (PageHighMem(page)) { 90915653371SRussell King if (len + offset > PAGE_SIZE) 91065af191aSRussell King len = PAGE_SIZE - offset; 911dd0f67f4SJoonsoo Kim 912dd0f67f4SJoonsoo Kim if (cache_is_vipt_nonaliasing()) { 91339af22a7SNicolas Pitre vaddr = kmap_atomic(page); 9147e5a69e8SNicolas Pitre op(vaddr + offset, len, dir); 91539af22a7SNicolas Pitre kunmap_atomic(vaddr); 916dd0f67f4SJoonsoo Kim } else { 917dd0f67f4SJoonsoo Kim vaddr = kmap_high_get(page); 918dd0f67f4SJoonsoo Kim if (vaddr) { 919dd0f67f4SJoonsoo Kim op(vaddr + offset, len, dir); 920dd0f67f4SJoonsoo Kim kunmap_high(page); 921dd0f67f4SJoonsoo Kim } 92293f1d629SRussell King } 92393f1d629SRussell King } else { 92493f1d629SRussell King vaddr = page_address(page) + offset; 925a9c9147eSRussell King op(vaddr, len, dir); 92693f1d629SRussell King } 92765af191aSRussell King offset = 0; 92815653371SRussell King pfn++; 92965af191aSRussell King left -= len; 93065af191aSRussell King } while (left); 93165af191aSRussell King } 93265af191aSRussell King 93351fde349SMarek Szyprowski /* 93451fde349SMarek Szyprowski * Make an area consistent for devices. 93551fde349SMarek Szyprowski * Note: Drivers should NOT use this function directly, as it will break 93651fde349SMarek Szyprowski * platforms with CONFIG_DMABOUNCE. 93751fde349SMarek Szyprowski * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 93851fde349SMarek Szyprowski */ 93951fde349SMarek Szyprowski static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, 94065af191aSRussell King size_t size, enum dma_data_direction dir) 94165af191aSRussell King { 9422161c248SSantosh Shilimkar phys_addr_t paddr; 94343377453SNicolas Pitre 944a9c9147eSRussell King dma_cache_maint_page(page, off, size, dir, dmac_map_area); 94543377453SNicolas Pitre 94665af191aSRussell King paddr = page_to_phys(page) + off; 9472ffe2da3SRussell King if (dir == DMA_FROM_DEVICE) { 9482ffe2da3SRussell King outer_inv_range(paddr, paddr + size); 9492ffe2da3SRussell King } else { 9502ffe2da3SRussell King outer_clean_range(paddr, paddr + size); 9512ffe2da3SRussell King } 9522ffe2da3SRussell King /* FIXME: non-speculating: flush on bidirectional mappings? */ 95343377453SNicolas Pitre } 9544ea0d737SRussell King 95551fde349SMarek Szyprowski static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, 9564ea0d737SRussell King size_t size, enum dma_data_direction dir) 9574ea0d737SRussell King { 9582161c248SSantosh Shilimkar phys_addr_t paddr = page_to_phys(page) + off; 9592ffe2da3SRussell King 9602ffe2da3SRussell King /* FIXME: non-speculating: not required */ 961deace4a6SRussell King /* in any case, don't bother invalidating if DMA to device */ 962deace4a6SRussell King if (dir != DMA_TO_DEVICE) { 9632ffe2da3SRussell King outer_inv_range(paddr, paddr + size); 9642ffe2da3SRussell King 965a9c9147eSRussell King dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); 966deace4a6SRussell King } 967c0177800SCatalin Marinas 968c0177800SCatalin Marinas /* 969b2a234edSMing Lei * Mark the D-cache clean for these pages to avoid extra flushing. 970c0177800SCatalin Marinas */ 971b2a234edSMing Lei if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) { 972b2a234edSMing Lei unsigned long pfn; 973b2a234edSMing Lei size_t left = size; 974b2a234edSMing Lei 975b2a234edSMing Lei pfn = page_to_pfn(page) + off / PAGE_SIZE; 976b2a234edSMing Lei off %= PAGE_SIZE; 977b2a234edSMing Lei if (off) { 978b2a234edSMing Lei pfn++; 979b2a234edSMing Lei left -= PAGE_SIZE - off; 980b2a234edSMing Lei } 981b2a234edSMing Lei while (left >= PAGE_SIZE) { 982b2a234edSMing Lei page = pfn_to_page(pfn++); 983c0177800SCatalin Marinas set_bit(PG_dcache_clean, &page->flags); 984b2a234edSMing Lei left -= PAGE_SIZE; 985b2a234edSMing Lei } 986b2a234edSMing Lei } 9874ea0d737SRussell King } 98843377453SNicolas Pitre 989afd1a321SRussell King /** 9902a550e73SMarek Szyprowski * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA 991afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 992afd1a321SRussell King * @sg: list of buffers 993afd1a321SRussell King * @nents: number of buffers to map 994afd1a321SRussell King * @dir: DMA transfer direction 995afd1a321SRussell King * 996afd1a321SRussell King * Map a set of buffers described by scatterlist in streaming mode for DMA. 997afd1a321SRussell King * This is the scatter-gather version of the dma_map_single interface. 998afd1a321SRussell King * Here the scatter gather list elements are each tagged with the 999afd1a321SRussell King * appropriate dma address and length. They are obtained via 1000afd1a321SRussell King * sg_dma_{address,length}. 1001afd1a321SRussell King * 1002afd1a321SRussell King * Device ownership issues as mentioned for dma_map_single are the same 1003afd1a321SRussell King * here. 1004afd1a321SRussell King */ 10052dc6a016SMarek Szyprowski int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 100600085f1eSKrzysztof Kozlowski enum dma_data_direction dir, unsigned long attrs) 1007afd1a321SRussell King { 10085299709dSBart Van Assche const struct dma_map_ops *ops = get_dma_ops(dev); 1009afd1a321SRussell King struct scatterlist *s; 101001135d92SRussell King int i, j; 1011afd1a321SRussell King 1012afd1a321SRussell King for_each_sg(sg, s, nents, i) { 10134ce63fcdSMarek Szyprowski #ifdef CONFIG_NEED_SG_DMA_LENGTH 10144ce63fcdSMarek Szyprowski s->dma_length = s->length; 10154ce63fcdSMarek Szyprowski #endif 10162a550e73SMarek Szyprowski s->dma_address = ops->map_page(dev, sg_page(s), s->offset, 10172a550e73SMarek Szyprowski s->length, dir, attrs); 101801135d92SRussell King if (dma_mapping_error(dev, s->dma_address)) 101901135d92SRussell King goto bad_mapping; 1020afd1a321SRussell King } 1021afd1a321SRussell King return nents; 102201135d92SRussell King 102301135d92SRussell King bad_mapping: 102401135d92SRussell King for_each_sg(sg, s, i, j) 10252a550e73SMarek Szyprowski ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 102601135d92SRussell King return 0; 1027afd1a321SRussell King } 1028afd1a321SRussell King 1029afd1a321SRussell King /** 10302a550e73SMarek Szyprowski * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 1031afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1032afd1a321SRussell King * @sg: list of buffers 10330adfca6fSLinus Walleij * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 1034afd1a321SRussell King * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1035afd1a321SRussell King * 1036afd1a321SRussell King * Unmap a set of streaming mode DMA translations. Again, CPU access 1037afd1a321SRussell King * rules concerning calls here are the same as for dma_unmap_single(). 1038afd1a321SRussell King */ 10392dc6a016SMarek Szyprowski void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 104000085f1eSKrzysztof Kozlowski enum dma_data_direction dir, unsigned long attrs) 1041afd1a321SRussell King { 10425299709dSBart Van Assche const struct dma_map_ops *ops = get_dma_ops(dev); 104301135d92SRussell King struct scatterlist *s; 104401135d92SRussell King 104501135d92SRussell King int i; 104624056f52SRussell King 104701135d92SRussell King for_each_sg(sg, s, nents, i) 10482a550e73SMarek Szyprowski ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 1049afd1a321SRussell King } 1050afd1a321SRussell King 1051afd1a321SRussell King /** 10522a550e73SMarek Szyprowski * arm_dma_sync_sg_for_cpu 1053afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1054afd1a321SRussell King * @sg: list of buffers 1055afd1a321SRussell King * @nents: number of buffers to map (returned from dma_map_sg) 1056afd1a321SRussell King * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1057afd1a321SRussell King */ 10582dc6a016SMarek Szyprowski void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 1059afd1a321SRussell King int nents, enum dma_data_direction dir) 1060afd1a321SRussell King { 10615299709dSBart Van Assche const struct dma_map_ops *ops = get_dma_ops(dev); 1062afd1a321SRussell King struct scatterlist *s; 1063afd1a321SRussell King int i; 1064afd1a321SRussell King 10652a550e73SMarek Szyprowski for_each_sg(sg, s, nents, i) 10662a550e73SMarek Szyprowski ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length, 10672a550e73SMarek Szyprowski dir); 1068afd1a321SRussell King } 106924056f52SRussell King 1070afd1a321SRussell King /** 10712a550e73SMarek Szyprowski * arm_dma_sync_sg_for_device 1072afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1073afd1a321SRussell King * @sg: list of buffers 1074afd1a321SRussell King * @nents: number of buffers to map (returned from dma_map_sg) 1075afd1a321SRussell King * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1076afd1a321SRussell King */ 10772dc6a016SMarek Szyprowski void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 1078afd1a321SRussell King int nents, enum dma_data_direction dir) 1079afd1a321SRussell King { 10805299709dSBart Van Assche const struct dma_map_ops *ops = get_dma_ops(dev); 1081afd1a321SRussell King struct scatterlist *s; 1082afd1a321SRussell King int i; 1083afd1a321SRussell King 10842a550e73SMarek Szyprowski for_each_sg(sg, s, nents, i) 10852a550e73SMarek Szyprowski ops->sync_single_for_device(dev, sg_dma_address(s), s->length, 10862a550e73SMarek Szyprowski dir); 1087afd1a321SRussell King } 108824056f52SRussell King 1089022ae537SRussell King /* 1090022ae537SRussell King * Return whether the given device DMA address mask can be supported 1091022ae537SRussell King * properly. For example, if your device can only drive the low 24-bits 1092022ae537SRussell King * during bus mastering, then you would pass 0x00ffffff as the mask 1093022ae537SRussell King * to this function. 1094022ae537SRussell King */ 1095418a7a7eSChristoph Hellwig int arm_dma_supported(struct device *dev, u64 mask) 1096022ae537SRussell King { 10979f28cde0SRussell King return __dma_supported(dev, mask, false); 1098022ae537SRussell King } 1099022ae537SRussell King 11001874619aSThierry Reding static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent) 11011874619aSThierry Reding { 1102ad3c7b18SChristoph Hellwig /* 1103ad3c7b18SChristoph Hellwig * When CONFIG_ARM_LPAE is set, physical address can extend above 1104ad3c7b18SChristoph Hellwig * 32-bits, which then can't be addressed by devices that only support 1105ad3c7b18SChristoph Hellwig * 32-bit DMA. 1106ad3c7b18SChristoph Hellwig * Use the generic dma-direct / swiotlb ops code in that case, as that 1107ad3c7b18SChristoph Hellwig * handles bounce buffering for us. 1108ad3c7b18SChristoph Hellwig * 1109ad3c7b18SChristoph Hellwig * Note: this checks CONFIG_ARM_LPAE instead of CONFIG_SWIOTLB as the 1110ad3c7b18SChristoph Hellwig * latter is also selected by the Xen code, but that code for now relies 1111ad3c7b18SChristoph Hellwig * on non-NULL dev_dma_ops. To be cleaned up later. 1112ad3c7b18SChristoph Hellwig */ 1113ad3c7b18SChristoph Hellwig if (IS_ENABLED(CONFIG_ARM_LPAE)) 1114ad3c7b18SChristoph Hellwig return NULL; 11151874619aSThierry Reding return coherent ? &arm_coherent_dma_ops : &arm_dma_ops; 11161874619aSThierry Reding } 11171874619aSThierry Reding 11184ce63fcdSMarek Szyprowski #ifdef CONFIG_ARM_DMA_USE_IOMMU 11194ce63fcdSMarek Szyprowski 11207d2822dfSSricharan R static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs) 11217d2822dfSSricharan R { 11227d2822dfSSricharan R int prot = 0; 11237d2822dfSSricharan R 11247d2822dfSSricharan R if (attrs & DMA_ATTR_PRIVILEGED) 11257d2822dfSSricharan R prot |= IOMMU_PRIV; 11267d2822dfSSricharan R 11277d2822dfSSricharan R switch (dir) { 11287d2822dfSSricharan R case DMA_BIDIRECTIONAL: 11297d2822dfSSricharan R return prot | IOMMU_READ | IOMMU_WRITE; 11307d2822dfSSricharan R case DMA_TO_DEVICE: 11317d2822dfSSricharan R return prot | IOMMU_READ; 11327d2822dfSSricharan R case DMA_FROM_DEVICE: 11337d2822dfSSricharan R return prot | IOMMU_WRITE; 11347d2822dfSSricharan R default: 11357d2822dfSSricharan R return prot; 11367d2822dfSSricharan R } 11377d2822dfSSricharan R } 11387d2822dfSSricharan R 11394ce63fcdSMarek Szyprowski /* IOMMU */ 11404ce63fcdSMarek Szyprowski 11414d852ef8SAndreas Herrmann static int extend_iommu_mapping(struct dma_iommu_mapping *mapping); 11424d852ef8SAndreas Herrmann 11434ce63fcdSMarek Szyprowski static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, 11444ce63fcdSMarek Szyprowski size_t size) 11454ce63fcdSMarek Szyprowski { 11464ce63fcdSMarek Szyprowski unsigned int order = get_order(size); 11474ce63fcdSMarek Szyprowski unsigned int align = 0; 11484ce63fcdSMarek Szyprowski unsigned int count, start; 1149006f841dSRitesh Harjani size_t mapping_size = mapping->bits << PAGE_SHIFT; 11504ce63fcdSMarek Szyprowski unsigned long flags; 11514d852ef8SAndreas Herrmann dma_addr_t iova; 11524d852ef8SAndreas Herrmann int i; 11534ce63fcdSMarek Szyprowski 115460460abfSSeung-Woo Kim if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT) 115560460abfSSeung-Woo Kim order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT; 115660460abfSSeung-Woo Kim 115768efd7d2SMarek Szyprowski count = PAGE_ALIGN(size) >> PAGE_SHIFT; 115868efd7d2SMarek Szyprowski align = (1 << order) - 1; 11594ce63fcdSMarek Szyprowski 11604ce63fcdSMarek Szyprowski spin_lock_irqsave(&mapping->lock, flags); 11614d852ef8SAndreas Herrmann for (i = 0; i < mapping->nr_bitmaps; i++) { 11624d852ef8SAndreas Herrmann start = bitmap_find_next_zero_area(mapping->bitmaps[i], 11634d852ef8SAndreas Herrmann mapping->bits, 0, count, align); 11644d852ef8SAndreas Herrmann 11654d852ef8SAndreas Herrmann if (start > mapping->bits) 11664d852ef8SAndreas Herrmann continue; 11674d852ef8SAndreas Herrmann 11684d852ef8SAndreas Herrmann bitmap_set(mapping->bitmaps[i], start, count); 11694d852ef8SAndreas Herrmann break; 11704d852ef8SAndreas Herrmann } 11714d852ef8SAndreas Herrmann 11724d852ef8SAndreas Herrmann /* 11734d852ef8SAndreas Herrmann * No unused range found. Try to extend the existing mapping 11744d852ef8SAndreas Herrmann * and perform a second attempt to reserve an IO virtual 11754d852ef8SAndreas Herrmann * address range of size bytes. 11764d852ef8SAndreas Herrmann */ 11774d852ef8SAndreas Herrmann if (i == mapping->nr_bitmaps) { 11784d852ef8SAndreas Herrmann if (extend_iommu_mapping(mapping)) { 11794d852ef8SAndreas Herrmann spin_unlock_irqrestore(&mapping->lock, flags); 118072fd97bfSChristoph Hellwig return DMA_MAPPING_ERROR; 11814d852ef8SAndreas Herrmann } 11824d852ef8SAndreas Herrmann 11834d852ef8SAndreas Herrmann start = bitmap_find_next_zero_area(mapping->bitmaps[i], 11844d852ef8SAndreas Herrmann mapping->bits, 0, count, align); 11854d852ef8SAndreas Herrmann 11864ce63fcdSMarek Szyprowski if (start > mapping->bits) { 11874ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 118872fd97bfSChristoph Hellwig return DMA_MAPPING_ERROR; 11894ce63fcdSMarek Szyprowski } 11904ce63fcdSMarek Szyprowski 11914d852ef8SAndreas Herrmann bitmap_set(mapping->bitmaps[i], start, count); 11924d852ef8SAndreas Herrmann } 11934ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 11944ce63fcdSMarek Szyprowski 1195006f841dSRitesh Harjani iova = mapping->base + (mapping_size * i); 119668efd7d2SMarek Szyprowski iova += start << PAGE_SHIFT; 11974d852ef8SAndreas Herrmann 11984d852ef8SAndreas Herrmann return iova; 11994ce63fcdSMarek Szyprowski } 12004ce63fcdSMarek Szyprowski 12014ce63fcdSMarek Szyprowski static inline void __free_iova(struct dma_iommu_mapping *mapping, 12024ce63fcdSMarek Szyprowski dma_addr_t addr, size_t size) 12034ce63fcdSMarek Szyprowski { 12044d852ef8SAndreas Herrmann unsigned int start, count; 1205006f841dSRitesh Harjani size_t mapping_size = mapping->bits << PAGE_SHIFT; 12064ce63fcdSMarek Szyprowski unsigned long flags; 12074d852ef8SAndreas Herrmann dma_addr_t bitmap_base; 12084d852ef8SAndreas Herrmann u32 bitmap_index; 12094d852ef8SAndreas Herrmann 12104d852ef8SAndreas Herrmann if (!size) 12114d852ef8SAndreas Herrmann return; 12124d852ef8SAndreas Herrmann 1213006f841dSRitesh Harjani bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size; 12144d852ef8SAndreas Herrmann BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions); 12154d852ef8SAndreas Herrmann 1216006f841dSRitesh Harjani bitmap_base = mapping->base + mapping_size * bitmap_index; 12174d852ef8SAndreas Herrmann 121868efd7d2SMarek Szyprowski start = (addr - bitmap_base) >> PAGE_SHIFT; 12194d852ef8SAndreas Herrmann 1220006f841dSRitesh Harjani if (addr + size > bitmap_base + mapping_size) { 12214d852ef8SAndreas Herrmann /* 12224d852ef8SAndreas Herrmann * The address range to be freed reaches into the iova 12234d852ef8SAndreas Herrmann * range of the next bitmap. This should not happen as 12244d852ef8SAndreas Herrmann * we don't allow this in __alloc_iova (at the 12254d852ef8SAndreas Herrmann * moment). 12264d852ef8SAndreas Herrmann */ 12274d852ef8SAndreas Herrmann BUG(); 12284d852ef8SAndreas Herrmann } else 122968efd7d2SMarek Szyprowski count = size >> PAGE_SHIFT; 12304ce63fcdSMarek Szyprowski 12314ce63fcdSMarek Szyprowski spin_lock_irqsave(&mapping->lock, flags); 12324d852ef8SAndreas Herrmann bitmap_clear(mapping->bitmaps[bitmap_index], start, count); 12334ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 12344ce63fcdSMarek Szyprowski } 12354ce63fcdSMarek Szyprowski 123633298ef6SDoug Anderson /* We'll try 2M, 1M, 64K, and finally 4K; array must end with 0! */ 123733298ef6SDoug Anderson static const int iommu_order_array[] = { 9, 8, 4, 0 }; 123833298ef6SDoug Anderson 1239549a17e4SMarek Szyprowski static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, 124000085f1eSKrzysztof Kozlowski gfp_t gfp, unsigned long attrs, 1241f1270896SGregory CLEMENT int coherent_flag) 12424ce63fcdSMarek Szyprowski { 12434ce63fcdSMarek Szyprowski struct page **pages; 12444ce63fcdSMarek Szyprowski int count = size >> PAGE_SHIFT; 12454ce63fcdSMarek Szyprowski int array_size = count * sizeof(struct page *); 12464ce63fcdSMarek Szyprowski int i = 0; 124733298ef6SDoug Anderson int order_idx = 0; 12484ce63fcdSMarek Szyprowski 12494ce63fcdSMarek Szyprowski if (array_size <= PAGE_SIZE) 125023be7fdaSAlexandre Courbot pages = kzalloc(array_size, GFP_KERNEL); 12514ce63fcdSMarek Szyprowski else 12524ce63fcdSMarek Szyprowski pages = vzalloc(array_size); 12534ce63fcdSMarek Szyprowski if (!pages) 12544ce63fcdSMarek Szyprowski return NULL; 12554ce63fcdSMarek Szyprowski 125600085f1eSKrzysztof Kozlowski if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) 1257549a17e4SMarek Szyprowski { 1258549a17e4SMarek Szyprowski unsigned long order = get_order(size); 1259549a17e4SMarek Szyprowski struct page *page; 1260549a17e4SMarek Szyprowski 1261d834c5abSMarek Szyprowski page = dma_alloc_from_contiguous(dev, count, order, 1262d834c5abSMarek Szyprowski gfp & __GFP_NOWARN); 1263549a17e4SMarek Szyprowski if (!page) 1264549a17e4SMarek Szyprowski goto error; 1265549a17e4SMarek Szyprowski 1266f1270896SGregory CLEMENT __dma_clear_buffer(page, size, coherent_flag); 1267549a17e4SMarek Szyprowski 1268549a17e4SMarek Szyprowski for (i = 0; i < count; i++) 1269549a17e4SMarek Szyprowski pages[i] = page + i; 1270549a17e4SMarek Szyprowski 1271549a17e4SMarek Szyprowski return pages; 1272549a17e4SMarek Szyprowski } 1273549a17e4SMarek Szyprowski 127414d3ae2eSDoug Anderson /* Go straight to 4K chunks if caller says it's OK. */ 127500085f1eSKrzysztof Kozlowski if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) 127614d3ae2eSDoug Anderson order_idx = ARRAY_SIZE(iommu_order_array) - 1; 127714d3ae2eSDoug Anderson 1278f8669befSMarek Szyprowski /* 1279f8669befSMarek Szyprowski * IOMMU can map any pages, so himem can also be used here 1280f8669befSMarek Szyprowski */ 1281f8669befSMarek Szyprowski gfp |= __GFP_NOWARN | __GFP_HIGHMEM; 1282f8669befSMarek Szyprowski 12834ce63fcdSMarek Szyprowski while (count) { 128449f28aa6STomasz Figa int j, order; 12854ce63fcdSMarek Szyprowski 128633298ef6SDoug Anderson order = iommu_order_array[order_idx]; 128733298ef6SDoug Anderson 128833298ef6SDoug Anderson /* Drop down when we get small */ 128933298ef6SDoug Anderson if (__fls(count) < order) { 129033298ef6SDoug Anderson order_idx++; 129133298ef6SDoug Anderson continue; 129249f28aa6STomasz Figa } 129349f28aa6STomasz Figa 129433298ef6SDoug Anderson if (order) { 129533298ef6SDoug Anderson /* See if it's easy to allocate a high-order chunk */ 129633298ef6SDoug Anderson pages[i] = alloc_pages(gfp | __GFP_NORETRY, order); 129733298ef6SDoug Anderson 129833298ef6SDoug Anderson /* Go down a notch at first sign of pressure */ 129949f28aa6STomasz Figa if (!pages[i]) { 130033298ef6SDoug Anderson order_idx++; 130133298ef6SDoug Anderson continue; 130233298ef6SDoug Anderson } 130333298ef6SDoug Anderson } else { 130449f28aa6STomasz Figa pages[i] = alloc_pages(gfp, 0); 13054ce63fcdSMarek Szyprowski if (!pages[i]) 13064ce63fcdSMarek Szyprowski goto error; 130749f28aa6STomasz Figa } 13084ce63fcdSMarek Szyprowski 13095a796eebSHiroshi Doyu if (order) { 13104ce63fcdSMarek Szyprowski split_page(pages[i], order); 13114ce63fcdSMarek Szyprowski j = 1 << order; 13124ce63fcdSMarek Szyprowski while (--j) 13134ce63fcdSMarek Szyprowski pages[i + j] = pages[i] + j; 13145a796eebSHiroshi Doyu } 13154ce63fcdSMarek Szyprowski 1316f1270896SGregory CLEMENT __dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag); 13174ce63fcdSMarek Szyprowski i += 1 << order; 13184ce63fcdSMarek Szyprowski count -= 1 << order; 13194ce63fcdSMarek Szyprowski } 13204ce63fcdSMarek Szyprowski 13214ce63fcdSMarek Szyprowski return pages; 13224ce63fcdSMarek Szyprowski error: 13239fa8af91SMarek Szyprowski while (i--) 13244ce63fcdSMarek Szyprowski if (pages[i]) 13254ce63fcdSMarek Szyprowski __free_pages(pages[i], 0); 13261d5cfdb0STetsuo Handa kvfree(pages); 13274ce63fcdSMarek Szyprowski return NULL; 13284ce63fcdSMarek Szyprowski } 13294ce63fcdSMarek Szyprowski 1330549a17e4SMarek Szyprowski static int __iommu_free_buffer(struct device *dev, struct page **pages, 133100085f1eSKrzysztof Kozlowski size_t size, unsigned long attrs) 13324ce63fcdSMarek Szyprowski { 13334ce63fcdSMarek Szyprowski int count = size >> PAGE_SHIFT; 13344ce63fcdSMarek Szyprowski int i; 1335549a17e4SMarek Szyprowski 133600085f1eSKrzysztof Kozlowski if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { 1337549a17e4SMarek Szyprowski dma_release_from_contiguous(dev, pages[0], count); 1338549a17e4SMarek Szyprowski } else { 13394ce63fcdSMarek Szyprowski for (i = 0; i < count; i++) 13404ce63fcdSMarek Szyprowski if (pages[i]) 13414ce63fcdSMarek Szyprowski __free_pages(pages[i], 0); 1342549a17e4SMarek Szyprowski } 1343549a17e4SMarek Szyprowski 13441d5cfdb0STetsuo Handa kvfree(pages); 13454ce63fcdSMarek Szyprowski return 0; 13464ce63fcdSMarek Szyprowski } 13474ce63fcdSMarek Szyprowski 13484ce63fcdSMarek Szyprowski /* 13494ce63fcdSMarek Szyprowski * Create a mapping in device IO address space for specified pages 13504ce63fcdSMarek Szyprowski */ 13514ce63fcdSMarek Szyprowski static dma_addr_t 13527d2822dfSSricharan R __iommu_create_mapping(struct device *dev, struct page **pages, size_t size, 13537d2822dfSSricharan R unsigned long attrs) 13544ce63fcdSMarek Szyprowski { 135589cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 13564ce63fcdSMarek Szyprowski unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 13574ce63fcdSMarek Szyprowski dma_addr_t dma_addr, iova; 135890cde558SAndre Przywara int i; 13594ce63fcdSMarek Szyprowski 13604ce63fcdSMarek Szyprowski dma_addr = __alloc_iova(mapping, size); 136172fd97bfSChristoph Hellwig if (dma_addr == DMA_MAPPING_ERROR) 13624ce63fcdSMarek Szyprowski return dma_addr; 13634ce63fcdSMarek Szyprowski 13644ce63fcdSMarek Szyprowski iova = dma_addr; 13654ce63fcdSMarek Szyprowski for (i = 0; i < count; ) { 136690cde558SAndre Przywara int ret; 136790cde558SAndre Przywara 13684ce63fcdSMarek Szyprowski unsigned int next_pfn = page_to_pfn(pages[i]) + 1; 13694ce63fcdSMarek Szyprowski phys_addr_t phys = page_to_phys(pages[i]); 13704ce63fcdSMarek Szyprowski unsigned int len, j; 13714ce63fcdSMarek Szyprowski 13724ce63fcdSMarek Szyprowski for (j = i + 1; j < count; j++, next_pfn++) 13734ce63fcdSMarek Szyprowski if (page_to_pfn(pages[j]) != next_pfn) 13744ce63fcdSMarek Szyprowski break; 13754ce63fcdSMarek Szyprowski 13764ce63fcdSMarek Szyprowski len = (j - i) << PAGE_SHIFT; 1377c9b24996SAndreas Herrmann ret = iommu_map(mapping->domain, iova, phys, len, 13787d2822dfSSricharan R __dma_info_to_prot(DMA_BIDIRECTIONAL, attrs)); 13794ce63fcdSMarek Szyprowski if (ret < 0) 13804ce63fcdSMarek Szyprowski goto fail; 13814ce63fcdSMarek Szyprowski iova += len; 13824ce63fcdSMarek Szyprowski i = j; 13834ce63fcdSMarek Szyprowski } 13844ce63fcdSMarek Szyprowski return dma_addr; 13854ce63fcdSMarek Szyprowski fail: 13864ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); 13874ce63fcdSMarek Szyprowski __free_iova(mapping, dma_addr, size); 138872fd97bfSChristoph Hellwig return DMA_MAPPING_ERROR; 13894ce63fcdSMarek Szyprowski } 13904ce63fcdSMarek Szyprowski 13914ce63fcdSMarek Szyprowski static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) 13924ce63fcdSMarek Szyprowski { 139389cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 13944ce63fcdSMarek Szyprowski 13954ce63fcdSMarek Szyprowski /* 13964ce63fcdSMarek Szyprowski * add optional in-page offset from iova to size and align 13974ce63fcdSMarek Szyprowski * result to page size 13984ce63fcdSMarek Szyprowski */ 13994ce63fcdSMarek Szyprowski size = PAGE_ALIGN((iova & ~PAGE_MASK) + size); 14004ce63fcdSMarek Szyprowski iova &= PAGE_MASK; 14014ce63fcdSMarek Szyprowski 14024ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova, size); 14034ce63fcdSMarek Szyprowski __free_iova(mapping, iova, size); 14044ce63fcdSMarek Szyprowski return 0; 14054ce63fcdSMarek Szyprowski } 14064ce63fcdSMarek Szyprowski 1407665bad7bSHiroshi Doyu static struct page **__atomic_get_pages(void *addr) 1408665bad7bSHiroshi Doyu { 140936d0fd21SLaura Abbott struct page *page; 141036d0fd21SLaura Abbott phys_addr_t phys; 1411665bad7bSHiroshi Doyu 141236d0fd21SLaura Abbott phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr); 141336d0fd21SLaura Abbott page = phys_to_page(phys); 141436d0fd21SLaura Abbott 141536d0fd21SLaura Abbott return (struct page **)page; 1416665bad7bSHiroshi Doyu } 1417665bad7bSHiroshi Doyu 141800085f1eSKrzysztof Kozlowski static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs) 1419e9da6e99SMarek Szyprowski { 1420665bad7bSHiroshi Doyu if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) 1421665bad7bSHiroshi Doyu return __atomic_get_pages(cpu_addr); 1422665bad7bSHiroshi Doyu 142300085f1eSKrzysztof Kozlowski if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) 1424955c757eSMarek Szyprowski return cpu_addr; 1425955c757eSMarek Szyprowski 14265cf45379SChristoph Hellwig return dma_common_find_pages(cpu_addr); 1427e9da6e99SMarek Szyprowski } 1428e9da6e99SMarek Szyprowski 142956506822SGregory CLEMENT static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp, 14307d2822dfSSricharan R dma_addr_t *handle, int coherent_flag, 14317d2822dfSSricharan R unsigned long attrs) 1432479ed93aSHiroshi Doyu { 1433479ed93aSHiroshi Doyu struct page *page; 1434479ed93aSHiroshi Doyu void *addr; 1435479ed93aSHiroshi Doyu 143656506822SGregory CLEMENT if (coherent_flag == COHERENT) 143756506822SGregory CLEMENT addr = __alloc_simple_buffer(dev, size, gfp, &page); 143856506822SGregory CLEMENT else 1439479ed93aSHiroshi Doyu addr = __alloc_from_pool(size, &page); 1440479ed93aSHiroshi Doyu if (!addr) 1441479ed93aSHiroshi Doyu return NULL; 1442479ed93aSHiroshi Doyu 14437d2822dfSSricharan R *handle = __iommu_create_mapping(dev, &page, size, attrs); 144472fd97bfSChristoph Hellwig if (*handle == DMA_MAPPING_ERROR) 1445479ed93aSHiroshi Doyu goto err_mapping; 1446479ed93aSHiroshi Doyu 1447479ed93aSHiroshi Doyu return addr; 1448479ed93aSHiroshi Doyu 1449479ed93aSHiroshi Doyu err_mapping: 1450479ed93aSHiroshi Doyu __free_from_pool(addr, size); 1451479ed93aSHiroshi Doyu return NULL; 1452479ed93aSHiroshi Doyu } 1453479ed93aSHiroshi Doyu 1454d5898291SMarek Szyprowski static void __iommu_free_atomic(struct device *dev, void *cpu_addr, 145556506822SGregory CLEMENT dma_addr_t handle, size_t size, int coherent_flag) 1456479ed93aSHiroshi Doyu { 1457479ed93aSHiroshi Doyu __iommu_remove_mapping(dev, handle, size); 145856506822SGregory CLEMENT if (coherent_flag == COHERENT) 145956506822SGregory CLEMENT __dma_free_buffer(virt_to_page(cpu_addr), size); 146056506822SGregory CLEMENT else 1461d5898291SMarek Szyprowski __free_from_pool(cpu_addr, size); 1462479ed93aSHiroshi Doyu } 1463479ed93aSHiroshi Doyu 146456506822SGregory CLEMENT static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, 146500085f1eSKrzysztof Kozlowski dma_addr_t *handle, gfp_t gfp, unsigned long attrs, 146656506822SGregory CLEMENT int coherent_flag) 14674ce63fcdSMarek Szyprowski { 146871b55663SRussell King pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); 14694ce63fcdSMarek Szyprowski struct page **pages; 14704ce63fcdSMarek Szyprowski void *addr = NULL; 14714ce63fcdSMarek Szyprowski 147272fd97bfSChristoph Hellwig *handle = DMA_MAPPING_ERROR; 14734ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 14744ce63fcdSMarek Szyprowski 147556506822SGregory CLEMENT if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp)) 147656506822SGregory CLEMENT return __iommu_alloc_simple(dev, size, gfp, handle, 14777d2822dfSSricharan R coherent_flag, attrs); 1478479ed93aSHiroshi Doyu 14795b91a98cSRichard Zhao /* 14805b91a98cSRichard Zhao * Following is a work-around (a.k.a. hack) to prevent pages 14815b91a98cSRichard Zhao * with __GFP_COMP being passed to split_page() which cannot 14825b91a98cSRichard Zhao * handle them. The real problem is that this flag probably 14835b91a98cSRichard Zhao * should be 0 on ARM as it is not supported on this 14845b91a98cSRichard Zhao * platform; see CONFIG_HUGETLBFS. 14855b91a98cSRichard Zhao */ 14865b91a98cSRichard Zhao gfp &= ~(__GFP_COMP); 14875b91a98cSRichard Zhao 148856506822SGregory CLEMENT pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag); 14894ce63fcdSMarek Szyprowski if (!pages) 14904ce63fcdSMarek Szyprowski return NULL; 14914ce63fcdSMarek Szyprowski 14927d2822dfSSricharan R *handle = __iommu_create_mapping(dev, pages, size, attrs); 149372fd97bfSChristoph Hellwig if (*handle == DMA_MAPPING_ERROR) 14944ce63fcdSMarek Szyprowski goto err_buffer; 14954ce63fcdSMarek Szyprowski 149600085f1eSKrzysztof Kozlowski if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) 1497955c757eSMarek Szyprowski return pages; 1498955c757eSMarek Szyprowski 149978406ff5SChristoph Hellwig addr = dma_common_pages_remap(pages, size, prot, 1500e9da6e99SMarek Szyprowski __builtin_return_address(0)); 15014ce63fcdSMarek Szyprowski if (!addr) 15024ce63fcdSMarek Szyprowski goto err_mapping; 15034ce63fcdSMarek Szyprowski 15044ce63fcdSMarek Szyprowski return addr; 15054ce63fcdSMarek Szyprowski 15064ce63fcdSMarek Szyprowski err_mapping: 15074ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, *handle, size); 15084ce63fcdSMarek Szyprowski err_buffer: 1509549a17e4SMarek Szyprowski __iommu_free_buffer(dev, pages, size, attrs); 15104ce63fcdSMarek Szyprowski return NULL; 15114ce63fcdSMarek Szyprowski } 15124ce63fcdSMarek Szyprowski 151356506822SGregory CLEMENT static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, 151400085f1eSKrzysztof Kozlowski dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 151556506822SGregory CLEMENT { 151656506822SGregory CLEMENT return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL); 151756506822SGregory CLEMENT } 151856506822SGregory CLEMENT 151956506822SGregory CLEMENT static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size, 152000085f1eSKrzysztof Kozlowski dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 152156506822SGregory CLEMENT { 152256506822SGregory CLEMENT return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT); 152356506822SGregory CLEMENT } 152456506822SGregory CLEMENT 152556506822SGregory CLEMENT static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 15264ce63fcdSMarek Szyprowski void *cpu_addr, dma_addr_t dma_addr, size_t size, 152700085f1eSKrzysztof Kozlowski unsigned long attrs) 15284ce63fcdSMarek Szyprowski { 1529955c757eSMarek Szyprowski struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1530371f0f08SMarek Szyprowski unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 15316248461dSSouptick Joarder int err; 1532e9da6e99SMarek Szyprowski 1533e9da6e99SMarek Szyprowski if (!pages) 1534e9da6e99SMarek Szyprowski return -ENXIO; 15354ce63fcdSMarek Szyprowski 15366248461dSSouptick Joarder if (vma->vm_pgoff >= nr_pages) 1537371f0f08SMarek Szyprowski return -ENXIO; 1538371f0f08SMarek Szyprowski 15396248461dSSouptick Joarder err = vm_map_pages(vma, pages, nr_pages); 15406248461dSSouptick Joarder if (err) 15416248461dSSouptick Joarder pr_err("Remapping memory failed: %d\n", err); 15427e312103SMarek Szyprowski 15436248461dSSouptick Joarder return err; 15444ce63fcdSMarek Szyprowski } 154556506822SGregory CLEMENT static int arm_iommu_mmap_attrs(struct device *dev, 154656506822SGregory CLEMENT struct vm_area_struct *vma, void *cpu_addr, 154700085f1eSKrzysztof Kozlowski dma_addr_t dma_addr, size_t size, unsigned long attrs) 154856506822SGregory CLEMENT { 154956506822SGregory CLEMENT vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 155056506822SGregory CLEMENT 155156506822SGregory CLEMENT return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs); 155256506822SGregory CLEMENT } 155356506822SGregory CLEMENT 155456506822SGregory CLEMENT static int arm_coherent_iommu_mmap_attrs(struct device *dev, 155556506822SGregory CLEMENT struct vm_area_struct *vma, void *cpu_addr, 155600085f1eSKrzysztof Kozlowski dma_addr_t dma_addr, size_t size, unsigned long attrs) 155756506822SGregory CLEMENT { 155856506822SGregory CLEMENT return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs); 155956506822SGregory CLEMENT } 15604ce63fcdSMarek Szyprowski 15614ce63fcdSMarek Szyprowski /* 15624ce63fcdSMarek Szyprowski * free a page as defined by the above mapping. 15634ce63fcdSMarek Szyprowski * Must not be called with IRQs disabled. 15644ce63fcdSMarek Szyprowski */ 156556506822SGregory CLEMENT void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, 156600085f1eSKrzysztof Kozlowski dma_addr_t handle, unsigned long attrs, int coherent_flag) 15674ce63fcdSMarek Szyprowski { 1568836bfa0dSYoungJun Cho struct page **pages; 15694ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 15704ce63fcdSMarek Szyprowski 157156506822SGregory CLEMENT if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) { 157256506822SGregory CLEMENT __iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag); 1573479ed93aSHiroshi Doyu return; 1574479ed93aSHiroshi Doyu } 1575479ed93aSHiroshi Doyu 1576836bfa0dSYoungJun Cho pages = __iommu_get_pages(cpu_addr, attrs); 1577836bfa0dSYoungJun Cho if (!pages) { 1578836bfa0dSYoungJun Cho WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); 1579836bfa0dSYoungJun Cho return; 1580836bfa0dSYoungJun Cho } 1581836bfa0dSYoungJun Cho 1582fe9041c2SChristoph Hellwig if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) 158351231740SChristoph Hellwig dma_common_free_remap(cpu_addr, size); 1584e9da6e99SMarek Szyprowski 15854ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, handle, size); 1586549a17e4SMarek Szyprowski __iommu_free_buffer(dev, pages, size, attrs); 15874ce63fcdSMarek Szyprowski } 15884ce63fcdSMarek Szyprowski 158956506822SGregory CLEMENT void arm_iommu_free_attrs(struct device *dev, size_t size, 159000085f1eSKrzysztof Kozlowski void *cpu_addr, dma_addr_t handle, unsigned long attrs) 159156506822SGregory CLEMENT { 159256506822SGregory CLEMENT __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL); 159356506822SGregory CLEMENT } 159456506822SGregory CLEMENT 159556506822SGregory CLEMENT void arm_coherent_iommu_free_attrs(struct device *dev, size_t size, 159600085f1eSKrzysztof Kozlowski void *cpu_addr, dma_addr_t handle, unsigned long attrs) 159756506822SGregory CLEMENT { 159856506822SGregory CLEMENT __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT); 159956506822SGregory CLEMENT } 160056506822SGregory CLEMENT 1601dc2832e1SMarek Szyprowski static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, 1602dc2832e1SMarek Szyprowski void *cpu_addr, dma_addr_t dma_addr, 160300085f1eSKrzysztof Kozlowski size_t size, unsigned long attrs) 1604dc2832e1SMarek Szyprowski { 1605dc2832e1SMarek Szyprowski unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1606dc2832e1SMarek Szyprowski struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1607dc2832e1SMarek Szyprowski 1608dc2832e1SMarek Szyprowski if (!pages) 1609dc2832e1SMarek Szyprowski return -ENXIO; 1610dc2832e1SMarek Szyprowski 1611dc2832e1SMarek Szyprowski return sg_alloc_table_from_pages(sgt, pages, count, 0, size, 1612dc2832e1SMarek Szyprowski GFP_KERNEL); 16134ce63fcdSMarek Szyprowski } 16144ce63fcdSMarek Szyprowski 16154ce63fcdSMarek Szyprowski /* 16164ce63fcdSMarek Szyprowski * Map a part of the scatter-gather list into contiguous io address space 16174ce63fcdSMarek Szyprowski */ 16184ce63fcdSMarek Szyprowski static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, 16194ce63fcdSMarek Szyprowski size_t size, dma_addr_t *handle, 162000085f1eSKrzysztof Kozlowski enum dma_data_direction dir, unsigned long attrs, 16210fa478dfSRob Herring bool is_coherent) 16224ce63fcdSMarek Szyprowski { 162389cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 16244ce63fcdSMarek Szyprowski dma_addr_t iova, iova_base; 16254ce63fcdSMarek Szyprowski int ret = 0; 16264ce63fcdSMarek Szyprowski unsigned int count; 16274ce63fcdSMarek Szyprowski struct scatterlist *s; 1628c9b24996SAndreas Herrmann int prot; 16294ce63fcdSMarek Szyprowski 16304ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 163172fd97bfSChristoph Hellwig *handle = DMA_MAPPING_ERROR; 16324ce63fcdSMarek Szyprowski 16334ce63fcdSMarek Szyprowski iova_base = iova = __alloc_iova(mapping, size); 163472fd97bfSChristoph Hellwig if (iova == DMA_MAPPING_ERROR) 16354ce63fcdSMarek Szyprowski return -ENOMEM; 16364ce63fcdSMarek Szyprowski 16374ce63fcdSMarek Szyprowski for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { 16383e6110fdSDan Williams phys_addr_t phys = page_to_phys(sg_page(s)); 16394ce63fcdSMarek Szyprowski unsigned int len = PAGE_ALIGN(s->offset + s->length); 16404ce63fcdSMarek Szyprowski 164100085f1eSKrzysztof Kozlowski if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 16424ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 16434ce63fcdSMarek Szyprowski 16447d2822dfSSricharan R prot = __dma_info_to_prot(dir, attrs); 1645c9b24996SAndreas Herrmann 1646c9b24996SAndreas Herrmann ret = iommu_map(mapping->domain, iova, phys, len, prot); 16474ce63fcdSMarek Szyprowski if (ret < 0) 16484ce63fcdSMarek Szyprowski goto fail; 16494ce63fcdSMarek Szyprowski count += len >> PAGE_SHIFT; 16504ce63fcdSMarek Szyprowski iova += len; 16514ce63fcdSMarek Szyprowski } 16524ce63fcdSMarek Szyprowski *handle = iova_base; 16534ce63fcdSMarek Szyprowski 16544ce63fcdSMarek Szyprowski return 0; 16554ce63fcdSMarek Szyprowski fail: 16564ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); 16574ce63fcdSMarek Szyprowski __free_iova(mapping, iova_base, size); 16584ce63fcdSMarek Szyprowski return ret; 16594ce63fcdSMarek Szyprowski } 16604ce63fcdSMarek Szyprowski 16610fa478dfSRob Herring static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, 166200085f1eSKrzysztof Kozlowski enum dma_data_direction dir, unsigned long attrs, 16630fa478dfSRob Herring bool is_coherent) 16644ce63fcdSMarek Szyprowski { 16654ce63fcdSMarek Szyprowski struct scatterlist *s = sg, *dma = sg, *start = sg; 16664ce63fcdSMarek Szyprowski int i, count = 0; 16674ce63fcdSMarek Szyprowski unsigned int offset = s->offset; 16684ce63fcdSMarek Szyprowski unsigned int size = s->offset + s->length; 16694ce63fcdSMarek Szyprowski unsigned int max = dma_get_max_seg_size(dev); 16704ce63fcdSMarek Szyprowski 16714ce63fcdSMarek Szyprowski for (i = 1; i < nents; i++) { 16724ce63fcdSMarek Szyprowski s = sg_next(s); 16734ce63fcdSMarek Szyprowski 167472fd97bfSChristoph Hellwig s->dma_address = DMA_MAPPING_ERROR; 16754ce63fcdSMarek Szyprowski s->dma_length = 0; 16764ce63fcdSMarek Szyprowski 16774ce63fcdSMarek Szyprowski if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { 16784ce63fcdSMarek Szyprowski if (__map_sg_chunk(dev, start, size, &dma->dma_address, 16790fa478dfSRob Herring dir, attrs, is_coherent) < 0) 16804ce63fcdSMarek Szyprowski goto bad_mapping; 16814ce63fcdSMarek Szyprowski 16824ce63fcdSMarek Szyprowski dma->dma_address += offset; 16834ce63fcdSMarek Szyprowski dma->dma_length = size - offset; 16844ce63fcdSMarek Szyprowski 16854ce63fcdSMarek Szyprowski size = offset = s->offset; 16864ce63fcdSMarek Szyprowski start = s; 16874ce63fcdSMarek Szyprowski dma = sg_next(dma); 16884ce63fcdSMarek Szyprowski count += 1; 16894ce63fcdSMarek Szyprowski } 16904ce63fcdSMarek Szyprowski size += s->length; 16914ce63fcdSMarek Szyprowski } 16920fa478dfSRob Herring if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, 16930fa478dfSRob Herring is_coherent) < 0) 16944ce63fcdSMarek Szyprowski goto bad_mapping; 16954ce63fcdSMarek Szyprowski 16964ce63fcdSMarek Szyprowski dma->dma_address += offset; 16974ce63fcdSMarek Szyprowski dma->dma_length = size - offset; 16984ce63fcdSMarek Szyprowski 16994ce63fcdSMarek Szyprowski return count+1; 17004ce63fcdSMarek Szyprowski 17014ce63fcdSMarek Szyprowski bad_mapping: 17024ce63fcdSMarek Szyprowski for_each_sg(sg, s, count, i) 17034ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); 17044ce63fcdSMarek Szyprowski return 0; 17054ce63fcdSMarek Szyprowski } 17064ce63fcdSMarek Szyprowski 17074ce63fcdSMarek Szyprowski /** 17080fa478dfSRob Herring * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA 17090fa478dfSRob Herring * @dev: valid struct device pointer 17100fa478dfSRob Herring * @sg: list of buffers 17110fa478dfSRob Herring * @nents: number of buffers to map 17120fa478dfSRob Herring * @dir: DMA transfer direction 17130fa478dfSRob Herring * 17140fa478dfSRob Herring * Map a set of i/o coherent buffers described by scatterlist in streaming 17150fa478dfSRob Herring * mode for DMA. The scatter gather list elements are merged together (if 17160fa478dfSRob Herring * possible) and tagged with the appropriate dma address and length. They are 17170fa478dfSRob Herring * obtained via sg_dma_{address,length}. 17180fa478dfSRob Herring */ 17190fa478dfSRob Herring int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, 172000085f1eSKrzysztof Kozlowski int nents, enum dma_data_direction dir, unsigned long attrs) 17210fa478dfSRob Herring { 17220fa478dfSRob Herring return __iommu_map_sg(dev, sg, nents, dir, attrs, true); 17230fa478dfSRob Herring } 17240fa478dfSRob Herring 17250fa478dfSRob Herring /** 17260fa478dfSRob Herring * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA 17270fa478dfSRob Herring * @dev: valid struct device pointer 17280fa478dfSRob Herring * @sg: list of buffers 17290fa478dfSRob Herring * @nents: number of buffers to map 17300fa478dfSRob Herring * @dir: DMA transfer direction 17310fa478dfSRob Herring * 17320fa478dfSRob Herring * Map a set of buffers described by scatterlist in streaming mode for DMA. 17330fa478dfSRob Herring * The scatter gather list elements are merged together (if possible) and 17340fa478dfSRob Herring * tagged with the appropriate dma address and length. They are obtained via 17350fa478dfSRob Herring * sg_dma_{address,length}. 17360fa478dfSRob Herring */ 17370fa478dfSRob Herring int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, 173800085f1eSKrzysztof Kozlowski int nents, enum dma_data_direction dir, unsigned long attrs) 17390fa478dfSRob Herring { 17400fa478dfSRob Herring return __iommu_map_sg(dev, sg, nents, dir, attrs, false); 17410fa478dfSRob Herring } 17420fa478dfSRob Herring 17430fa478dfSRob Herring static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, 174400085f1eSKrzysztof Kozlowski int nents, enum dma_data_direction dir, 174500085f1eSKrzysztof Kozlowski unsigned long attrs, bool is_coherent) 17460fa478dfSRob Herring { 17470fa478dfSRob Herring struct scatterlist *s; 17480fa478dfSRob Herring int i; 17490fa478dfSRob Herring 17500fa478dfSRob Herring for_each_sg(sg, s, nents, i) { 17510fa478dfSRob Herring if (sg_dma_len(s)) 17520fa478dfSRob Herring __iommu_remove_mapping(dev, sg_dma_address(s), 17530fa478dfSRob Herring sg_dma_len(s)); 175400085f1eSKrzysztof Kozlowski if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 17550fa478dfSRob Herring __dma_page_dev_to_cpu(sg_page(s), s->offset, 17560fa478dfSRob Herring s->length, dir); 17570fa478dfSRob Herring } 17580fa478dfSRob Herring } 17590fa478dfSRob Herring 17600fa478dfSRob Herring /** 17610fa478dfSRob Herring * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 17620fa478dfSRob Herring * @dev: valid struct device pointer 17630fa478dfSRob Herring * @sg: list of buffers 17640fa478dfSRob Herring * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 17650fa478dfSRob Herring * @dir: DMA transfer direction (same as was passed to dma_map_sg) 17660fa478dfSRob Herring * 17670fa478dfSRob Herring * Unmap a set of streaming mode DMA translations. Again, CPU access 17680fa478dfSRob Herring * rules concerning calls here are the same as for dma_unmap_single(). 17690fa478dfSRob Herring */ 17700fa478dfSRob Herring void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, 177100085f1eSKrzysztof Kozlowski int nents, enum dma_data_direction dir, 177200085f1eSKrzysztof Kozlowski unsigned long attrs) 17730fa478dfSRob Herring { 17740fa478dfSRob Herring __iommu_unmap_sg(dev, sg, nents, dir, attrs, true); 17750fa478dfSRob Herring } 17760fa478dfSRob Herring 17770fa478dfSRob Herring /** 17784ce63fcdSMarek Szyprowski * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 17794ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 17804ce63fcdSMarek Szyprowski * @sg: list of buffers 17814ce63fcdSMarek Szyprowski * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 17824ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 17834ce63fcdSMarek Szyprowski * 17844ce63fcdSMarek Szyprowski * Unmap a set of streaming mode DMA translations. Again, CPU access 17854ce63fcdSMarek Szyprowski * rules concerning calls here are the same as for dma_unmap_single(). 17864ce63fcdSMarek Szyprowski */ 17874ce63fcdSMarek Szyprowski void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 178800085f1eSKrzysztof Kozlowski enum dma_data_direction dir, 178900085f1eSKrzysztof Kozlowski unsigned long attrs) 17904ce63fcdSMarek Szyprowski { 17910fa478dfSRob Herring __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); 17924ce63fcdSMarek Szyprowski } 17934ce63fcdSMarek Szyprowski 17944ce63fcdSMarek Szyprowski /** 17954ce63fcdSMarek Szyprowski * arm_iommu_sync_sg_for_cpu 17964ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 17974ce63fcdSMarek Szyprowski * @sg: list of buffers 17984ce63fcdSMarek Szyprowski * @nents: number of buffers to map (returned from dma_map_sg) 17994ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 18004ce63fcdSMarek Szyprowski */ 18014ce63fcdSMarek Szyprowski void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 18024ce63fcdSMarek Szyprowski int nents, enum dma_data_direction dir) 18034ce63fcdSMarek Szyprowski { 18044ce63fcdSMarek Szyprowski struct scatterlist *s; 18054ce63fcdSMarek Szyprowski int i; 18064ce63fcdSMarek Szyprowski 18074ce63fcdSMarek Szyprowski for_each_sg(sg, s, nents, i) 18084ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); 18094ce63fcdSMarek Szyprowski 18104ce63fcdSMarek Szyprowski } 18114ce63fcdSMarek Szyprowski 18124ce63fcdSMarek Szyprowski /** 18134ce63fcdSMarek Szyprowski * arm_iommu_sync_sg_for_device 18144ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 18154ce63fcdSMarek Szyprowski * @sg: list of buffers 18164ce63fcdSMarek Szyprowski * @nents: number of buffers to map (returned from dma_map_sg) 18174ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 18184ce63fcdSMarek Szyprowski */ 18194ce63fcdSMarek Szyprowski void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 18204ce63fcdSMarek Szyprowski int nents, enum dma_data_direction dir) 18214ce63fcdSMarek Szyprowski { 18224ce63fcdSMarek Szyprowski struct scatterlist *s; 18234ce63fcdSMarek Szyprowski int i; 18244ce63fcdSMarek Szyprowski 18254ce63fcdSMarek Szyprowski for_each_sg(sg, s, nents, i) 18264ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 18274ce63fcdSMarek Szyprowski } 18284ce63fcdSMarek Szyprowski 18294ce63fcdSMarek Szyprowski 18304ce63fcdSMarek Szyprowski /** 18310fa478dfSRob Herring * arm_coherent_iommu_map_page 18320fa478dfSRob Herring * @dev: valid struct device pointer 18330fa478dfSRob Herring * @page: page that buffer resides in 18340fa478dfSRob Herring * @offset: offset into page for start of buffer 18350fa478dfSRob Herring * @size: size of buffer to map 18360fa478dfSRob Herring * @dir: DMA transfer direction 18370fa478dfSRob Herring * 18380fa478dfSRob Herring * Coherent IOMMU aware version of arm_dma_map_page() 18390fa478dfSRob Herring */ 18400fa478dfSRob Herring static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page, 18410fa478dfSRob Herring unsigned long offset, size_t size, enum dma_data_direction dir, 184200085f1eSKrzysztof Kozlowski unsigned long attrs) 18430fa478dfSRob Herring { 184489cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 18450fa478dfSRob Herring dma_addr_t dma_addr; 184613987d68SWill Deacon int ret, prot, len = PAGE_ALIGN(size + offset); 18470fa478dfSRob Herring 18480fa478dfSRob Herring dma_addr = __alloc_iova(mapping, len); 184972fd97bfSChristoph Hellwig if (dma_addr == DMA_MAPPING_ERROR) 18500fa478dfSRob Herring return dma_addr; 18510fa478dfSRob Herring 18527d2822dfSSricharan R prot = __dma_info_to_prot(dir, attrs); 185313987d68SWill Deacon 185413987d68SWill Deacon ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); 18550fa478dfSRob Herring if (ret < 0) 18560fa478dfSRob Herring goto fail; 18570fa478dfSRob Herring 18580fa478dfSRob Herring return dma_addr + offset; 18590fa478dfSRob Herring fail: 18600fa478dfSRob Herring __free_iova(mapping, dma_addr, len); 186172fd97bfSChristoph Hellwig return DMA_MAPPING_ERROR; 18620fa478dfSRob Herring } 18630fa478dfSRob Herring 18640fa478dfSRob Herring /** 18654ce63fcdSMarek Szyprowski * arm_iommu_map_page 18664ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 18674ce63fcdSMarek Szyprowski * @page: page that buffer resides in 18684ce63fcdSMarek Szyprowski * @offset: offset into page for start of buffer 18694ce63fcdSMarek Szyprowski * @size: size of buffer to map 18704ce63fcdSMarek Szyprowski * @dir: DMA transfer direction 18714ce63fcdSMarek Szyprowski * 18724ce63fcdSMarek Szyprowski * IOMMU aware version of arm_dma_map_page() 18734ce63fcdSMarek Szyprowski */ 18744ce63fcdSMarek Szyprowski static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, 18754ce63fcdSMarek Szyprowski unsigned long offset, size_t size, enum dma_data_direction dir, 187600085f1eSKrzysztof Kozlowski unsigned long attrs) 18774ce63fcdSMarek Szyprowski { 187800085f1eSKrzysztof Kozlowski if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 18794ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 18804ce63fcdSMarek Szyprowski 18810fa478dfSRob Herring return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); 18820fa478dfSRob Herring } 18834ce63fcdSMarek Szyprowski 18840fa478dfSRob Herring /** 18850fa478dfSRob Herring * arm_coherent_iommu_unmap_page 18860fa478dfSRob Herring * @dev: valid struct device pointer 18870fa478dfSRob Herring * @handle: DMA address of buffer 18880fa478dfSRob Herring * @size: size of buffer (same as passed to dma_map_page) 18890fa478dfSRob Herring * @dir: DMA transfer direction (same as passed to dma_map_page) 18900fa478dfSRob Herring * 18910fa478dfSRob Herring * Coherent IOMMU aware version of arm_dma_unmap_page() 18920fa478dfSRob Herring */ 18930fa478dfSRob Herring static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, 189400085f1eSKrzysztof Kozlowski size_t size, enum dma_data_direction dir, unsigned long attrs) 18950fa478dfSRob Herring { 189689cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 18970fa478dfSRob Herring dma_addr_t iova = handle & PAGE_MASK; 18980fa478dfSRob Herring int offset = handle & ~PAGE_MASK; 18990fa478dfSRob Herring int len = PAGE_ALIGN(size + offset); 19004ce63fcdSMarek Szyprowski 19010fa478dfSRob Herring if (!iova) 19020fa478dfSRob Herring return; 19030fa478dfSRob Herring 19040fa478dfSRob Herring iommu_unmap(mapping->domain, iova, len); 19050fa478dfSRob Herring __free_iova(mapping, iova, len); 19064ce63fcdSMarek Szyprowski } 19074ce63fcdSMarek Szyprowski 19084ce63fcdSMarek Szyprowski /** 19094ce63fcdSMarek Szyprowski * arm_iommu_unmap_page 19104ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 19114ce63fcdSMarek Szyprowski * @handle: DMA address of buffer 19124ce63fcdSMarek Szyprowski * @size: size of buffer (same as passed to dma_map_page) 19134ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as passed to dma_map_page) 19144ce63fcdSMarek Szyprowski * 19154ce63fcdSMarek Szyprowski * IOMMU aware version of arm_dma_unmap_page() 19164ce63fcdSMarek Szyprowski */ 19174ce63fcdSMarek Szyprowski static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, 191800085f1eSKrzysztof Kozlowski size_t size, enum dma_data_direction dir, unsigned long attrs) 19194ce63fcdSMarek Szyprowski { 192089cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 19214ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 19224ce63fcdSMarek Szyprowski struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 19234ce63fcdSMarek Szyprowski int offset = handle & ~PAGE_MASK; 19244ce63fcdSMarek Szyprowski int len = PAGE_ALIGN(size + offset); 19254ce63fcdSMarek Szyprowski 19264ce63fcdSMarek Szyprowski if (!iova) 19274ce63fcdSMarek Szyprowski return; 19284ce63fcdSMarek Szyprowski 192900085f1eSKrzysztof Kozlowski if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 19304ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(page, offset, size, dir); 19314ce63fcdSMarek Szyprowski 19324ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova, len); 19334ce63fcdSMarek Szyprowski __free_iova(mapping, iova, len); 19344ce63fcdSMarek Szyprowski } 19354ce63fcdSMarek Szyprowski 193624ed5d2cSNiklas Söderlund /** 193724ed5d2cSNiklas Söderlund * arm_iommu_map_resource - map a device resource for DMA 193824ed5d2cSNiklas Söderlund * @dev: valid struct device pointer 193924ed5d2cSNiklas Söderlund * @phys_addr: physical address of resource 194024ed5d2cSNiklas Söderlund * @size: size of resource to map 194124ed5d2cSNiklas Söderlund * @dir: DMA transfer direction 194224ed5d2cSNiklas Söderlund */ 194324ed5d2cSNiklas Söderlund static dma_addr_t arm_iommu_map_resource(struct device *dev, 194424ed5d2cSNiklas Söderlund phys_addr_t phys_addr, size_t size, 194524ed5d2cSNiklas Söderlund enum dma_data_direction dir, unsigned long attrs) 194624ed5d2cSNiklas Söderlund { 194724ed5d2cSNiklas Söderlund struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 194824ed5d2cSNiklas Söderlund dma_addr_t dma_addr; 194924ed5d2cSNiklas Söderlund int ret, prot; 195024ed5d2cSNiklas Söderlund phys_addr_t addr = phys_addr & PAGE_MASK; 195124ed5d2cSNiklas Söderlund unsigned int offset = phys_addr & ~PAGE_MASK; 195224ed5d2cSNiklas Söderlund size_t len = PAGE_ALIGN(size + offset); 195324ed5d2cSNiklas Söderlund 195424ed5d2cSNiklas Söderlund dma_addr = __alloc_iova(mapping, len); 195572fd97bfSChristoph Hellwig if (dma_addr == DMA_MAPPING_ERROR) 195624ed5d2cSNiklas Söderlund return dma_addr; 195724ed5d2cSNiklas Söderlund 19587d2822dfSSricharan R prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO; 195924ed5d2cSNiklas Söderlund 196024ed5d2cSNiklas Söderlund ret = iommu_map(mapping->domain, dma_addr, addr, len, prot); 196124ed5d2cSNiklas Söderlund if (ret < 0) 196224ed5d2cSNiklas Söderlund goto fail; 196324ed5d2cSNiklas Söderlund 196424ed5d2cSNiklas Söderlund return dma_addr + offset; 196524ed5d2cSNiklas Söderlund fail: 196624ed5d2cSNiklas Söderlund __free_iova(mapping, dma_addr, len); 196772fd97bfSChristoph Hellwig return DMA_MAPPING_ERROR; 196824ed5d2cSNiklas Söderlund } 196924ed5d2cSNiklas Söderlund 197024ed5d2cSNiklas Söderlund /** 197124ed5d2cSNiklas Söderlund * arm_iommu_unmap_resource - unmap a device DMA resource 197224ed5d2cSNiklas Söderlund * @dev: valid struct device pointer 197324ed5d2cSNiklas Söderlund * @dma_handle: DMA address to resource 197424ed5d2cSNiklas Söderlund * @size: size of resource to map 197524ed5d2cSNiklas Söderlund * @dir: DMA transfer direction 197624ed5d2cSNiklas Söderlund */ 197724ed5d2cSNiklas Söderlund static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle, 197824ed5d2cSNiklas Söderlund size_t size, enum dma_data_direction dir, 197924ed5d2cSNiklas Söderlund unsigned long attrs) 198024ed5d2cSNiklas Söderlund { 198124ed5d2cSNiklas Söderlund struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 198224ed5d2cSNiklas Söderlund dma_addr_t iova = dma_handle & PAGE_MASK; 198324ed5d2cSNiklas Söderlund unsigned int offset = dma_handle & ~PAGE_MASK; 198424ed5d2cSNiklas Söderlund size_t len = PAGE_ALIGN(size + offset); 198524ed5d2cSNiklas Söderlund 198624ed5d2cSNiklas Söderlund if (!iova) 198724ed5d2cSNiklas Söderlund return; 198824ed5d2cSNiklas Söderlund 198924ed5d2cSNiklas Söderlund iommu_unmap(mapping->domain, iova, len); 199024ed5d2cSNiklas Söderlund __free_iova(mapping, iova, len); 199124ed5d2cSNiklas Söderlund } 199224ed5d2cSNiklas Söderlund 19934ce63fcdSMarek Szyprowski static void arm_iommu_sync_single_for_cpu(struct device *dev, 19944ce63fcdSMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 19954ce63fcdSMarek Szyprowski { 199689cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 19974ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 19984ce63fcdSMarek Szyprowski struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 19994ce63fcdSMarek Szyprowski unsigned int offset = handle & ~PAGE_MASK; 20004ce63fcdSMarek Szyprowski 20014ce63fcdSMarek Szyprowski if (!iova) 20024ce63fcdSMarek Szyprowski return; 20034ce63fcdSMarek Szyprowski 20044ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(page, offset, size, dir); 20054ce63fcdSMarek Szyprowski } 20064ce63fcdSMarek Szyprowski 20074ce63fcdSMarek Szyprowski static void arm_iommu_sync_single_for_device(struct device *dev, 20084ce63fcdSMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 20094ce63fcdSMarek Szyprowski { 201089cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 20114ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 20124ce63fcdSMarek Szyprowski struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 20134ce63fcdSMarek Szyprowski unsigned int offset = handle & ~PAGE_MASK; 20144ce63fcdSMarek Szyprowski 20154ce63fcdSMarek Szyprowski if (!iova) 20164ce63fcdSMarek Szyprowski return; 20174ce63fcdSMarek Szyprowski 20184ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 20194ce63fcdSMarek Szyprowski } 20204ce63fcdSMarek Szyprowski 20215299709dSBart Van Assche const struct dma_map_ops iommu_ops = { 20224ce63fcdSMarek Szyprowski .alloc = arm_iommu_alloc_attrs, 20234ce63fcdSMarek Szyprowski .free = arm_iommu_free_attrs, 20244ce63fcdSMarek Szyprowski .mmap = arm_iommu_mmap_attrs, 2025dc2832e1SMarek Szyprowski .get_sgtable = arm_iommu_get_sgtable, 20264ce63fcdSMarek Szyprowski 20274ce63fcdSMarek Szyprowski .map_page = arm_iommu_map_page, 20284ce63fcdSMarek Szyprowski .unmap_page = arm_iommu_unmap_page, 20294ce63fcdSMarek Szyprowski .sync_single_for_cpu = arm_iommu_sync_single_for_cpu, 20304ce63fcdSMarek Szyprowski .sync_single_for_device = arm_iommu_sync_single_for_device, 20314ce63fcdSMarek Szyprowski 20324ce63fcdSMarek Szyprowski .map_sg = arm_iommu_map_sg, 20334ce63fcdSMarek Szyprowski .unmap_sg = arm_iommu_unmap_sg, 20344ce63fcdSMarek Szyprowski .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, 20354ce63fcdSMarek Szyprowski .sync_sg_for_device = arm_iommu_sync_sg_for_device, 203624ed5d2cSNiklas Söderlund 203724ed5d2cSNiklas Söderlund .map_resource = arm_iommu_map_resource, 203824ed5d2cSNiklas Söderlund .unmap_resource = arm_iommu_unmap_resource, 20399eef8b8cSChristoph Hellwig 2040418a7a7eSChristoph Hellwig .dma_supported = arm_dma_supported, 20414ce63fcdSMarek Szyprowski }; 20424ce63fcdSMarek Szyprowski 20435299709dSBart Van Assche const struct dma_map_ops iommu_coherent_ops = { 204456506822SGregory CLEMENT .alloc = arm_coherent_iommu_alloc_attrs, 204556506822SGregory CLEMENT .free = arm_coherent_iommu_free_attrs, 204656506822SGregory CLEMENT .mmap = arm_coherent_iommu_mmap_attrs, 20470fa478dfSRob Herring .get_sgtable = arm_iommu_get_sgtable, 20480fa478dfSRob Herring 20490fa478dfSRob Herring .map_page = arm_coherent_iommu_map_page, 20500fa478dfSRob Herring .unmap_page = arm_coherent_iommu_unmap_page, 20510fa478dfSRob Herring 20520fa478dfSRob Herring .map_sg = arm_coherent_iommu_map_sg, 20530fa478dfSRob Herring .unmap_sg = arm_coherent_iommu_unmap_sg, 205424ed5d2cSNiklas Söderlund 205524ed5d2cSNiklas Söderlund .map_resource = arm_iommu_map_resource, 205624ed5d2cSNiklas Söderlund .unmap_resource = arm_iommu_unmap_resource, 20579eef8b8cSChristoph Hellwig 2058418a7a7eSChristoph Hellwig .dma_supported = arm_dma_supported, 20590fa478dfSRob Herring }; 20600fa478dfSRob Herring 20614ce63fcdSMarek Szyprowski /** 20624ce63fcdSMarek Szyprowski * arm_iommu_create_mapping 20634ce63fcdSMarek Szyprowski * @bus: pointer to the bus holding the client device (for IOMMU calls) 20644ce63fcdSMarek Szyprowski * @base: start address of the valid IO address space 206568efd7d2SMarek Szyprowski * @size: maximum size of the valid IO address space 20664ce63fcdSMarek Szyprowski * 20674ce63fcdSMarek Szyprowski * Creates a mapping structure which holds information about used/unused 20684ce63fcdSMarek Szyprowski * IO address ranges, which is required to perform memory allocation and 20694ce63fcdSMarek Szyprowski * mapping with IOMMU aware functions. 20704ce63fcdSMarek Szyprowski * 20714ce63fcdSMarek Szyprowski * The client device need to be attached to the mapping with 20724ce63fcdSMarek Szyprowski * arm_iommu_attach_device function. 20734ce63fcdSMarek Szyprowski */ 20744ce63fcdSMarek Szyprowski struct dma_iommu_mapping * 20751424532bSMarek Szyprowski arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size) 20764ce63fcdSMarek Szyprowski { 207768efd7d2SMarek Szyprowski unsigned int bits = size >> PAGE_SHIFT; 207868efd7d2SMarek Szyprowski unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long); 20794ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping; 208068efd7d2SMarek Szyprowski int extensions = 1; 20814ce63fcdSMarek Szyprowski int err = -ENOMEM; 20824ce63fcdSMarek Szyprowski 20831424532bSMarek Szyprowski /* currently only 32-bit DMA address space is supported */ 20841424532bSMarek Szyprowski if (size > DMA_BIT_MASK(32) + 1) 20851424532bSMarek Szyprowski return ERR_PTR(-ERANGE); 20861424532bSMarek Szyprowski 208768efd7d2SMarek Szyprowski if (!bitmap_size) 20884ce63fcdSMarek Szyprowski return ERR_PTR(-EINVAL); 20894ce63fcdSMarek Szyprowski 209068efd7d2SMarek Szyprowski if (bitmap_size > PAGE_SIZE) { 209168efd7d2SMarek Szyprowski extensions = bitmap_size / PAGE_SIZE; 209268efd7d2SMarek Szyprowski bitmap_size = PAGE_SIZE; 209368efd7d2SMarek Szyprowski } 209468efd7d2SMarek Szyprowski 20954ce63fcdSMarek Szyprowski mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); 20964ce63fcdSMarek Szyprowski if (!mapping) 20974ce63fcdSMarek Szyprowski goto err; 20984ce63fcdSMarek Szyprowski 209968efd7d2SMarek Szyprowski mapping->bitmap_size = bitmap_size; 21006396bb22SKees Cook mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *), 21014d852ef8SAndreas Herrmann GFP_KERNEL); 21024d852ef8SAndreas Herrmann if (!mapping->bitmaps) 21034ce63fcdSMarek Szyprowski goto err2; 21044ce63fcdSMarek Szyprowski 210568efd7d2SMarek Szyprowski mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL); 21064d852ef8SAndreas Herrmann if (!mapping->bitmaps[0]) 21074d852ef8SAndreas Herrmann goto err3; 21084d852ef8SAndreas Herrmann 21094d852ef8SAndreas Herrmann mapping->nr_bitmaps = 1; 21104d852ef8SAndreas Herrmann mapping->extensions = extensions; 21114ce63fcdSMarek Szyprowski mapping->base = base; 211268efd7d2SMarek Szyprowski mapping->bits = BITS_PER_BYTE * bitmap_size; 21134d852ef8SAndreas Herrmann 21144ce63fcdSMarek Szyprowski spin_lock_init(&mapping->lock); 21154ce63fcdSMarek Szyprowski 21164ce63fcdSMarek Szyprowski mapping->domain = iommu_domain_alloc(bus); 21174ce63fcdSMarek Szyprowski if (!mapping->domain) 21184d852ef8SAndreas Herrmann goto err4; 21194ce63fcdSMarek Szyprowski 21204ce63fcdSMarek Szyprowski kref_init(&mapping->kref); 21214ce63fcdSMarek Szyprowski return mapping; 21224d852ef8SAndreas Herrmann err4: 21234d852ef8SAndreas Herrmann kfree(mapping->bitmaps[0]); 21244ce63fcdSMarek Szyprowski err3: 21254d852ef8SAndreas Herrmann kfree(mapping->bitmaps); 21264ce63fcdSMarek Szyprowski err2: 21274ce63fcdSMarek Szyprowski kfree(mapping); 21284ce63fcdSMarek Szyprowski err: 21294ce63fcdSMarek Szyprowski return ERR_PTR(err); 21304ce63fcdSMarek Szyprowski } 213118177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_create_mapping); 21324ce63fcdSMarek Szyprowski 21334ce63fcdSMarek Szyprowski static void release_iommu_mapping(struct kref *kref) 21344ce63fcdSMarek Szyprowski { 21354d852ef8SAndreas Herrmann int i; 21364ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = 21374ce63fcdSMarek Szyprowski container_of(kref, struct dma_iommu_mapping, kref); 21384ce63fcdSMarek Szyprowski 21394ce63fcdSMarek Szyprowski iommu_domain_free(mapping->domain); 21404d852ef8SAndreas Herrmann for (i = 0; i < mapping->nr_bitmaps; i++) 21414d852ef8SAndreas Herrmann kfree(mapping->bitmaps[i]); 21424d852ef8SAndreas Herrmann kfree(mapping->bitmaps); 21434ce63fcdSMarek Szyprowski kfree(mapping); 21444ce63fcdSMarek Szyprowski } 21454ce63fcdSMarek Szyprowski 21464d852ef8SAndreas Herrmann static int extend_iommu_mapping(struct dma_iommu_mapping *mapping) 21474d852ef8SAndreas Herrmann { 21484d852ef8SAndreas Herrmann int next_bitmap; 21494d852ef8SAndreas Herrmann 2150462859aaSMarek Szyprowski if (mapping->nr_bitmaps >= mapping->extensions) 21514d852ef8SAndreas Herrmann return -EINVAL; 21524d852ef8SAndreas Herrmann 21534d852ef8SAndreas Herrmann next_bitmap = mapping->nr_bitmaps; 21544d852ef8SAndreas Herrmann mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size, 21554d852ef8SAndreas Herrmann GFP_ATOMIC); 21564d852ef8SAndreas Herrmann if (!mapping->bitmaps[next_bitmap]) 21574d852ef8SAndreas Herrmann return -ENOMEM; 21584d852ef8SAndreas Herrmann 21594d852ef8SAndreas Herrmann mapping->nr_bitmaps++; 21604d852ef8SAndreas Herrmann 21614d852ef8SAndreas Herrmann return 0; 21624d852ef8SAndreas Herrmann } 21634d852ef8SAndreas Herrmann 21644ce63fcdSMarek Szyprowski void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) 21654ce63fcdSMarek Szyprowski { 21664ce63fcdSMarek Szyprowski if (mapping) 21674ce63fcdSMarek Szyprowski kref_put(&mapping->kref, release_iommu_mapping); 21684ce63fcdSMarek Szyprowski } 216918177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_release_mapping); 21704ce63fcdSMarek Szyprowski 2171eab8d653SLaurent Pinchart static int __arm_iommu_attach_device(struct device *dev, 21724ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping) 21734ce63fcdSMarek Szyprowski { 21744ce63fcdSMarek Szyprowski int err; 21754ce63fcdSMarek Szyprowski 21764ce63fcdSMarek Szyprowski err = iommu_attach_device(mapping->domain, dev); 21774ce63fcdSMarek Szyprowski if (err) 21784ce63fcdSMarek Szyprowski return err; 21794ce63fcdSMarek Szyprowski 21804ce63fcdSMarek Szyprowski kref_get(&mapping->kref); 218189cfdb19SWill Deacon to_dma_iommu_mapping(dev) = mapping; 21824ce63fcdSMarek Szyprowski 218375c59716SHiroshi Doyu pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); 21844ce63fcdSMarek Szyprowski return 0; 21854ce63fcdSMarek Szyprowski } 21864ce63fcdSMarek Szyprowski 21876fe36758SHiroshi Doyu /** 2188eab8d653SLaurent Pinchart * arm_iommu_attach_device 21896fe36758SHiroshi Doyu * @dev: valid struct device pointer 2190eab8d653SLaurent Pinchart * @mapping: io address space mapping structure (returned from 2191eab8d653SLaurent Pinchart * arm_iommu_create_mapping) 21926fe36758SHiroshi Doyu * 2193eab8d653SLaurent Pinchart * Attaches specified io address space mapping to the provided device. 2194eab8d653SLaurent Pinchart * This replaces the dma operations (dma_map_ops pointer) with the 2195eab8d653SLaurent Pinchart * IOMMU aware version. 2196eab8d653SLaurent Pinchart * 2197eab8d653SLaurent Pinchart * More than one client might be attached to the same io address space 2198eab8d653SLaurent Pinchart * mapping. 21996fe36758SHiroshi Doyu */ 2200eab8d653SLaurent Pinchart int arm_iommu_attach_device(struct device *dev, 2201eab8d653SLaurent Pinchart struct dma_iommu_mapping *mapping) 2202eab8d653SLaurent Pinchart { 2203eab8d653SLaurent Pinchart int err; 2204eab8d653SLaurent Pinchart 2205eab8d653SLaurent Pinchart err = __arm_iommu_attach_device(dev, mapping); 2206eab8d653SLaurent Pinchart if (err) 2207eab8d653SLaurent Pinchart return err; 2208eab8d653SLaurent Pinchart 2209eab8d653SLaurent Pinchart set_dma_ops(dev, &iommu_ops); 2210eab8d653SLaurent Pinchart return 0; 2211eab8d653SLaurent Pinchart } 2212eab8d653SLaurent Pinchart EXPORT_SYMBOL_GPL(arm_iommu_attach_device); 2213eab8d653SLaurent Pinchart 2214d3e01c51SSricharan R /** 2215d3e01c51SSricharan R * arm_iommu_detach_device 2216d3e01c51SSricharan R * @dev: valid struct device pointer 2217d3e01c51SSricharan R * 2218d3e01c51SSricharan R * Detaches the provided device from a previously attached map. 22194a4d68fcSWolfram Sang (Renesas) * This overwrites the dma_ops pointer with appropriate non-IOMMU ops. 2220d3e01c51SSricharan R */ 2221d3e01c51SSricharan R void arm_iommu_detach_device(struct device *dev) 22226fe36758SHiroshi Doyu { 22236fe36758SHiroshi Doyu struct dma_iommu_mapping *mapping; 22246fe36758SHiroshi Doyu 22256fe36758SHiroshi Doyu mapping = to_dma_iommu_mapping(dev); 22266fe36758SHiroshi Doyu if (!mapping) { 22276fe36758SHiroshi Doyu dev_warn(dev, "Not attached\n"); 22286fe36758SHiroshi Doyu return; 22296fe36758SHiroshi Doyu } 22306fe36758SHiroshi Doyu 22316fe36758SHiroshi Doyu iommu_detach_device(mapping->domain, dev); 22326fe36758SHiroshi Doyu kref_put(&mapping->kref, release_iommu_mapping); 223389cfdb19SWill Deacon to_dma_iommu_mapping(dev) = NULL; 22341874619aSThierry Reding set_dma_ops(dev, arm_get_dma_map_ops(dev->archdata.dma_coherent)); 22356fe36758SHiroshi Doyu 22366fe36758SHiroshi Doyu pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); 22376fe36758SHiroshi Doyu } 223818177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_detach_device); 22396fe36758SHiroshi Doyu 22405299709dSBart Van Assche static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) 22414bb25789SWill Deacon { 22424bb25789SWill Deacon return coherent ? &iommu_coherent_ops : &iommu_ops; 22434bb25789SWill Deacon } 22444bb25789SWill Deacon 22454bb25789SWill Deacon static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, 224653c92d79SRobin Murphy const struct iommu_ops *iommu) 22474bb25789SWill Deacon { 22484bb25789SWill Deacon struct dma_iommu_mapping *mapping; 22494bb25789SWill Deacon 22504bb25789SWill Deacon if (!iommu) 22514bb25789SWill Deacon return false; 22524bb25789SWill Deacon 22534bb25789SWill Deacon mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); 22544bb25789SWill Deacon if (IS_ERR(mapping)) { 22554bb25789SWill Deacon pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", 22564bb25789SWill Deacon size, dev_name(dev)); 22574bb25789SWill Deacon return false; 22584bb25789SWill Deacon } 22594bb25789SWill Deacon 2260eab8d653SLaurent Pinchart if (__arm_iommu_attach_device(dev, mapping)) { 22614bb25789SWill Deacon pr_warn("Failed to attached device %s to IOMMU_mapping\n", 22624bb25789SWill Deacon dev_name(dev)); 22634bb25789SWill Deacon arm_iommu_release_mapping(mapping); 22644bb25789SWill Deacon return false; 22654bb25789SWill Deacon } 22664bb25789SWill Deacon 22674bb25789SWill Deacon return true; 22684bb25789SWill Deacon } 22694bb25789SWill Deacon 22704bb25789SWill Deacon static void arm_teardown_iommu_dma_ops(struct device *dev) 22714bb25789SWill Deacon { 227289cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 22734bb25789SWill Deacon 2274c2273a18SWill Deacon if (!mapping) 2275c2273a18SWill Deacon return; 2276c2273a18SWill Deacon 2277d3e01c51SSricharan R arm_iommu_detach_device(dev); 22784bb25789SWill Deacon arm_iommu_release_mapping(mapping); 22794bb25789SWill Deacon } 22804bb25789SWill Deacon 22814bb25789SWill Deacon #else 22824bb25789SWill Deacon 22834bb25789SWill Deacon static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, 228453c92d79SRobin Murphy const struct iommu_ops *iommu) 22854bb25789SWill Deacon { 22864bb25789SWill Deacon return false; 22874bb25789SWill Deacon } 22884bb25789SWill Deacon 22894bb25789SWill Deacon static void arm_teardown_iommu_dma_ops(struct device *dev) { } 22904bb25789SWill Deacon 22914bb25789SWill Deacon #define arm_get_iommu_dma_map_ops arm_get_dma_map_ops 22924bb25789SWill Deacon 22934bb25789SWill Deacon #endif /* CONFIG_ARM_DMA_USE_IOMMU */ 22944bb25789SWill Deacon 22954bb25789SWill Deacon void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, 229653c92d79SRobin Murphy const struct iommu_ops *iommu, bool coherent) 22974bb25789SWill Deacon { 22985299709dSBart Van Assche const struct dma_map_ops *dma_ops; 22994bb25789SWill Deacon 23006f51ee70SLinus Torvalds dev->archdata.dma_coherent = coherent; 2301ad3c7b18SChristoph Hellwig #ifdef CONFIG_SWIOTLB 2302ad3c7b18SChristoph Hellwig dev->dma_coherent = coherent; 2303ad3c7b18SChristoph Hellwig #endif 230426b37b94SLaurent Pinchart 230526b37b94SLaurent Pinchart /* 230626b37b94SLaurent Pinchart * Don't override the dma_ops if they have already been set. Ideally 230726b37b94SLaurent Pinchart * this should be the only location where dma_ops are set, remove this 230826b37b94SLaurent Pinchart * check when all other callers of set_dma_ops will have disappeared. 230926b37b94SLaurent Pinchart */ 231026b37b94SLaurent Pinchart if (dev->dma_ops) 231126b37b94SLaurent Pinchart return; 231226b37b94SLaurent Pinchart 23134bb25789SWill Deacon if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu)) 23144bb25789SWill Deacon dma_ops = arm_get_iommu_dma_map_ops(coherent); 23154bb25789SWill Deacon else 23164bb25789SWill Deacon dma_ops = arm_get_dma_map_ops(coherent); 23174bb25789SWill Deacon 23184bb25789SWill Deacon set_dma_ops(dev, dma_ops); 2319e0586326SStefano Stabellini 2320e0586326SStefano Stabellini #ifdef CONFIG_XEN 2321e0586326SStefano Stabellini if (xen_initial_domain()) { 2322e0586326SStefano Stabellini dev->archdata.dev_dma_ops = dev->dma_ops; 2323e0586326SStefano Stabellini dev->dma_ops = xen_dma_ops; 2324e0586326SStefano Stabellini } 2325e0586326SStefano Stabellini #endif 2326a93a121aSLaurent Pinchart dev->archdata.dma_ops_setup = true; 23274bb25789SWill Deacon } 23284bb25789SWill Deacon 23294bb25789SWill Deacon void arch_teardown_dma_ops(struct device *dev) 23304bb25789SWill Deacon { 2331a93a121aSLaurent Pinchart if (!dev->archdata.dma_ops_setup) 2332a93a121aSLaurent Pinchart return; 2333a93a121aSLaurent Pinchart 23344bb25789SWill Deacon arm_teardown_iommu_dma_ops(dev); 2335fc67e6f1SRobin Murphy /* Let arch_setup_dma_ops() start again from scratch upon re-probe */ 2336fc67e6f1SRobin Murphy set_dma_ops(dev, NULL); 23374bb25789SWill Deacon } 2338ad3c7b18SChristoph Hellwig 2339ad3c7b18SChristoph Hellwig #ifdef CONFIG_SWIOTLB 2340ad3c7b18SChristoph Hellwig void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, 2341ad3c7b18SChristoph Hellwig size_t size, enum dma_data_direction dir) 2342ad3c7b18SChristoph Hellwig { 2343ad3c7b18SChristoph Hellwig __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), 2344ad3c7b18SChristoph Hellwig size, dir); 2345ad3c7b18SChristoph Hellwig } 2346ad3c7b18SChristoph Hellwig 2347ad3c7b18SChristoph Hellwig void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, 2348ad3c7b18SChristoph Hellwig size_t size, enum dma_data_direction dir) 2349ad3c7b18SChristoph Hellwig { 2350ad3c7b18SChristoph Hellwig __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), 2351ad3c7b18SChristoph Hellwig size, dir); 2352ad3c7b18SChristoph Hellwig } 2353ad3c7b18SChristoph Hellwig 2354ad3c7b18SChristoph Hellwig long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, 2355ad3c7b18SChristoph Hellwig dma_addr_t dma_addr) 2356ad3c7b18SChristoph Hellwig { 2357ad3c7b18SChristoph Hellwig return dma_to_pfn(dev, dma_addr); 2358ad3c7b18SChristoph Hellwig } 2359ad3c7b18SChristoph Hellwig 2360ad3c7b18SChristoph Hellwig void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 2361ad3c7b18SChristoph Hellwig gfp_t gfp, unsigned long attrs) 2362ad3c7b18SChristoph Hellwig { 2363ad3c7b18SChristoph Hellwig return __dma_alloc(dev, size, dma_handle, gfp, 2364ad3c7b18SChristoph Hellwig __get_dma_pgprot(attrs, PAGE_KERNEL), false, 2365ad3c7b18SChristoph Hellwig attrs, __builtin_return_address(0)); 2366ad3c7b18SChristoph Hellwig } 2367ad3c7b18SChristoph Hellwig 2368ad3c7b18SChristoph Hellwig void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, 2369ad3c7b18SChristoph Hellwig dma_addr_t dma_handle, unsigned long attrs) 2370ad3c7b18SChristoph Hellwig { 2371ad3c7b18SChristoph Hellwig __arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false); 2372ad3c7b18SChristoph Hellwig } 2373ad3c7b18SChristoph Hellwig #endif /* CONFIG_SWIOTLB */ 2374