1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 20ddbccd1SRussell King /* 30ddbccd1SRussell King * linux/arch/arm/mm/dma-mapping.c 40ddbccd1SRussell King * 50ddbccd1SRussell King * Copyright (C) 2000-2004 Russell King 60ddbccd1SRussell King * 70ddbccd1SRussell King * DMA uncached mapping support. 80ddbccd1SRussell King */ 90ddbccd1SRussell King #include <linux/module.h> 100ddbccd1SRussell King #include <linux/mm.h> 1136d0fd21SLaura Abbott #include <linux/genalloc.h> 125a0e3ad6STejun Heo #include <linux/gfp.h> 130ddbccd1SRussell King #include <linux/errno.h> 140ddbccd1SRussell King #include <linux/list.h> 150ddbccd1SRussell King #include <linux/init.h> 160ddbccd1SRussell King #include <linux/device.h> 17249baa54SChristoph Hellwig #include <linux/dma-direct.h> 180ddbccd1SRussell King #include <linux/dma-mapping.h> 19ad3c7b18SChristoph Hellwig #include <linux/dma-noncoherent.h> 20c7909509SMarek Szyprowski #include <linux/dma-contiguous.h> 2139af22a7SNicolas Pitre #include <linux/highmem.h> 22c7909509SMarek Szyprowski #include <linux/memblock.h> 2399d1717dSJon Medhurst #include <linux/slab.h> 244ce63fcdSMarek Szyprowski #include <linux/iommu.h> 25e9da6e99SMarek Szyprowski #include <linux/io.h> 264ce63fcdSMarek Szyprowski #include <linux/vmalloc.h> 27158e8bfeSAlessandro Rubini #include <linux/sizes.h> 28a254129eSJoonsoo Kim #include <linux/cma.h> 290ddbccd1SRussell King 300ddbccd1SRussell King #include <asm/memory.h> 3143377453SNicolas Pitre #include <asm/highmem.h> 320ddbccd1SRussell King #include <asm/cacheflush.h> 330ddbccd1SRussell King #include <asm/tlbflush.h> 3499d1717dSJon Medhurst #include <asm/mach/arch.h> 354ce63fcdSMarek Szyprowski #include <asm/dma-iommu.h> 36c7909509SMarek Szyprowski #include <asm/mach/map.h> 37c7909509SMarek Szyprowski #include <asm/system_info.h> 38c7909509SMarek Szyprowski #include <asm/dma-contiguous.h> 390ddbccd1SRussell King 401234e3fdSRussell King #include "dma.h" 41022ae537SRussell King #include "mm.h" 42022ae537SRussell King 43b4268676SRabin Vincent struct arm_dma_alloc_args { 44b4268676SRabin Vincent struct device *dev; 45b4268676SRabin Vincent size_t size; 46b4268676SRabin Vincent gfp_t gfp; 47b4268676SRabin Vincent pgprot_t prot; 48b4268676SRabin Vincent const void *caller; 49b4268676SRabin Vincent bool want_vaddr; 50f1270896SGregory CLEMENT int coherent_flag; 51b4268676SRabin Vincent }; 52b4268676SRabin Vincent 53b4268676SRabin Vincent struct arm_dma_free_args { 54b4268676SRabin Vincent struct device *dev; 55b4268676SRabin Vincent size_t size; 56b4268676SRabin Vincent void *cpu_addr; 57b4268676SRabin Vincent struct page *page; 58b4268676SRabin Vincent bool want_vaddr; 59b4268676SRabin Vincent }; 60b4268676SRabin Vincent 61f1270896SGregory CLEMENT #define NORMAL 0 62f1270896SGregory CLEMENT #define COHERENT 1 63f1270896SGregory CLEMENT 64b4268676SRabin Vincent struct arm_dma_allocator { 65b4268676SRabin Vincent void *(*alloc)(struct arm_dma_alloc_args *args, 66b4268676SRabin Vincent struct page **ret_page); 67b4268676SRabin Vincent void (*free)(struct arm_dma_free_args *args); 68b4268676SRabin Vincent }; 69b4268676SRabin Vincent 7019e6e5e5SRabin Vincent struct arm_dma_buffer { 7119e6e5e5SRabin Vincent struct list_head list; 7219e6e5e5SRabin Vincent void *virt; 73b4268676SRabin Vincent struct arm_dma_allocator *allocator; 7419e6e5e5SRabin Vincent }; 7519e6e5e5SRabin Vincent 7619e6e5e5SRabin Vincent static LIST_HEAD(arm_dma_bufs); 7719e6e5e5SRabin Vincent static DEFINE_SPINLOCK(arm_dma_bufs_lock); 7819e6e5e5SRabin Vincent 7919e6e5e5SRabin Vincent static struct arm_dma_buffer *arm_dma_buffer_find(void *virt) 8019e6e5e5SRabin Vincent { 8119e6e5e5SRabin Vincent struct arm_dma_buffer *buf, *found = NULL; 8219e6e5e5SRabin Vincent unsigned long flags; 8319e6e5e5SRabin Vincent 8419e6e5e5SRabin Vincent spin_lock_irqsave(&arm_dma_bufs_lock, flags); 8519e6e5e5SRabin Vincent list_for_each_entry(buf, &arm_dma_bufs, list) { 8619e6e5e5SRabin Vincent if (buf->virt == virt) { 8719e6e5e5SRabin Vincent list_del(&buf->list); 8819e6e5e5SRabin Vincent found = buf; 8919e6e5e5SRabin Vincent break; 9019e6e5e5SRabin Vincent } 9119e6e5e5SRabin Vincent } 9219e6e5e5SRabin Vincent spin_unlock_irqrestore(&arm_dma_bufs_lock, flags); 9319e6e5e5SRabin Vincent return found; 9419e6e5e5SRabin Vincent } 9519e6e5e5SRabin Vincent 9615237e1fSMarek Szyprowski /* 9715237e1fSMarek Szyprowski * The DMA API is built upon the notion of "buffer ownership". A buffer 9815237e1fSMarek Szyprowski * is either exclusively owned by the CPU (and therefore may be accessed 9915237e1fSMarek Szyprowski * by it) or exclusively owned by the DMA device. These helper functions 10015237e1fSMarek Szyprowski * represent the transitions between these two ownership states. 10115237e1fSMarek Szyprowski * 10215237e1fSMarek Szyprowski * Note, however, that on later ARMs, this notion does not work due to 10315237e1fSMarek Szyprowski * speculative prefetches. We model our approach on the assumption that 10415237e1fSMarek Szyprowski * the CPU does do speculative prefetches, which means we clean caches 10515237e1fSMarek Szyprowski * before transfers and delay cache invalidation until transfer completion. 10615237e1fSMarek Szyprowski * 10715237e1fSMarek Szyprowski */ 10851fde349SMarek Szyprowski static void __dma_page_cpu_to_dev(struct page *, unsigned long, 10915237e1fSMarek Szyprowski size_t, enum dma_data_direction); 11051fde349SMarek Szyprowski static void __dma_page_dev_to_cpu(struct page *, unsigned long, 11115237e1fSMarek Szyprowski size_t, enum dma_data_direction); 11215237e1fSMarek Szyprowski 1132dc6a016SMarek Szyprowski /** 1142dc6a016SMarek Szyprowski * arm_dma_map_page - map a portion of a page for streaming DMA 1152dc6a016SMarek Szyprowski * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1162dc6a016SMarek Szyprowski * @page: page that buffer resides in 1172dc6a016SMarek Szyprowski * @offset: offset into page for start of buffer 1182dc6a016SMarek Szyprowski * @size: size of buffer to map 1192dc6a016SMarek Szyprowski * @dir: DMA transfer direction 1202dc6a016SMarek Szyprowski * 1212dc6a016SMarek Szyprowski * Ensure that any data held in the cache is appropriately discarded 1222dc6a016SMarek Szyprowski * or written back. 1232dc6a016SMarek Szyprowski * 1242dc6a016SMarek Szyprowski * The device owns this memory once this call has completed. The CPU 1252dc6a016SMarek Szyprowski * can regain ownership by calling dma_unmap_page(). 1262dc6a016SMarek Szyprowski */ 12751fde349SMarek Szyprowski static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, 1282dc6a016SMarek Szyprowski unsigned long offset, size_t size, enum dma_data_direction dir, 12900085f1eSKrzysztof Kozlowski unsigned long attrs) 1302dc6a016SMarek Szyprowski { 13100085f1eSKrzysztof Kozlowski if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 13251fde349SMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 13351fde349SMarek Szyprowski return pfn_to_dma(dev, page_to_pfn(page)) + offset; 1342dc6a016SMarek Szyprowski } 1352dc6a016SMarek Szyprowski 136dd37e940SRob Herring static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, 137dd37e940SRob Herring unsigned long offset, size_t size, enum dma_data_direction dir, 13800085f1eSKrzysztof Kozlowski unsigned long attrs) 139dd37e940SRob Herring { 140dd37e940SRob Herring return pfn_to_dma(dev, page_to_pfn(page)) + offset; 141dd37e940SRob Herring } 142dd37e940SRob Herring 1432dc6a016SMarek Szyprowski /** 1442dc6a016SMarek Szyprowski * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page() 1452dc6a016SMarek Szyprowski * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1462dc6a016SMarek Szyprowski * @handle: DMA address of buffer 1472dc6a016SMarek Szyprowski * @size: size of buffer (same as passed to dma_map_page) 1482dc6a016SMarek Szyprowski * @dir: DMA transfer direction (same as passed to dma_map_page) 1492dc6a016SMarek Szyprowski * 1502dc6a016SMarek Szyprowski * Unmap a page streaming mode DMA translation. The handle and size 1512dc6a016SMarek Szyprowski * must match what was provided in the previous dma_map_page() call. 1522dc6a016SMarek Szyprowski * All other usages are undefined. 1532dc6a016SMarek Szyprowski * 1542dc6a016SMarek Szyprowski * After this call, reads by the CPU to the buffer are guaranteed to see 1552dc6a016SMarek Szyprowski * whatever the device wrote there. 1562dc6a016SMarek Szyprowski */ 15751fde349SMarek Szyprowski static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, 15800085f1eSKrzysztof Kozlowski size_t size, enum dma_data_direction dir, unsigned long attrs) 1592dc6a016SMarek Szyprowski { 16000085f1eSKrzysztof Kozlowski if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 16151fde349SMarek Szyprowski __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), 16251fde349SMarek Szyprowski handle & ~PAGE_MASK, size, dir); 1632dc6a016SMarek Szyprowski } 1642dc6a016SMarek Szyprowski 16551fde349SMarek Szyprowski static void arm_dma_sync_single_for_cpu(struct device *dev, 1662dc6a016SMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 1672dc6a016SMarek Szyprowski { 1682dc6a016SMarek Szyprowski unsigned int offset = handle & (PAGE_SIZE - 1); 1692dc6a016SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 1702dc6a016SMarek Szyprowski __dma_page_dev_to_cpu(page, offset, size, dir); 1712dc6a016SMarek Szyprowski } 1722dc6a016SMarek Szyprowski 17351fde349SMarek Szyprowski static void arm_dma_sync_single_for_device(struct device *dev, 1742dc6a016SMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 1752dc6a016SMarek Szyprowski { 1762dc6a016SMarek Szyprowski unsigned int offset = handle & (PAGE_SIZE - 1); 1772dc6a016SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 1782dc6a016SMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 1792dc6a016SMarek Szyprowski } 1802dc6a016SMarek Szyprowski 1815299709dSBart Van Assche const struct dma_map_ops arm_dma_ops = { 182f99d6034SMarek Szyprowski .alloc = arm_dma_alloc, 183f99d6034SMarek Szyprowski .free = arm_dma_free, 184f99d6034SMarek Szyprowski .mmap = arm_dma_mmap, 185dc2832e1SMarek Szyprowski .get_sgtable = arm_dma_get_sgtable, 1862dc6a016SMarek Szyprowski .map_page = arm_dma_map_page, 1872dc6a016SMarek Szyprowski .unmap_page = arm_dma_unmap_page, 1882dc6a016SMarek Szyprowski .map_sg = arm_dma_map_sg, 1892dc6a016SMarek Szyprowski .unmap_sg = arm_dma_unmap_sg, 190cfced786SChristoph Hellwig .map_resource = dma_direct_map_resource, 1912dc6a016SMarek Szyprowski .sync_single_for_cpu = arm_dma_sync_single_for_cpu, 1922dc6a016SMarek Szyprowski .sync_single_for_device = arm_dma_sync_single_for_device, 1932dc6a016SMarek Szyprowski .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, 1942dc6a016SMarek Szyprowski .sync_sg_for_device = arm_dma_sync_sg_for_device, 195418a7a7eSChristoph Hellwig .dma_supported = arm_dma_supported, 196249baa54SChristoph Hellwig .get_required_mask = dma_direct_get_required_mask, 1972dc6a016SMarek Szyprowski }; 1982dc6a016SMarek Szyprowski EXPORT_SYMBOL(arm_dma_ops); 1992dc6a016SMarek Szyprowski 200dd37e940SRob Herring static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 20100085f1eSKrzysztof Kozlowski dma_addr_t *handle, gfp_t gfp, unsigned long attrs); 202dd37e940SRob Herring static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 20300085f1eSKrzysztof Kozlowski dma_addr_t handle, unsigned long attrs); 20455af8a91SMike Looijmans static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, 20555af8a91SMike Looijmans void *cpu_addr, dma_addr_t dma_addr, size_t size, 20600085f1eSKrzysztof Kozlowski unsigned long attrs); 207dd37e940SRob Herring 2085299709dSBart Van Assche const struct dma_map_ops arm_coherent_dma_ops = { 209dd37e940SRob Herring .alloc = arm_coherent_dma_alloc, 210dd37e940SRob Herring .free = arm_coherent_dma_free, 21155af8a91SMike Looijmans .mmap = arm_coherent_dma_mmap, 212dd37e940SRob Herring .get_sgtable = arm_dma_get_sgtable, 213dd37e940SRob Herring .map_page = arm_coherent_dma_map_page, 214dd37e940SRob Herring .map_sg = arm_dma_map_sg, 215cfced786SChristoph Hellwig .map_resource = dma_direct_map_resource, 216418a7a7eSChristoph Hellwig .dma_supported = arm_dma_supported, 217249baa54SChristoph Hellwig .get_required_mask = dma_direct_get_required_mask, 218dd37e940SRob Herring }; 219dd37e940SRob Herring EXPORT_SYMBOL(arm_coherent_dma_ops); 220dd37e940SRob Herring 2219f28cde0SRussell King static int __dma_supported(struct device *dev, u64 mask, bool warn) 2229f28cde0SRussell King { 223ab746573SChristoph Hellwig unsigned long max_dma_pfn = min(max_pfn, arm_dma_pfn_limit); 2249f28cde0SRussell King 2259f28cde0SRussell King /* 2269f28cde0SRussell King * Translate the device's DMA mask to a PFN limit. This 2279f28cde0SRussell King * PFN number includes the page which we can DMA to. 2289f28cde0SRussell King */ 2299f28cde0SRussell King if (dma_to_pfn(dev, mask) < max_dma_pfn) { 2309f28cde0SRussell King if (warn) 2319f28cde0SRussell King dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", 2329f28cde0SRussell King mask, 2339f28cde0SRussell King dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, 2349f28cde0SRussell King max_dma_pfn + 1); 2359f28cde0SRussell King return 0; 2369f28cde0SRussell King } 2379f28cde0SRussell King 2389f28cde0SRussell King return 1; 2399f28cde0SRussell King } 2409f28cde0SRussell King 241ab6494f0SCatalin Marinas static u64 get_coherent_dma_mask(struct device *dev) 242ab6494f0SCatalin Marinas { 2434dcfa600SRussell King u64 mask = (u64)DMA_BIT_MASK(32); 2440ddbccd1SRussell King 245ab6494f0SCatalin Marinas if (dev) { 246ab6494f0SCatalin Marinas mask = dev->coherent_dma_mask; 247ab6494f0SCatalin Marinas 248ab6494f0SCatalin Marinas /* 249ab6494f0SCatalin Marinas * Sanity check the DMA mask - it must be non-zero, and 250ab6494f0SCatalin Marinas * must be able to be satisfied by a DMA allocation. 251ab6494f0SCatalin Marinas */ 252ab6494f0SCatalin Marinas if (mask == 0) { 253ab6494f0SCatalin Marinas dev_warn(dev, "coherent DMA mask is unset\n"); 254ab6494f0SCatalin Marinas return 0; 255ab6494f0SCatalin Marinas } 256ab6494f0SCatalin Marinas 2579f28cde0SRussell King if (!__dma_supported(dev, mask, true)) 2584dcfa600SRussell King return 0; 2594dcfa600SRussell King } 2604dcfa600SRussell King 261ab6494f0SCatalin Marinas return mask; 262ab6494f0SCatalin Marinas } 263ab6494f0SCatalin Marinas 264f1270896SGregory CLEMENT static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag) 265c7909509SMarek Szyprowski { 266c7909509SMarek Szyprowski /* 267c7909509SMarek Szyprowski * Ensure that the allocated pages are zeroed, and that any data 268c7909509SMarek Szyprowski * lurking in the kernel direct-mapped region is invalidated. 269c7909509SMarek Szyprowski */ 2709848e48fSMarek Szyprowski if (PageHighMem(page)) { 2719848e48fSMarek Szyprowski phys_addr_t base = __pfn_to_phys(page_to_pfn(page)); 2729848e48fSMarek Szyprowski phys_addr_t end = base + size; 2739848e48fSMarek Szyprowski while (size > 0) { 2749848e48fSMarek Szyprowski void *ptr = kmap_atomic(page); 2759848e48fSMarek Szyprowski memset(ptr, 0, PAGE_SIZE); 276f1270896SGregory CLEMENT if (coherent_flag != COHERENT) 2779848e48fSMarek Szyprowski dmac_flush_range(ptr, ptr + PAGE_SIZE); 2789848e48fSMarek Szyprowski kunmap_atomic(ptr); 2799848e48fSMarek Szyprowski page++; 2809848e48fSMarek Szyprowski size -= PAGE_SIZE; 2819848e48fSMarek Szyprowski } 282f1270896SGregory CLEMENT if (coherent_flag != COHERENT) 2839848e48fSMarek Szyprowski outer_flush_range(base, end); 2849848e48fSMarek Szyprowski } else { 2859848e48fSMarek Szyprowski void *ptr = page_address(page); 286c7909509SMarek Szyprowski memset(ptr, 0, size); 287f1270896SGregory CLEMENT if (coherent_flag != COHERENT) { 288c7909509SMarek Szyprowski dmac_flush_range(ptr, ptr + size); 289c7909509SMarek Szyprowski outer_flush_range(__pa(ptr), __pa(ptr) + size); 290c7909509SMarek Szyprowski } 2914ce63fcdSMarek Szyprowski } 292f1270896SGregory CLEMENT } 293c7909509SMarek Szyprowski 2947a9a32a9SRussell King /* 2957a9a32a9SRussell King * Allocate a DMA buffer for 'dev' of size 'size' using the 2967a9a32a9SRussell King * specified gfp mask. Note that 'size' must be page aligned. 2977a9a32a9SRussell King */ 298f1270896SGregory CLEMENT static struct page *__dma_alloc_buffer(struct device *dev, size_t size, 299f1270896SGregory CLEMENT gfp_t gfp, int coherent_flag) 3007a9a32a9SRussell King { 3017a9a32a9SRussell King unsigned long order = get_order(size); 3027a9a32a9SRussell King struct page *page, *p, *e; 3037a9a32a9SRussell King 3047a9a32a9SRussell King page = alloc_pages(gfp, order); 3057a9a32a9SRussell King if (!page) 3067a9a32a9SRussell King return NULL; 3077a9a32a9SRussell King 3087a9a32a9SRussell King /* 3097a9a32a9SRussell King * Now split the huge page and free the excess pages 3107a9a32a9SRussell King */ 3117a9a32a9SRussell King split_page(page, order); 3127a9a32a9SRussell King for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) 3137a9a32a9SRussell King __free_page(p); 3147a9a32a9SRussell King 315f1270896SGregory CLEMENT __dma_clear_buffer(page, size, coherent_flag); 3167a9a32a9SRussell King 3177a9a32a9SRussell King return page; 3187a9a32a9SRussell King } 3197a9a32a9SRussell King 3207a9a32a9SRussell King /* 3217a9a32a9SRussell King * Free a DMA buffer. 'size' must be page aligned. 3227a9a32a9SRussell King */ 3237a9a32a9SRussell King static void __dma_free_buffer(struct page *page, size_t size) 3247a9a32a9SRussell King { 3257a9a32a9SRussell King struct page *e = page + (size >> PAGE_SHIFT); 3267a9a32a9SRussell King 3277a9a32a9SRussell King while (page < e) { 3287a9a32a9SRussell King __free_page(page); 3297a9a32a9SRussell King page++; 3307a9a32a9SRussell King } 3317a9a32a9SRussell King } 3327a9a32a9SRussell King 333c7909509SMarek Szyprowski static void *__alloc_from_contiguous(struct device *dev, size_t size, 3349848e48fSMarek Szyprowski pgprot_t prot, struct page **ret_page, 335f1270896SGregory CLEMENT const void *caller, bool want_vaddr, 336712c604dSLucas Stach int coherent_flag, gfp_t gfp); 337c7909509SMarek Szyprowski 338e9da6e99SMarek Szyprowski static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 339e9da6e99SMarek Szyprowski pgprot_t prot, struct page **ret_page, 3406e8266e3SCarlo Caione const void *caller, bool want_vaddr); 341e9da6e99SMarek Szyprowski 342e9da6e99SMarek Szyprowski static void * 343e9da6e99SMarek Szyprowski __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, 344e9da6e99SMarek Szyprowski const void *caller) 345e9da6e99SMarek Szyprowski { 34651231740SChristoph Hellwig return dma_common_contiguous_remap(page, size, prot, caller); 347e9da6e99SMarek Szyprowski } 348e9da6e99SMarek Szyprowski 349e9da6e99SMarek Szyprowski static void __dma_free_remap(void *cpu_addr, size_t size) 350e9da6e99SMarek Szyprowski { 35151231740SChristoph Hellwig dma_common_free_remap(cpu_addr, size); 352e9da6e99SMarek Szyprowski } 353e9da6e99SMarek Szyprowski 3546e5267aaSMarek Szyprowski #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K 355b337e1c4SVladimir Murzin static struct gen_pool *atomic_pool __ro_after_init; 3566e5267aaSMarek Szyprowski 357b337e1c4SVladimir Murzin static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE; 358c7909509SMarek Szyprowski 359c7909509SMarek Szyprowski static int __init early_coherent_pool(char *p) 360c7909509SMarek Szyprowski { 36136d0fd21SLaura Abbott atomic_pool_size = memparse(p, &p); 362c7909509SMarek Szyprowski return 0; 363c7909509SMarek Szyprowski } 364c7909509SMarek Szyprowski early_param("coherent_pool", early_coherent_pool); 365c7909509SMarek Szyprowski 366c7909509SMarek Szyprowski /* 367c7909509SMarek Szyprowski * Initialise the coherent pool for atomic allocations. 368c7909509SMarek Szyprowski */ 369e9da6e99SMarek Szyprowski static int __init atomic_pool_init(void) 370c7909509SMarek Szyprowski { 37171b55663SRussell King pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL); 3729d1400cfSMarek Szyprowski gfp_t gfp = GFP_KERNEL | GFP_DMA; 373c7909509SMarek Szyprowski struct page *page; 374c7909509SMarek Szyprowski void *ptr; 375c7909509SMarek Szyprowski 37636d0fd21SLaura Abbott atomic_pool = gen_pool_create(PAGE_SHIFT, -1); 37736d0fd21SLaura Abbott if (!atomic_pool) 37836d0fd21SLaura Abbott goto out; 379f1270896SGregory CLEMENT /* 380f1270896SGregory CLEMENT * The atomic pool is only used for non-coherent allocations 381f1270896SGregory CLEMENT * so we must pass NORMAL for coherent_flag. 382f1270896SGregory CLEMENT */ 383e464ef16SGioh Kim if (dev_get_cma_area(NULL)) 38436d0fd21SLaura Abbott ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot, 385712c604dSLucas Stach &page, atomic_pool_init, true, NORMAL, 386712c604dSLucas Stach GFP_KERNEL); 387e9da6e99SMarek Szyprowski else 38836d0fd21SLaura Abbott ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot, 3896e8266e3SCarlo Caione &page, atomic_pool_init, true); 390c7909509SMarek Szyprowski if (ptr) { 39136d0fd21SLaura Abbott int ret; 3926b3fe472SHiroshi Doyu 39336d0fd21SLaura Abbott ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr, 39436d0fd21SLaura Abbott page_to_phys(page), 39536d0fd21SLaura Abbott atomic_pool_size, -1); 39636d0fd21SLaura Abbott if (ret) 39736d0fd21SLaura Abbott goto destroy_genpool; 3986b3fe472SHiroshi Doyu 39936d0fd21SLaura Abbott gen_pool_set_algo(atomic_pool, 40036d0fd21SLaura Abbott gen_pool_first_fit_order_align, 401acb62448SVladimir Murzin NULL); 402bf31c5e0SFabio Estevam pr_info("DMA: preallocated %zu KiB pool for atomic coherent allocations\n", 40336d0fd21SLaura Abbott atomic_pool_size / 1024); 404c7909509SMarek Szyprowski return 0; 405c7909509SMarek Szyprowski } 406ec10665cSSachin Kamat 40736d0fd21SLaura Abbott destroy_genpool: 40836d0fd21SLaura Abbott gen_pool_destroy(atomic_pool); 40936d0fd21SLaura Abbott atomic_pool = NULL; 41036d0fd21SLaura Abbott out: 411bf31c5e0SFabio Estevam pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n", 41236d0fd21SLaura Abbott atomic_pool_size / 1024); 413c7909509SMarek Szyprowski return -ENOMEM; 414c7909509SMarek Szyprowski } 415c7909509SMarek Szyprowski /* 416c7909509SMarek Szyprowski * CMA is activated by core_initcall, so we must be called after it. 417c7909509SMarek Szyprowski */ 418e9da6e99SMarek Szyprowski postcore_initcall(atomic_pool_init); 419c7909509SMarek Szyprowski 420c7909509SMarek Szyprowski struct dma_contig_early_reserve { 421c7909509SMarek Szyprowski phys_addr_t base; 422c7909509SMarek Szyprowski unsigned long size; 423c7909509SMarek Szyprowski }; 424c7909509SMarek Szyprowski 425c7909509SMarek Szyprowski static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata; 426c7909509SMarek Szyprowski 427c7909509SMarek Szyprowski static int dma_mmu_remap_num __initdata; 428c7909509SMarek Szyprowski 429c7909509SMarek Szyprowski void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) 430c7909509SMarek Szyprowski { 431c7909509SMarek Szyprowski dma_mmu_remap[dma_mmu_remap_num].base = base; 432c7909509SMarek Szyprowski dma_mmu_remap[dma_mmu_remap_num].size = size; 433c7909509SMarek Szyprowski dma_mmu_remap_num++; 434c7909509SMarek Szyprowski } 435c7909509SMarek Szyprowski 436c7909509SMarek Szyprowski void __init dma_contiguous_remap(void) 437c7909509SMarek Szyprowski { 438c7909509SMarek Szyprowski int i; 439c7909509SMarek Szyprowski for (i = 0; i < dma_mmu_remap_num; i++) { 440c7909509SMarek Szyprowski phys_addr_t start = dma_mmu_remap[i].base; 441c7909509SMarek Szyprowski phys_addr_t end = start + dma_mmu_remap[i].size; 442c7909509SMarek Szyprowski struct map_desc map; 443c7909509SMarek Szyprowski unsigned long addr; 444c7909509SMarek Szyprowski 445c7909509SMarek Szyprowski if (end > arm_lowmem_limit) 446c7909509SMarek Szyprowski end = arm_lowmem_limit; 447c7909509SMarek Szyprowski if (start >= end) 44839f78e70SChris Brand continue; 449c7909509SMarek Szyprowski 450c7909509SMarek Szyprowski map.pfn = __phys_to_pfn(start); 451c7909509SMarek Szyprowski map.virtual = __phys_to_virt(start); 452c7909509SMarek Szyprowski map.length = end - start; 453c7909509SMarek Szyprowski map.type = MT_MEMORY_DMA_READY; 454c7909509SMarek Szyprowski 455c7909509SMarek Szyprowski /* 4566b076991SRussell King * Clear previous low-memory mapping to ensure that the 4576b076991SRussell King * TLB does not see any conflicting entries, then flush 4586b076991SRussell King * the TLB of the old entries before creating new mappings. 4596b076991SRussell King * 4606b076991SRussell King * This ensures that any speculatively loaded TLB entries 4616b076991SRussell King * (even though they may be rare) can not cause any problems, 4626b076991SRussell King * and ensures that this code is architecturally compliant. 463c7909509SMarek Szyprowski */ 464c7909509SMarek Szyprowski for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); 46561f6c7a4SVitaly Andrianov addr += PMD_SIZE) 466c7909509SMarek Szyprowski pmd_clear(pmd_off_k(addr)); 467c7909509SMarek Szyprowski 4686b076991SRussell King flush_tlb_kernel_range(__phys_to_virt(start), 4696b076991SRussell King __phys_to_virt(end)); 4706b076991SRussell King 471c7909509SMarek Szyprowski iotable_init(&map, 1); 472c7909509SMarek Szyprowski } 473c7909509SMarek Szyprowski } 474c7909509SMarek Szyprowski 4758b1e0f81SAnshuman Khandual static int __dma_update_pte(pte_t *pte, unsigned long addr, void *data) 476c7909509SMarek Szyprowski { 477c7909509SMarek Szyprowski struct page *page = virt_to_page(addr); 478c7909509SMarek Szyprowski pgprot_t prot = *(pgprot_t *)data; 479c7909509SMarek Szyprowski 480c7909509SMarek Szyprowski set_pte_ext(pte, mk_pte(page, prot), 0); 481c7909509SMarek Szyprowski return 0; 482c7909509SMarek Szyprowski } 483c7909509SMarek Szyprowski 484c7909509SMarek Szyprowski static void __dma_remap(struct page *page, size_t size, pgprot_t prot) 485c7909509SMarek Szyprowski { 486c7909509SMarek Szyprowski unsigned long start = (unsigned long) page_address(page); 487c7909509SMarek Szyprowski unsigned end = start + size; 488c7909509SMarek Szyprowski 489c7909509SMarek Szyprowski apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); 490c7909509SMarek Szyprowski flush_tlb_kernel_range(start, end); 491c7909509SMarek Szyprowski } 492c7909509SMarek Szyprowski 493c7909509SMarek Szyprowski static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 494c7909509SMarek Szyprowski pgprot_t prot, struct page **ret_page, 4956e8266e3SCarlo Caione const void *caller, bool want_vaddr) 496c7909509SMarek Szyprowski { 497c7909509SMarek Szyprowski struct page *page; 4986e8266e3SCarlo Caione void *ptr = NULL; 499f1270896SGregory CLEMENT /* 500f1270896SGregory CLEMENT * __alloc_remap_buffer is only called when the device is 501f1270896SGregory CLEMENT * non-coherent 502f1270896SGregory CLEMENT */ 503f1270896SGregory CLEMENT page = __dma_alloc_buffer(dev, size, gfp, NORMAL); 504c7909509SMarek Szyprowski if (!page) 505c7909509SMarek Szyprowski return NULL; 5066e8266e3SCarlo Caione if (!want_vaddr) 5076e8266e3SCarlo Caione goto out; 508c7909509SMarek Szyprowski 509c7909509SMarek Szyprowski ptr = __dma_alloc_remap(page, size, gfp, prot, caller); 510c7909509SMarek Szyprowski if (!ptr) { 511c7909509SMarek Szyprowski __dma_free_buffer(page, size); 512c7909509SMarek Szyprowski return NULL; 513c7909509SMarek Szyprowski } 514c7909509SMarek Szyprowski 5156e8266e3SCarlo Caione out: 516c7909509SMarek Szyprowski *ret_page = page; 517c7909509SMarek Szyprowski return ptr; 518c7909509SMarek Szyprowski } 519c7909509SMarek Szyprowski 520e9da6e99SMarek Szyprowski static void *__alloc_from_pool(size_t size, struct page **ret_page) 521c7909509SMarek Szyprowski { 52236d0fd21SLaura Abbott unsigned long val; 523e9da6e99SMarek Szyprowski void *ptr = NULL; 524c7909509SMarek Szyprowski 52536d0fd21SLaura Abbott if (!atomic_pool) { 526e9da6e99SMarek Szyprowski WARN(1, "coherent pool not initialised!\n"); 527c7909509SMarek Szyprowski return NULL; 528c7909509SMarek Szyprowski } 529c7909509SMarek Szyprowski 53036d0fd21SLaura Abbott val = gen_pool_alloc(atomic_pool, size); 53136d0fd21SLaura Abbott if (val) { 53236d0fd21SLaura Abbott phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val); 533e9da6e99SMarek Szyprowski 53436d0fd21SLaura Abbott *ret_page = phys_to_page(phys); 53536d0fd21SLaura Abbott ptr = (void *)val; 536e9da6e99SMarek Szyprowski } 537e9da6e99SMarek Szyprowski 538c7909509SMarek Szyprowski return ptr; 539c7909509SMarek Szyprowski } 540c7909509SMarek Szyprowski 54121d0a759SHiroshi Doyu static bool __in_atomic_pool(void *start, size_t size) 54221d0a759SHiroshi Doyu { 54336d0fd21SLaura Abbott return addr_in_gen_pool(atomic_pool, (unsigned long)start, size); 54421d0a759SHiroshi Doyu } 54521d0a759SHiroshi Doyu 546e9da6e99SMarek Szyprowski static int __free_from_pool(void *start, size_t size) 547c7909509SMarek Szyprowski { 54821d0a759SHiroshi Doyu if (!__in_atomic_pool(start, size)) 549c7909509SMarek Szyprowski return 0; 550c7909509SMarek Szyprowski 55136d0fd21SLaura Abbott gen_pool_free(atomic_pool, (unsigned long)start, size); 552e9da6e99SMarek Szyprowski 553c7909509SMarek Szyprowski return 1; 554c7909509SMarek Szyprowski } 555c7909509SMarek Szyprowski 556c7909509SMarek Szyprowski static void *__alloc_from_contiguous(struct device *dev, size_t size, 5579848e48fSMarek Szyprowski pgprot_t prot, struct page **ret_page, 558f1270896SGregory CLEMENT const void *caller, bool want_vaddr, 559712c604dSLucas Stach int coherent_flag, gfp_t gfp) 560c7909509SMarek Szyprowski { 561c7909509SMarek Szyprowski unsigned long order = get_order(size); 562c7909509SMarek Szyprowski size_t count = size >> PAGE_SHIFT; 563c7909509SMarek Szyprowski struct page *page; 5646e8266e3SCarlo Caione void *ptr = NULL; 565c7909509SMarek Szyprowski 566d834c5abSMarek Szyprowski page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN); 567c7909509SMarek Szyprowski if (!page) 568c7909509SMarek Szyprowski return NULL; 569c7909509SMarek Szyprowski 570f1270896SGregory CLEMENT __dma_clear_buffer(page, size, coherent_flag); 571c7909509SMarek Szyprowski 5726e8266e3SCarlo Caione if (!want_vaddr) 5736e8266e3SCarlo Caione goto out; 5746e8266e3SCarlo Caione 5759848e48fSMarek Szyprowski if (PageHighMem(page)) { 5769848e48fSMarek Szyprowski ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller); 5779848e48fSMarek Szyprowski if (!ptr) { 5789848e48fSMarek Szyprowski dma_release_from_contiguous(dev, page, count); 5799848e48fSMarek Szyprowski return NULL; 5809848e48fSMarek Szyprowski } 5819848e48fSMarek Szyprowski } else { 5829848e48fSMarek Szyprowski __dma_remap(page, size, prot); 5839848e48fSMarek Szyprowski ptr = page_address(page); 5849848e48fSMarek Szyprowski } 5856e8266e3SCarlo Caione 5866e8266e3SCarlo Caione out: 587c7909509SMarek Szyprowski *ret_page = page; 5889848e48fSMarek Szyprowski return ptr; 589c7909509SMarek Szyprowski } 590c7909509SMarek Szyprowski 591c7909509SMarek Szyprowski static void __free_from_contiguous(struct device *dev, struct page *page, 5926e8266e3SCarlo Caione void *cpu_addr, size_t size, bool want_vaddr) 593c7909509SMarek Szyprowski { 5946e8266e3SCarlo Caione if (want_vaddr) { 5959848e48fSMarek Szyprowski if (PageHighMem(page)) 5969848e48fSMarek Szyprowski __dma_free_remap(cpu_addr, size); 5979848e48fSMarek Szyprowski else 59871b55663SRussell King __dma_remap(page, size, PAGE_KERNEL); 5996e8266e3SCarlo Caione } 600c7909509SMarek Szyprowski dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); 601c7909509SMarek Szyprowski } 602c7909509SMarek Szyprowski 60300085f1eSKrzysztof Kozlowski static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot) 604f99d6034SMarek Szyprowski { 60500085f1eSKrzysztof Kozlowski prot = (attrs & DMA_ATTR_WRITE_COMBINE) ? 606f99d6034SMarek Szyprowski pgprot_writecombine(prot) : 607f99d6034SMarek Szyprowski pgprot_dmacoherent(prot); 608f99d6034SMarek Szyprowski return prot; 609f99d6034SMarek Szyprowski } 610f99d6034SMarek Szyprowski 611c7909509SMarek Szyprowski static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, 612c7909509SMarek Szyprowski struct page **ret_page) 613ab6494f0SCatalin Marinas { 61404da5694SRussell King struct page *page; 615f1270896SGregory CLEMENT /* __alloc_simple_buffer is only called when the device is coherent */ 616f1270896SGregory CLEMENT page = __dma_alloc_buffer(dev, size, gfp, COHERENT); 617c7909509SMarek Szyprowski if (!page) 618c7909509SMarek Szyprowski return NULL; 619c7909509SMarek Szyprowski 620c7909509SMarek Szyprowski *ret_page = page; 621c7909509SMarek Szyprowski return page_address(page); 622c7909509SMarek Szyprowski } 623c7909509SMarek Szyprowski 624b4268676SRabin Vincent static void *simple_allocator_alloc(struct arm_dma_alloc_args *args, 625b4268676SRabin Vincent struct page **ret_page) 626b4268676SRabin Vincent { 627b4268676SRabin Vincent return __alloc_simple_buffer(args->dev, args->size, args->gfp, 628b4268676SRabin Vincent ret_page); 629b4268676SRabin Vincent } 630c7909509SMarek Szyprowski 631b4268676SRabin Vincent static void simple_allocator_free(struct arm_dma_free_args *args) 632b4268676SRabin Vincent { 633b4268676SRabin Vincent __dma_free_buffer(args->page, args->size); 634b4268676SRabin Vincent } 635b4268676SRabin Vincent 636b4268676SRabin Vincent static struct arm_dma_allocator simple_allocator = { 637b4268676SRabin Vincent .alloc = simple_allocator_alloc, 638b4268676SRabin Vincent .free = simple_allocator_free, 639b4268676SRabin Vincent }; 640b4268676SRabin Vincent 641b4268676SRabin Vincent static void *cma_allocator_alloc(struct arm_dma_alloc_args *args, 642b4268676SRabin Vincent struct page **ret_page) 643b4268676SRabin Vincent { 644b4268676SRabin Vincent return __alloc_from_contiguous(args->dev, args->size, args->prot, 645b4268676SRabin Vincent ret_page, args->caller, 646712c604dSLucas Stach args->want_vaddr, args->coherent_flag, 647712c604dSLucas Stach args->gfp); 648b4268676SRabin Vincent } 649b4268676SRabin Vincent 650b4268676SRabin Vincent static void cma_allocator_free(struct arm_dma_free_args *args) 651b4268676SRabin Vincent { 652b4268676SRabin Vincent __free_from_contiguous(args->dev, args->page, args->cpu_addr, 653b4268676SRabin Vincent args->size, args->want_vaddr); 654b4268676SRabin Vincent } 655b4268676SRabin Vincent 656b4268676SRabin Vincent static struct arm_dma_allocator cma_allocator = { 657b4268676SRabin Vincent .alloc = cma_allocator_alloc, 658b4268676SRabin Vincent .free = cma_allocator_free, 659b4268676SRabin Vincent }; 660b4268676SRabin Vincent 661b4268676SRabin Vincent static void *pool_allocator_alloc(struct arm_dma_alloc_args *args, 662b4268676SRabin Vincent struct page **ret_page) 663b4268676SRabin Vincent { 664b4268676SRabin Vincent return __alloc_from_pool(args->size, ret_page); 665b4268676SRabin Vincent } 666b4268676SRabin Vincent 667b4268676SRabin Vincent static void pool_allocator_free(struct arm_dma_free_args *args) 668b4268676SRabin Vincent { 669b4268676SRabin Vincent __free_from_pool(args->cpu_addr, args->size); 670b4268676SRabin Vincent } 671b4268676SRabin Vincent 672b4268676SRabin Vincent static struct arm_dma_allocator pool_allocator = { 673b4268676SRabin Vincent .alloc = pool_allocator_alloc, 674b4268676SRabin Vincent .free = pool_allocator_free, 675b4268676SRabin Vincent }; 676b4268676SRabin Vincent 677b4268676SRabin Vincent static void *remap_allocator_alloc(struct arm_dma_alloc_args *args, 678b4268676SRabin Vincent struct page **ret_page) 679b4268676SRabin Vincent { 680b4268676SRabin Vincent return __alloc_remap_buffer(args->dev, args->size, args->gfp, 681b4268676SRabin Vincent args->prot, ret_page, args->caller, 682b4268676SRabin Vincent args->want_vaddr); 683b4268676SRabin Vincent } 684b4268676SRabin Vincent 685b4268676SRabin Vincent static void remap_allocator_free(struct arm_dma_free_args *args) 686b4268676SRabin Vincent { 687b4268676SRabin Vincent if (args->want_vaddr) 688b4268676SRabin Vincent __dma_free_remap(args->cpu_addr, args->size); 689b4268676SRabin Vincent 690b4268676SRabin Vincent __dma_free_buffer(args->page, args->size); 691b4268676SRabin Vincent } 692b4268676SRabin Vincent 693b4268676SRabin Vincent static struct arm_dma_allocator remap_allocator = { 694b4268676SRabin Vincent .alloc = remap_allocator_alloc, 695b4268676SRabin Vincent .free = remap_allocator_free, 696b4268676SRabin Vincent }; 697c7909509SMarek Szyprowski 698c7909509SMarek Szyprowski static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 6996e8266e3SCarlo Caione gfp_t gfp, pgprot_t prot, bool is_coherent, 70000085f1eSKrzysztof Kozlowski unsigned long attrs, const void *caller) 701c7909509SMarek Szyprowski { 702c7909509SMarek Szyprowski u64 mask = get_coherent_dma_mask(dev); 7033dd7ea92SJingoo Han struct page *page = NULL; 70431ebf944SRussell King void *addr; 705b4268676SRabin Vincent bool allowblock, cma; 70619e6e5e5SRabin Vincent struct arm_dma_buffer *buf; 707b4268676SRabin Vincent struct arm_dma_alloc_args args = { 708b4268676SRabin Vincent .dev = dev, 709b4268676SRabin Vincent .size = PAGE_ALIGN(size), 710b4268676SRabin Vincent .gfp = gfp, 711b4268676SRabin Vincent .prot = prot, 712b4268676SRabin Vincent .caller = caller, 71300085f1eSKrzysztof Kozlowski .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0), 714f1270896SGregory CLEMENT .coherent_flag = is_coherent ? COHERENT : NORMAL, 715b4268676SRabin Vincent }; 716ab6494f0SCatalin Marinas 717c7909509SMarek Szyprowski #ifdef CONFIG_DMA_API_DEBUG 718c7909509SMarek Szyprowski u64 limit = (mask + 1) & ~mask; 719c7909509SMarek Szyprowski if (limit && size >= limit) { 720c7909509SMarek Szyprowski dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n", 721c7909509SMarek Szyprowski size, mask); 722c7909509SMarek Szyprowski return NULL; 723c7909509SMarek Szyprowski } 724c7909509SMarek Szyprowski #endif 725c7909509SMarek Szyprowski 726c7909509SMarek Szyprowski if (!mask) 727c7909509SMarek Szyprowski return NULL; 728c7909509SMarek Szyprowski 7299c18fcf7SAlexandre Courbot buf = kzalloc(sizeof(*buf), 7309c18fcf7SAlexandre Courbot gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)); 73119e6e5e5SRabin Vincent if (!buf) 73219e6e5e5SRabin Vincent return NULL; 73319e6e5e5SRabin Vincent 734c7909509SMarek Szyprowski if (mask < 0xffffffffULL) 735c7909509SMarek Szyprowski gfp |= GFP_DMA; 736c7909509SMarek Szyprowski 737ea2e7057SSumit Bhattacharya /* 738ea2e7057SSumit Bhattacharya * Following is a work-around (a.k.a. hack) to prevent pages 739ea2e7057SSumit Bhattacharya * with __GFP_COMP being passed to split_page() which cannot 740ea2e7057SSumit Bhattacharya * handle them. The real problem is that this flag probably 741ea2e7057SSumit Bhattacharya * should be 0 on ARM as it is not supported on this 742ea2e7057SSumit Bhattacharya * platform; see CONFIG_HUGETLBFS. 743ea2e7057SSumit Bhattacharya */ 744ea2e7057SSumit Bhattacharya gfp &= ~(__GFP_COMP); 745b4268676SRabin Vincent args.gfp = gfp; 746ea2e7057SSumit Bhattacharya 74772fd97bfSChristoph Hellwig *handle = DMA_MAPPING_ERROR; 748b4268676SRabin Vincent allowblock = gfpflags_allow_blocking(gfp); 749b4268676SRabin Vincent cma = allowblock ? dev_get_cma_area(dev) : false; 75004da5694SRussell King 751b4268676SRabin Vincent if (cma) 752b4268676SRabin Vincent buf->allocator = &cma_allocator; 7531655cf88SVladimir Murzin else if (is_coherent) 754b4268676SRabin Vincent buf->allocator = &simple_allocator; 755b4268676SRabin Vincent else if (allowblock) 756b4268676SRabin Vincent buf->allocator = &remap_allocator; 75731ebf944SRussell King else 758b4268676SRabin Vincent buf->allocator = &pool_allocator; 759b4268676SRabin Vincent 760b4268676SRabin Vincent addr = buf->allocator->alloc(&args, &page); 76131ebf944SRussell King 76219e6e5e5SRabin Vincent if (page) { 76319e6e5e5SRabin Vincent unsigned long flags; 76419e6e5e5SRabin Vincent 7659eedd963SRussell King *handle = pfn_to_dma(dev, page_to_pfn(page)); 766b4268676SRabin Vincent buf->virt = args.want_vaddr ? addr : page; 76719e6e5e5SRabin Vincent 76819e6e5e5SRabin Vincent spin_lock_irqsave(&arm_dma_bufs_lock, flags); 76919e6e5e5SRabin Vincent list_add(&buf->list, &arm_dma_bufs); 77019e6e5e5SRabin Vincent spin_unlock_irqrestore(&arm_dma_bufs_lock, flags); 77119e6e5e5SRabin Vincent } else { 77219e6e5e5SRabin Vincent kfree(buf); 77319e6e5e5SRabin Vincent } 77431ebf944SRussell King 775b4268676SRabin Vincent return args.want_vaddr ? addr : page; 776ab6494f0SCatalin Marinas } 777695ae0afSRussell King 7780ddbccd1SRussell King /* 7790ddbccd1SRussell King * Allocate DMA-coherent memory space and return both the kernel remapped 7800ddbccd1SRussell King * virtual and bus address for that space. 7810ddbccd1SRussell King */ 782f99d6034SMarek Szyprowski void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 78300085f1eSKrzysztof Kozlowski gfp_t gfp, unsigned long attrs) 7840ddbccd1SRussell King { 7850ea1ec71SRussell King pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); 7860ddbccd1SRussell King 787dd37e940SRob Herring return __dma_alloc(dev, size, handle, gfp, prot, false, 7886e8266e3SCarlo Caione attrs, __builtin_return_address(0)); 789dd37e940SRob Herring } 790dd37e940SRob Herring 791dd37e940SRob Herring static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 79200085f1eSKrzysztof Kozlowski dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 793dd37e940SRob Herring { 79421caf3a7SLorenzo Nava return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true, 7956e8266e3SCarlo Caione attrs, __builtin_return_address(0)); 7960ddbccd1SRussell King } 7970ddbccd1SRussell King 79855af8a91SMike Looijmans static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, 799f99d6034SMarek Szyprowski void *cpu_addr, dma_addr_t dma_addr, size_t size, 80000085f1eSKrzysztof Kozlowski unsigned long attrs) 8010ddbccd1SRussell King { 802c2a3831dSNathan Jones int ret = -ENXIO; 803a70c3ee3SFabio Estevam unsigned long nr_vma_pages = vma_pages(vma); 80450262a4bSMarek Szyprowski unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 805c7909509SMarek Szyprowski unsigned long pfn = dma_to_pfn(dev, dma_addr); 80650262a4bSMarek Szyprowski unsigned long off = vma->vm_pgoff; 80750262a4bSMarek Szyprowski 80843fc509cSVladimir Murzin if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 80947142f07SMarek Szyprowski return ret; 81047142f07SMarek Szyprowski 81150262a4bSMarek Szyprowski if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { 8120ddbccd1SRussell King ret = remap_pfn_range(vma, vma->vm_start, 81350262a4bSMarek Szyprowski pfn + off, 814c7909509SMarek Szyprowski vma->vm_end - vma->vm_start, 8150ddbccd1SRussell King vma->vm_page_prot); 81650262a4bSMarek Szyprowski } 8170ddbccd1SRussell King 8180ddbccd1SRussell King return ret; 8190ddbccd1SRussell King } 8200ddbccd1SRussell King 8210ddbccd1SRussell King /* 82255af8a91SMike Looijmans * Create userspace mapping for the DMA-coherent memory. 82355af8a91SMike Looijmans */ 82455af8a91SMike Looijmans static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, 82555af8a91SMike Looijmans void *cpu_addr, dma_addr_t dma_addr, size_t size, 82600085f1eSKrzysztof Kozlowski unsigned long attrs) 82755af8a91SMike Looijmans { 82855af8a91SMike Looijmans return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 82955af8a91SMike Looijmans } 83055af8a91SMike Looijmans 83155af8a91SMike Looijmans int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, 83255af8a91SMike Looijmans void *cpu_addr, dma_addr_t dma_addr, size_t size, 83300085f1eSKrzysztof Kozlowski unsigned long attrs) 83455af8a91SMike Looijmans { 83555af8a91SMike Looijmans vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 83655af8a91SMike Looijmans return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 83755af8a91SMike Looijmans } 83855af8a91SMike Looijmans 83955af8a91SMike Looijmans /* 840c7909509SMarek Szyprowski * Free a buffer as defined by the above mapping. 8410ddbccd1SRussell King */ 842dd37e940SRob Herring static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 84300085f1eSKrzysztof Kozlowski dma_addr_t handle, unsigned long attrs, 844dd37e940SRob Herring bool is_coherent) 8450ddbccd1SRussell King { 846c7909509SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 84719e6e5e5SRabin Vincent struct arm_dma_buffer *buf; 848b4268676SRabin Vincent struct arm_dma_free_args args = { 849b4268676SRabin Vincent .dev = dev, 850b4268676SRabin Vincent .size = PAGE_ALIGN(size), 851b4268676SRabin Vincent .cpu_addr = cpu_addr, 852b4268676SRabin Vincent .page = page, 85300085f1eSKrzysztof Kozlowski .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0), 854b4268676SRabin Vincent }; 85519e6e5e5SRabin Vincent 85619e6e5e5SRabin Vincent buf = arm_dma_buffer_find(cpu_addr); 85719e6e5e5SRabin Vincent if (WARN(!buf, "Freeing invalid buffer %p\n", cpu_addr)) 85819e6e5e5SRabin Vincent return; 8590ddbccd1SRussell King 860b4268676SRabin Vincent buf->allocator->free(&args); 86119e6e5e5SRabin Vincent kfree(buf); 8620ddbccd1SRussell King } 863afd1a321SRussell King 864dd37e940SRob Herring void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 86500085f1eSKrzysztof Kozlowski dma_addr_t handle, unsigned long attrs) 866dd37e940SRob Herring { 867dd37e940SRob Herring __arm_dma_free(dev, size, cpu_addr, handle, attrs, false); 868dd37e940SRob Herring } 869dd37e940SRob Herring 870dd37e940SRob Herring static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 87100085f1eSKrzysztof Kozlowski dma_addr_t handle, unsigned long attrs) 872dd37e940SRob Herring { 873dd37e940SRob Herring __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); 874dd37e940SRob Herring } 875dd37e940SRob Herring 876dc2832e1SMarek Szyprowski int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 877dc2832e1SMarek Szyprowski void *cpu_addr, dma_addr_t handle, size_t size, 87800085f1eSKrzysztof Kozlowski unsigned long attrs) 879dc2832e1SMarek Szyprowski { 880916a008bSRussell King unsigned long pfn = dma_to_pfn(dev, handle); 881916a008bSRussell King struct page *page; 882dc2832e1SMarek Szyprowski int ret; 883dc2832e1SMarek Szyprowski 884916a008bSRussell King /* If the PFN is not valid, we do not have a struct page */ 885916a008bSRussell King if (!pfn_valid(pfn)) 886916a008bSRussell King return -ENXIO; 887916a008bSRussell King 888916a008bSRussell King page = pfn_to_page(pfn); 889916a008bSRussell King 890dc2832e1SMarek Szyprowski ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 891dc2832e1SMarek Szyprowski if (unlikely(ret)) 892dc2832e1SMarek Szyprowski return ret; 893dc2832e1SMarek Szyprowski 894dc2832e1SMarek Szyprowski sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 895dc2832e1SMarek Szyprowski return 0; 896dc2832e1SMarek Szyprowski } 897dc2832e1SMarek Szyprowski 89865af191aSRussell King static void dma_cache_maint_page(struct page *page, unsigned long offset, 899a9c9147eSRussell King size_t size, enum dma_data_direction dir, 900a9c9147eSRussell King void (*op)(const void *, size_t, int)) 90165af191aSRussell King { 90215653371SRussell King unsigned long pfn; 90315653371SRussell King size_t left = size; 90415653371SRussell King 90515653371SRussell King pfn = page_to_pfn(page) + offset / PAGE_SIZE; 90615653371SRussell King offset %= PAGE_SIZE; 90715653371SRussell King 90865af191aSRussell King /* 90965af191aSRussell King * A single sg entry may refer to multiple physically contiguous 91065af191aSRussell King * pages. But we still need to process highmem pages individually. 91165af191aSRussell King * If highmem is not configured then the bulk of this loop gets 91265af191aSRussell King * optimized out. 91365af191aSRussell King */ 91465af191aSRussell King do { 91565af191aSRussell King size_t len = left; 91693f1d629SRussell King void *vaddr; 91793f1d629SRussell King 91815653371SRussell King page = pfn_to_page(pfn); 91915653371SRussell King 92093f1d629SRussell King if (PageHighMem(page)) { 92115653371SRussell King if (len + offset > PAGE_SIZE) 92265af191aSRussell King len = PAGE_SIZE - offset; 923dd0f67f4SJoonsoo Kim 924dd0f67f4SJoonsoo Kim if (cache_is_vipt_nonaliasing()) { 92539af22a7SNicolas Pitre vaddr = kmap_atomic(page); 9267e5a69e8SNicolas Pitre op(vaddr + offset, len, dir); 92739af22a7SNicolas Pitre kunmap_atomic(vaddr); 928dd0f67f4SJoonsoo Kim } else { 929dd0f67f4SJoonsoo Kim vaddr = kmap_high_get(page); 930dd0f67f4SJoonsoo Kim if (vaddr) { 931dd0f67f4SJoonsoo Kim op(vaddr + offset, len, dir); 932dd0f67f4SJoonsoo Kim kunmap_high(page); 933dd0f67f4SJoonsoo Kim } 93493f1d629SRussell King } 93593f1d629SRussell King } else { 93693f1d629SRussell King vaddr = page_address(page) + offset; 937a9c9147eSRussell King op(vaddr, len, dir); 93893f1d629SRussell King } 93965af191aSRussell King offset = 0; 94015653371SRussell King pfn++; 94165af191aSRussell King left -= len; 94265af191aSRussell King } while (left); 94365af191aSRussell King } 94465af191aSRussell King 94551fde349SMarek Szyprowski /* 94651fde349SMarek Szyprowski * Make an area consistent for devices. 94751fde349SMarek Szyprowski * Note: Drivers should NOT use this function directly, as it will break 94851fde349SMarek Szyprowski * platforms with CONFIG_DMABOUNCE. 94951fde349SMarek Szyprowski * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 95051fde349SMarek Szyprowski */ 95151fde349SMarek Szyprowski static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, 95265af191aSRussell King size_t size, enum dma_data_direction dir) 95365af191aSRussell King { 9542161c248SSantosh Shilimkar phys_addr_t paddr; 95543377453SNicolas Pitre 956a9c9147eSRussell King dma_cache_maint_page(page, off, size, dir, dmac_map_area); 95743377453SNicolas Pitre 95865af191aSRussell King paddr = page_to_phys(page) + off; 9592ffe2da3SRussell King if (dir == DMA_FROM_DEVICE) { 9602ffe2da3SRussell King outer_inv_range(paddr, paddr + size); 9612ffe2da3SRussell King } else { 9622ffe2da3SRussell King outer_clean_range(paddr, paddr + size); 9632ffe2da3SRussell King } 9642ffe2da3SRussell King /* FIXME: non-speculating: flush on bidirectional mappings? */ 96543377453SNicolas Pitre } 9664ea0d737SRussell King 96751fde349SMarek Szyprowski static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, 9684ea0d737SRussell King size_t size, enum dma_data_direction dir) 9694ea0d737SRussell King { 9702161c248SSantosh Shilimkar phys_addr_t paddr = page_to_phys(page) + off; 9712ffe2da3SRussell King 9722ffe2da3SRussell King /* FIXME: non-speculating: not required */ 973deace4a6SRussell King /* in any case, don't bother invalidating if DMA to device */ 974deace4a6SRussell King if (dir != DMA_TO_DEVICE) { 9752ffe2da3SRussell King outer_inv_range(paddr, paddr + size); 9762ffe2da3SRussell King 977a9c9147eSRussell King dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); 978deace4a6SRussell King } 979c0177800SCatalin Marinas 980c0177800SCatalin Marinas /* 981b2a234edSMing Lei * Mark the D-cache clean for these pages to avoid extra flushing. 982c0177800SCatalin Marinas */ 983b2a234edSMing Lei if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) { 984b2a234edSMing Lei unsigned long pfn; 985b2a234edSMing Lei size_t left = size; 986b2a234edSMing Lei 987b2a234edSMing Lei pfn = page_to_pfn(page) + off / PAGE_SIZE; 988b2a234edSMing Lei off %= PAGE_SIZE; 989b2a234edSMing Lei if (off) { 990b2a234edSMing Lei pfn++; 991b2a234edSMing Lei left -= PAGE_SIZE - off; 992b2a234edSMing Lei } 993b2a234edSMing Lei while (left >= PAGE_SIZE) { 994b2a234edSMing Lei page = pfn_to_page(pfn++); 995c0177800SCatalin Marinas set_bit(PG_dcache_clean, &page->flags); 996b2a234edSMing Lei left -= PAGE_SIZE; 997b2a234edSMing Lei } 998b2a234edSMing Lei } 9994ea0d737SRussell King } 100043377453SNicolas Pitre 1001afd1a321SRussell King /** 10022a550e73SMarek Szyprowski * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA 1003afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1004afd1a321SRussell King * @sg: list of buffers 1005afd1a321SRussell King * @nents: number of buffers to map 1006afd1a321SRussell King * @dir: DMA transfer direction 1007afd1a321SRussell King * 1008afd1a321SRussell King * Map a set of buffers described by scatterlist in streaming mode for DMA. 1009afd1a321SRussell King * This is the scatter-gather version of the dma_map_single interface. 1010afd1a321SRussell King * Here the scatter gather list elements are each tagged with the 1011afd1a321SRussell King * appropriate dma address and length. They are obtained via 1012afd1a321SRussell King * sg_dma_{address,length}. 1013afd1a321SRussell King * 1014afd1a321SRussell King * Device ownership issues as mentioned for dma_map_single are the same 1015afd1a321SRussell King * here. 1016afd1a321SRussell King */ 10172dc6a016SMarek Szyprowski int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 101800085f1eSKrzysztof Kozlowski enum dma_data_direction dir, unsigned long attrs) 1019afd1a321SRussell King { 10205299709dSBart Van Assche const struct dma_map_ops *ops = get_dma_ops(dev); 1021afd1a321SRussell King struct scatterlist *s; 102201135d92SRussell King int i, j; 1023afd1a321SRussell King 1024afd1a321SRussell King for_each_sg(sg, s, nents, i) { 10254ce63fcdSMarek Szyprowski #ifdef CONFIG_NEED_SG_DMA_LENGTH 10264ce63fcdSMarek Szyprowski s->dma_length = s->length; 10274ce63fcdSMarek Szyprowski #endif 10282a550e73SMarek Szyprowski s->dma_address = ops->map_page(dev, sg_page(s), s->offset, 10292a550e73SMarek Szyprowski s->length, dir, attrs); 103001135d92SRussell King if (dma_mapping_error(dev, s->dma_address)) 103101135d92SRussell King goto bad_mapping; 1032afd1a321SRussell King } 1033afd1a321SRussell King return nents; 103401135d92SRussell King 103501135d92SRussell King bad_mapping: 103601135d92SRussell King for_each_sg(sg, s, i, j) 10372a550e73SMarek Szyprowski ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 103801135d92SRussell King return 0; 1039afd1a321SRussell King } 1040afd1a321SRussell King 1041afd1a321SRussell King /** 10422a550e73SMarek Szyprowski * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 1043afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1044afd1a321SRussell King * @sg: list of buffers 10450adfca6fSLinus Walleij * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 1046afd1a321SRussell King * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1047afd1a321SRussell King * 1048afd1a321SRussell King * Unmap a set of streaming mode DMA translations. Again, CPU access 1049afd1a321SRussell King * rules concerning calls here are the same as for dma_unmap_single(). 1050afd1a321SRussell King */ 10512dc6a016SMarek Szyprowski void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 105200085f1eSKrzysztof Kozlowski enum dma_data_direction dir, unsigned long attrs) 1053afd1a321SRussell King { 10545299709dSBart Van Assche const struct dma_map_ops *ops = get_dma_ops(dev); 105501135d92SRussell King struct scatterlist *s; 105601135d92SRussell King 105701135d92SRussell King int i; 105824056f52SRussell King 105901135d92SRussell King for_each_sg(sg, s, nents, i) 10602a550e73SMarek Szyprowski ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 1061afd1a321SRussell King } 1062afd1a321SRussell King 1063afd1a321SRussell King /** 10642a550e73SMarek Szyprowski * arm_dma_sync_sg_for_cpu 1065afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1066afd1a321SRussell King * @sg: list of buffers 1067afd1a321SRussell King * @nents: number of buffers to map (returned from dma_map_sg) 1068afd1a321SRussell King * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1069afd1a321SRussell King */ 10702dc6a016SMarek Szyprowski void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 1071afd1a321SRussell King int nents, enum dma_data_direction dir) 1072afd1a321SRussell King { 10735299709dSBart Van Assche const struct dma_map_ops *ops = get_dma_ops(dev); 1074afd1a321SRussell King struct scatterlist *s; 1075afd1a321SRussell King int i; 1076afd1a321SRussell King 10772a550e73SMarek Szyprowski for_each_sg(sg, s, nents, i) 10782a550e73SMarek Szyprowski ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length, 10792a550e73SMarek Szyprowski dir); 1080afd1a321SRussell King } 108124056f52SRussell King 1082afd1a321SRussell King /** 10832a550e73SMarek Szyprowski * arm_dma_sync_sg_for_device 1084afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1085afd1a321SRussell King * @sg: list of buffers 1086afd1a321SRussell King * @nents: number of buffers to map (returned from dma_map_sg) 1087afd1a321SRussell King * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1088afd1a321SRussell King */ 10892dc6a016SMarek Szyprowski void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 1090afd1a321SRussell King int nents, enum dma_data_direction dir) 1091afd1a321SRussell King { 10925299709dSBart Van Assche const struct dma_map_ops *ops = get_dma_ops(dev); 1093afd1a321SRussell King struct scatterlist *s; 1094afd1a321SRussell King int i; 1095afd1a321SRussell King 10962a550e73SMarek Szyprowski for_each_sg(sg, s, nents, i) 10972a550e73SMarek Szyprowski ops->sync_single_for_device(dev, sg_dma_address(s), s->length, 10982a550e73SMarek Szyprowski dir); 1099afd1a321SRussell King } 110024056f52SRussell King 1101022ae537SRussell King /* 1102022ae537SRussell King * Return whether the given device DMA address mask can be supported 1103022ae537SRussell King * properly. For example, if your device can only drive the low 24-bits 1104022ae537SRussell King * during bus mastering, then you would pass 0x00ffffff as the mask 1105022ae537SRussell King * to this function. 1106022ae537SRussell King */ 1107418a7a7eSChristoph Hellwig int arm_dma_supported(struct device *dev, u64 mask) 1108022ae537SRussell King { 11099f28cde0SRussell King return __dma_supported(dev, mask, false); 1110022ae537SRussell King } 1111022ae537SRussell King 11121874619aSThierry Reding static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent) 11131874619aSThierry Reding { 1114ad3c7b18SChristoph Hellwig /* 1115ad3c7b18SChristoph Hellwig * When CONFIG_ARM_LPAE is set, physical address can extend above 1116ad3c7b18SChristoph Hellwig * 32-bits, which then can't be addressed by devices that only support 1117ad3c7b18SChristoph Hellwig * 32-bit DMA. 1118ad3c7b18SChristoph Hellwig * Use the generic dma-direct / swiotlb ops code in that case, as that 1119ad3c7b18SChristoph Hellwig * handles bounce buffering for us. 1120ad3c7b18SChristoph Hellwig * 1121ad3c7b18SChristoph Hellwig * Note: this checks CONFIG_ARM_LPAE instead of CONFIG_SWIOTLB as the 1122ad3c7b18SChristoph Hellwig * latter is also selected by the Xen code, but that code for now relies 1123ad3c7b18SChristoph Hellwig * on non-NULL dev_dma_ops. To be cleaned up later. 1124ad3c7b18SChristoph Hellwig */ 1125ad3c7b18SChristoph Hellwig if (IS_ENABLED(CONFIG_ARM_LPAE)) 1126ad3c7b18SChristoph Hellwig return NULL; 11271874619aSThierry Reding return coherent ? &arm_coherent_dma_ops : &arm_dma_ops; 11281874619aSThierry Reding } 11291874619aSThierry Reding 11304ce63fcdSMarek Szyprowski #ifdef CONFIG_ARM_DMA_USE_IOMMU 11314ce63fcdSMarek Szyprowski 11327d2822dfSSricharan R static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs) 11337d2822dfSSricharan R { 11347d2822dfSSricharan R int prot = 0; 11357d2822dfSSricharan R 11367d2822dfSSricharan R if (attrs & DMA_ATTR_PRIVILEGED) 11377d2822dfSSricharan R prot |= IOMMU_PRIV; 11387d2822dfSSricharan R 11397d2822dfSSricharan R switch (dir) { 11407d2822dfSSricharan R case DMA_BIDIRECTIONAL: 11417d2822dfSSricharan R return prot | IOMMU_READ | IOMMU_WRITE; 11427d2822dfSSricharan R case DMA_TO_DEVICE: 11437d2822dfSSricharan R return prot | IOMMU_READ; 11447d2822dfSSricharan R case DMA_FROM_DEVICE: 11457d2822dfSSricharan R return prot | IOMMU_WRITE; 11467d2822dfSSricharan R default: 11477d2822dfSSricharan R return prot; 11487d2822dfSSricharan R } 11497d2822dfSSricharan R } 11507d2822dfSSricharan R 11514ce63fcdSMarek Szyprowski /* IOMMU */ 11524ce63fcdSMarek Szyprowski 11534d852ef8SAndreas Herrmann static int extend_iommu_mapping(struct dma_iommu_mapping *mapping); 11544d852ef8SAndreas Herrmann 11554ce63fcdSMarek Szyprowski static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, 11564ce63fcdSMarek Szyprowski size_t size) 11574ce63fcdSMarek Szyprowski { 11584ce63fcdSMarek Szyprowski unsigned int order = get_order(size); 11594ce63fcdSMarek Szyprowski unsigned int align = 0; 11604ce63fcdSMarek Szyprowski unsigned int count, start; 1161006f841dSRitesh Harjani size_t mapping_size = mapping->bits << PAGE_SHIFT; 11624ce63fcdSMarek Szyprowski unsigned long flags; 11634d852ef8SAndreas Herrmann dma_addr_t iova; 11644d852ef8SAndreas Herrmann int i; 11654ce63fcdSMarek Szyprowski 116660460abfSSeung-Woo Kim if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT) 116760460abfSSeung-Woo Kim order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT; 116860460abfSSeung-Woo Kim 116968efd7d2SMarek Szyprowski count = PAGE_ALIGN(size) >> PAGE_SHIFT; 117068efd7d2SMarek Szyprowski align = (1 << order) - 1; 11714ce63fcdSMarek Szyprowski 11724ce63fcdSMarek Szyprowski spin_lock_irqsave(&mapping->lock, flags); 11734d852ef8SAndreas Herrmann for (i = 0; i < mapping->nr_bitmaps; i++) { 11744d852ef8SAndreas Herrmann start = bitmap_find_next_zero_area(mapping->bitmaps[i], 11754d852ef8SAndreas Herrmann mapping->bits, 0, count, align); 11764d852ef8SAndreas Herrmann 11774d852ef8SAndreas Herrmann if (start > mapping->bits) 11784d852ef8SAndreas Herrmann continue; 11794d852ef8SAndreas Herrmann 11804d852ef8SAndreas Herrmann bitmap_set(mapping->bitmaps[i], start, count); 11814d852ef8SAndreas Herrmann break; 11824d852ef8SAndreas Herrmann } 11834d852ef8SAndreas Herrmann 11844d852ef8SAndreas Herrmann /* 11854d852ef8SAndreas Herrmann * No unused range found. Try to extend the existing mapping 11864d852ef8SAndreas Herrmann * and perform a second attempt to reserve an IO virtual 11874d852ef8SAndreas Herrmann * address range of size bytes. 11884d852ef8SAndreas Herrmann */ 11894d852ef8SAndreas Herrmann if (i == mapping->nr_bitmaps) { 11904d852ef8SAndreas Herrmann if (extend_iommu_mapping(mapping)) { 11914d852ef8SAndreas Herrmann spin_unlock_irqrestore(&mapping->lock, flags); 119272fd97bfSChristoph Hellwig return DMA_MAPPING_ERROR; 11934d852ef8SAndreas Herrmann } 11944d852ef8SAndreas Herrmann 11954d852ef8SAndreas Herrmann start = bitmap_find_next_zero_area(mapping->bitmaps[i], 11964d852ef8SAndreas Herrmann mapping->bits, 0, count, align); 11974d852ef8SAndreas Herrmann 11984ce63fcdSMarek Szyprowski if (start > mapping->bits) { 11994ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 120072fd97bfSChristoph Hellwig return DMA_MAPPING_ERROR; 12014ce63fcdSMarek Szyprowski } 12024ce63fcdSMarek Szyprowski 12034d852ef8SAndreas Herrmann bitmap_set(mapping->bitmaps[i], start, count); 12044d852ef8SAndreas Herrmann } 12054ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 12064ce63fcdSMarek Szyprowski 1207006f841dSRitesh Harjani iova = mapping->base + (mapping_size * i); 120868efd7d2SMarek Szyprowski iova += start << PAGE_SHIFT; 12094d852ef8SAndreas Herrmann 12104d852ef8SAndreas Herrmann return iova; 12114ce63fcdSMarek Szyprowski } 12124ce63fcdSMarek Szyprowski 12134ce63fcdSMarek Szyprowski static inline void __free_iova(struct dma_iommu_mapping *mapping, 12144ce63fcdSMarek Szyprowski dma_addr_t addr, size_t size) 12154ce63fcdSMarek Szyprowski { 12164d852ef8SAndreas Herrmann unsigned int start, count; 1217006f841dSRitesh Harjani size_t mapping_size = mapping->bits << PAGE_SHIFT; 12184ce63fcdSMarek Szyprowski unsigned long flags; 12194d852ef8SAndreas Herrmann dma_addr_t bitmap_base; 12204d852ef8SAndreas Herrmann u32 bitmap_index; 12214d852ef8SAndreas Herrmann 12224d852ef8SAndreas Herrmann if (!size) 12234d852ef8SAndreas Herrmann return; 12244d852ef8SAndreas Herrmann 1225006f841dSRitesh Harjani bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size; 12264d852ef8SAndreas Herrmann BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions); 12274d852ef8SAndreas Herrmann 1228006f841dSRitesh Harjani bitmap_base = mapping->base + mapping_size * bitmap_index; 12294d852ef8SAndreas Herrmann 123068efd7d2SMarek Szyprowski start = (addr - bitmap_base) >> PAGE_SHIFT; 12314d852ef8SAndreas Herrmann 1232006f841dSRitesh Harjani if (addr + size > bitmap_base + mapping_size) { 12334d852ef8SAndreas Herrmann /* 12344d852ef8SAndreas Herrmann * The address range to be freed reaches into the iova 12354d852ef8SAndreas Herrmann * range of the next bitmap. This should not happen as 12364d852ef8SAndreas Herrmann * we don't allow this in __alloc_iova (at the 12374d852ef8SAndreas Herrmann * moment). 12384d852ef8SAndreas Herrmann */ 12394d852ef8SAndreas Herrmann BUG(); 12404d852ef8SAndreas Herrmann } else 124168efd7d2SMarek Szyprowski count = size >> PAGE_SHIFT; 12424ce63fcdSMarek Szyprowski 12434ce63fcdSMarek Szyprowski spin_lock_irqsave(&mapping->lock, flags); 12444d852ef8SAndreas Herrmann bitmap_clear(mapping->bitmaps[bitmap_index], start, count); 12454ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 12464ce63fcdSMarek Szyprowski } 12474ce63fcdSMarek Szyprowski 124833298ef6SDoug Anderson /* We'll try 2M, 1M, 64K, and finally 4K; array must end with 0! */ 124933298ef6SDoug Anderson static const int iommu_order_array[] = { 9, 8, 4, 0 }; 125033298ef6SDoug Anderson 1251549a17e4SMarek Szyprowski static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, 125200085f1eSKrzysztof Kozlowski gfp_t gfp, unsigned long attrs, 1253f1270896SGregory CLEMENT int coherent_flag) 12544ce63fcdSMarek Szyprowski { 12554ce63fcdSMarek Szyprowski struct page **pages; 12564ce63fcdSMarek Szyprowski int count = size >> PAGE_SHIFT; 12574ce63fcdSMarek Szyprowski int array_size = count * sizeof(struct page *); 12584ce63fcdSMarek Szyprowski int i = 0; 125933298ef6SDoug Anderson int order_idx = 0; 12604ce63fcdSMarek Szyprowski 12614ce63fcdSMarek Szyprowski if (array_size <= PAGE_SIZE) 126223be7fdaSAlexandre Courbot pages = kzalloc(array_size, GFP_KERNEL); 12634ce63fcdSMarek Szyprowski else 12644ce63fcdSMarek Szyprowski pages = vzalloc(array_size); 12654ce63fcdSMarek Szyprowski if (!pages) 12664ce63fcdSMarek Szyprowski return NULL; 12674ce63fcdSMarek Szyprowski 126800085f1eSKrzysztof Kozlowski if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) 1269549a17e4SMarek Szyprowski { 1270549a17e4SMarek Szyprowski unsigned long order = get_order(size); 1271549a17e4SMarek Szyprowski struct page *page; 1272549a17e4SMarek Szyprowski 1273d834c5abSMarek Szyprowski page = dma_alloc_from_contiguous(dev, count, order, 1274d834c5abSMarek Szyprowski gfp & __GFP_NOWARN); 1275549a17e4SMarek Szyprowski if (!page) 1276549a17e4SMarek Szyprowski goto error; 1277549a17e4SMarek Szyprowski 1278f1270896SGregory CLEMENT __dma_clear_buffer(page, size, coherent_flag); 1279549a17e4SMarek Szyprowski 1280549a17e4SMarek Szyprowski for (i = 0; i < count; i++) 1281549a17e4SMarek Szyprowski pages[i] = page + i; 1282549a17e4SMarek Szyprowski 1283549a17e4SMarek Szyprowski return pages; 1284549a17e4SMarek Szyprowski } 1285549a17e4SMarek Szyprowski 128614d3ae2eSDoug Anderson /* Go straight to 4K chunks if caller says it's OK. */ 128700085f1eSKrzysztof Kozlowski if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) 128814d3ae2eSDoug Anderson order_idx = ARRAY_SIZE(iommu_order_array) - 1; 128914d3ae2eSDoug Anderson 1290f8669befSMarek Szyprowski /* 1291f8669befSMarek Szyprowski * IOMMU can map any pages, so himem can also be used here 1292f8669befSMarek Szyprowski */ 1293f8669befSMarek Szyprowski gfp |= __GFP_NOWARN | __GFP_HIGHMEM; 1294f8669befSMarek Szyprowski 12954ce63fcdSMarek Szyprowski while (count) { 129649f28aa6STomasz Figa int j, order; 12974ce63fcdSMarek Szyprowski 129833298ef6SDoug Anderson order = iommu_order_array[order_idx]; 129933298ef6SDoug Anderson 130033298ef6SDoug Anderson /* Drop down when we get small */ 130133298ef6SDoug Anderson if (__fls(count) < order) { 130233298ef6SDoug Anderson order_idx++; 130333298ef6SDoug Anderson continue; 130449f28aa6STomasz Figa } 130549f28aa6STomasz Figa 130633298ef6SDoug Anderson if (order) { 130733298ef6SDoug Anderson /* See if it's easy to allocate a high-order chunk */ 130833298ef6SDoug Anderson pages[i] = alloc_pages(gfp | __GFP_NORETRY, order); 130933298ef6SDoug Anderson 131033298ef6SDoug Anderson /* Go down a notch at first sign of pressure */ 131149f28aa6STomasz Figa if (!pages[i]) { 131233298ef6SDoug Anderson order_idx++; 131333298ef6SDoug Anderson continue; 131433298ef6SDoug Anderson } 131533298ef6SDoug Anderson } else { 131649f28aa6STomasz Figa pages[i] = alloc_pages(gfp, 0); 13174ce63fcdSMarek Szyprowski if (!pages[i]) 13184ce63fcdSMarek Szyprowski goto error; 131949f28aa6STomasz Figa } 13204ce63fcdSMarek Szyprowski 13215a796eebSHiroshi Doyu if (order) { 13224ce63fcdSMarek Szyprowski split_page(pages[i], order); 13234ce63fcdSMarek Szyprowski j = 1 << order; 13244ce63fcdSMarek Szyprowski while (--j) 13254ce63fcdSMarek Szyprowski pages[i + j] = pages[i] + j; 13265a796eebSHiroshi Doyu } 13274ce63fcdSMarek Szyprowski 1328f1270896SGregory CLEMENT __dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag); 13294ce63fcdSMarek Szyprowski i += 1 << order; 13304ce63fcdSMarek Szyprowski count -= 1 << order; 13314ce63fcdSMarek Szyprowski } 13324ce63fcdSMarek Szyprowski 13334ce63fcdSMarek Szyprowski return pages; 13344ce63fcdSMarek Szyprowski error: 13359fa8af91SMarek Szyprowski while (i--) 13364ce63fcdSMarek Szyprowski if (pages[i]) 13374ce63fcdSMarek Szyprowski __free_pages(pages[i], 0); 13381d5cfdb0STetsuo Handa kvfree(pages); 13394ce63fcdSMarek Szyprowski return NULL; 13404ce63fcdSMarek Szyprowski } 13414ce63fcdSMarek Szyprowski 1342549a17e4SMarek Szyprowski static int __iommu_free_buffer(struct device *dev, struct page **pages, 134300085f1eSKrzysztof Kozlowski size_t size, unsigned long attrs) 13444ce63fcdSMarek Szyprowski { 13454ce63fcdSMarek Szyprowski int count = size >> PAGE_SHIFT; 13464ce63fcdSMarek Szyprowski int i; 1347549a17e4SMarek Szyprowski 134800085f1eSKrzysztof Kozlowski if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { 1349549a17e4SMarek Szyprowski dma_release_from_contiguous(dev, pages[0], count); 1350549a17e4SMarek Szyprowski } else { 13514ce63fcdSMarek Szyprowski for (i = 0; i < count; i++) 13524ce63fcdSMarek Szyprowski if (pages[i]) 13534ce63fcdSMarek Szyprowski __free_pages(pages[i], 0); 1354549a17e4SMarek Szyprowski } 1355549a17e4SMarek Szyprowski 13561d5cfdb0STetsuo Handa kvfree(pages); 13574ce63fcdSMarek Szyprowski return 0; 13584ce63fcdSMarek Szyprowski } 13594ce63fcdSMarek Szyprowski 13604ce63fcdSMarek Szyprowski /* 13614ce63fcdSMarek Szyprowski * Create a CPU mapping for a specified pages 13624ce63fcdSMarek Szyprowski */ 13634ce63fcdSMarek Szyprowski static void * 1364e9da6e99SMarek Szyprowski __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, 1365e9da6e99SMarek Szyprowski const void *caller) 13664ce63fcdSMarek Szyprowski { 136751231740SChristoph Hellwig return dma_common_pages_remap(pages, size, prot, caller); 13684ce63fcdSMarek Szyprowski } 13694ce63fcdSMarek Szyprowski 13704ce63fcdSMarek Szyprowski /* 13714ce63fcdSMarek Szyprowski * Create a mapping in device IO address space for specified pages 13724ce63fcdSMarek Szyprowski */ 13734ce63fcdSMarek Szyprowski static dma_addr_t 13747d2822dfSSricharan R __iommu_create_mapping(struct device *dev, struct page **pages, size_t size, 13757d2822dfSSricharan R unsigned long attrs) 13764ce63fcdSMarek Szyprowski { 137789cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 13784ce63fcdSMarek Szyprowski unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 13794ce63fcdSMarek Szyprowski dma_addr_t dma_addr, iova; 138090cde558SAndre Przywara int i; 13814ce63fcdSMarek Szyprowski 13824ce63fcdSMarek Szyprowski dma_addr = __alloc_iova(mapping, size); 138372fd97bfSChristoph Hellwig if (dma_addr == DMA_MAPPING_ERROR) 13844ce63fcdSMarek Szyprowski return dma_addr; 13854ce63fcdSMarek Szyprowski 13864ce63fcdSMarek Szyprowski iova = dma_addr; 13874ce63fcdSMarek Szyprowski for (i = 0; i < count; ) { 138890cde558SAndre Przywara int ret; 138990cde558SAndre Przywara 13904ce63fcdSMarek Szyprowski unsigned int next_pfn = page_to_pfn(pages[i]) + 1; 13914ce63fcdSMarek Szyprowski phys_addr_t phys = page_to_phys(pages[i]); 13924ce63fcdSMarek Szyprowski unsigned int len, j; 13934ce63fcdSMarek Szyprowski 13944ce63fcdSMarek Szyprowski for (j = i + 1; j < count; j++, next_pfn++) 13954ce63fcdSMarek Szyprowski if (page_to_pfn(pages[j]) != next_pfn) 13964ce63fcdSMarek Szyprowski break; 13974ce63fcdSMarek Szyprowski 13984ce63fcdSMarek Szyprowski len = (j - i) << PAGE_SHIFT; 1399c9b24996SAndreas Herrmann ret = iommu_map(mapping->domain, iova, phys, len, 14007d2822dfSSricharan R __dma_info_to_prot(DMA_BIDIRECTIONAL, attrs)); 14014ce63fcdSMarek Szyprowski if (ret < 0) 14024ce63fcdSMarek Szyprowski goto fail; 14034ce63fcdSMarek Szyprowski iova += len; 14044ce63fcdSMarek Szyprowski i = j; 14054ce63fcdSMarek Szyprowski } 14064ce63fcdSMarek Szyprowski return dma_addr; 14074ce63fcdSMarek Szyprowski fail: 14084ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); 14094ce63fcdSMarek Szyprowski __free_iova(mapping, dma_addr, size); 141072fd97bfSChristoph Hellwig return DMA_MAPPING_ERROR; 14114ce63fcdSMarek Szyprowski } 14124ce63fcdSMarek Szyprowski 14134ce63fcdSMarek Szyprowski static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) 14144ce63fcdSMarek Szyprowski { 141589cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 14164ce63fcdSMarek Szyprowski 14174ce63fcdSMarek Szyprowski /* 14184ce63fcdSMarek Szyprowski * add optional in-page offset from iova to size and align 14194ce63fcdSMarek Szyprowski * result to page size 14204ce63fcdSMarek Szyprowski */ 14214ce63fcdSMarek Szyprowski size = PAGE_ALIGN((iova & ~PAGE_MASK) + size); 14224ce63fcdSMarek Szyprowski iova &= PAGE_MASK; 14234ce63fcdSMarek Szyprowski 14244ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova, size); 14254ce63fcdSMarek Szyprowski __free_iova(mapping, iova, size); 14264ce63fcdSMarek Szyprowski return 0; 14274ce63fcdSMarek Szyprowski } 14284ce63fcdSMarek Szyprowski 1429665bad7bSHiroshi Doyu static struct page **__atomic_get_pages(void *addr) 1430665bad7bSHiroshi Doyu { 143136d0fd21SLaura Abbott struct page *page; 143236d0fd21SLaura Abbott phys_addr_t phys; 1433665bad7bSHiroshi Doyu 143436d0fd21SLaura Abbott phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr); 143536d0fd21SLaura Abbott page = phys_to_page(phys); 143636d0fd21SLaura Abbott 143736d0fd21SLaura Abbott return (struct page **)page; 1438665bad7bSHiroshi Doyu } 1439665bad7bSHiroshi Doyu 144000085f1eSKrzysztof Kozlowski static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs) 1441e9da6e99SMarek Szyprowski { 1442665bad7bSHiroshi Doyu if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) 1443665bad7bSHiroshi Doyu return __atomic_get_pages(cpu_addr); 1444665bad7bSHiroshi Doyu 144500085f1eSKrzysztof Kozlowski if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) 1446955c757eSMarek Szyprowski return cpu_addr; 1447955c757eSMarek Szyprowski 14485cf45379SChristoph Hellwig return dma_common_find_pages(cpu_addr); 1449e9da6e99SMarek Szyprowski } 1450e9da6e99SMarek Szyprowski 145156506822SGregory CLEMENT static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp, 14527d2822dfSSricharan R dma_addr_t *handle, int coherent_flag, 14537d2822dfSSricharan R unsigned long attrs) 1454479ed93aSHiroshi Doyu { 1455479ed93aSHiroshi Doyu struct page *page; 1456479ed93aSHiroshi Doyu void *addr; 1457479ed93aSHiroshi Doyu 145856506822SGregory CLEMENT if (coherent_flag == COHERENT) 145956506822SGregory CLEMENT addr = __alloc_simple_buffer(dev, size, gfp, &page); 146056506822SGregory CLEMENT else 1461479ed93aSHiroshi Doyu addr = __alloc_from_pool(size, &page); 1462479ed93aSHiroshi Doyu if (!addr) 1463479ed93aSHiroshi Doyu return NULL; 1464479ed93aSHiroshi Doyu 14657d2822dfSSricharan R *handle = __iommu_create_mapping(dev, &page, size, attrs); 146672fd97bfSChristoph Hellwig if (*handle == DMA_MAPPING_ERROR) 1467479ed93aSHiroshi Doyu goto err_mapping; 1468479ed93aSHiroshi Doyu 1469479ed93aSHiroshi Doyu return addr; 1470479ed93aSHiroshi Doyu 1471479ed93aSHiroshi Doyu err_mapping: 1472479ed93aSHiroshi Doyu __free_from_pool(addr, size); 1473479ed93aSHiroshi Doyu return NULL; 1474479ed93aSHiroshi Doyu } 1475479ed93aSHiroshi Doyu 1476d5898291SMarek Szyprowski static void __iommu_free_atomic(struct device *dev, void *cpu_addr, 147756506822SGregory CLEMENT dma_addr_t handle, size_t size, int coherent_flag) 1478479ed93aSHiroshi Doyu { 1479479ed93aSHiroshi Doyu __iommu_remove_mapping(dev, handle, size); 148056506822SGregory CLEMENT if (coherent_flag == COHERENT) 148156506822SGregory CLEMENT __dma_free_buffer(virt_to_page(cpu_addr), size); 148256506822SGregory CLEMENT else 1483d5898291SMarek Szyprowski __free_from_pool(cpu_addr, size); 1484479ed93aSHiroshi Doyu } 1485479ed93aSHiroshi Doyu 148656506822SGregory CLEMENT static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, 148700085f1eSKrzysztof Kozlowski dma_addr_t *handle, gfp_t gfp, unsigned long attrs, 148856506822SGregory CLEMENT int coherent_flag) 14894ce63fcdSMarek Szyprowski { 149071b55663SRussell King pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); 14914ce63fcdSMarek Szyprowski struct page **pages; 14924ce63fcdSMarek Szyprowski void *addr = NULL; 14934ce63fcdSMarek Szyprowski 149472fd97bfSChristoph Hellwig *handle = DMA_MAPPING_ERROR; 14954ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 14964ce63fcdSMarek Szyprowski 149756506822SGregory CLEMENT if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp)) 149856506822SGregory CLEMENT return __iommu_alloc_simple(dev, size, gfp, handle, 14997d2822dfSSricharan R coherent_flag, attrs); 1500479ed93aSHiroshi Doyu 15015b91a98cSRichard Zhao /* 15025b91a98cSRichard Zhao * Following is a work-around (a.k.a. hack) to prevent pages 15035b91a98cSRichard Zhao * with __GFP_COMP being passed to split_page() which cannot 15045b91a98cSRichard Zhao * handle them. The real problem is that this flag probably 15055b91a98cSRichard Zhao * should be 0 on ARM as it is not supported on this 15065b91a98cSRichard Zhao * platform; see CONFIG_HUGETLBFS. 15075b91a98cSRichard Zhao */ 15085b91a98cSRichard Zhao gfp &= ~(__GFP_COMP); 15095b91a98cSRichard Zhao 151056506822SGregory CLEMENT pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag); 15114ce63fcdSMarek Szyprowski if (!pages) 15124ce63fcdSMarek Szyprowski return NULL; 15134ce63fcdSMarek Szyprowski 15147d2822dfSSricharan R *handle = __iommu_create_mapping(dev, pages, size, attrs); 151572fd97bfSChristoph Hellwig if (*handle == DMA_MAPPING_ERROR) 15164ce63fcdSMarek Szyprowski goto err_buffer; 15174ce63fcdSMarek Szyprowski 151800085f1eSKrzysztof Kozlowski if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) 1519955c757eSMarek Szyprowski return pages; 1520955c757eSMarek Szyprowski 1521e9da6e99SMarek Szyprowski addr = __iommu_alloc_remap(pages, size, gfp, prot, 1522e9da6e99SMarek Szyprowski __builtin_return_address(0)); 15234ce63fcdSMarek Szyprowski if (!addr) 15244ce63fcdSMarek Szyprowski goto err_mapping; 15254ce63fcdSMarek Szyprowski 15264ce63fcdSMarek Szyprowski return addr; 15274ce63fcdSMarek Szyprowski 15284ce63fcdSMarek Szyprowski err_mapping: 15294ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, *handle, size); 15304ce63fcdSMarek Szyprowski err_buffer: 1531549a17e4SMarek Szyprowski __iommu_free_buffer(dev, pages, size, attrs); 15324ce63fcdSMarek Szyprowski return NULL; 15334ce63fcdSMarek Szyprowski } 15344ce63fcdSMarek Szyprowski 153556506822SGregory CLEMENT static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, 153600085f1eSKrzysztof Kozlowski dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 153756506822SGregory CLEMENT { 153856506822SGregory CLEMENT return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL); 153956506822SGregory CLEMENT } 154056506822SGregory CLEMENT 154156506822SGregory CLEMENT static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size, 154200085f1eSKrzysztof Kozlowski dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 154356506822SGregory CLEMENT { 154456506822SGregory CLEMENT return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT); 154556506822SGregory CLEMENT } 154656506822SGregory CLEMENT 154756506822SGregory CLEMENT static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 15484ce63fcdSMarek Szyprowski void *cpu_addr, dma_addr_t dma_addr, size_t size, 154900085f1eSKrzysztof Kozlowski unsigned long attrs) 15504ce63fcdSMarek Szyprowski { 1551955c757eSMarek Szyprowski struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1552371f0f08SMarek Szyprowski unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 15536248461dSSouptick Joarder int err; 1554e9da6e99SMarek Szyprowski 1555e9da6e99SMarek Szyprowski if (!pages) 1556e9da6e99SMarek Szyprowski return -ENXIO; 15574ce63fcdSMarek Szyprowski 15586248461dSSouptick Joarder if (vma->vm_pgoff >= nr_pages) 1559371f0f08SMarek Szyprowski return -ENXIO; 1560371f0f08SMarek Szyprowski 15616248461dSSouptick Joarder err = vm_map_pages(vma, pages, nr_pages); 15626248461dSSouptick Joarder if (err) 15636248461dSSouptick Joarder pr_err("Remapping memory failed: %d\n", err); 15647e312103SMarek Szyprowski 15656248461dSSouptick Joarder return err; 15664ce63fcdSMarek Szyprowski } 156756506822SGregory CLEMENT static int arm_iommu_mmap_attrs(struct device *dev, 156856506822SGregory CLEMENT struct vm_area_struct *vma, void *cpu_addr, 156900085f1eSKrzysztof Kozlowski dma_addr_t dma_addr, size_t size, unsigned long attrs) 157056506822SGregory CLEMENT { 157156506822SGregory CLEMENT vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 157256506822SGregory CLEMENT 157356506822SGregory CLEMENT return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs); 157456506822SGregory CLEMENT } 157556506822SGregory CLEMENT 157656506822SGregory CLEMENT static int arm_coherent_iommu_mmap_attrs(struct device *dev, 157756506822SGregory CLEMENT struct vm_area_struct *vma, void *cpu_addr, 157800085f1eSKrzysztof Kozlowski dma_addr_t dma_addr, size_t size, unsigned long attrs) 157956506822SGregory CLEMENT { 158056506822SGregory CLEMENT return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs); 158156506822SGregory CLEMENT } 15824ce63fcdSMarek Szyprowski 15834ce63fcdSMarek Szyprowski /* 15844ce63fcdSMarek Szyprowski * free a page as defined by the above mapping. 15854ce63fcdSMarek Szyprowski * Must not be called with IRQs disabled. 15864ce63fcdSMarek Szyprowski */ 158756506822SGregory CLEMENT void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, 158800085f1eSKrzysztof Kozlowski dma_addr_t handle, unsigned long attrs, int coherent_flag) 15894ce63fcdSMarek Szyprowski { 1590836bfa0dSYoungJun Cho struct page **pages; 15914ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 15924ce63fcdSMarek Szyprowski 159356506822SGregory CLEMENT if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) { 159456506822SGregory CLEMENT __iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag); 1595479ed93aSHiroshi Doyu return; 1596479ed93aSHiroshi Doyu } 1597479ed93aSHiroshi Doyu 1598836bfa0dSYoungJun Cho pages = __iommu_get_pages(cpu_addr, attrs); 1599836bfa0dSYoungJun Cho if (!pages) { 1600836bfa0dSYoungJun Cho WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); 1601836bfa0dSYoungJun Cho return; 1602836bfa0dSYoungJun Cho } 1603836bfa0dSYoungJun Cho 1604fe9041c2SChristoph Hellwig if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) 160551231740SChristoph Hellwig dma_common_free_remap(cpu_addr, size); 1606e9da6e99SMarek Szyprowski 16074ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, handle, size); 1608549a17e4SMarek Szyprowski __iommu_free_buffer(dev, pages, size, attrs); 16094ce63fcdSMarek Szyprowski } 16104ce63fcdSMarek Szyprowski 161156506822SGregory CLEMENT void arm_iommu_free_attrs(struct device *dev, size_t size, 161200085f1eSKrzysztof Kozlowski void *cpu_addr, dma_addr_t handle, unsigned long attrs) 161356506822SGregory CLEMENT { 161456506822SGregory CLEMENT __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL); 161556506822SGregory CLEMENT } 161656506822SGregory CLEMENT 161756506822SGregory CLEMENT void arm_coherent_iommu_free_attrs(struct device *dev, size_t size, 161800085f1eSKrzysztof Kozlowski void *cpu_addr, dma_addr_t handle, unsigned long attrs) 161956506822SGregory CLEMENT { 162056506822SGregory CLEMENT __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT); 162156506822SGregory CLEMENT } 162256506822SGregory CLEMENT 1623dc2832e1SMarek Szyprowski static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, 1624dc2832e1SMarek Szyprowski void *cpu_addr, dma_addr_t dma_addr, 162500085f1eSKrzysztof Kozlowski size_t size, unsigned long attrs) 1626dc2832e1SMarek Szyprowski { 1627dc2832e1SMarek Szyprowski unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1628dc2832e1SMarek Szyprowski struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1629dc2832e1SMarek Szyprowski 1630dc2832e1SMarek Szyprowski if (!pages) 1631dc2832e1SMarek Szyprowski return -ENXIO; 1632dc2832e1SMarek Szyprowski 1633dc2832e1SMarek Szyprowski return sg_alloc_table_from_pages(sgt, pages, count, 0, size, 1634dc2832e1SMarek Szyprowski GFP_KERNEL); 16354ce63fcdSMarek Szyprowski } 16364ce63fcdSMarek Szyprowski 16374ce63fcdSMarek Szyprowski /* 16384ce63fcdSMarek Szyprowski * Map a part of the scatter-gather list into contiguous io address space 16394ce63fcdSMarek Szyprowski */ 16404ce63fcdSMarek Szyprowski static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, 16414ce63fcdSMarek Szyprowski size_t size, dma_addr_t *handle, 164200085f1eSKrzysztof Kozlowski enum dma_data_direction dir, unsigned long attrs, 16430fa478dfSRob Herring bool is_coherent) 16444ce63fcdSMarek Szyprowski { 164589cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 16464ce63fcdSMarek Szyprowski dma_addr_t iova, iova_base; 16474ce63fcdSMarek Szyprowski int ret = 0; 16484ce63fcdSMarek Szyprowski unsigned int count; 16494ce63fcdSMarek Szyprowski struct scatterlist *s; 1650c9b24996SAndreas Herrmann int prot; 16514ce63fcdSMarek Szyprowski 16524ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 165372fd97bfSChristoph Hellwig *handle = DMA_MAPPING_ERROR; 16544ce63fcdSMarek Szyprowski 16554ce63fcdSMarek Szyprowski iova_base = iova = __alloc_iova(mapping, size); 165672fd97bfSChristoph Hellwig if (iova == DMA_MAPPING_ERROR) 16574ce63fcdSMarek Szyprowski return -ENOMEM; 16584ce63fcdSMarek Szyprowski 16594ce63fcdSMarek Szyprowski for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { 16603e6110fdSDan Williams phys_addr_t phys = page_to_phys(sg_page(s)); 16614ce63fcdSMarek Szyprowski unsigned int len = PAGE_ALIGN(s->offset + s->length); 16624ce63fcdSMarek Szyprowski 166300085f1eSKrzysztof Kozlowski if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 16644ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 16654ce63fcdSMarek Szyprowski 16667d2822dfSSricharan R prot = __dma_info_to_prot(dir, attrs); 1667c9b24996SAndreas Herrmann 1668c9b24996SAndreas Herrmann ret = iommu_map(mapping->domain, iova, phys, len, prot); 16694ce63fcdSMarek Szyprowski if (ret < 0) 16704ce63fcdSMarek Szyprowski goto fail; 16714ce63fcdSMarek Szyprowski count += len >> PAGE_SHIFT; 16724ce63fcdSMarek Szyprowski iova += len; 16734ce63fcdSMarek Szyprowski } 16744ce63fcdSMarek Szyprowski *handle = iova_base; 16754ce63fcdSMarek Szyprowski 16764ce63fcdSMarek Szyprowski return 0; 16774ce63fcdSMarek Szyprowski fail: 16784ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); 16794ce63fcdSMarek Szyprowski __free_iova(mapping, iova_base, size); 16804ce63fcdSMarek Szyprowski return ret; 16814ce63fcdSMarek Szyprowski } 16824ce63fcdSMarek Szyprowski 16830fa478dfSRob Herring static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, 168400085f1eSKrzysztof Kozlowski enum dma_data_direction dir, unsigned long attrs, 16850fa478dfSRob Herring bool is_coherent) 16864ce63fcdSMarek Szyprowski { 16874ce63fcdSMarek Szyprowski struct scatterlist *s = sg, *dma = sg, *start = sg; 16884ce63fcdSMarek Szyprowski int i, count = 0; 16894ce63fcdSMarek Szyprowski unsigned int offset = s->offset; 16904ce63fcdSMarek Szyprowski unsigned int size = s->offset + s->length; 16914ce63fcdSMarek Szyprowski unsigned int max = dma_get_max_seg_size(dev); 16924ce63fcdSMarek Szyprowski 16934ce63fcdSMarek Szyprowski for (i = 1; i < nents; i++) { 16944ce63fcdSMarek Szyprowski s = sg_next(s); 16954ce63fcdSMarek Szyprowski 169672fd97bfSChristoph Hellwig s->dma_address = DMA_MAPPING_ERROR; 16974ce63fcdSMarek Szyprowski s->dma_length = 0; 16984ce63fcdSMarek Szyprowski 16994ce63fcdSMarek Szyprowski if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { 17004ce63fcdSMarek Szyprowski if (__map_sg_chunk(dev, start, size, &dma->dma_address, 17010fa478dfSRob Herring dir, attrs, is_coherent) < 0) 17024ce63fcdSMarek Szyprowski goto bad_mapping; 17034ce63fcdSMarek Szyprowski 17044ce63fcdSMarek Szyprowski dma->dma_address += offset; 17054ce63fcdSMarek Szyprowski dma->dma_length = size - offset; 17064ce63fcdSMarek Szyprowski 17074ce63fcdSMarek Szyprowski size = offset = s->offset; 17084ce63fcdSMarek Szyprowski start = s; 17094ce63fcdSMarek Szyprowski dma = sg_next(dma); 17104ce63fcdSMarek Szyprowski count += 1; 17114ce63fcdSMarek Szyprowski } 17124ce63fcdSMarek Szyprowski size += s->length; 17134ce63fcdSMarek Szyprowski } 17140fa478dfSRob Herring if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, 17150fa478dfSRob Herring is_coherent) < 0) 17164ce63fcdSMarek Szyprowski goto bad_mapping; 17174ce63fcdSMarek Szyprowski 17184ce63fcdSMarek Szyprowski dma->dma_address += offset; 17194ce63fcdSMarek Szyprowski dma->dma_length = size - offset; 17204ce63fcdSMarek Szyprowski 17214ce63fcdSMarek Szyprowski return count+1; 17224ce63fcdSMarek Szyprowski 17234ce63fcdSMarek Szyprowski bad_mapping: 17244ce63fcdSMarek Szyprowski for_each_sg(sg, s, count, i) 17254ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); 17264ce63fcdSMarek Szyprowski return 0; 17274ce63fcdSMarek Szyprowski } 17284ce63fcdSMarek Szyprowski 17294ce63fcdSMarek Szyprowski /** 17300fa478dfSRob Herring * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA 17310fa478dfSRob Herring * @dev: valid struct device pointer 17320fa478dfSRob Herring * @sg: list of buffers 17330fa478dfSRob Herring * @nents: number of buffers to map 17340fa478dfSRob Herring * @dir: DMA transfer direction 17350fa478dfSRob Herring * 17360fa478dfSRob Herring * Map a set of i/o coherent buffers described by scatterlist in streaming 17370fa478dfSRob Herring * mode for DMA. The scatter gather list elements are merged together (if 17380fa478dfSRob Herring * possible) and tagged with the appropriate dma address and length. They are 17390fa478dfSRob Herring * obtained via sg_dma_{address,length}. 17400fa478dfSRob Herring */ 17410fa478dfSRob Herring int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, 174200085f1eSKrzysztof Kozlowski int nents, enum dma_data_direction dir, unsigned long attrs) 17430fa478dfSRob Herring { 17440fa478dfSRob Herring return __iommu_map_sg(dev, sg, nents, dir, attrs, true); 17450fa478dfSRob Herring } 17460fa478dfSRob Herring 17470fa478dfSRob Herring /** 17480fa478dfSRob Herring * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA 17490fa478dfSRob Herring * @dev: valid struct device pointer 17500fa478dfSRob Herring * @sg: list of buffers 17510fa478dfSRob Herring * @nents: number of buffers to map 17520fa478dfSRob Herring * @dir: DMA transfer direction 17530fa478dfSRob Herring * 17540fa478dfSRob Herring * Map a set of buffers described by scatterlist in streaming mode for DMA. 17550fa478dfSRob Herring * The scatter gather list elements are merged together (if possible) and 17560fa478dfSRob Herring * tagged with the appropriate dma address and length. They are obtained via 17570fa478dfSRob Herring * sg_dma_{address,length}. 17580fa478dfSRob Herring */ 17590fa478dfSRob Herring int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, 176000085f1eSKrzysztof Kozlowski int nents, enum dma_data_direction dir, unsigned long attrs) 17610fa478dfSRob Herring { 17620fa478dfSRob Herring return __iommu_map_sg(dev, sg, nents, dir, attrs, false); 17630fa478dfSRob Herring } 17640fa478dfSRob Herring 17650fa478dfSRob Herring static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, 176600085f1eSKrzysztof Kozlowski int nents, enum dma_data_direction dir, 176700085f1eSKrzysztof Kozlowski unsigned long attrs, bool is_coherent) 17680fa478dfSRob Herring { 17690fa478dfSRob Herring struct scatterlist *s; 17700fa478dfSRob Herring int i; 17710fa478dfSRob Herring 17720fa478dfSRob Herring for_each_sg(sg, s, nents, i) { 17730fa478dfSRob Herring if (sg_dma_len(s)) 17740fa478dfSRob Herring __iommu_remove_mapping(dev, sg_dma_address(s), 17750fa478dfSRob Herring sg_dma_len(s)); 177600085f1eSKrzysztof Kozlowski if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 17770fa478dfSRob Herring __dma_page_dev_to_cpu(sg_page(s), s->offset, 17780fa478dfSRob Herring s->length, dir); 17790fa478dfSRob Herring } 17800fa478dfSRob Herring } 17810fa478dfSRob Herring 17820fa478dfSRob Herring /** 17830fa478dfSRob Herring * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 17840fa478dfSRob Herring * @dev: valid struct device pointer 17850fa478dfSRob Herring * @sg: list of buffers 17860fa478dfSRob Herring * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 17870fa478dfSRob Herring * @dir: DMA transfer direction (same as was passed to dma_map_sg) 17880fa478dfSRob Herring * 17890fa478dfSRob Herring * Unmap a set of streaming mode DMA translations. Again, CPU access 17900fa478dfSRob Herring * rules concerning calls here are the same as for dma_unmap_single(). 17910fa478dfSRob Herring */ 17920fa478dfSRob Herring void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, 179300085f1eSKrzysztof Kozlowski int nents, enum dma_data_direction dir, 179400085f1eSKrzysztof Kozlowski unsigned long attrs) 17950fa478dfSRob Herring { 17960fa478dfSRob Herring __iommu_unmap_sg(dev, sg, nents, dir, attrs, true); 17970fa478dfSRob Herring } 17980fa478dfSRob Herring 17990fa478dfSRob Herring /** 18004ce63fcdSMarek Szyprowski * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 18014ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 18024ce63fcdSMarek Szyprowski * @sg: list of buffers 18034ce63fcdSMarek Szyprowski * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 18044ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 18054ce63fcdSMarek Szyprowski * 18064ce63fcdSMarek Szyprowski * Unmap a set of streaming mode DMA translations. Again, CPU access 18074ce63fcdSMarek Szyprowski * rules concerning calls here are the same as for dma_unmap_single(). 18084ce63fcdSMarek Szyprowski */ 18094ce63fcdSMarek Szyprowski void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 181000085f1eSKrzysztof Kozlowski enum dma_data_direction dir, 181100085f1eSKrzysztof Kozlowski unsigned long attrs) 18124ce63fcdSMarek Szyprowski { 18130fa478dfSRob Herring __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); 18144ce63fcdSMarek Szyprowski } 18154ce63fcdSMarek Szyprowski 18164ce63fcdSMarek Szyprowski /** 18174ce63fcdSMarek Szyprowski * arm_iommu_sync_sg_for_cpu 18184ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 18194ce63fcdSMarek Szyprowski * @sg: list of buffers 18204ce63fcdSMarek Szyprowski * @nents: number of buffers to map (returned from dma_map_sg) 18214ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 18224ce63fcdSMarek Szyprowski */ 18234ce63fcdSMarek Szyprowski void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 18244ce63fcdSMarek Szyprowski int nents, enum dma_data_direction dir) 18254ce63fcdSMarek Szyprowski { 18264ce63fcdSMarek Szyprowski struct scatterlist *s; 18274ce63fcdSMarek Szyprowski int i; 18284ce63fcdSMarek Szyprowski 18294ce63fcdSMarek Szyprowski for_each_sg(sg, s, nents, i) 18304ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); 18314ce63fcdSMarek Szyprowski 18324ce63fcdSMarek Szyprowski } 18334ce63fcdSMarek Szyprowski 18344ce63fcdSMarek Szyprowski /** 18354ce63fcdSMarek Szyprowski * arm_iommu_sync_sg_for_device 18364ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 18374ce63fcdSMarek Szyprowski * @sg: list of buffers 18384ce63fcdSMarek Szyprowski * @nents: number of buffers to map (returned from dma_map_sg) 18394ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 18404ce63fcdSMarek Szyprowski */ 18414ce63fcdSMarek Szyprowski void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 18424ce63fcdSMarek Szyprowski int nents, enum dma_data_direction dir) 18434ce63fcdSMarek Szyprowski { 18444ce63fcdSMarek Szyprowski struct scatterlist *s; 18454ce63fcdSMarek Szyprowski int i; 18464ce63fcdSMarek Szyprowski 18474ce63fcdSMarek Szyprowski for_each_sg(sg, s, nents, i) 18484ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 18494ce63fcdSMarek Szyprowski } 18504ce63fcdSMarek Szyprowski 18514ce63fcdSMarek Szyprowski 18524ce63fcdSMarek Szyprowski /** 18530fa478dfSRob Herring * arm_coherent_iommu_map_page 18540fa478dfSRob Herring * @dev: valid struct device pointer 18550fa478dfSRob Herring * @page: page that buffer resides in 18560fa478dfSRob Herring * @offset: offset into page for start of buffer 18570fa478dfSRob Herring * @size: size of buffer to map 18580fa478dfSRob Herring * @dir: DMA transfer direction 18590fa478dfSRob Herring * 18600fa478dfSRob Herring * Coherent IOMMU aware version of arm_dma_map_page() 18610fa478dfSRob Herring */ 18620fa478dfSRob Herring static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page, 18630fa478dfSRob Herring unsigned long offset, size_t size, enum dma_data_direction dir, 186400085f1eSKrzysztof Kozlowski unsigned long attrs) 18650fa478dfSRob Herring { 186689cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 18670fa478dfSRob Herring dma_addr_t dma_addr; 186813987d68SWill Deacon int ret, prot, len = PAGE_ALIGN(size + offset); 18690fa478dfSRob Herring 18700fa478dfSRob Herring dma_addr = __alloc_iova(mapping, len); 187172fd97bfSChristoph Hellwig if (dma_addr == DMA_MAPPING_ERROR) 18720fa478dfSRob Herring return dma_addr; 18730fa478dfSRob Herring 18747d2822dfSSricharan R prot = __dma_info_to_prot(dir, attrs); 187513987d68SWill Deacon 187613987d68SWill Deacon ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); 18770fa478dfSRob Herring if (ret < 0) 18780fa478dfSRob Herring goto fail; 18790fa478dfSRob Herring 18800fa478dfSRob Herring return dma_addr + offset; 18810fa478dfSRob Herring fail: 18820fa478dfSRob Herring __free_iova(mapping, dma_addr, len); 188372fd97bfSChristoph Hellwig return DMA_MAPPING_ERROR; 18840fa478dfSRob Herring } 18850fa478dfSRob Herring 18860fa478dfSRob Herring /** 18874ce63fcdSMarek Szyprowski * arm_iommu_map_page 18884ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 18894ce63fcdSMarek Szyprowski * @page: page that buffer resides in 18904ce63fcdSMarek Szyprowski * @offset: offset into page for start of buffer 18914ce63fcdSMarek Szyprowski * @size: size of buffer to map 18924ce63fcdSMarek Szyprowski * @dir: DMA transfer direction 18934ce63fcdSMarek Szyprowski * 18944ce63fcdSMarek Szyprowski * IOMMU aware version of arm_dma_map_page() 18954ce63fcdSMarek Szyprowski */ 18964ce63fcdSMarek Szyprowski static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, 18974ce63fcdSMarek Szyprowski unsigned long offset, size_t size, enum dma_data_direction dir, 189800085f1eSKrzysztof Kozlowski unsigned long attrs) 18994ce63fcdSMarek Szyprowski { 190000085f1eSKrzysztof Kozlowski if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 19014ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 19024ce63fcdSMarek Szyprowski 19030fa478dfSRob Herring return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); 19040fa478dfSRob Herring } 19054ce63fcdSMarek Szyprowski 19060fa478dfSRob Herring /** 19070fa478dfSRob Herring * arm_coherent_iommu_unmap_page 19080fa478dfSRob Herring * @dev: valid struct device pointer 19090fa478dfSRob Herring * @handle: DMA address of buffer 19100fa478dfSRob Herring * @size: size of buffer (same as passed to dma_map_page) 19110fa478dfSRob Herring * @dir: DMA transfer direction (same as passed to dma_map_page) 19120fa478dfSRob Herring * 19130fa478dfSRob Herring * Coherent IOMMU aware version of arm_dma_unmap_page() 19140fa478dfSRob Herring */ 19150fa478dfSRob Herring static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, 191600085f1eSKrzysztof Kozlowski size_t size, enum dma_data_direction dir, unsigned long attrs) 19170fa478dfSRob Herring { 191889cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 19190fa478dfSRob Herring dma_addr_t iova = handle & PAGE_MASK; 19200fa478dfSRob Herring int offset = handle & ~PAGE_MASK; 19210fa478dfSRob Herring int len = PAGE_ALIGN(size + offset); 19224ce63fcdSMarek Szyprowski 19230fa478dfSRob Herring if (!iova) 19240fa478dfSRob Herring return; 19250fa478dfSRob Herring 19260fa478dfSRob Herring iommu_unmap(mapping->domain, iova, len); 19270fa478dfSRob Herring __free_iova(mapping, iova, len); 19284ce63fcdSMarek Szyprowski } 19294ce63fcdSMarek Szyprowski 19304ce63fcdSMarek Szyprowski /** 19314ce63fcdSMarek Szyprowski * arm_iommu_unmap_page 19324ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 19334ce63fcdSMarek Szyprowski * @handle: DMA address of buffer 19344ce63fcdSMarek Szyprowski * @size: size of buffer (same as passed to dma_map_page) 19354ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as passed to dma_map_page) 19364ce63fcdSMarek Szyprowski * 19374ce63fcdSMarek Szyprowski * IOMMU aware version of arm_dma_unmap_page() 19384ce63fcdSMarek Szyprowski */ 19394ce63fcdSMarek Szyprowski static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, 194000085f1eSKrzysztof Kozlowski size_t size, enum dma_data_direction dir, unsigned long attrs) 19414ce63fcdSMarek Szyprowski { 194289cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 19434ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 19444ce63fcdSMarek Szyprowski struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 19454ce63fcdSMarek Szyprowski int offset = handle & ~PAGE_MASK; 19464ce63fcdSMarek Szyprowski int len = PAGE_ALIGN(size + offset); 19474ce63fcdSMarek Szyprowski 19484ce63fcdSMarek Szyprowski if (!iova) 19494ce63fcdSMarek Szyprowski return; 19504ce63fcdSMarek Szyprowski 195100085f1eSKrzysztof Kozlowski if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 19524ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(page, offset, size, dir); 19534ce63fcdSMarek Szyprowski 19544ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova, len); 19554ce63fcdSMarek Szyprowski __free_iova(mapping, iova, len); 19564ce63fcdSMarek Szyprowski } 19574ce63fcdSMarek Szyprowski 195824ed5d2cSNiklas Söderlund /** 195924ed5d2cSNiklas Söderlund * arm_iommu_map_resource - map a device resource for DMA 196024ed5d2cSNiklas Söderlund * @dev: valid struct device pointer 196124ed5d2cSNiklas Söderlund * @phys_addr: physical address of resource 196224ed5d2cSNiklas Söderlund * @size: size of resource to map 196324ed5d2cSNiklas Söderlund * @dir: DMA transfer direction 196424ed5d2cSNiklas Söderlund */ 196524ed5d2cSNiklas Söderlund static dma_addr_t arm_iommu_map_resource(struct device *dev, 196624ed5d2cSNiklas Söderlund phys_addr_t phys_addr, size_t size, 196724ed5d2cSNiklas Söderlund enum dma_data_direction dir, unsigned long attrs) 196824ed5d2cSNiklas Söderlund { 196924ed5d2cSNiklas Söderlund struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 197024ed5d2cSNiklas Söderlund dma_addr_t dma_addr; 197124ed5d2cSNiklas Söderlund int ret, prot; 197224ed5d2cSNiklas Söderlund phys_addr_t addr = phys_addr & PAGE_MASK; 197324ed5d2cSNiklas Söderlund unsigned int offset = phys_addr & ~PAGE_MASK; 197424ed5d2cSNiklas Söderlund size_t len = PAGE_ALIGN(size + offset); 197524ed5d2cSNiklas Söderlund 197624ed5d2cSNiklas Söderlund dma_addr = __alloc_iova(mapping, len); 197772fd97bfSChristoph Hellwig if (dma_addr == DMA_MAPPING_ERROR) 197824ed5d2cSNiklas Söderlund return dma_addr; 197924ed5d2cSNiklas Söderlund 19807d2822dfSSricharan R prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO; 198124ed5d2cSNiklas Söderlund 198224ed5d2cSNiklas Söderlund ret = iommu_map(mapping->domain, dma_addr, addr, len, prot); 198324ed5d2cSNiklas Söderlund if (ret < 0) 198424ed5d2cSNiklas Söderlund goto fail; 198524ed5d2cSNiklas Söderlund 198624ed5d2cSNiklas Söderlund return dma_addr + offset; 198724ed5d2cSNiklas Söderlund fail: 198824ed5d2cSNiklas Söderlund __free_iova(mapping, dma_addr, len); 198972fd97bfSChristoph Hellwig return DMA_MAPPING_ERROR; 199024ed5d2cSNiklas Söderlund } 199124ed5d2cSNiklas Söderlund 199224ed5d2cSNiklas Söderlund /** 199324ed5d2cSNiklas Söderlund * arm_iommu_unmap_resource - unmap a device DMA resource 199424ed5d2cSNiklas Söderlund * @dev: valid struct device pointer 199524ed5d2cSNiklas Söderlund * @dma_handle: DMA address to resource 199624ed5d2cSNiklas Söderlund * @size: size of resource to map 199724ed5d2cSNiklas Söderlund * @dir: DMA transfer direction 199824ed5d2cSNiklas Söderlund */ 199924ed5d2cSNiklas Söderlund static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle, 200024ed5d2cSNiklas Söderlund size_t size, enum dma_data_direction dir, 200124ed5d2cSNiklas Söderlund unsigned long attrs) 200224ed5d2cSNiklas Söderlund { 200324ed5d2cSNiklas Söderlund struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 200424ed5d2cSNiklas Söderlund dma_addr_t iova = dma_handle & PAGE_MASK; 200524ed5d2cSNiklas Söderlund unsigned int offset = dma_handle & ~PAGE_MASK; 200624ed5d2cSNiklas Söderlund size_t len = PAGE_ALIGN(size + offset); 200724ed5d2cSNiklas Söderlund 200824ed5d2cSNiklas Söderlund if (!iova) 200924ed5d2cSNiklas Söderlund return; 201024ed5d2cSNiklas Söderlund 201124ed5d2cSNiklas Söderlund iommu_unmap(mapping->domain, iova, len); 201224ed5d2cSNiklas Söderlund __free_iova(mapping, iova, len); 201324ed5d2cSNiklas Söderlund } 201424ed5d2cSNiklas Söderlund 20154ce63fcdSMarek Szyprowski static void arm_iommu_sync_single_for_cpu(struct device *dev, 20164ce63fcdSMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 20174ce63fcdSMarek Szyprowski { 201889cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 20194ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 20204ce63fcdSMarek Szyprowski struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 20214ce63fcdSMarek Szyprowski unsigned int offset = handle & ~PAGE_MASK; 20224ce63fcdSMarek Szyprowski 20234ce63fcdSMarek Szyprowski if (!iova) 20244ce63fcdSMarek Szyprowski return; 20254ce63fcdSMarek Szyprowski 20264ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(page, offset, size, dir); 20274ce63fcdSMarek Szyprowski } 20284ce63fcdSMarek Szyprowski 20294ce63fcdSMarek Szyprowski static void arm_iommu_sync_single_for_device(struct device *dev, 20304ce63fcdSMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 20314ce63fcdSMarek Szyprowski { 203289cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 20334ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 20344ce63fcdSMarek Szyprowski struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 20354ce63fcdSMarek Szyprowski unsigned int offset = handle & ~PAGE_MASK; 20364ce63fcdSMarek Szyprowski 20374ce63fcdSMarek Szyprowski if (!iova) 20384ce63fcdSMarek Szyprowski return; 20394ce63fcdSMarek Szyprowski 20404ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 20414ce63fcdSMarek Szyprowski } 20424ce63fcdSMarek Szyprowski 20435299709dSBart Van Assche const struct dma_map_ops iommu_ops = { 20444ce63fcdSMarek Szyprowski .alloc = arm_iommu_alloc_attrs, 20454ce63fcdSMarek Szyprowski .free = arm_iommu_free_attrs, 20464ce63fcdSMarek Szyprowski .mmap = arm_iommu_mmap_attrs, 2047dc2832e1SMarek Szyprowski .get_sgtable = arm_iommu_get_sgtable, 20484ce63fcdSMarek Szyprowski 20494ce63fcdSMarek Szyprowski .map_page = arm_iommu_map_page, 20504ce63fcdSMarek Szyprowski .unmap_page = arm_iommu_unmap_page, 20514ce63fcdSMarek Szyprowski .sync_single_for_cpu = arm_iommu_sync_single_for_cpu, 20524ce63fcdSMarek Szyprowski .sync_single_for_device = arm_iommu_sync_single_for_device, 20534ce63fcdSMarek Szyprowski 20544ce63fcdSMarek Szyprowski .map_sg = arm_iommu_map_sg, 20554ce63fcdSMarek Szyprowski .unmap_sg = arm_iommu_unmap_sg, 20564ce63fcdSMarek Szyprowski .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, 20574ce63fcdSMarek Szyprowski .sync_sg_for_device = arm_iommu_sync_sg_for_device, 205824ed5d2cSNiklas Söderlund 205924ed5d2cSNiklas Söderlund .map_resource = arm_iommu_map_resource, 206024ed5d2cSNiklas Söderlund .unmap_resource = arm_iommu_unmap_resource, 20619eef8b8cSChristoph Hellwig 2062418a7a7eSChristoph Hellwig .dma_supported = arm_dma_supported, 20634ce63fcdSMarek Szyprowski }; 20644ce63fcdSMarek Szyprowski 20655299709dSBart Van Assche const struct dma_map_ops iommu_coherent_ops = { 206656506822SGregory CLEMENT .alloc = arm_coherent_iommu_alloc_attrs, 206756506822SGregory CLEMENT .free = arm_coherent_iommu_free_attrs, 206856506822SGregory CLEMENT .mmap = arm_coherent_iommu_mmap_attrs, 20690fa478dfSRob Herring .get_sgtable = arm_iommu_get_sgtable, 20700fa478dfSRob Herring 20710fa478dfSRob Herring .map_page = arm_coherent_iommu_map_page, 20720fa478dfSRob Herring .unmap_page = arm_coherent_iommu_unmap_page, 20730fa478dfSRob Herring 20740fa478dfSRob Herring .map_sg = arm_coherent_iommu_map_sg, 20750fa478dfSRob Herring .unmap_sg = arm_coherent_iommu_unmap_sg, 207624ed5d2cSNiklas Söderlund 207724ed5d2cSNiklas Söderlund .map_resource = arm_iommu_map_resource, 207824ed5d2cSNiklas Söderlund .unmap_resource = arm_iommu_unmap_resource, 20799eef8b8cSChristoph Hellwig 2080418a7a7eSChristoph Hellwig .dma_supported = arm_dma_supported, 20810fa478dfSRob Herring }; 20820fa478dfSRob Herring 20834ce63fcdSMarek Szyprowski /** 20844ce63fcdSMarek Szyprowski * arm_iommu_create_mapping 20854ce63fcdSMarek Szyprowski * @bus: pointer to the bus holding the client device (for IOMMU calls) 20864ce63fcdSMarek Szyprowski * @base: start address of the valid IO address space 208768efd7d2SMarek Szyprowski * @size: maximum size of the valid IO address space 20884ce63fcdSMarek Szyprowski * 20894ce63fcdSMarek Szyprowski * Creates a mapping structure which holds information about used/unused 20904ce63fcdSMarek Szyprowski * IO address ranges, which is required to perform memory allocation and 20914ce63fcdSMarek Szyprowski * mapping with IOMMU aware functions. 20924ce63fcdSMarek Szyprowski * 20934ce63fcdSMarek Szyprowski * The client device need to be attached to the mapping with 20944ce63fcdSMarek Szyprowski * arm_iommu_attach_device function. 20954ce63fcdSMarek Szyprowski */ 20964ce63fcdSMarek Szyprowski struct dma_iommu_mapping * 20971424532bSMarek Szyprowski arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size) 20984ce63fcdSMarek Szyprowski { 209968efd7d2SMarek Szyprowski unsigned int bits = size >> PAGE_SHIFT; 210068efd7d2SMarek Szyprowski unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long); 21014ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping; 210268efd7d2SMarek Szyprowski int extensions = 1; 21034ce63fcdSMarek Szyprowski int err = -ENOMEM; 21044ce63fcdSMarek Szyprowski 21051424532bSMarek Szyprowski /* currently only 32-bit DMA address space is supported */ 21061424532bSMarek Szyprowski if (size > DMA_BIT_MASK(32) + 1) 21071424532bSMarek Szyprowski return ERR_PTR(-ERANGE); 21081424532bSMarek Szyprowski 210968efd7d2SMarek Szyprowski if (!bitmap_size) 21104ce63fcdSMarek Szyprowski return ERR_PTR(-EINVAL); 21114ce63fcdSMarek Szyprowski 211268efd7d2SMarek Szyprowski if (bitmap_size > PAGE_SIZE) { 211368efd7d2SMarek Szyprowski extensions = bitmap_size / PAGE_SIZE; 211468efd7d2SMarek Szyprowski bitmap_size = PAGE_SIZE; 211568efd7d2SMarek Szyprowski } 211668efd7d2SMarek Szyprowski 21174ce63fcdSMarek Szyprowski mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); 21184ce63fcdSMarek Szyprowski if (!mapping) 21194ce63fcdSMarek Szyprowski goto err; 21204ce63fcdSMarek Szyprowski 212168efd7d2SMarek Szyprowski mapping->bitmap_size = bitmap_size; 21226396bb22SKees Cook mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *), 21234d852ef8SAndreas Herrmann GFP_KERNEL); 21244d852ef8SAndreas Herrmann if (!mapping->bitmaps) 21254ce63fcdSMarek Szyprowski goto err2; 21264ce63fcdSMarek Szyprowski 212768efd7d2SMarek Szyprowski mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL); 21284d852ef8SAndreas Herrmann if (!mapping->bitmaps[0]) 21294d852ef8SAndreas Herrmann goto err3; 21304d852ef8SAndreas Herrmann 21314d852ef8SAndreas Herrmann mapping->nr_bitmaps = 1; 21324d852ef8SAndreas Herrmann mapping->extensions = extensions; 21334ce63fcdSMarek Szyprowski mapping->base = base; 213468efd7d2SMarek Szyprowski mapping->bits = BITS_PER_BYTE * bitmap_size; 21354d852ef8SAndreas Herrmann 21364ce63fcdSMarek Szyprowski spin_lock_init(&mapping->lock); 21374ce63fcdSMarek Szyprowski 21384ce63fcdSMarek Szyprowski mapping->domain = iommu_domain_alloc(bus); 21394ce63fcdSMarek Szyprowski if (!mapping->domain) 21404d852ef8SAndreas Herrmann goto err4; 21414ce63fcdSMarek Szyprowski 21424ce63fcdSMarek Szyprowski kref_init(&mapping->kref); 21434ce63fcdSMarek Szyprowski return mapping; 21444d852ef8SAndreas Herrmann err4: 21454d852ef8SAndreas Herrmann kfree(mapping->bitmaps[0]); 21464ce63fcdSMarek Szyprowski err3: 21474d852ef8SAndreas Herrmann kfree(mapping->bitmaps); 21484ce63fcdSMarek Szyprowski err2: 21494ce63fcdSMarek Szyprowski kfree(mapping); 21504ce63fcdSMarek Szyprowski err: 21514ce63fcdSMarek Szyprowski return ERR_PTR(err); 21524ce63fcdSMarek Szyprowski } 215318177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_create_mapping); 21544ce63fcdSMarek Szyprowski 21554ce63fcdSMarek Szyprowski static void release_iommu_mapping(struct kref *kref) 21564ce63fcdSMarek Szyprowski { 21574d852ef8SAndreas Herrmann int i; 21584ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = 21594ce63fcdSMarek Szyprowski container_of(kref, struct dma_iommu_mapping, kref); 21604ce63fcdSMarek Szyprowski 21614ce63fcdSMarek Szyprowski iommu_domain_free(mapping->domain); 21624d852ef8SAndreas Herrmann for (i = 0; i < mapping->nr_bitmaps; i++) 21634d852ef8SAndreas Herrmann kfree(mapping->bitmaps[i]); 21644d852ef8SAndreas Herrmann kfree(mapping->bitmaps); 21654ce63fcdSMarek Szyprowski kfree(mapping); 21664ce63fcdSMarek Szyprowski } 21674ce63fcdSMarek Szyprowski 21684d852ef8SAndreas Herrmann static int extend_iommu_mapping(struct dma_iommu_mapping *mapping) 21694d852ef8SAndreas Herrmann { 21704d852ef8SAndreas Herrmann int next_bitmap; 21714d852ef8SAndreas Herrmann 2172462859aaSMarek Szyprowski if (mapping->nr_bitmaps >= mapping->extensions) 21734d852ef8SAndreas Herrmann return -EINVAL; 21744d852ef8SAndreas Herrmann 21754d852ef8SAndreas Herrmann next_bitmap = mapping->nr_bitmaps; 21764d852ef8SAndreas Herrmann mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size, 21774d852ef8SAndreas Herrmann GFP_ATOMIC); 21784d852ef8SAndreas Herrmann if (!mapping->bitmaps[next_bitmap]) 21794d852ef8SAndreas Herrmann return -ENOMEM; 21804d852ef8SAndreas Herrmann 21814d852ef8SAndreas Herrmann mapping->nr_bitmaps++; 21824d852ef8SAndreas Herrmann 21834d852ef8SAndreas Herrmann return 0; 21844d852ef8SAndreas Herrmann } 21854d852ef8SAndreas Herrmann 21864ce63fcdSMarek Szyprowski void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) 21874ce63fcdSMarek Szyprowski { 21884ce63fcdSMarek Szyprowski if (mapping) 21894ce63fcdSMarek Szyprowski kref_put(&mapping->kref, release_iommu_mapping); 21904ce63fcdSMarek Szyprowski } 219118177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_release_mapping); 21924ce63fcdSMarek Szyprowski 2193eab8d653SLaurent Pinchart static int __arm_iommu_attach_device(struct device *dev, 21944ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping) 21954ce63fcdSMarek Szyprowski { 21964ce63fcdSMarek Szyprowski int err; 21974ce63fcdSMarek Szyprowski 21984ce63fcdSMarek Szyprowski err = iommu_attach_device(mapping->domain, dev); 21994ce63fcdSMarek Szyprowski if (err) 22004ce63fcdSMarek Szyprowski return err; 22014ce63fcdSMarek Szyprowski 22024ce63fcdSMarek Szyprowski kref_get(&mapping->kref); 220389cfdb19SWill Deacon to_dma_iommu_mapping(dev) = mapping; 22044ce63fcdSMarek Szyprowski 220575c59716SHiroshi Doyu pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); 22064ce63fcdSMarek Szyprowski return 0; 22074ce63fcdSMarek Szyprowski } 22084ce63fcdSMarek Szyprowski 22096fe36758SHiroshi Doyu /** 2210eab8d653SLaurent Pinchart * arm_iommu_attach_device 22116fe36758SHiroshi Doyu * @dev: valid struct device pointer 2212eab8d653SLaurent Pinchart * @mapping: io address space mapping structure (returned from 2213eab8d653SLaurent Pinchart * arm_iommu_create_mapping) 22146fe36758SHiroshi Doyu * 2215eab8d653SLaurent Pinchart * Attaches specified io address space mapping to the provided device. 2216eab8d653SLaurent Pinchart * This replaces the dma operations (dma_map_ops pointer) with the 2217eab8d653SLaurent Pinchart * IOMMU aware version. 2218eab8d653SLaurent Pinchart * 2219eab8d653SLaurent Pinchart * More than one client might be attached to the same io address space 2220eab8d653SLaurent Pinchart * mapping. 22216fe36758SHiroshi Doyu */ 2222eab8d653SLaurent Pinchart int arm_iommu_attach_device(struct device *dev, 2223eab8d653SLaurent Pinchart struct dma_iommu_mapping *mapping) 2224eab8d653SLaurent Pinchart { 2225eab8d653SLaurent Pinchart int err; 2226eab8d653SLaurent Pinchart 2227eab8d653SLaurent Pinchart err = __arm_iommu_attach_device(dev, mapping); 2228eab8d653SLaurent Pinchart if (err) 2229eab8d653SLaurent Pinchart return err; 2230eab8d653SLaurent Pinchart 2231eab8d653SLaurent Pinchart set_dma_ops(dev, &iommu_ops); 2232eab8d653SLaurent Pinchart return 0; 2233eab8d653SLaurent Pinchart } 2234eab8d653SLaurent Pinchart EXPORT_SYMBOL_GPL(arm_iommu_attach_device); 2235eab8d653SLaurent Pinchart 2236d3e01c51SSricharan R /** 2237d3e01c51SSricharan R * arm_iommu_detach_device 2238d3e01c51SSricharan R * @dev: valid struct device pointer 2239d3e01c51SSricharan R * 2240d3e01c51SSricharan R * Detaches the provided device from a previously attached map. 22414a4d68fcSWolfram Sang (Renesas) * This overwrites the dma_ops pointer with appropriate non-IOMMU ops. 2242d3e01c51SSricharan R */ 2243d3e01c51SSricharan R void arm_iommu_detach_device(struct device *dev) 22446fe36758SHiroshi Doyu { 22456fe36758SHiroshi Doyu struct dma_iommu_mapping *mapping; 22466fe36758SHiroshi Doyu 22476fe36758SHiroshi Doyu mapping = to_dma_iommu_mapping(dev); 22486fe36758SHiroshi Doyu if (!mapping) { 22496fe36758SHiroshi Doyu dev_warn(dev, "Not attached\n"); 22506fe36758SHiroshi Doyu return; 22516fe36758SHiroshi Doyu } 22526fe36758SHiroshi Doyu 22536fe36758SHiroshi Doyu iommu_detach_device(mapping->domain, dev); 22546fe36758SHiroshi Doyu kref_put(&mapping->kref, release_iommu_mapping); 225589cfdb19SWill Deacon to_dma_iommu_mapping(dev) = NULL; 22561874619aSThierry Reding set_dma_ops(dev, arm_get_dma_map_ops(dev->archdata.dma_coherent)); 22576fe36758SHiroshi Doyu 22586fe36758SHiroshi Doyu pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); 22596fe36758SHiroshi Doyu } 226018177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_detach_device); 22616fe36758SHiroshi Doyu 22625299709dSBart Van Assche static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) 22634bb25789SWill Deacon { 22644bb25789SWill Deacon return coherent ? &iommu_coherent_ops : &iommu_ops; 22654bb25789SWill Deacon } 22664bb25789SWill Deacon 22674bb25789SWill Deacon static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, 226853c92d79SRobin Murphy const struct iommu_ops *iommu) 22694bb25789SWill Deacon { 22704bb25789SWill Deacon struct dma_iommu_mapping *mapping; 22714bb25789SWill Deacon 22724bb25789SWill Deacon if (!iommu) 22734bb25789SWill Deacon return false; 22744bb25789SWill Deacon 22754bb25789SWill Deacon mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); 22764bb25789SWill Deacon if (IS_ERR(mapping)) { 22774bb25789SWill Deacon pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", 22784bb25789SWill Deacon size, dev_name(dev)); 22794bb25789SWill Deacon return false; 22804bb25789SWill Deacon } 22814bb25789SWill Deacon 2282eab8d653SLaurent Pinchart if (__arm_iommu_attach_device(dev, mapping)) { 22834bb25789SWill Deacon pr_warn("Failed to attached device %s to IOMMU_mapping\n", 22844bb25789SWill Deacon dev_name(dev)); 22854bb25789SWill Deacon arm_iommu_release_mapping(mapping); 22864bb25789SWill Deacon return false; 22874bb25789SWill Deacon } 22884bb25789SWill Deacon 22894bb25789SWill Deacon return true; 22904bb25789SWill Deacon } 22914bb25789SWill Deacon 22924bb25789SWill Deacon static void arm_teardown_iommu_dma_ops(struct device *dev) 22934bb25789SWill Deacon { 229489cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 22954bb25789SWill Deacon 2296c2273a18SWill Deacon if (!mapping) 2297c2273a18SWill Deacon return; 2298c2273a18SWill Deacon 2299d3e01c51SSricharan R arm_iommu_detach_device(dev); 23004bb25789SWill Deacon arm_iommu_release_mapping(mapping); 23014bb25789SWill Deacon } 23024bb25789SWill Deacon 23034bb25789SWill Deacon #else 23044bb25789SWill Deacon 23054bb25789SWill Deacon static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, 230653c92d79SRobin Murphy const struct iommu_ops *iommu) 23074bb25789SWill Deacon { 23084bb25789SWill Deacon return false; 23094bb25789SWill Deacon } 23104bb25789SWill Deacon 23114bb25789SWill Deacon static void arm_teardown_iommu_dma_ops(struct device *dev) { } 23124bb25789SWill Deacon 23134bb25789SWill Deacon #define arm_get_iommu_dma_map_ops arm_get_dma_map_ops 23144bb25789SWill Deacon 23154bb25789SWill Deacon #endif /* CONFIG_ARM_DMA_USE_IOMMU */ 23164bb25789SWill Deacon 23174bb25789SWill Deacon void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, 231853c92d79SRobin Murphy const struct iommu_ops *iommu, bool coherent) 23194bb25789SWill Deacon { 23205299709dSBart Van Assche const struct dma_map_ops *dma_ops; 23214bb25789SWill Deacon 23226f51ee70SLinus Torvalds dev->archdata.dma_coherent = coherent; 2323ad3c7b18SChristoph Hellwig #ifdef CONFIG_SWIOTLB 2324ad3c7b18SChristoph Hellwig dev->dma_coherent = coherent; 2325ad3c7b18SChristoph Hellwig #endif 232626b37b94SLaurent Pinchart 232726b37b94SLaurent Pinchart /* 232826b37b94SLaurent Pinchart * Don't override the dma_ops if they have already been set. Ideally 232926b37b94SLaurent Pinchart * this should be the only location where dma_ops are set, remove this 233026b37b94SLaurent Pinchart * check when all other callers of set_dma_ops will have disappeared. 233126b37b94SLaurent Pinchart */ 233226b37b94SLaurent Pinchart if (dev->dma_ops) 233326b37b94SLaurent Pinchart return; 233426b37b94SLaurent Pinchart 23354bb25789SWill Deacon if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu)) 23364bb25789SWill Deacon dma_ops = arm_get_iommu_dma_map_ops(coherent); 23374bb25789SWill Deacon else 23384bb25789SWill Deacon dma_ops = arm_get_dma_map_ops(coherent); 23394bb25789SWill Deacon 23404bb25789SWill Deacon set_dma_ops(dev, dma_ops); 2341e0586326SStefano Stabellini 2342e0586326SStefano Stabellini #ifdef CONFIG_XEN 2343e0586326SStefano Stabellini if (xen_initial_domain()) { 2344e0586326SStefano Stabellini dev->archdata.dev_dma_ops = dev->dma_ops; 2345e0586326SStefano Stabellini dev->dma_ops = xen_dma_ops; 2346e0586326SStefano Stabellini } 2347e0586326SStefano Stabellini #endif 2348a93a121aSLaurent Pinchart dev->archdata.dma_ops_setup = true; 23494bb25789SWill Deacon } 23504bb25789SWill Deacon 23514bb25789SWill Deacon void arch_teardown_dma_ops(struct device *dev) 23524bb25789SWill Deacon { 2353a93a121aSLaurent Pinchart if (!dev->archdata.dma_ops_setup) 2354a93a121aSLaurent Pinchart return; 2355a93a121aSLaurent Pinchart 23564bb25789SWill Deacon arm_teardown_iommu_dma_ops(dev); 2357fc67e6f1SRobin Murphy /* Let arch_setup_dma_ops() start again from scratch upon re-probe */ 2358fc67e6f1SRobin Murphy set_dma_ops(dev, NULL); 23594bb25789SWill Deacon } 2360ad3c7b18SChristoph Hellwig 2361ad3c7b18SChristoph Hellwig #ifdef CONFIG_SWIOTLB 2362ad3c7b18SChristoph Hellwig void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, 2363ad3c7b18SChristoph Hellwig size_t size, enum dma_data_direction dir) 2364ad3c7b18SChristoph Hellwig { 2365ad3c7b18SChristoph Hellwig __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), 2366ad3c7b18SChristoph Hellwig size, dir); 2367ad3c7b18SChristoph Hellwig } 2368ad3c7b18SChristoph Hellwig 2369ad3c7b18SChristoph Hellwig void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, 2370ad3c7b18SChristoph Hellwig size_t size, enum dma_data_direction dir) 2371ad3c7b18SChristoph Hellwig { 2372ad3c7b18SChristoph Hellwig __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), 2373ad3c7b18SChristoph Hellwig size, dir); 2374ad3c7b18SChristoph Hellwig } 2375ad3c7b18SChristoph Hellwig 2376ad3c7b18SChristoph Hellwig long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, 2377ad3c7b18SChristoph Hellwig dma_addr_t dma_addr) 2378ad3c7b18SChristoph Hellwig { 2379ad3c7b18SChristoph Hellwig return dma_to_pfn(dev, dma_addr); 2380ad3c7b18SChristoph Hellwig } 2381ad3c7b18SChristoph Hellwig 2382ad3c7b18SChristoph Hellwig void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 2383ad3c7b18SChristoph Hellwig gfp_t gfp, unsigned long attrs) 2384ad3c7b18SChristoph Hellwig { 2385ad3c7b18SChristoph Hellwig return __dma_alloc(dev, size, dma_handle, gfp, 2386ad3c7b18SChristoph Hellwig __get_dma_pgprot(attrs, PAGE_KERNEL), false, 2387ad3c7b18SChristoph Hellwig attrs, __builtin_return_address(0)); 2388ad3c7b18SChristoph Hellwig } 2389ad3c7b18SChristoph Hellwig 2390ad3c7b18SChristoph Hellwig void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, 2391ad3c7b18SChristoph Hellwig dma_addr_t dma_handle, unsigned long attrs) 2392ad3c7b18SChristoph Hellwig { 2393ad3c7b18SChristoph Hellwig __arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false); 2394ad3c7b18SChristoph Hellwig } 2395ad3c7b18SChristoph Hellwig #endif /* CONFIG_SWIOTLB */ 2396