1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 20ddbccd1SRussell King /* 30ddbccd1SRussell King * linux/arch/arm/mm/dma-mapping.c 40ddbccd1SRussell King * 50ddbccd1SRussell King * Copyright (C) 2000-2004 Russell King 60ddbccd1SRussell King * 70ddbccd1SRussell King * DMA uncached mapping support. 80ddbccd1SRussell King */ 90ddbccd1SRussell King #include <linux/module.h> 100ddbccd1SRussell King #include <linux/mm.h> 1136d0fd21SLaura Abbott #include <linux/genalloc.h> 125a0e3ad6STejun Heo #include <linux/gfp.h> 130ddbccd1SRussell King #include <linux/errno.h> 140ddbccd1SRussell King #include <linux/list.h> 150ddbccd1SRussell King #include <linux/init.h> 160ddbccd1SRussell King #include <linux/device.h> 17249baa54SChristoph Hellwig #include <linux/dma-direct.h> 180ddbccd1SRussell King #include <linux/dma-mapping.h> 19ad3c7b18SChristoph Hellwig #include <linux/dma-noncoherent.h> 20c7909509SMarek Szyprowski #include <linux/dma-contiguous.h> 2139af22a7SNicolas Pitre #include <linux/highmem.h> 22c7909509SMarek Szyprowski #include <linux/memblock.h> 2399d1717dSJon Medhurst #include <linux/slab.h> 244ce63fcdSMarek Szyprowski #include <linux/iommu.h> 25e9da6e99SMarek Szyprowski #include <linux/io.h> 264ce63fcdSMarek Szyprowski #include <linux/vmalloc.h> 27158e8bfeSAlessandro Rubini #include <linux/sizes.h> 28a254129eSJoonsoo Kim #include <linux/cma.h> 290ddbccd1SRussell King 300ddbccd1SRussell King #include <asm/memory.h> 3143377453SNicolas Pitre #include <asm/highmem.h> 320ddbccd1SRussell King #include <asm/cacheflush.h> 330ddbccd1SRussell King #include <asm/tlbflush.h> 3499d1717dSJon Medhurst #include <asm/mach/arch.h> 354ce63fcdSMarek Szyprowski #include <asm/dma-iommu.h> 36c7909509SMarek Szyprowski #include <asm/mach/map.h> 37c7909509SMarek Szyprowski #include <asm/system_info.h> 38c7909509SMarek Szyprowski #include <asm/dma-contiguous.h> 390e0d26e7SChristoph Hellwig #include <xen/swiotlb-xen.h> 400ddbccd1SRussell King 411234e3fdSRussell King #include "dma.h" 42022ae537SRussell King #include "mm.h" 43022ae537SRussell King 44b4268676SRabin Vincent struct arm_dma_alloc_args { 45b4268676SRabin Vincent struct device *dev; 46b4268676SRabin Vincent size_t size; 47b4268676SRabin Vincent gfp_t gfp; 48b4268676SRabin Vincent pgprot_t prot; 49b4268676SRabin Vincent const void *caller; 50b4268676SRabin Vincent bool want_vaddr; 51f1270896SGregory CLEMENT int coherent_flag; 52b4268676SRabin Vincent }; 53b4268676SRabin Vincent 54b4268676SRabin Vincent struct arm_dma_free_args { 55b4268676SRabin Vincent struct device *dev; 56b4268676SRabin Vincent size_t size; 57b4268676SRabin Vincent void *cpu_addr; 58b4268676SRabin Vincent struct page *page; 59b4268676SRabin Vincent bool want_vaddr; 60b4268676SRabin Vincent }; 61b4268676SRabin Vincent 62f1270896SGregory CLEMENT #define NORMAL 0 63f1270896SGregory CLEMENT #define COHERENT 1 64f1270896SGregory CLEMENT 65b4268676SRabin Vincent struct arm_dma_allocator { 66b4268676SRabin Vincent void *(*alloc)(struct arm_dma_alloc_args *args, 67b4268676SRabin Vincent struct page **ret_page); 68b4268676SRabin Vincent void (*free)(struct arm_dma_free_args *args); 69b4268676SRabin Vincent }; 70b4268676SRabin Vincent 7119e6e5e5SRabin Vincent struct arm_dma_buffer { 7219e6e5e5SRabin Vincent struct list_head list; 7319e6e5e5SRabin Vincent void *virt; 74b4268676SRabin Vincent struct arm_dma_allocator *allocator; 7519e6e5e5SRabin Vincent }; 7619e6e5e5SRabin Vincent 7719e6e5e5SRabin Vincent static LIST_HEAD(arm_dma_bufs); 7819e6e5e5SRabin Vincent static DEFINE_SPINLOCK(arm_dma_bufs_lock); 7919e6e5e5SRabin Vincent 8019e6e5e5SRabin Vincent static struct arm_dma_buffer *arm_dma_buffer_find(void *virt) 8119e6e5e5SRabin Vincent { 8219e6e5e5SRabin Vincent struct arm_dma_buffer *buf, *found = NULL; 8319e6e5e5SRabin Vincent unsigned long flags; 8419e6e5e5SRabin Vincent 8519e6e5e5SRabin Vincent spin_lock_irqsave(&arm_dma_bufs_lock, flags); 8619e6e5e5SRabin Vincent list_for_each_entry(buf, &arm_dma_bufs, list) { 8719e6e5e5SRabin Vincent if (buf->virt == virt) { 8819e6e5e5SRabin Vincent list_del(&buf->list); 8919e6e5e5SRabin Vincent found = buf; 9019e6e5e5SRabin Vincent break; 9119e6e5e5SRabin Vincent } 9219e6e5e5SRabin Vincent } 9319e6e5e5SRabin Vincent spin_unlock_irqrestore(&arm_dma_bufs_lock, flags); 9419e6e5e5SRabin Vincent return found; 9519e6e5e5SRabin Vincent } 9619e6e5e5SRabin Vincent 9715237e1fSMarek Szyprowski /* 9815237e1fSMarek Szyprowski * The DMA API is built upon the notion of "buffer ownership". A buffer 9915237e1fSMarek Szyprowski * is either exclusively owned by the CPU (and therefore may be accessed 10015237e1fSMarek Szyprowski * by it) or exclusively owned by the DMA device. These helper functions 10115237e1fSMarek Szyprowski * represent the transitions between these two ownership states. 10215237e1fSMarek Szyprowski * 10315237e1fSMarek Szyprowski * Note, however, that on later ARMs, this notion does not work due to 10415237e1fSMarek Szyprowski * speculative prefetches. We model our approach on the assumption that 10515237e1fSMarek Szyprowski * the CPU does do speculative prefetches, which means we clean caches 10615237e1fSMarek Szyprowski * before transfers and delay cache invalidation until transfer completion. 10715237e1fSMarek Szyprowski * 10815237e1fSMarek Szyprowski */ 10951fde349SMarek Szyprowski static void __dma_page_cpu_to_dev(struct page *, unsigned long, 11015237e1fSMarek Szyprowski size_t, enum dma_data_direction); 11151fde349SMarek Szyprowski static void __dma_page_dev_to_cpu(struct page *, unsigned long, 11215237e1fSMarek Szyprowski size_t, enum dma_data_direction); 11315237e1fSMarek Szyprowski 1142dc6a016SMarek Szyprowski /** 1152dc6a016SMarek Szyprowski * arm_dma_map_page - map a portion of a page for streaming DMA 1162dc6a016SMarek Szyprowski * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1172dc6a016SMarek Szyprowski * @page: page that buffer resides in 1182dc6a016SMarek Szyprowski * @offset: offset into page for start of buffer 1192dc6a016SMarek Szyprowski * @size: size of buffer to map 1202dc6a016SMarek Szyprowski * @dir: DMA transfer direction 1212dc6a016SMarek Szyprowski * 1222dc6a016SMarek Szyprowski * Ensure that any data held in the cache is appropriately discarded 1232dc6a016SMarek Szyprowski * or written back. 1242dc6a016SMarek Szyprowski * 1252dc6a016SMarek Szyprowski * The device owns this memory once this call has completed. The CPU 1262dc6a016SMarek Szyprowski * can regain ownership by calling dma_unmap_page(). 1272dc6a016SMarek Szyprowski */ 12851fde349SMarek Szyprowski static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, 1292dc6a016SMarek Szyprowski unsigned long offset, size_t size, enum dma_data_direction dir, 13000085f1eSKrzysztof Kozlowski unsigned long attrs) 1312dc6a016SMarek Szyprowski { 13200085f1eSKrzysztof Kozlowski if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 13351fde349SMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 13451fde349SMarek Szyprowski return pfn_to_dma(dev, page_to_pfn(page)) + offset; 1352dc6a016SMarek Szyprowski } 1362dc6a016SMarek Szyprowski 137dd37e940SRob Herring static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, 138dd37e940SRob Herring unsigned long offset, size_t size, enum dma_data_direction dir, 13900085f1eSKrzysztof Kozlowski unsigned long attrs) 140dd37e940SRob Herring { 141dd37e940SRob Herring return pfn_to_dma(dev, page_to_pfn(page)) + offset; 142dd37e940SRob Herring } 143dd37e940SRob Herring 1442dc6a016SMarek Szyprowski /** 1452dc6a016SMarek Szyprowski * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page() 1462dc6a016SMarek Szyprowski * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1472dc6a016SMarek Szyprowski * @handle: DMA address of buffer 1482dc6a016SMarek Szyprowski * @size: size of buffer (same as passed to dma_map_page) 1492dc6a016SMarek Szyprowski * @dir: DMA transfer direction (same as passed to dma_map_page) 1502dc6a016SMarek Szyprowski * 1512dc6a016SMarek Szyprowski * Unmap a page streaming mode DMA translation. The handle and size 1522dc6a016SMarek Szyprowski * must match what was provided in the previous dma_map_page() call. 1532dc6a016SMarek Szyprowski * All other usages are undefined. 1542dc6a016SMarek Szyprowski * 1552dc6a016SMarek Szyprowski * After this call, reads by the CPU to the buffer are guaranteed to see 1562dc6a016SMarek Szyprowski * whatever the device wrote there. 1572dc6a016SMarek Szyprowski */ 15851fde349SMarek Szyprowski static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, 15900085f1eSKrzysztof Kozlowski size_t size, enum dma_data_direction dir, unsigned long attrs) 1602dc6a016SMarek Szyprowski { 16100085f1eSKrzysztof Kozlowski if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 16251fde349SMarek Szyprowski __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), 16351fde349SMarek Szyprowski handle & ~PAGE_MASK, size, dir); 1642dc6a016SMarek Szyprowski } 1652dc6a016SMarek Szyprowski 16651fde349SMarek Szyprowski static void arm_dma_sync_single_for_cpu(struct device *dev, 1672dc6a016SMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 1682dc6a016SMarek Szyprowski { 1692dc6a016SMarek Szyprowski unsigned int offset = handle & (PAGE_SIZE - 1); 1702dc6a016SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 1712dc6a016SMarek Szyprowski __dma_page_dev_to_cpu(page, offset, size, dir); 1722dc6a016SMarek Szyprowski } 1732dc6a016SMarek Szyprowski 17451fde349SMarek Szyprowski static void arm_dma_sync_single_for_device(struct device *dev, 1752dc6a016SMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 1762dc6a016SMarek Szyprowski { 1772dc6a016SMarek Szyprowski unsigned int offset = handle & (PAGE_SIZE - 1); 1782dc6a016SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 1792dc6a016SMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 1802dc6a016SMarek Szyprowski } 1812dc6a016SMarek Szyprowski 1825299709dSBart Van Assche const struct dma_map_ops arm_dma_ops = { 183f99d6034SMarek Szyprowski .alloc = arm_dma_alloc, 184f99d6034SMarek Szyprowski .free = arm_dma_free, 185f99d6034SMarek Szyprowski .mmap = arm_dma_mmap, 186dc2832e1SMarek Szyprowski .get_sgtable = arm_dma_get_sgtable, 1872dc6a016SMarek Szyprowski .map_page = arm_dma_map_page, 1882dc6a016SMarek Szyprowski .unmap_page = arm_dma_unmap_page, 1892dc6a016SMarek Szyprowski .map_sg = arm_dma_map_sg, 1902dc6a016SMarek Szyprowski .unmap_sg = arm_dma_unmap_sg, 191cfced786SChristoph Hellwig .map_resource = dma_direct_map_resource, 1922dc6a016SMarek Szyprowski .sync_single_for_cpu = arm_dma_sync_single_for_cpu, 1932dc6a016SMarek Szyprowski .sync_single_for_device = arm_dma_sync_single_for_device, 1942dc6a016SMarek Szyprowski .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, 1952dc6a016SMarek Szyprowski .sync_sg_for_device = arm_dma_sync_sg_for_device, 196418a7a7eSChristoph Hellwig .dma_supported = arm_dma_supported, 197249baa54SChristoph Hellwig .get_required_mask = dma_direct_get_required_mask, 1982dc6a016SMarek Szyprowski }; 1992dc6a016SMarek Szyprowski EXPORT_SYMBOL(arm_dma_ops); 2002dc6a016SMarek Szyprowski 201dd37e940SRob Herring static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 20200085f1eSKrzysztof Kozlowski dma_addr_t *handle, gfp_t gfp, unsigned long attrs); 203dd37e940SRob Herring static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 20400085f1eSKrzysztof Kozlowski dma_addr_t handle, unsigned long attrs); 20555af8a91SMike Looijmans static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, 20655af8a91SMike Looijmans void *cpu_addr, dma_addr_t dma_addr, size_t size, 20700085f1eSKrzysztof Kozlowski unsigned long attrs); 208dd37e940SRob Herring 2095299709dSBart Van Assche const struct dma_map_ops arm_coherent_dma_ops = { 210dd37e940SRob Herring .alloc = arm_coherent_dma_alloc, 211dd37e940SRob Herring .free = arm_coherent_dma_free, 21255af8a91SMike Looijmans .mmap = arm_coherent_dma_mmap, 213dd37e940SRob Herring .get_sgtable = arm_dma_get_sgtable, 214dd37e940SRob Herring .map_page = arm_coherent_dma_map_page, 215dd37e940SRob Herring .map_sg = arm_dma_map_sg, 216cfced786SChristoph Hellwig .map_resource = dma_direct_map_resource, 217418a7a7eSChristoph Hellwig .dma_supported = arm_dma_supported, 218249baa54SChristoph Hellwig .get_required_mask = dma_direct_get_required_mask, 219dd37e940SRob Herring }; 220dd37e940SRob Herring EXPORT_SYMBOL(arm_coherent_dma_ops); 221dd37e940SRob Herring 2229f28cde0SRussell King static int __dma_supported(struct device *dev, u64 mask, bool warn) 2239f28cde0SRussell King { 224ab746573SChristoph Hellwig unsigned long max_dma_pfn = min(max_pfn, arm_dma_pfn_limit); 2259f28cde0SRussell King 2269f28cde0SRussell King /* 2279f28cde0SRussell King * Translate the device's DMA mask to a PFN limit. This 2289f28cde0SRussell King * PFN number includes the page which we can DMA to. 2299f28cde0SRussell King */ 2309f28cde0SRussell King if (dma_to_pfn(dev, mask) < max_dma_pfn) { 2319f28cde0SRussell King if (warn) 2329f28cde0SRussell King dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", 2339f28cde0SRussell King mask, 2349f28cde0SRussell King dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, 2359f28cde0SRussell King max_dma_pfn + 1); 2369f28cde0SRussell King return 0; 2379f28cde0SRussell King } 2389f28cde0SRussell King 2399f28cde0SRussell King return 1; 2409f28cde0SRussell King } 2419f28cde0SRussell King 242ab6494f0SCatalin Marinas static u64 get_coherent_dma_mask(struct device *dev) 243ab6494f0SCatalin Marinas { 2444dcfa600SRussell King u64 mask = (u64)DMA_BIT_MASK(32); 2450ddbccd1SRussell King 246ab6494f0SCatalin Marinas if (dev) { 247ab6494f0SCatalin Marinas mask = dev->coherent_dma_mask; 248ab6494f0SCatalin Marinas 249ab6494f0SCatalin Marinas /* 250ab6494f0SCatalin Marinas * Sanity check the DMA mask - it must be non-zero, and 251ab6494f0SCatalin Marinas * must be able to be satisfied by a DMA allocation. 252ab6494f0SCatalin Marinas */ 253ab6494f0SCatalin Marinas if (mask == 0) { 254ab6494f0SCatalin Marinas dev_warn(dev, "coherent DMA mask is unset\n"); 255ab6494f0SCatalin Marinas return 0; 256ab6494f0SCatalin Marinas } 257ab6494f0SCatalin Marinas 2589f28cde0SRussell King if (!__dma_supported(dev, mask, true)) 2594dcfa600SRussell King return 0; 2604dcfa600SRussell King } 2614dcfa600SRussell King 262ab6494f0SCatalin Marinas return mask; 263ab6494f0SCatalin Marinas } 264ab6494f0SCatalin Marinas 265f1270896SGregory CLEMENT static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag) 266c7909509SMarek Szyprowski { 267c7909509SMarek Szyprowski /* 268c7909509SMarek Szyprowski * Ensure that the allocated pages are zeroed, and that any data 269c7909509SMarek Szyprowski * lurking in the kernel direct-mapped region is invalidated. 270c7909509SMarek Szyprowski */ 2719848e48fSMarek Szyprowski if (PageHighMem(page)) { 2729848e48fSMarek Szyprowski phys_addr_t base = __pfn_to_phys(page_to_pfn(page)); 2739848e48fSMarek Szyprowski phys_addr_t end = base + size; 2749848e48fSMarek Szyprowski while (size > 0) { 2759848e48fSMarek Szyprowski void *ptr = kmap_atomic(page); 2769848e48fSMarek Szyprowski memset(ptr, 0, PAGE_SIZE); 277f1270896SGregory CLEMENT if (coherent_flag != COHERENT) 2789848e48fSMarek Szyprowski dmac_flush_range(ptr, ptr + PAGE_SIZE); 2799848e48fSMarek Szyprowski kunmap_atomic(ptr); 2809848e48fSMarek Szyprowski page++; 2819848e48fSMarek Szyprowski size -= PAGE_SIZE; 2829848e48fSMarek Szyprowski } 283f1270896SGregory CLEMENT if (coherent_flag != COHERENT) 2849848e48fSMarek Szyprowski outer_flush_range(base, end); 2859848e48fSMarek Szyprowski } else { 2869848e48fSMarek Szyprowski void *ptr = page_address(page); 287c7909509SMarek Szyprowski memset(ptr, 0, size); 288f1270896SGregory CLEMENT if (coherent_flag != COHERENT) { 289c7909509SMarek Szyprowski dmac_flush_range(ptr, ptr + size); 290c7909509SMarek Szyprowski outer_flush_range(__pa(ptr), __pa(ptr) + size); 291c7909509SMarek Szyprowski } 2924ce63fcdSMarek Szyprowski } 293f1270896SGregory CLEMENT } 294c7909509SMarek Szyprowski 2957a9a32a9SRussell King /* 2967a9a32a9SRussell King * Allocate a DMA buffer for 'dev' of size 'size' using the 2977a9a32a9SRussell King * specified gfp mask. Note that 'size' must be page aligned. 2987a9a32a9SRussell King */ 299f1270896SGregory CLEMENT static struct page *__dma_alloc_buffer(struct device *dev, size_t size, 300f1270896SGregory CLEMENT gfp_t gfp, int coherent_flag) 3017a9a32a9SRussell King { 3027a9a32a9SRussell King unsigned long order = get_order(size); 3037a9a32a9SRussell King struct page *page, *p, *e; 3047a9a32a9SRussell King 3057a9a32a9SRussell King page = alloc_pages(gfp, order); 3067a9a32a9SRussell King if (!page) 3077a9a32a9SRussell King return NULL; 3087a9a32a9SRussell King 3097a9a32a9SRussell King /* 3107a9a32a9SRussell King * Now split the huge page and free the excess pages 3117a9a32a9SRussell King */ 3127a9a32a9SRussell King split_page(page, order); 3137a9a32a9SRussell King for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) 3147a9a32a9SRussell King __free_page(p); 3157a9a32a9SRussell King 316f1270896SGregory CLEMENT __dma_clear_buffer(page, size, coherent_flag); 3177a9a32a9SRussell King 3187a9a32a9SRussell King return page; 3197a9a32a9SRussell King } 3207a9a32a9SRussell King 3217a9a32a9SRussell King /* 3227a9a32a9SRussell King * Free a DMA buffer. 'size' must be page aligned. 3237a9a32a9SRussell King */ 3247a9a32a9SRussell King static void __dma_free_buffer(struct page *page, size_t size) 3257a9a32a9SRussell King { 3267a9a32a9SRussell King struct page *e = page + (size >> PAGE_SHIFT); 3277a9a32a9SRussell King 3287a9a32a9SRussell King while (page < e) { 3297a9a32a9SRussell King __free_page(page); 3307a9a32a9SRussell King page++; 3317a9a32a9SRussell King } 3327a9a32a9SRussell King } 3337a9a32a9SRussell King 334c7909509SMarek Szyprowski static void *__alloc_from_contiguous(struct device *dev, size_t size, 3359848e48fSMarek Szyprowski pgprot_t prot, struct page **ret_page, 336f1270896SGregory CLEMENT const void *caller, bool want_vaddr, 337712c604dSLucas Stach int coherent_flag, gfp_t gfp); 338c7909509SMarek Szyprowski 339e9da6e99SMarek Szyprowski static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 340e9da6e99SMarek Szyprowski pgprot_t prot, struct page **ret_page, 3416e8266e3SCarlo Caione const void *caller, bool want_vaddr); 342e9da6e99SMarek Szyprowski 3436e5267aaSMarek Szyprowski #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K 344b337e1c4SVladimir Murzin static struct gen_pool *atomic_pool __ro_after_init; 3456e5267aaSMarek Szyprowski 346b337e1c4SVladimir Murzin static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE; 347c7909509SMarek Szyprowski 348c7909509SMarek Szyprowski static int __init early_coherent_pool(char *p) 349c7909509SMarek Szyprowski { 35036d0fd21SLaura Abbott atomic_pool_size = memparse(p, &p); 351c7909509SMarek Szyprowski return 0; 352c7909509SMarek Szyprowski } 353c7909509SMarek Szyprowski early_param("coherent_pool", early_coherent_pool); 354c7909509SMarek Szyprowski 355c7909509SMarek Szyprowski /* 356c7909509SMarek Szyprowski * Initialise the coherent pool for atomic allocations. 357c7909509SMarek Szyprowski */ 358e9da6e99SMarek Szyprowski static int __init atomic_pool_init(void) 359c7909509SMarek Szyprowski { 36071b55663SRussell King pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL); 3619d1400cfSMarek Szyprowski gfp_t gfp = GFP_KERNEL | GFP_DMA; 362c7909509SMarek Szyprowski struct page *page; 363c7909509SMarek Szyprowski void *ptr; 364c7909509SMarek Szyprowski 36536d0fd21SLaura Abbott atomic_pool = gen_pool_create(PAGE_SHIFT, -1); 36636d0fd21SLaura Abbott if (!atomic_pool) 36736d0fd21SLaura Abbott goto out; 368f1270896SGregory CLEMENT /* 369f1270896SGregory CLEMENT * The atomic pool is only used for non-coherent allocations 370f1270896SGregory CLEMENT * so we must pass NORMAL for coherent_flag. 371f1270896SGregory CLEMENT */ 372e464ef16SGioh Kim if (dev_get_cma_area(NULL)) 37336d0fd21SLaura Abbott ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot, 374712c604dSLucas Stach &page, atomic_pool_init, true, NORMAL, 375712c604dSLucas Stach GFP_KERNEL); 376e9da6e99SMarek Szyprowski else 37736d0fd21SLaura Abbott ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot, 3786e8266e3SCarlo Caione &page, atomic_pool_init, true); 379c7909509SMarek Szyprowski if (ptr) { 38036d0fd21SLaura Abbott int ret; 3816b3fe472SHiroshi Doyu 38236d0fd21SLaura Abbott ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr, 38336d0fd21SLaura Abbott page_to_phys(page), 38436d0fd21SLaura Abbott atomic_pool_size, -1); 38536d0fd21SLaura Abbott if (ret) 38636d0fd21SLaura Abbott goto destroy_genpool; 3876b3fe472SHiroshi Doyu 38836d0fd21SLaura Abbott gen_pool_set_algo(atomic_pool, 38936d0fd21SLaura Abbott gen_pool_first_fit_order_align, 390acb62448SVladimir Murzin NULL); 391bf31c5e0SFabio Estevam pr_info("DMA: preallocated %zu KiB pool for atomic coherent allocations\n", 39236d0fd21SLaura Abbott atomic_pool_size / 1024); 393c7909509SMarek Szyprowski return 0; 394c7909509SMarek Szyprowski } 395ec10665cSSachin Kamat 39636d0fd21SLaura Abbott destroy_genpool: 39736d0fd21SLaura Abbott gen_pool_destroy(atomic_pool); 39836d0fd21SLaura Abbott atomic_pool = NULL; 39936d0fd21SLaura Abbott out: 400bf31c5e0SFabio Estevam pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n", 40136d0fd21SLaura Abbott atomic_pool_size / 1024); 402c7909509SMarek Szyprowski return -ENOMEM; 403c7909509SMarek Szyprowski } 404c7909509SMarek Szyprowski /* 405c7909509SMarek Szyprowski * CMA is activated by core_initcall, so we must be called after it. 406c7909509SMarek Szyprowski */ 407e9da6e99SMarek Szyprowski postcore_initcall(atomic_pool_init); 408c7909509SMarek Szyprowski 409c7909509SMarek Szyprowski struct dma_contig_early_reserve { 410c7909509SMarek Szyprowski phys_addr_t base; 411c7909509SMarek Szyprowski unsigned long size; 412c7909509SMarek Szyprowski }; 413c7909509SMarek Szyprowski 414c7909509SMarek Szyprowski static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata; 415c7909509SMarek Szyprowski 416c7909509SMarek Szyprowski static int dma_mmu_remap_num __initdata; 417c7909509SMarek Szyprowski 418c7909509SMarek Szyprowski void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) 419c7909509SMarek Szyprowski { 420c7909509SMarek Szyprowski dma_mmu_remap[dma_mmu_remap_num].base = base; 421c7909509SMarek Szyprowski dma_mmu_remap[dma_mmu_remap_num].size = size; 422c7909509SMarek Szyprowski dma_mmu_remap_num++; 423c7909509SMarek Szyprowski } 424c7909509SMarek Szyprowski 425c7909509SMarek Szyprowski void __init dma_contiguous_remap(void) 426c7909509SMarek Szyprowski { 427c7909509SMarek Szyprowski int i; 428c7909509SMarek Szyprowski for (i = 0; i < dma_mmu_remap_num; i++) { 429c7909509SMarek Szyprowski phys_addr_t start = dma_mmu_remap[i].base; 430c7909509SMarek Szyprowski phys_addr_t end = start + dma_mmu_remap[i].size; 431c7909509SMarek Szyprowski struct map_desc map; 432c7909509SMarek Szyprowski unsigned long addr; 433c7909509SMarek Szyprowski 434c7909509SMarek Szyprowski if (end > arm_lowmem_limit) 435c7909509SMarek Szyprowski end = arm_lowmem_limit; 436c7909509SMarek Szyprowski if (start >= end) 43739f78e70SChris Brand continue; 438c7909509SMarek Szyprowski 439c7909509SMarek Szyprowski map.pfn = __phys_to_pfn(start); 440c7909509SMarek Szyprowski map.virtual = __phys_to_virt(start); 441c7909509SMarek Szyprowski map.length = end - start; 442c7909509SMarek Szyprowski map.type = MT_MEMORY_DMA_READY; 443c7909509SMarek Szyprowski 444c7909509SMarek Szyprowski /* 4456b076991SRussell King * Clear previous low-memory mapping to ensure that the 4466b076991SRussell King * TLB does not see any conflicting entries, then flush 4476b076991SRussell King * the TLB of the old entries before creating new mappings. 4486b076991SRussell King * 4496b076991SRussell King * This ensures that any speculatively loaded TLB entries 4506b076991SRussell King * (even though they may be rare) can not cause any problems, 4516b076991SRussell King * and ensures that this code is architecturally compliant. 452c7909509SMarek Szyprowski */ 453c7909509SMarek Szyprowski for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); 45461f6c7a4SVitaly Andrianov addr += PMD_SIZE) 455c7909509SMarek Szyprowski pmd_clear(pmd_off_k(addr)); 456c7909509SMarek Szyprowski 4576b076991SRussell King flush_tlb_kernel_range(__phys_to_virt(start), 4586b076991SRussell King __phys_to_virt(end)); 4596b076991SRussell King 460c7909509SMarek Szyprowski iotable_init(&map, 1); 461c7909509SMarek Szyprowski } 462c7909509SMarek Szyprowski } 463c7909509SMarek Szyprowski 4648b1e0f81SAnshuman Khandual static int __dma_update_pte(pte_t *pte, unsigned long addr, void *data) 465c7909509SMarek Szyprowski { 466c7909509SMarek Szyprowski struct page *page = virt_to_page(addr); 467c7909509SMarek Szyprowski pgprot_t prot = *(pgprot_t *)data; 468c7909509SMarek Szyprowski 469c7909509SMarek Szyprowski set_pte_ext(pte, mk_pte(page, prot), 0); 470c7909509SMarek Szyprowski return 0; 471c7909509SMarek Szyprowski } 472c7909509SMarek Szyprowski 473c7909509SMarek Szyprowski static void __dma_remap(struct page *page, size_t size, pgprot_t prot) 474c7909509SMarek Szyprowski { 475c7909509SMarek Szyprowski unsigned long start = (unsigned long) page_address(page); 476c7909509SMarek Szyprowski unsigned end = start + size; 477c7909509SMarek Szyprowski 478c7909509SMarek Szyprowski apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); 479c7909509SMarek Szyprowski flush_tlb_kernel_range(start, end); 480c7909509SMarek Szyprowski } 481c7909509SMarek Szyprowski 482c7909509SMarek Szyprowski static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 483c7909509SMarek Szyprowski pgprot_t prot, struct page **ret_page, 4846e8266e3SCarlo Caione const void *caller, bool want_vaddr) 485c7909509SMarek Szyprowski { 486c7909509SMarek Szyprowski struct page *page; 4876e8266e3SCarlo Caione void *ptr = NULL; 488f1270896SGregory CLEMENT /* 489f1270896SGregory CLEMENT * __alloc_remap_buffer is only called when the device is 490f1270896SGregory CLEMENT * non-coherent 491f1270896SGregory CLEMENT */ 492f1270896SGregory CLEMENT page = __dma_alloc_buffer(dev, size, gfp, NORMAL); 493c7909509SMarek Szyprowski if (!page) 494c7909509SMarek Szyprowski return NULL; 4956e8266e3SCarlo Caione if (!want_vaddr) 4966e8266e3SCarlo Caione goto out; 497c7909509SMarek Szyprowski 49878406ff5SChristoph Hellwig ptr = dma_common_contiguous_remap(page, size, prot, caller); 499c7909509SMarek Szyprowski if (!ptr) { 500c7909509SMarek Szyprowski __dma_free_buffer(page, size); 501c7909509SMarek Szyprowski return NULL; 502c7909509SMarek Szyprowski } 503c7909509SMarek Szyprowski 5046e8266e3SCarlo Caione out: 505c7909509SMarek Szyprowski *ret_page = page; 506c7909509SMarek Szyprowski return ptr; 507c7909509SMarek Szyprowski } 508c7909509SMarek Szyprowski 509e9da6e99SMarek Szyprowski static void *__alloc_from_pool(size_t size, struct page **ret_page) 510c7909509SMarek Szyprowski { 51136d0fd21SLaura Abbott unsigned long val; 512e9da6e99SMarek Szyprowski void *ptr = NULL; 513c7909509SMarek Szyprowski 51436d0fd21SLaura Abbott if (!atomic_pool) { 515e9da6e99SMarek Szyprowski WARN(1, "coherent pool not initialised!\n"); 516c7909509SMarek Szyprowski return NULL; 517c7909509SMarek Szyprowski } 518c7909509SMarek Szyprowski 51936d0fd21SLaura Abbott val = gen_pool_alloc(atomic_pool, size); 52036d0fd21SLaura Abbott if (val) { 52136d0fd21SLaura Abbott phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val); 522e9da6e99SMarek Szyprowski 52336d0fd21SLaura Abbott *ret_page = phys_to_page(phys); 52436d0fd21SLaura Abbott ptr = (void *)val; 525e9da6e99SMarek Szyprowski } 526e9da6e99SMarek Szyprowski 527c7909509SMarek Szyprowski return ptr; 528c7909509SMarek Szyprowski } 529c7909509SMarek Szyprowski 53021d0a759SHiroshi Doyu static bool __in_atomic_pool(void *start, size_t size) 53121d0a759SHiroshi Doyu { 53236d0fd21SLaura Abbott return addr_in_gen_pool(atomic_pool, (unsigned long)start, size); 53321d0a759SHiroshi Doyu } 53421d0a759SHiroshi Doyu 535e9da6e99SMarek Szyprowski static int __free_from_pool(void *start, size_t size) 536c7909509SMarek Szyprowski { 53721d0a759SHiroshi Doyu if (!__in_atomic_pool(start, size)) 538c7909509SMarek Szyprowski return 0; 539c7909509SMarek Szyprowski 54036d0fd21SLaura Abbott gen_pool_free(atomic_pool, (unsigned long)start, size); 541e9da6e99SMarek Szyprowski 542c7909509SMarek Szyprowski return 1; 543c7909509SMarek Szyprowski } 544c7909509SMarek Szyprowski 545c7909509SMarek Szyprowski static void *__alloc_from_contiguous(struct device *dev, size_t size, 5469848e48fSMarek Szyprowski pgprot_t prot, struct page **ret_page, 547f1270896SGregory CLEMENT const void *caller, bool want_vaddr, 548712c604dSLucas Stach int coherent_flag, gfp_t gfp) 549c7909509SMarek Szyprowski { 550c7909509SMarek Szyprowski unsigned long order = get_order(size); 551c7909509SMarek Szyprowski size_t count = size >> PAGE_SHIFT; 552c7909509SMarek Szyprowski struct page *page; 5536e8266e3SCarlo Caione void *ptr = NULL; 554c7909509SMarek Szyprowski 555d834c5abSMarek Szyprowski page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN); 556c7909509SMarek Szyprowski if (!page) 557c7909509SMarek Szyprowski return NULL; 558c7909509SMarek Szyprowski 559f1270896SGregory CLEMENT __dma_clear_buffer(page, size, coherent_flag); 560c7909509SMarek Szyprowski 5616e8266e3SCarlo Caione if (!want_vaddr) 5626e8266e3SCarlo Caione goto out; 5636e8266e3SCarlo Caione 5649848e48fSMarek Szyprowski if (PageHighMem(page)) { 56578406ff5SChristoph Hellwig ptr = dma_common_contiguous_remap(page, size, prot, caller); 5669848e48fSMarek Szyprowski if (!ptr) { 5679848e48fSMarek Szyprowski dma_release_from_contiguous(dev, page, count); 5689848e48fSMarek Szyprowski return NULL; 5699848e48fSMarek Szyprowski } 5709848e48fSMarek Szyprowski } else { 5719848e48fSMarek Szyprowski __dma_remap(page, size, prot); 5729848e48fSMarek Szyprowski ptr = page_address(page); 5739848e48fSMarek Szyprowski } 5746e8266e3SCarlo Caione 5756e8266e3SCarlo Caione out: 576c7909509SMarek Szyprowski *ret_page = page; 5779848e48fSMarek Szyprowski return ptr; 578c7909509SMarek Szyprowski } 579c7909509SMarek Szyprowski 580c7909509SMarek Szyprowski static void __free_from_contiguous(struct device *dev, struct page *page, 5816e8266e3SCarlo Caione void *cpu_addr, size_t size, bool want_vaddr) 582c7909509SMarek Szyprowski { 5836e8266e3SCarlo Caione if (want_vaddr) { 5849848e48fSMarek Szyprowski if (PageHighMem(page)) 58578406ff5SChristoph Hellwig dma_common_free_remap(cpu_addr, size); 5869848e48fSMarek Szyprowski else 58771b55663SRussell King __dma_remap(page, size, PAGE_KERNEL); 5886e8266e3SCarlo Caione } 589c7909509SMarek Szyprowski dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); 590c7909509SMarek Szyprowski } 591c7909509SMarek Szyprowski 59200085f1eSKrzysztof Kozlowski static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot) 593f99d6034SMarek Szyprowski { 59400085f1eSKrzysztof Kozlowski prot = (attrs & DMA_ATTR_WRITE_COMBINE) ? 595f99d6034SMarek Szyprowski pgprot_writecombine(prot) : 596f99d6034SMarek Szyprowski pgprot_dmacoherent(prot); 597f99d6034SMarek Szyprowski return prot; 598f99d6034SMarek Szyprowski } 599f99d6034SMarek Szyprowski 600c7909509SMarek Szyprowski static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, 601c7909509SMarek Szyprowski struct page **ret_page) 602ab6494f0SCatalin Marinas { 60304da5694SRussell King struct page *page; 604f1270896SGregory CLEMENT /* __alloc_simple_buffer is only called when the device is coherent */ 605f1270896SGregory CLEMENT page = __dma_alloc_buffer(dev, size, gfp, COHERENT); 606c7909509SMarek Szyprowski if (!page) 607c7909509SMarek Szyprowski return NULL; 608c7909509SMarek Szyprowski 609c7909509SMarek Szyprowski *ret_page = page; 610c7909509SMarek Szyprowski return page_address(page); 611c7909509SMarek Szyprowski } 612c7909509SMarek Szyprowski 613b4268676SRabin Vincent static void *simple_allocator_alloc(struct arm_dma_alloc_args *args, 614b4268676SRabin Vincent struct page **ret_page) 615b4268676SRabin Vincent { 616b4268676SRabin Vincent return __alloc_simple_buffer(args->dev, args->size, args->gfp, 617b4268676SRabin Vincent ret_page); 618b4268676SRabin Vincent } 619c7909509SMarek Szyprowski 620b4268676SRabin Vincent static void simple_allocator_free(struct arm_dma_free_args *args) 621b4268676SRabin Vincent { 622b4268676SRabin Vincent __dma_free_buffer(args->page, args->size); 623b4268676SRabin Vincent } 624b4268676SRabin Vincent 625b4268676SRabin Vincent static struct arm_dma_allocator simple_allocator = { 626b4268676SRabin Vincent .alloc = simple_allocator_alloc, 627b4268676SRabin Vincent .free = simple_allocator_free, 628b4268676SRabin Vincent }; 629b4268676SRabin Vincent 630b4268676SRabin Vincent static void *cma_allocator_alloc(struct arm_dma_alloc_args *args, 631b4268676SRabin Vincent struct page **ret_page) 632b4268676SRabin Vincent { 633b4268676SRabin Vincent return __alloc_from_contiguous(args->dev, args->size, args->prot, 634b4268676SRabin Vincent ret_page, args->caller, 635712c604dSLucas Stach args->want_vaddr, args->coherent_flag, 636712c604dSLucas Stach args->gfp); 637b4268676SRabin Vincent } 638b4268676SRabin Vincent 639b4268676SRabin Vincent static void cma_allocator_free(struct arm_dma_free_args *args) 640b4268676SRabin Vincent { 641b4268676SRabin Vincent __free_from_contiguous(args->dev, args->page, args->cpu_addr, 642b4268676SRabin Vincent args->size, args->want_vaddr); 643b4268676SRabin Vincent } 644b4268676SRabin Vincent 645b4268676SRabin Vincent static struct arm_dma_allocator cma_allocator = { 646b4268676SRabin Vincent .alloc = cma_allocator_alloc, 647b4268676SRabin Vincent .free = cma_allocator_free, 648b4268676SRabin Vincent }; 649b4268676SRabin Vincent 650b4268676SRabin Vincent static void *pool_allocator_alloc(struct arm_dma_alloc_args *args, 651b4268676SRabin Vincent struct page **ret_page) 652b4268676SRabin Vincent { 653b4268676SRabin Vincent return __alloc_from_pool(args->size, ret_page); 654b4268676SRabin Vincent } 655b4268676SRabin Vincent 656b4268676SRabin Vincent static void pool_allocator_free(struct arm_dma_free_args *args) 657b4268676SRabin Vincent { 658b4268676SRabin Vincent __free_from_pool(args->cpu_addr, args->size); 659b4268676SRabin Vincent } 660b4268676SRabin Vincent 661b4268676SRabin Vincent static struct arm_dma_allocator pool_allocator = { 662b4268676SRabin Vincent .alloc = pool_allocator_alloc, 663b4268676SRabin Vincent .free = pool_allocator_free, 664b4268676SRabin Vincent }; 665b4268676SRabin Vincent 666b4268676SRabin Vincent static void *remap_allocator_alloc(struct arm_dma_alloc_args *args, 667b4268676SRabin Vincent struct page **ret_page) 668b4268676SRabin Vincent { 669b4268676SRabin Vincent return __alloc_remap_buffer(args->dev, args->size, args->gfp, 670b4268676SRabin Vincent args->prot, ret_page, args->caller, 671b4268676SRabin Vincent args->want_vaddr); 672b4268676SRabin Vincent } 673b4268676SRabin Vincent 674b4268676SRabin Vincent static void remap_allocator_free(struct arm_dma_free_args *args) 675b4268676SRabin Vincent { 676b4268676SRabin Vincent if (args->want_vaddr) 67778406ff5SChristoph Hellwig dma_common_free_remap(args->cpu_addr, args->size); 678b4268676SRabin Vincent 679b4268676SRabin Vincent __dma_free_buffer(args->page, args->size); 680b4268676SRabin Vincent } 681b4268676SRabin Vincent 682b4268676SRabin Vincent static struct arm_dma_allocator remap_allocator = { 683b4268676SRabin Vincent .alloc = remap_allocator_alloc, 684b4268676SRabin Vincent .free = remap_allocator_free, 685b4268676SRabin Vincent }; 686c7909509SMarek Szyprowski 687c7909509SMarek Szyprowski static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 6886e8266e3SCarlo Caione gfp_t gfp, pgprot_t prot, bool is_coherent, 68900085f1eSKrzysztof Kozlowski unsigned long attrs, const void *caller) 690c7909509SMarek Szyprowski { 691c7909509SMarek Szyprowski u64 mask = get_coherent_dma_mask(dev); 6923dd7ea92SJingoo Han struct page *page = NULL; 69331ebf944SRussell King void *addr; 694b4268676SRabin Vincent bool allowblock, cma; 69519e6e5e5SRabin Vincent struct arm_dma_buffer *buf; 696b4268676SRabin Vincent struct arm_dma_alloc_args args = { 697b4268676SRabin Vincent .dev = dev, 698b4268676SRabin Vincent .size = PAGE_ALIGN(size), 699b4268676SRabin Vincent .gfp = gfp, 700b4268676SRabin Vincent .prot = prot, 701b4268676SRabin Vincent .caller = caller, 70200085f1eSKrzysztof Kozlowski .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0), 703f1270896SGregory CLEMENT .coherent_flag = is_coherent ? COHERENT : NORMAL, 704b4268676SRabin Vincent }; 705ab6494f0SCatalin Marinas 706c7909509SMarek Szyprowski #ifdef CONFIG_DMA_API_DEBUG 707c7909509SMarek Szyprowski u64 limit = (mask + 1) & ~mask; 708c7909509SMarek Szyprowski if (limit && size >= limit) { 709c7909509SMarek Szyprowski dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n", 710c7909509SMarek Szyprowski size, mask); 711c7909509SMarek Szyprowski return NULL; 712c7909509SMarek Szyprowski } 713c7909509SMarek Szyprowski #endif 714c7909509SMarek Szyprowski 715c7909509SMarek Szyprowski if (!mask) 716c7909509SMarek Szyprowski return NULL; 717c7909509SMarek Szyprowski 7189c18fcf7SAlexandre Courbot buf = kzalloc(sizeof(*buf), 7199c18fcf7SAlexandre Courbot gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)); 72019e6e5e5SRabin Vincent if (!buf) 72119e6e5e5SRabin Vincent return NULL; 72219e6e5e5SRabin Vincent 723c7909509SMarek Szyprowski if (mask < 0xffffffffULL) 724c7909509SMarek Szyprowski gfp |= GFP_DMA; 725c7909509SMarek Szyprowski 726ea2e7057SSumit Bhattacharya /* 727ea2e7057SSumit Bhattacharya * Following is a work-around (a.k.a. hack) to prevent pages 728ea2e7057SSumit Bhattacharya * with __GFP_COMP being passed to split_page() which cannot 729ea2e7057SSumit Bhattacharya * handle them. The real problem is that this flag probably 730ea2e7057SSumit Bhattacharya * should be 0 on ARM as it is not supported on this 731ea2e7057SSumit Bhattacharya * platform; see CONFIG_HUGETLBFS. 732ea2e7057SSumit Bhattacharya */ 733ea2e7057SSumit Bhattacharya gfp &= ~(__GFP_COMP); 734b4268676SRabin Vincent args.gfp = gfp; 735ea2e7057SSumit Bhattacharya 73672fd97bfSChristoph Hellwig *handle = DMA_MAPPING_ERROR; 737b4268676SRabin Vincent allowblock = gfpflags_allow_blocking(gfp); 738b4268676SRabin Vincent cma = allowblock ? dev_get_cma_area(dev) : false; 73904da5694SRussell King 740b4268676SRabin Vincent if (cma) 741b4268676SRabin Vincent buf->allocator = &cma_allocator; 7421655cf88SVladimir Murzin else if (is_coherent) 743b4268676SRabin Vincent buf->allocator = &simple_allocator; 744b4268676SRabin Vincent else if (allowblock) 745b4268676SRabin Vincent buf->allocator = &remap_allocator; 74631ebf944SRussell King else 747b4268676SRabin Vincent buf->allocator = &pool_allocator; 748b4268676SRabin Vincent 749b4268676SRabin Vincent addr = buf->allocator->alloc(&args, &page); 75031ebf944SRussell King 75119e6e5e5SRabin Vincent if (page) { 75219e6e5e5SRabin Vincent unsigned long flags; 75319e6e5e5SRabin Vincent 7549eedd963SRussell King *handle = pfn_to_dma(dev, page_to_pfn(page)); 755b4268676SRabin Vincent buf->virt = args.want_vaddr ? addr : page; 75619e6e5e5SRabin Vincent 75719e6e5e5SRabin Vincent spin_lock_irqsave(&arm_dma_bufs_lock, flags); 75819e6e5e5SRabin Vincent list_add(&buf->list, &arm_dma_bufs); 75919e6e5e5SRabin Vincent spin_unlock_irqrestore(&arm_dma_bufs_lock, flags); 76019e6e5e5SRabin Vincent } else { 76119e6e5e5SRabin Vincent kfree(buf); 76219e6e5e5SRabin Vincent } 76331ebf944SRussell King 764b4268676SRabin Vincent return args.want_vaddr ? addr : page; 765ab6494f0SCatalin Marinas } 766695ae0afSRussell King 7670ddbccd1SRussell King /* 7680ddbccd1SRussell King * Allocate DMA-coherent memory space and return both the kernel remapped 7690ddbccd1SRussell King * virtual and bus address for that space. 7700ddbccd1SRussell King */ 771f99d6034SMarek Szyprowski void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 77200085f1eSKrzysztof Kozlowski gfp_t gfp, unsigned long attrs) 7730ddbccd1SRussell King { 7740ea1ec71SRussell King pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); 7750ddbccd1SRussell King 776dd37e940SRob Herring return __dma_alloc(dev, size, handle, gfp, prot, false, 7776e8266e3SCarlo Caione attrs, __builtin_return_address(0)); 778dd37e940SRob Herring } 779dd37e940SRob Herring 780dd37e940SRob Herring static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 78100085f1eSKrzysztof Kozlowski dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 782dd37e940SRob Herring { 78321caf3a7SLorenzo Nava return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true, 7846e8266e3SCarlo Caione attrs, __builtin_return_address(0)); 7850ddbccd1SRussell King } 7860ddbccd1SRussell King 78755af8a91SMike Looijmans static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, 788f99d6034SMarek Szyprowski void *cpu_addr, dma_addr_t dma_addr, size_t size, 78900085f1eSKrzysztof Kozlowski unsigned long attrs) 7900ddbccd1SRussell King { 791c2a3831dSNathan Jones int ret = -ENXIO; 792a70c3ee3SFabio Estevam unsigned long nr_vma_pages = vma_pages(vma); 79350262a4bSMarek Szyprowski unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 794c7909509SMarek Szyprowski unsigned long pfn = dma_to_pfn(dev, dma_addr); 79550262a4bSMarek Szyprowski unsigned long off = vma->vm_pgoff; 79650262a4bSMarek Szyprowski 79743fc509cSVladimir Murzin if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 79847142f07SMarek Szyprowski return ret; 79947142f07SMarek Szyprowski 80050262a4bSMarek Szyprowski if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { 8010ddbccd1SRussell King ret = remap_pfn_range(vma, vma->vm_start, 80250262a4bSMarek Szyprowski pfn + off, 803c7909509SMarek Szyprowski vma->vm_end - vma->vm_start, 8040ddbccd1SRussell King vma->vm_page_prot); 80550262a4bSMarek Szyprowski } 8060ddbccd1SRussell King 8070ddbccd1SRussell King return ret; 8080ddbccd1SRussell King } 8090ddbccd1SRussell King 8100ddbccd1SRussell King /* 81155af8a91SMike Looijmans * Create userspace mapping for the DMA-coherent memory. 81255af8a91SMike Looijmans */ 81355af8a91SMike Looijmans static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, 81455af8a91SMike Looijmans void *cpu_addr, dma_addr_t dma_addr, size_t size, 81500085f1eSKrzysztof Kozlowski unsigned long attrs) 81655af8a91SMike Looijmans { 81755af8a91SMike Looijmans return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 81855af8a91SMike Looijmans } 81955af8a91SMike Looijmans 82055af8a91SMike Looijmans int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, 82155af8a91SMike Looijmans void *cpu_addr, dma_addr_t dma_addr, size_t size, 82200085f1eSKrzysztof Kozlowski unsigned long attrs) 82355af8a91SMike Looijmans { 82455af8a91SMike Looijmans vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 82555af8a91SMike Looijmans return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 82655af8a91SMike Looijmans } 82755af8a91SMike Looijmans 82855af8a91SMike Looijmans /* 829c7909509SMarek Szyprowski * Free a buffer as defined by the above mapping. 8300ddbccd1SRussell King */ 831dd37e940SRob Herring static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 83200085f1eSKrzysztof Kozlowski dma_addr_t handle, unsigned long attrs, 833dd37e940SRob Herring bool is_coherent) 8340ddbccd1SRussell King { 835c7909509SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 83619e6e5e5SRabin Vincent struct arm_dma_buffer *buf; 837b4268676SRabin Vincent struct arm_dma_free_args args = { 838b4268676SRabin Vincent .dev = dev, 839b4268676SRabin Vincent .size = PAGE_ALIGN(size), 840b4268676SRabin Vincent .cpu_addr = cpu_addr, 841b4268676SRabin Vincent .page = page, 84200085f1eSKrzysztof Kozlowski .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0), 843b4268676SRabin Vincent }; 84419e6e5e5SRabin Vincent 84519e6e5e5SRabin Vincent buf = arm_dma_buffer_find(cpu_addr); 84619e6e5e5SRabin Vincent if (WARN(!buf, "Freeing invalid buffer %p\n", cpu_addr)) 84719e6e5e5SRabin Vincent return; 8480ddbccd1SRussell King 849b4268676SRabin Vincent buf->allocator->free(&args); 85019e6e5e5SRabin Vincent kfree(buf); 8510ddbccd1SRussell King } 852afd1a321SRussell King 853dd37e940SRob Herring void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 85400085f1eSKrzysztof Kozlowski dma_addr_t handle, unsigned long attrs) 855dd37e940SRob Herring { 856dd37e940SRob Herring __arm_dma_free(dev, size, cpu_addr, handle, attrs, false); 857dd37e940SRob Herring } 858dd37e940SRob Herring 859dd37e940SRob Herring static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 86000085f1eSKrzysztof Kozlowski dma_addr_t handle, unsigned long attrs) 861dd37e940SRob Herring { 862dd37e940SRob Herring __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); 863dd37e940SRob Herring } 864dd37e940SRob Herring 865dc2832e1SMarek Szyprowski int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 866dc2832e1SMarek Szyprowski void *cpu_addr, dma_addr_t handle, size_t size, 86700085f1eSKrzysztof Kozlowski unsigned long attrs) 868dc2832e1SMarek Szyprowski { 869916a008bSRussell King unsigned long pfn = dma_to_pfn(dev, handle); 870916a008bSRussell King struct page *page; 871dc2832e1SMarek Szyprowski int ret; 872dc2832e1SMarek Szyprowski 873916a008bSRussell King /* If the PFN is not valid, we do not have a struct page */ 874916a008bSRussell King if (!pfn_valid(pfn)) 875916a008bSRussell King return -ENXIO; 876916a008bSRussell King 877916a008bSRussell King page = pfn_to_page(pfn); 878916a008bSRussell King 879dc2832e1SMarek Szyprowski ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 880dc2832e1SMarek Szyprowski if (unlikely(ret)) 881dc2832e1SMarek Szyprowski return ret; 882dc2832e1SMarek Szyprowski 883dc2832e1SMarek Szyprowski sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 884dc2832e1SMarek Szyprowski return 0; 885dc2832e1SMarek Szyprowski } 886dc2832e1SMarek Szyprowski 88765af191aSRussell King static void dma_cache_maint_page(struct page *page, unsigned long offset, 888a9c9147eSRussell King size_t size, enum dma_data_direction dir, 889a9c9147eSRussell King void (*op)(const void *, size_t, int)) 89065af191aSRussell King { 89115653371SRussell King unsigned long pfn; 89215653371SRussell King size_t left = size; 89315653371SRussell King 89415653371SRussell King pfn = page_to_pfn(page) + offset / PAGE_SIZE; 89515653371SRussell King offset %= PAGE_SIZE; 89615653371SRussell King 89765af191aSRussell King /* 89865af191aSRussell King * A single sg entry may refer to multiple physically contiguous 89965af191aSRussell King * pages. But we still need to process highmem pages individually. 90065af191aSRussell King * If highmem is not configured then the bulk of this loop gets 90165af191aSRussell King * optimized out. 90265af191aSRussell King */ 90365af191aSRussell King do { 90465af191aSRussell King size_t len = left; 90593f1d629SRussell King void *vaddr; 90693f1d629SRussell King 90715653371SRussell King page = pfn_to_page(pfn); 90815653371SRussell King 90993f1d629SRussell King if (PageHighMem(page)) { 91015653371SRussell King if (len + offset > PAGE_SIZE) 91165af191aSRussell King len = PAGE_SIZE - offset; 912dd0f67f4SJoonsoo Kim 913dd0f67f4SJoonsoo Kim if (cache_is_vipt_nonaliasing()) { 91439af22a7SNicolas Pitre vaddr = kmap_atomic(page); 9157e5a69e8SNicolas Pitre op(vaddr + offset, len, dir); 91639af22a7SNicolas Pitre kunmap_atomic(vaddr); 917dd0f67f4SJoonsoo Kim } else { 918dd0f67f4SJoonsoo Kim vaddr = kmap_high_get(page); 919dd0f67f4SJoonsoo Kim if (vaddr) { 920dd0f67f4SJoonsoo Kim op(vaddr + offset, len, dir); 921dd0f67f4SJoonsoo Kim kunmap_high(page); 922dd0f67f4SJoonsoo Kim } 92393f1d629SRussell King } 92493f1d629SRussell King } else { 92593f1d629SRussell King vaddr = page_address(page) + offset; 926a9c9147eSRussell King op(vaddr, len, dir); 92793f1d629SRussell King } 92865af191aSRussell King offset = 0; 92915653371SRussell King pfn++; 93065af191aSRussell King left -= len; 93165af191aSRussell King } while (left); 93265af191aSRussell King } 93365af191aSRussell King 93451fde349SMarek Szyprowski /* 93551fde349SMarek Szyprowski * Make an area consistent for devices. 93651fde349SMarek Szyprowski * Note: Drivers should NOT use this function directly, as it will break 93751fde349SMarek Szyprowski * platforms with CONFIG_DMABOUNCE. 93851fde349SMarek Szyprowski * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 93951fde349SMarek Szyprowski */ 94051fde349SMarek Szyprowski static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, 94165af191aSRussell King size_t size, enum dma_data_direction dir) 94265af191aSRussell King { 9432161c248SSantosh Shilimkar phys_addr_t paddr; 94443377453SNicolas Pitre 945a9c9147eSRussell King dma_cache_maint_page(page, off, size, dir, dmac_map_area); 94643377453SNicolas Pitre 94765af191aSRussell King paddr = page_to_phys(page) + off; 9482ffe2da3SRussell King if (dir == DMA_FROM_DEVICE) { 9492ffe2da3SRussell King outer_inv_range(paddr, paddr + size); 9502ffe2da3SRussell King } else { 9512ffe2da3SRussell King outer_clean_range(paddr, paddr + size); 9522ffe2da3SRussell King } 9532ffe2da3SRussell King /* FIXME: non-speculating: flush on bidirectional mappings? */ 95443377453SNicolas Pitre } 9554ea0d737SRussell King 95651fde349SMarek Szyprowski static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, 9574ea0d737SRussell King size_t size, enum dma_data_direction dir) 9584ea0d737SRussell King { 9592161c248SSantosh Shilimkar phys_addr_t paddr = page_to_phys(page) + off; 9602ffe2da3SRussell King 9612ffe2da3SRussell King /* FIXME: non-speculating: not required */ 962deace4a6SRussell King /* in any case, don't bother invalidating if DMA to device */ 963deace4a6SRussell King if (dir != DMA_TO_DEVICE) { 9642ffe2da3SRussell King outer_inv_range(paddr, paddr + size); 9652ffe2da3SRussell King 966a9c9147eSRussell King dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); 967deace4a6SRussell King } 968c0177800SCatalin Marinas 969c0177800SCatalin Marinas /* 970b2a234edSMing Lei * Mark the D-cache clean for these pages to avoid extra flushing. 971c0177800SCatalin Marinas */ 972b2a234edSMing Lei if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) { 973b2a234edSMing Lei unsigned long pfn; 974b2a234edSMing Lei size_t left = size; 975b2a234edSMing Lei 976b2a234edSMing Lei pfn = page_to_pfn(page) + off / PAGE_SIZE; 977b2a234edSMing Lei off %= PAGE_SIZE; 978b2a234edSMing Lei if (off) { 979b2a234edSMing Lei pfn++; 980b2a234edSMing Lei left -= PAGE_SIZE - off; 981b2a234edSMing Lei } 982b2a234edSMing Lei while (left >= PAGE_SIZE) { 983b2a234edSMing Lei page = pfn_to_page(pfn++); 984c0177800SCatalin Marinas set_bit(PG_dcache_clean, &page->flags); 985b2a234edSMing Lei left -= PAGE_SIZE; 986b2a234edSMing Lei } 987b2a234edSMing Lei } 9884ea0d737SRussell King } 98943377453SNicolas Pitre 990afd1a321SRussell King /** 9912a550e73SMarek Szyprowski * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA 992afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 993afd1a321SRussell King * @sg: list of buffers 994afd1a321SRussell King * @nents: number of buffers to map 995afd1a321SRussell King * @dir: DMA transfer direction 996afd1a321SRussell King * 997afd1a321SRussell King * Map a set of buffers described by scatterlist in streaming mode for DMA. 998afd1a321SRussell King * This is the scatter-gather version of the dma_map_single interface. 999afd1a321SRussell King * Here the scatter gather list elements are each tagged with the 1000afd1a321SRussell King * appropriate dma address and length. They are obtained via 1001afd1a321SRussell King * sg_dma_{address,length}. 1002afd1a321SRussell King * 1003afd1a321SRussell King * Device ownership issues as mentioned for dma_map_single are the same 1004afd1a321SRussell King * here. 1005afd1a321SRussell King */ 10062dc6a016SMarek Szyprowski int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 100700085f1eSKrzysztof Kozlowski enum dma_data_direction dir, unsigned long attrs) 1008afd1a321SRussell King { 10095299709dSBart Van Assche const struct dma_map_ops *ops = get_dma_ops(dev); 1010afd1a321SRussell King struct scatterlist *s; 101101135d92SRussell King int i, j; 1012afd1a321SRussell King 1013afd1a321SRussell King for_each_sg(sg, s, nents, i) { 10144ce63fcdSMarek Szyprowski #ifdef CONFIG_NEED_SG_DMA_LENGTH 10154ce63fcdSMarek Szyprowski s->dma_length = s->length; 10164ce63fcdSMarek Szyprowski #endif 10172a550e73SMarek Szyprowski s->dma_address = ops->map_page(dev, sg_page(s), s->offset, 10182a550e73SMarek Szyprowski s->length, dir, attrs); 101901135d92SRussell King if (dma_mapping_error(dev, s->dma_address)) 102001135d92SRussell King goto bad_mapping; 1021afd1a321SRussell King } 1022afd1a321SRussell King return nents; 102301135d92SRussell King 102401135d92SRussell King bad_mapping: 102501135d92SRussell King for_each_sg(sg, s, i, j) 10262a550e73SMarek Szyprowski ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 102701135d92SRussell King return 0; 1028afd1a321SRussell King } 1029afd1a321SRussell King 1030afd1a321SRussell King /** 10312a550e73SMarek Szyprowski * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 1032afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1033afd1a321SRussell King * @sg: list of buffers 10340adfca6fSLinus Walleij * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 1035afd1a321SRussell King * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1036afd1a321SRussell King * 1037afd1a321SRussell King * Unmap a set of streaming mode DMA translations. Again, CPU access 1038afd1a321SRussell King * rules concerning calls here are the same as for dma_unmap_single(). 1039afd1a321SRussell King */ 10402dc6a016SMarek Szyprowski void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 104100085f1eSKrzysztof Kozlowski enum dma_data_direction dir, unsigned long attrs) 1042afd1a321SRussell King { 10435299709dSBart Van Assche const struct dma_map_ops *ops = get_dma_ops(dev); 104401135d92SRussell King struct scatterlist *s; 104501135d92SRussell King 104601135d92SRussell King int i; 104724056f52SRussell King 104801135d92SRussell King for_each_sg(sg, s, nents, i) 10492a550e73SMarek Szyprowski ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 1050afd1a321SRussell King } 1051afd1a321SRussell King 1052afd1a321SRussell King /** 10532a550e73SMarek Szyprowski * arm_dma_sync_sg_for_cpu 1054afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1055afd1a321SRussell King * @sg: list of buffers 1056afd1a321SRussell King * @nents: number of buffers to map (returned from dma_map_sg) 1057afd1a321SRussell King * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1058afd1a321SRussell King */ 10592dc6a016SMarek Szyprowski void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 1060afd1a321SRussell King int nents, enum dma_data_direction dir) 1061afd1a321SRussell King { 10625299709dSBart Van Assche const struct dma_map_ops *ops = get_dma_ops(dev); 1063afd1a321SRussell King struct scatterlist *s; 1064afd1a321SRussell King int i; 1065afd1a321SRussell King 10662a550e73SMarek Szyprowski for_each_sg(sg, s, nents, i) 10672a550e73SMarek Szyprowski ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length, 10682a550e73SMarek Szyprowski dir); 1069afd1a321SRussell King } 107024056f52SRussell King 1071afd1a321SRussell King /** 10722a550e73SMarek Szyprowski * arm_dma_sync_sg_for_device 1073afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1074afd1a321SRussell King * @sg: list of buffers 1075afd1a321SRussell King * @nents: number of buffers to map (returned from dma_map_sg) 1076afd1a321SRussell King * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1077afd1a321SRussell King */ 10782dc6a016SMarek Szyprowski void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 1079afd1a321SRussell King int nents, enum dma_data_direction dir) 1080afd1a321SRussell King { 10815299709dSBart Van Assche const struct dma_map_ops *ops = get_dma_ops(dev); 1082afd1a321SRussell King struct scatterlist *s; 1083afd1a321SRussell King int i; 1084afd1a321SRussell King 10852a550e73SMarek Szyprowski for_each_sg(sg, s, nents, i) 10862a550e73SMarek Szyprowski ops->sync_single_for_device(dev, sg_dma_address(s), s->length, 10872a550e73SMarek Szyprowski dir); 1088afd1a321SRussell King } 108924056f52SRussell King 1090022ae537SRussell King /* 1091022ae537SRussell King * Return whether the given device DMA address mask can be supported 1092022ae537SRussell King * properly. For example, if your device can only drive the low 24-bits 1093022ae537SRussell King * during bus mastering, then you would pass 0x00ffffff as the mask 1094022ae537SRussell King * to this function. 1095022ae537SRussell King */ 1096418a7a7eSChristoph Hellwig int arm_dma_supported(struct device *dev, u64 mask) 1097022ae537SRussell King { 10989f28cde0SRussell King return __dma_supported(dev, mask, false); 1099022ae537SRussell King } 1100022ae537SRussell King 11011874619aSThierry Reding static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent) 11021874619aSThierry Reding { 1103ad3c7b18SChristoph Hellwig /* 1104ad3c7b18SChristoph Hellwig * When CONFIG_ARM_LPAE is set, physical address can extend above 1105ad3c7b18SChristoph Hellwig * 32-bits, which then can't be addressed by devices that only support 1106ad3c7b18SChristoph Hellwig * 32-bit DMA. 1107ad3c7b18SChristoph Hellwig * Use the generic dma-direct / swiotlb ops code in that case, as that 1108ad3c7b18SChristoph Hellwig * handles bounce buffering for us. 1109ad3c7b18SChristoph Hellwig */ 1110ad3c7b18SChristoph Hellwig if (IS_ENABLED(CONFIG_ARM_LPAE)) 1111ad3c7b18SChristoph Hellwig return NULL; 11121874619aSThierry Reding return coherent ? &arm_coherent_dma_ops : &arm_dma_ops; 11131874619aSThierry Reding } 11141874619aSThierry Reding 11154ce63fcdSMarek Szyprowski #ifdef CONFIG_ARM_DMA_USE_IOMMU 11164ce63fcdSMarek Szyprowski 11177d2822dfSSricharan R static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs) 11187d2822dfSSricharan R { 11197d2822dfSSricharan R int prot = 0; 11207d2822dfSSricharan R 11217d2822dfSSricharan R if (attrs & DMA_ATTR_PRIVILEGED) 11227d2822dfSSricharan R prot |= IOMMU_PRIV; 11237d2822dfSSricharan R 11247d2822dfSSricharan R switch (dir) { 11257d2822dfSSricharan R case DMA_BIDIRECTIONAL: 11267d2822dfSSricharan R return prot | IOMMU_READ | IOMMU_WRITE; 11277d2822dfSSricharan R case DMA_TO_DEVICE: 11287d2822dfSSricharan R return prot | IOMMU_READ; 11297d2822dfSSricharan R case DMA_FROM_DEVICE: 11307d2822dfSSricharan R return prot | IOMMU_WRITE; 11317d2822dfSSricharan R default: 11327d2822dfSSricharan R return prot; 11337d2822dfSSricharan R } 11347d2822dfSSricharan R } 11357d2822dfSSricharan R 11364ce63fcdSMarek Szyprowski /* IOMMU */ 11374ce63fcdSMarek Szyprowski 11384d852ef8SAndreas Herrmann static int extend_iommu_mapping(struct dma_iommu_mapping *mapping); 11394d852ef8SAndreas Herrmann 11404ce63fcdSMarek Szyprowski static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, 11414ce63fcdSMarek Szyprowski size_t size) 11424ce63fcdSMarek Szyprowski { 11434ce63fcdSMarek Szyprowski unsigned int order = get_order(size); 11444ce63fcdSMarek Szyprowski unsigned int align = 0; 11454ce63fcdSMarek Szyprowski unsigned int count, start; 1146006f841dSRitesh Harjani size_t mapping_size = mapping->bits << PAGE_SHIFT; 11474ce63fcdSMarek Szyprowski unsigned long flags; 11484d852ef8SAndreas Herrmann dma_addr_t iova; 11494d852ef8SAndreas Herrmann int i; 11504ce63fcdSMarek Szyprowski 115160460abfSSeung-Woo Kim if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT) 115260460abfSSeung-Woo Kim order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT; 115360460abfSSeung-Woo Kim 115468efd7d2SMarek Szyprowski count = PAGE_ALIGN(size) >> PAGE_SHIFT; 115568efd7d2SMarek Szyprowski align = (1 << order) - 1; 11564ce63fcdSMarek Szyprowski 11574ce63fcdSMarek Szyprowski spin_lock_irqsave(&mapping->lock, flags); 11584d852ef8SAndreas Herrmann for (i = 0; i < mapping->nr_bitmaps; i++) { 11594d852ef8SAndreas Herrmann start = bitmap_find_next_zero_area(mapping->bitmaps[i], 11604d852ef8SAndreas Herrmann mapping->bits, 0, count, align); 11614d852ef8SAndreas Herrmann 11624d852ef8SAndreas Herrmann if (start > mapping->bits) 11634d852ef8SAndreas Herrmann continue; 11644d852ef8SAndreas Herrmann 11654d852ef8SAndreas Herrmann bitmap_set(mapping->bitmaps[i], start, count); 11664d852ef8SAndreas Herrmann break; 11674d852ef8SAndreas Herrmann } 11684d852ef8SAndreas Herrmann 11694d852ef8SAndreas Herrmann /* 11704d852ef8SAndreas Herrmann * No unused range found. Try to extend the existing mapping 11714d852ef8SAndreas Herrmann * and perform a second attempt to reserve an IO virtual 11724d852ef8SAndreas Herrmann * address range of size bytes. 11734d852ef8SAndreas Herrmann */ 11744d852ef8SAndreas Herrmann if (i == mapping->nr_bitmaps) { 11754d852ef8SAndreas Herrmann if (extend_iommu_mapping(mapping)) { 11764d852ef8SAndreas Herrmann spin_unlock_irqrestore(&mapping->lock, flags); 117772fd97bfSChristoph Hellwig return DMA_MAPPING_ERROR; 11784d852ef8SAndreas Herrmann } 11794d852ef8SAndreas Herrmann 11804d852ef8SAndreas Herrmann start = bitmap_find_next_zero_area(mapping->bitmaps[i], 11814d852ef8SAndreas Herrmann mapping->bits, 0, count, align); 11824d852ef8SAndreas Herrmann 11834ce63fcdSMarek Szyprowski if (start > mapping->bits) { 11844ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 118572fd97bfSChristoph Hellwig return DMA_MAPPING_ERROR; 11864ce63fcdSMarek Szyprowski } 11874ce63fcdSMarek Szyprowski 11884d852ef8SAndreas Herrmann bitmap_set(mapping->bitmaps[i], start, count); 11894d852ef8SAndreas Herrmann } 11904ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 11914ce63fcdSMarek Szyprowski 1192006f841dSRitesh Harjani iova = mapping->base + (mapping_size * i); 119368efd7d2SMarek Szyprowski iova += start << PAGE_SHIFT; 11944d852ef8SAndreas Herrmann 11954d852ef8SAndreas Herrmann return iova; 11964ce63fcdSMarek Szyprowski } 11974ce63fcdSMarek Szyprowski 11984ce63fcdSMarek Szyprowski static inline void __free_iova(struct dma_iommu_mapping *mapping, 11994ce63fcdSMarek Szyprowski dma_addr_t addr, size_t size) 12004ce63fcdSMarek Szyprowski { 12014d852ef8SAndreas Herrmann unsigned int start, count; 1202006f841dSRitesh Harjani size_t mapping_size = mapping->bits << PAGE_SHIFT; 12034ce63fcdSMarek Szyprowski unsigned long flags; 12044d852ef8SAndreas Herrmann dma_addr_t bitmap_base; 12054d852ef8SAndreas Herrmann u32 bitmap_index; 12064d852ef8SAndreas Herrmann 12074d852ef8SAndreas Herrmann if (!size) 12084d852ef8SAndreas Herrmann return; 12094d852ef8SAndreas Herrmann 1210006f841dSRitesh Harjani bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size; 12114d852ef8SAndreas Herrmann BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions); 12124d852ef8SAndreas Herrmann 1213006f841dSRitesh Harjani bitmap_base = mapping->base + mapping_size * bitmap_index; 12144d852ef8SAndreas Herrmann 121568efd7d2SMarek Szyprowski start = (addr - bitmap_base) >> PAGE_SHIFT; 12164d852ef8SAndreas Herrmann 1217006f841dSRitesh Harjani if (addr + size > bitmap_base + mapping_size) { 12184d852ef8SAndreas Herrmann /* 12194d852ef8SAndreas Herrmann * The address range to be freed reaches into the iova 12204d852ef8SAndreas Herrmann * range of the next bitmap. This should not happen as 12214d852ef8SAndreas Herrmann * we don't allow this in __alloc_iova (at the 12224d852ef8SAndreas Herrmann * moment). 12234d852ef8SAndreas Herrmann */ 12244d852ef8SAndreas Herrmann BUG(); 12254d852ef8SAndreas Herrmann } else 122668efd7d2SMarek Szyprowski count = size >> PAGE_SHIFT; 12274ce63fcdSMarek Szyprowski 12284ce63fcdSMarek Szyprowski spin_lock_irqsave(&mapping->lock, flags); 12294d852ef8SAndreas Herrmann bitmap_clear(mapping->bitmaps[bitmap_index], start, count); 12304ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 12314ce63fcdSMarek Szyprowski } 12324ce63fcdSMarek Szyprowski 123333298ef6SDoug Anderson /* We'll try 2M, 1M, 64K, and finally 4K; array must end with 0! */ 123433298ef6SDoug Anderson static const int iommu_order_array[] = { 9, 8, 4, 0 }; 123533298ef6SDoug Anderson 1236549a17e4SMarek Szyprowski static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, 123700085f1eSKrzysztof Kozlowski gfp_t gfp, unsigned long attrs, 1238f1270896SGregory CLEMENT int coherent_flag) 12394ce63fcdSMarek Szyprowski { 12404ce63fcdSMarek Szyprowski struct page **pages; 12414ce63fcdSMarek Szyprowski int count = size >> PAGE_SHIFT; 12424ce63fcdSMarek Szyprowski int array_size = count * sizeof(struct page *); 12434ce63fcdSMarek Szyprowski int i = 0; 124433298ef6SDoug Anderson int order_idx = 0; 12454ce63fcdSMarek Szyprowski 12464ce63fcdSMarek Szyprowski if (array_size <= PAGE_SIZE) 124723be7fdaSAlexandre Courbot pages = kzalloc(array_size, GFP_KERNEL); 12484ce63fcdSMarek Szyprowski else 12494ce63fcdSMarek Szyprowski pages = vzalloc(array_size); 12504ce63fcdSMarek Szyprowski if (!pages) 12514ce63fcdSMarek Szyprowski return NULL; 12524ce63fcdSMarek Szyprowski 125300085f1eSKrzysztof Kozlowski if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) 1254549a17e4SMarek Szyprowski { 1255549a17e4SMarek Szyprowski unsigned long order = get_order(size); 1256549a17e4SMarek Szyprowski struct page *page; 1257549a17e4SMarek Szyprowski 1258d834c5abSMarek Szyprowski page = dma_alloc_from_contiguous(dev, count, order, 1259d834c5abSMarek Szyprowski gfp & __GFP_NOWARN); 1260549a17e4SMarek Szyprowski if (!page) 1261549a17e4SMarek Szyprowski goto error; 1262549a17e4SMarek Szyprowski 1263f1270896SGregory CLEMENT __dma_clear_buffer(page, size, coherent_flag); 1264549a17e4SMarek Szyprowski 1265549a17e4SMarek Szyprowski for (i = 0; i < count; i++) 1266549a17e4SMarek Szyprowski pages[i] = page + i; 1267549a17e4SMarek Szyprowski 1268549a17e4SMarek Szyprowski return pages; 1269549a17e4SMarek Szyprowski } 1270549a17e4SMarek Szyprowski 127114d3ae2eSDoug Anderson /* Go straight to 4K chunks if caller says it's OK. */ 127200085f1eSKrzysztof Kozlowski if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) 127314d3ae2eSDoug Anderson order_idx = ARRAY_SIZE(iommu_order_array) - 1; 127414d3ae2eSDoug Anderson 1275f8669befSMarek Szyprowski /* 1276f8669befSMarek Szyprowski * IOMMU can map any pages, so himem can also be used here 1277f8669befSMarek Szyprowski */ 1278f8669befSMarek Szyprowski gfp |= __GFP_NOWARN | __GFP_HIGHMEM; 1279f8669befSMarek Szyprowski 12804ce63fcdSMarek Szyprowski while (count) { 128149f28aa6STomasz Figa int j, order; 12824ce63fcdSMarek Szyprowski 128333298ef6SDoug Anderson order = iommu_order_array[order_idx]; 128433298ef6SDoug Anderson 128533298ef6SDoug Anderson /* Drop down when we get small */ 128633298ef6SDoug Anderson if (__fls(count) < order) { 128733298ef6SDoug Anderson order_idx++; 128833298ef6SDoug Anderson continue; 128949f28aa6STomasz Figa } 129049f28aa6STomasz Figa 129133298ef6SDoug Anderson if (order) { 129233298ef6SDoug Anderson /* See if it's easy to allocate a high-order chunk */ 129333298ef6SDoug Anderson pages[i] = alloc_pages(gfp | __GFP_NORETRY, order); 129433298ef6SDoug Anderson 129533298ef6SDoug Anderson /* Go down a notch at first sign of pressure */ 129649f28aa6STomasz Figa if (!pages[i]) { 129733298ef6SDoug Anderson order_idx++; 129833298ef6SDoug Anderson continue; 129933298ef6SDoug Anderson } 130033298ef6SDoug Anderson } else { 130149f28aa6STomasz Figa pages[i] = alloc_pages(gfp, 0); 13024ce63fcdSMarek Szyprowski if (!pages[i]) 13034ce63fcdSMarek Szyprowski goto error; 130449f28aa6STomasz Figa } 13054ce63fcdSMarek Szyprowski 13065a796eebSHiroshi Doyu if (order) { 13074ce63fcdSMarek Szyprowski split_page(pages[i], order); 13084ce63fcdSMarek Szyprowski j = 1 << order; 13094ce63fcdSMarek Szyprowski while (--j) 13104ce63fcdSMarek Szyprowski pages[i + j] = pages[i] + j; 13115a796eebSHiroshi Doyu } 13124ce63fcdSMarek Szyprowski 1313f1270896SGregory CLEMENT __dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag); 13144ce63fcdSMarek Szyprowski i += 1 << order; 13154ce63fcdSMarek Szyprowski count -= 1 << order; 13164ce63fcdSMarek Szyprowski } 13174ce63fcdSMarek Szyprowski 13184ce63fcdSMarek Szyprowski return pages; 13194ce63fcdSMarek Szyprowski error: 13209fa8af91SMarek Szyprowski while (i--) 13214ce63fcdSMarek Szyprowski if (pages[i]) 13224ce63fcdSMarek Szyprowski __free_pages(pages[i], 0); 13231d5cfdb0STetsuo Handa kvfree(pages); 13244ce63fcdSMarek Szyprowski return NULL; 13254ce63fcdSMarek Szyprowski } 13264ce63fcdSMarek Szyprowski 1327549a17e4SMarek Szyprowski static int __iommu_free_buffer(struct device *dev, struct page **pages, 132800085f1eSKrzysztof Kozlowski size_t size, unsigned long attrs) 13294ce63fcdSMarek Szyprowski { 13304ce63fcdSMarek Szyprowski int count = size >> PAGE_SHIFT; 13314ce63fcdSMarek Szyprowski int i; 1332549a17e4SMarek Szyprowski 133300085f1eSKrzysztof Kozlowski if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { 1334549a17e4SMarek Szyprowski dma_release_from_contiguous(dev, pages[0], count); 1335549a17e4SMarek Szyprowski } else { 13364ce63fcdSMarek Szyprowski for (i = 0; i < count; i++) 13374ce63fcdSMarek Szyprowski if (pages[i]) 13384ce63fcdSMarek Szyprowski __free_pages(pages[i], 0); 1339549a17e4SMarek Szyprowski } 1340549a17e4SMarek Szyprowski 13411d5cfdb0STetsuo Handa kvfree(pages); 13424ce63fcdSMarek Szyprowski return 0; 13434ce63fcdSMarek Szyprowski } 13444ce63fcdSMarek Szyprowski 13454ce63fcdSMarek Szyprowski /* 13464ce63fcdSMarek Szyprowski * Create a mapping in device IO address space for specified pages 13474ce63fcdSMarek Szyprowski */ 13484ce63fcdSMarek Szyprowski static dma_addr_t 13497d2822dfSSricharan R __iommu_create_mapping(struct device *dev, struct page **pages, size_t size, 13507d2822dfSSricharan R unsigned long attrs) 13514ce63fcdSMarek Szyprowski { 135289cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 13534ce63fcdSMarek Szyprowski unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 13544ce63fcdSMarek Szyprowski dma_addr_t dma_addr, iova; 135590cde558SAndre Przywara int i; 13564ce63fcdSMarek Szyprowski 13574ce63fcdSMarek Szyprowski dma_addr = __alloc_iova(mapping, size); 135872fd97bfSChristoph Hellwig if (dma_addr == DMA_MAPPING_ERROR) 13594ce63fcdSMarek Szyprowski return dma_addr; 13604ce63fcdSMarek Szyprowski 13614ce63fcdSMarek Szyprowski iova = dma_addr; 13624ce63fcdSMarek Szyprowski for (i = 0; i < count; ) { 136390cde558SAndre Przywara int ret; 136490cde558SAndre Przywara 13654ce63fcdSMarek Szyprowski unsigned int next_pfn = page_to_pfn(pages[i]) + 1; 13664ce63fcdSMarek Szyprowski phys_addr_t phys = page_to_phys(pages[i]); 13674ce63fcdSMarek Szyprowski unsigned int len, j; 13684ce63fcdSMarek Szyprowski 13694ce63fcdSMarek Szyprowski for (j = i + 1; j < count; j++, next_pfn++) 13704ce63fcdSMarek Szyprowski if (page_to_pfn(pages[j]) != next_pfn) 13714ce63fcdSMarek Szyprowski break; 13724ce63fcdSMarek Szyprowski 13734ce63fcdSMarek Szyprowski len = (j - i) << PAGE_SHIFT; 1374c9b24996SAndreas Herrmann ret = iommu_map(mapping->domain, iova, phys, len, 13757d2822dfSSricharan R __dma_info_to_prot(DMA_BIDIRECTIONAL, attrs)); 13764ce63fcdSMarek Szyprowski if (ret < 0) 13774ce63fcdSMarek Szyprowski goto fail; 13784ce63fcdSMarek Szyprowski iova += len; 13794ce63fcdSMarek Szyprowski i = j; 13804ce63fcdSMarek Szyprowski } 13814ce63fcdSMarek Szyprowski return dma_addr; 13824ce63fcdSMarek Szyprowski fail: 13834ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); 13844ce63fcdSMarek Szyprowski __free_iova(mapping, dma_addr, size); 138572fd97bfSChristoph Hellwig return DMA_MAPPING_ERROR; 13864ce63fcdSMarek Szyprowski } 13874ce63fcdSMarek Szyprowski 13884ce63fcdSMarek Szyprowski static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) 13894ce63fcdSMarek Szyprowski { 139089cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 13914ce63fcdSMarek Szyprowski 13924ce63fcdSMarek Szyprowski /* 13934ce63fcdSMarek Szyprowski * add optional in-page offset from iova to size and align 13944ce63fcdSMarek Szyprowski * result to page size 13954ce63fcdSMarek Szyprowski */ 13964ce63fcdSMarek Szyprowski size = PAGE_ALIGN((iova & ~PAGE_MASK) + size); 13974ce63fcdSMarek Szyprowski iova &= PAGE_MASK; 13984ce63fcdSMarek Szyprowski 13994ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova, size); 14004ce63fcdSMarek Szyprowski __free_iova(mapping, iova, size); 14014ce63fcdSMarek Szyprowski return 0; 14024ce63fcdSMarek Szyprowski } 14034ce63fcdSMarek Szyprowski 1404665bad7bSHiroshi Doyu static struct page **__atomic_get_pages(void *addr) 1405665bad7bSHiroshi Doyu { 140636d0fd21SLaura Abbott struct page *page; 140736d0fd21SLaura Abbott phys_addr_t phys; 1408665bad7bSHiroshi Doyu 140936d0fd21SLaura Abbott phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr); 141036d0fd21SLaura Abbott page = phys_to_page(phys); 141136d0fd21SLaura Abbott 141236d0fd21SLaura Abbott return (struct page **)page; 1413665bad7bSHiroshi Doyu } 1414665bad7bSHiroshi Doyu 141500085f1eSKrzysztof Kozlowski static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs) 1416e9da6e99SMarek Szyprowski { 1417665bad7bSHiroshi Doyu if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) 1418665bad7bSHiroshi Doyu return __atomic_get_pages(cpu_addr); 1419665bad7bSHiroshi Doyu 142000085f1eSKrzysztof Kozlowski if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) 1421955c757eSMarek Szyprowski return cpu_addr; 1422955c757eSMarek Szyprowski 14235cf45379SChristoph Hellwig return dma_common_find_pages(cpu_addr); 1424e9da6e99SMarek Szyprowski } 1425e9da6e99SMarek Szyprowski 142656506822SGregory CLEMENT static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp, 14277d2822dfSSricharan R dma_addr_t *handle, int coherent_flag, 14287d2822dfSSricharan R unsigned long attrs) 1429479ed93aSHiroshi Doyu { 1430479ed93aSHiroshi Doyu struct page *page; 1431479ed93aSHiroshi Doyu void *addr; 1432479ed93aSHiroshi Doyu 143356506822SGregory CLEMENT if (coherent_flag == COHERENT) 143456506822SGregory CLEMENT addr = __alloc_simple_buffer(dev, size, gfp, &page); 143556506822SGregory CLEMENT else 1436479ed93aSHiroshi Doyu addr = __alloc_from_pool(size, &page); 1437479ed93aSHiroshi Doyu if (!addr) 1438479ed93aSHiroshi Doyu return NULL; 1439479ed93aSHiroshi Doyu 14407d2822dfSSricharan R *handle = __iommu_create_mapping(dev, &page, size, attrs); 144172fd97bfSChristoph Hellwig if (*handle == DMA_MAPPING_ERROR) 1442479ed93aSHiroshi Doyu goto err_mapping; 1443479ed93aSHiroshi Doyu 1444479ed93aSHiroshi Doyu return addr; 1445479ed93aSHiroshi Doyu 1446479ed93aSHiroshi Doyu err_mapping: 1447479ed93aSHiroshi Doyu __free_from_pool(addr, size); 1448479ed93aSHiroshi Doyu return NULL; 1449479ed93aSHiroshi Doyu } 1450479ed93aSHiroshi Doyu 1451d5898291SMarek Szyprowski static void __iommu_free_atomic(struct device *dev, void *cpu_addr, 145256506822SGregory CLEMENT dma_addr_t handle, size_t size, int coherent_flag) 1453479ed93aSHiroshi Doyu { 1454479ed93aSHiroshi Doyu __iommu_remove_mapping(dev, handle, size); 145556506822SGregory CLEMENT if (coherent_flag == COHERENT) 145656506822SGregory CLEMENT __dma_free_buffer(virt_to_page(cpu_addr), size); 145756506822SGregory CLEMENT else 1458d5898291SMarek Szyprowski __free_from_pool(cpu_addr, size); 1459479ed93aSHiroshi Doyu } 1460479ed93aSHiroshi Doyu 146156506822SGregory CLEMENT static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, 146200085f1eSKrzysztof Kozlowski dma_addr_t *handle, gfp_t gfp, unsigned long attrs, 146356506822SGregory CLEMENT int coherent_flag) 14644ce63fcdSMarek Szyprowski { 146571b55663SRussell King pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); 14664ce63fcdSMarek Szyprowski struct page **pages; 14674ce63fcdSMarek Szyprowski void *addr = NULL; 14684ce63fcdSMarek Szyprowski 146972fd97bfSChristoph Hellwig *handle = DMA_MAPPING_ERROR; 14704ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 14714ce63fcdSMarek Szyprowski 147256506822SGregory CLEMENT if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp)) 147356506822SGregory CLEMENT return __iommu_alloc_simple(dev, size, gfp, handle, 14747d2822dfSSricharan R coherent_flag, attrs); 1475479ed93aSHiroshi Doyu 14765b91a98cSRichard Zhao /* 14775b91a98cSRichard Zhao * Following is a work-around (a.k.a. hack) to prevent pages 14785b91a98cSRichard Zhao * with __GFP_COMP being passed to split_page() which cannot 14795b91a98cSRichard Zhao * handle them. The real problem is that this flag probably 14805b91a98cSRichard Zhao * should be 0 on ARM as it is not supported on this 14815b91a98cSRichard Zhao * platform; see CONFIG_HUGETLBFS. 14825b91a98cSRichard Zhao */ 14835b91a98cSRichard Zhao gfp &= ~(__GFP_COMP); 14845b91a98cSRichard Zhao 148556506822SGregory CLEMENT pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag); 14864ce63fcdSMarek Szyprowski if (!pages) 14874ce63fcdSMarek Szyprowski return NULL; 14884ce63fcdSMarek Szyprowski 14897d2822dfSSricharan R *handle = __iommu_create_mapping(dev, pages, size, attrs); 149072fd97bfSChristoph Hellwig if (*handle == DMA_MAPPING_ERROR) 14914ce63fcdSMarek Szyprowski goto err_buffer; 14924ce63fcdSMarek Szyprowski 149300085f1eSKrzysztof Kozlowski if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) 1494955c757eSMarek Szyprowski return pages; 1495955c757eSMarek Szyprowski 149678406ff5SChristoph Hellwig addr = dma_common_pages_remap(pages, size, prot, 1497e9da6e99SMarek Szyprowski __builtin_return_address(0)); 14984ce63fcdSMarek Szyprowski if (!addr) 14994ce63fcdSMarek Szyprowski goto err_mapping; 15004ce63fcdSMarek Szyprowski 15014ce63fcdSMarek Szyprowski return addr; 15024ce63fcdSMarek Szyprowski 15034ce63fcdSMarek Szyprowski err_mapping: 15044ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, *handle, size); 15054ce63fcdSMarek Szyprowski err_buffer: 1506549a17e4SMarek Szyprowski __iommu_free_buffer(dev, pages, size, attrs); 15074ce63fcdSMarek Szyprowski return NULL; 15084ce63fcdSMarek Szyprowski } 15094ce63fcdSMarek Szyprowski 151056506822SGregory CLEMENT static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, 151100085f1eSKrzysztof Kozlowski dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 151256506822SGregory CLEMENT { 151356506822SGregory CLEMENT return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL); 151456506822SGregory CLEMENT } 151556506822SGregory CLEMENT 151656506822SGregory CLEMENT static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size, 151700085f1eSKrzysztof Kozlowski dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 151856506822SGregory CLEMENT { 151956506822SGregory CLEMENT return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT); 152056506822SGregory CLEMENT } 152156506822SGregory CLEMENT 152256506822SGregory CLEMENT static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 15234ce63fcdSMarek Szyprowski void *cpu_addr, dma_addr_t dma_addr, size_t size, 152400085f1eSKrzysztof Kozlowski unsigned long attrs) 15254ce63fcdSMarek Szyprowski { 1526955c757eSMarek Szyprowski struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1527371f0f08SMarek Szyprowski unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 15286248461dSSouptick Joarder int err; 1529e9da6e99SMarek Szyprowski 1530e9da6e99SMarek Szyprowski if (!pages) 1531e9da6e99SMarek Szyprowski return -ENXIO; 15324ce63fcdSMarek Szyprowski 15336248461dSSouptick Joarder if (vma->vm_pgoff >= nr_pages) 1534371f0f08SMarek Szyprowski return -ENXIO; 1535371f0f08SMarek Szyprowski 15366248461dSSouptick Joarder err = vm_map_pages(vma, pages, nr_pages); 15376248461dSSouptick Joarder if (err) 15386248461dSSouptick Joarder pr_err("Remapping memory failed: %d\n", err); 15397e312103SMarek Szyprowski 15406248461dSSouptick Joarder return err; 15414ce63fcdSMarek Szyprowski } 154256506822SGregory CLEMENT static int arm_iommu_mmap_attrs(struct device *dev, 154356506822SGregory CLEMENT struct vm_area_struct *vma, void *cpu_addr, 154400085f1eSKrzysztof Kozlowski dma_addr_t dma_addr, size_t size, unsigned long attrs) 154556506822SGregory CLEMENT { 154656506822SGregory CLEMENT vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 154756506822SGregory CLEMENT 154856506822SGregory CLEMENT return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs); 154956506822SGregory CLEMENT } 155056506822SGregory CLEMENT 155156506822SGregory CLEMENT static int arm_coherent_iommu_mmap_attrs(struct device *dev, 155256506822SGregory CLEMENT struct vm_area_struct *vma, void *cpu_addr, 155300085f1eSKrzysztof Kozlowski dma_addr_t dma_addr, size_t size, unsigned long attrs) 155456506822SGregory CLEMENT { 155556506822SGregory CLEMENT return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs); 155656506822SGregory CLEMENT } 15574ce63fcdSMarek Szyprowski 15584ce63fcdSMarek Szyprowski /* 15594ce63fcdSMarek Szyprowski * free a page as defined by the above mapping. 15604ce63fcdSMarek Szyprowski * Must not be called with IRQs disabled. 15614ce63fcdSMarek Szyprowski */ 156256506822SGregory CLEMENT void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, 156300085f1eSKrzysztof Kozlowski dma_addr_t handle, unsigned long attrs, int coherent_flag) 15644ce63fcdSMarek Szyprowski { 1565836bfa0dSYoungJun Cho struct page **pages; 15664ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 15674ce63fcdSMarek Szyprowski 156856506822SGregory CLEMENT if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) { 156956506822SGregory CLEMENT __iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag); 1570479ed93aSHiroshi Doyu return; 1571479ed93aSHiroshi Doyu } 1572479ed93aSHiroshi Doyu 1573836bfa0dSYoungJun Cho pages = __iommu_get_pages(cpu_addr, attrs); 1574836bfa0dSYoungJun Cho if (!pages) { 1575836bfa0dSYoungJun Cho WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); 1576836bfa0dSYoungJun Cho return; 1577836bfa0dSYoungJun Cho } 1578836bfa0dSYoungJun Cho 1579fe9041c2SChristoph Hellwig if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) 158051231740SChristoph Hellwig dma_common_free_remap(cpu_addr, size); 1581e9da6e99SMarek Szyprowski 15824ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, handle, size); 1583549a17e4SMarek Szyprowski __iommu_free_buffer(dev, pages, size, attrs); 15844ce63fcdSMarek Szyprowski } 15854ce63fcdSMarek Szyprowski 158656506822SGregory CLEMENT void arm_iommu_free_attrs(struct device *dev, size_t size, 158700085f1eSKrzysztof Kozlowski void *cpu_addr, dma_addr_t handle, unsigned long attrs) 158856506822SGregory CLEMENT { 158956506822SGregory CLEMENT __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL); 159056506822SGregory CLEMENT } 159156506822SGregory CLEMENT 159256506822SGregory CLEMENT void arm_coherent_iommu_free_attrs(struct device *dev, size_t size, 159300085f1eSKrzysztof Kozlowski void *cpu_addr, dma_addr_t handle, unsigned long attrs) 159456506822SGregory CLEMENT { 159556506822SGregory CLEMENT __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT); 159656506822SGregory CLEMENT } 159756506822SGregory CLEMENT 1598dc2832e1SMarek Szyprowski static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, 1599dc2832e1SMarek Szyprowski void *cpu_addr, dma_addr_t dma_addr, 160000085f1eSKrzysztof Kozlowski size_t size, unsigned long attrs) 1601dc2832e1SMarek Szyprowski { 1602dc2832e1SMarek Szyprowski unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1603dc2832e1SMarek Szyprowski struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1604dc2832e1SMarek Szyprowski 1605dc2832e1SMarek Szyprowski if (!pages) 1606dc2832e1SMarek Szyprowski return -ENXIO; 1607dc2832e1SMarek Szyprowski 1608dc2832e1SMarek Szyprowski return sg_alloc_table_from_pages(sgt, pages, count, 0, size, 1609dc2832e1SMarek Szyprowski GFP_KERNEL); 16104ce63fcdSMarek Szyprowski } 16114ce63fcdSMarek Szyprowski 16124ce63fcdSMarek Szyprowski /* 16134ce63fcdSMarek Szyprowski * Map a part of the scatter-gather list into contiguous io address space 16144ce63fcdSMarek Szyprowski */ 16154ce63fcdSMarek Szyprowski static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, 16164ce63fcdSMarek Szyprowski size_t size, dma_addr_t *handle, 161700085f1eSKrzysztof Kozlowski enum dma_data_direction dir, unsigned long attrs, 16180fa478dfSRob Herring bool is_coherent) 16194ce63fcdSMarek Szyprowski { 162089cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 16214ce63fcdSMarek Szyprowski dma_addr_t iova, iova_base; 16224ce63fcdSMarek Szyprowski int ret = 0; 16234ce63fcdSMarek Szyprowski unsigned int count; 16244ce63fcdSMarek Szyprowski struct scatterlist *s; 1625c9b24996SAndreas Herrmann int prot; 16264ce63fcdSMarek Szyprowski 16274ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 162872fd97bfSChristoph Hellwig *handle = DMA_MAPPING_ERROR; 16294ce63fcdSMarek Szyprowski 16304ce63fcdSMarek Szyprowski iova_base = iova = __alloc_iova(mapping, size); 163172fd97bfSChristoph Hellwig if (iova == DMA_MAPPING_ERROR) 16324ce63fcdSMarek Szyprowski return -ENOMEM; 16334ce63fcdSMarek Szyprowski 16344ce63fcdSMarek Szyprowski for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { 16353e6110fdSDan Williams phys_addr_t phys = page_to_phys(sg_page(s)); 16364ce63fcdSMarek Szyprowski unsigned int len = PAGE_ALIGN(s->offset + s->length); 16374ce63fcdSMarek Szyprowski 163800085f1eSKrzysztof Kozlowski if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 16394ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 16404ce63fcdSMarek Szyprowski 16417d2822dfSSricharan R prot = __dma_info_to_prot(dir, attrs); 1642c9b24996SAndreas Herrmann 1643c9b24996SAndreas Herrmann ret = iommu_map(mapping->domain, iova, phys, len, prot); 16444ce63fcdSMarek Szyprowski if (ret < 0) 16454ce63fcdSMarek Szyprowski goto fail; 16464ce63fcdSMarek Szyprowski count += len >> PAGE_SHIFT; 16474ce63fcdSMarek Szyprowski iova += len; 16484ce63fcdSMarek Szyprowski } 16494ce63fcdSMarek Szyprowski *handle = iova_base; 16504ce63fcdSMarek Szyprowski 16514ce63fcdSMarek Szyprowski return 0; 16524ce63fcdSMarek Szyprowski fail: 16534ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); 16544ce63fcdSMarek Szyprowski __free_iova(mapping, iova_base, size); 16554ce63fcdSMarek Szyprowski return ret; 16564ce63fcdSMarek Szyprowski } 16574ce63fcdSMarek Szyprowski 16580fa478dfSRob Herring static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, 165900085f1eSKrzysztof Kozlowski enum dma_data_direction dir, unsigned long attrs, 16600fa478dfSRob Herring bool is_coherent) 16614ce63fcdSMarek Szyprowski { 16624ce63fcdSMarek Szyprowski struct scatterlist *s = sg, *dma = sg, *start = sg; 16634ce63fcdSMarek Szyprowski int i, count = 0; 16644ce63fcdSMarek Szyprowski unsigned int offset = s->offset; 16654ce63fcdSMarek Szyprowski unsigned int size = s->offset + s->length; 16664ce63fcdSMarek Szyprowski unsigned int max = dma_get_max_seg_size(dev); 16674ce63fcdSMarek Szyprowski 16684ce63fcdSMarek Szyprowski for (i = 1; i < nents; i++) { 16694ce63fcdSMarek Szyprowski s = sg_next(s); 16704ce63fcdSMarek Szyprowski 167172fd97bfSChristoph Hellwig s->dma_address = DMA_MAPPING_ERROR; 16724ce63fcdSMarek Szyprowski s->dma_length = 0; 16734ce63fcdSMarek Szyprowski 16744ce63fcdSMarek Szyprowski if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { 16754ce63fcdSMarek Szyprowski if (__map_sg_chunk(dev, start, size, &dma->dma_address, 16760fa478dfSRob Herring dir, attrs, is_coherent) < 0) 16774ce63fcdSMarek Szyprowski goto bad_mapping; 16784ce63fcdSMarek Szyprowski 16794ce63fcdSMarek Szyprowski dma->dma_address += offset; 16804ce63fcdSMarek Szyprowski dma->dma_length = size - offset; 16814ce63fcdSMarek Szyprowski 16824ce63fcdSMarek Szyprowski size = offset = s->offset; 16834ce63fcdSMarek Szyprowski start = s; 16844ce63fcdSMarek Szyprowski dma = sg_next(dma); 16854ce63fcdSMarek Szyprowski count += 1; 16864ce63fcdSMarek Szyprowski } 16874ce63fcdSMarek Szyprowski size += s->length; 16884ce63fcdSMarek Szyprowski } 16890fa478dfSRob Herring if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, 16900fa478dfSRob Herring is_coherent) < 0) 16914ce63fcdSMarek Szyprowski goto bad_mapping; 16924ce63fcdSMarek Szyprowski 16934ce63fcdSMarek Szyprowski dma->dma_address += offset; 16944ce63fcdSMarek Szyprowski dma->dma_length = size - offset; 16954ce63fcdSMarek Szyprowski 16964ce63fcdSMarek Szyprowski return count+1; 16974ce63fcdSMarek Szyprowski 16984ce63fcdSMarek Szyprowski bad_mapping: 16994ce63fcdSMarek Szyprowski for_each_sg(sg, s, count, i) 17004ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); 17014ce63fcdSMarek Szyprowski return 0; 17024ce63fcdSMarek Szyprowski } 17034ce63fcdSMarek Szyprowski 17044ce63fcdSMarek Szyprowski /** 17050fa478dfSRob Herring * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA 17060fa478dfSRob Herring * @dev: valid struct device pointer 17070fa478dfSRob Herring * @sg: list of buffers 17080fa478dfSRob Herring * @nents: number of buffers to map 17090fa478dfSRob Herring * @dir: DMA transfer direction 17100fa478dfSRob Herring * 17110fa478dfSRob Herring * Map a set of i/o coherent buffers described by scatterlist in streaming 17120fa478dfSRob Herring * mode for DMA. The scatter gather list elements are merged together (if 17130fa478dfSRob Herring * possible) and tagged with the appropriate dma address and length. They are 17140fa478dfSRob Herring * obtained via sg_dma_{address,length}. 17150fa478dfSRob Herring */ 17160fa478dfSRob Herring int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, 171700085f1eSKrzysztof Kozlowski int nents, enum dma_data_direction dir, unsigned long attrs) 17180fa478dfSRob Herring { 17190fa478dfSRob Herring return __iommu_map_sg(dev, sg, nents, dir, attrs, true); 17200fa478dfSRob Herring } 17210fa478dfSRob Herring 17220fa478dfSRob Herring /** 17230fa478dfSRob Herring * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA 17240fa478dfSRob Herring * @dev: valid struct device pointer 17250fa478dfSRob Herring * @sg: list of buffers 17260fa478dfSRob Herring * @nents: number of buffers to map 17270fa478dfSRob Herring * @dir: DMA transfer direction 17280fa478dfSRob Herring * 17290fa478dfSRob Herring * Map a set of buffers described by scatterlist in streaming mode for DMA. 17300fa478dfSRob Herring * The scatter gather list elements are merged together (if possible) and 17310fa478dfSRob Herring * tagged with the appropriate dma address and length. They are obtained via 17320fa478dfSRob Herring * sg_dma_{address,length}. 17330fa478dfSRob Herring */ 17340fa478dfSRob Herring int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, 173500085f1eSKrzysztof Kozlowski int nents, enum dma_data_direction dir, unsigned long attrs) 17360fa478dfSRob Herring { 17370fa478dfSRob Herring return __iommu_map_sg(dev, sg, nents, dir, attrs, false); 17380fa478dfSRob Herring } 17390fa478dfSRob Herring 17400fa478dfSRob Herring static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, 174100085f1eSKrzysztof Kozlowski int nents, enum dma_data_direction dir, 174200085f1eSKrzysztof Kozlowski unsigned long attrs, bool is_coherent) 17430fa478dfSRob Herring { 17440fa478dfSRob Herring struct scatterlist *s; 17450fa478dfSRob Herring int i; 17460fa478dfSRob Herring 17470fa478dfSRob Herring for_each_sg(sg, s, nents, i) { 17480fa478dfSRob Herring if (sg_dma_len(s)) 17490fa478dfSRob Herring __iommu_remove_mapping(dev, sg_dma_address(s), 17500fa478dfSRob Herring sg_dma_len(s)); 175100085f1eSKrzysztof Kozlowski if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 17520fa478dfSRob Herring __dma_page_dev_to_cpu(sg_page(s), s->offset, 17530fa478dfSRob Herring s->length, dir); 17540fa478dfSRob Herring } 17550fa478dfSRob Herring } 17560fa478dfSRob Herring 17570fa478dfSRob Herring /** 17580fa478dfSRob Herring * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 17590fa478dfSRob Herring * @dev: valid struct device pointer 17600fa478dfSRob Herring * @sg: list of buffers 17610fa478dfSRob Herring * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 17620fa478dfSRob Herring * @dir: DMA transfer direction (same as was passed to dma_map_sg) 17630fa478dfSRob Herring * 17640fa478dfSRob Herring * Unmap a set of streaming mode DMA translations. Again, CPU access 17650fa478dfSRob Herring * rules concerning calls here are the same as for dma_unmap_single(). 17660fa478dfSRob Herring */ 17670fa478dfSRob Herring void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, 176800085f1eSKrzysztof Kozlowski int nents, enum dma_data_direction dir, 176900085f1eSKrzysztof Kozlowski unsigned long attrs) 17700fa478dfSRob Herring { 17710fa478dfSRob Herring __iommu_unmap_sg(dev, sg, nents, dir, attrs, true); 17720fa478dfSRob Herring } 17730fa478dfSRob Herring 17740fa478dfSRob Herring /** 17754ce63fcdSMarek Szyprowski * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 17764ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 17774ce63fcdSMarek Szyprowski * @sg: list of buffers 17784ce63fcdSMarek Szyprowski * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 17794ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 17804ce63fcdSMarek Szyprowski * 17814ce63fcdSMarek Szyprowski * Unmap a set of streaming mode DMA translations. Again, CPU access 17824ce63fcdSMarek Szyprowski * rules concerning calls here are the same as for dma_unmap_single(). 17834ce63fcdSMarek Szyprowski */ 17844ce63fcdSMarek Szyprowski void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 178500085f1eSKrzysztof Kozlowski enum dma_data_direction dir, 178600085f1eSKrzysztof Kozlowski unsigned long attrs) 17874ce63fcdSMarek Szyprowski { 17880fa478dfSRob Herring __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); 17894ce63fcdSMarek Szyprowski } 17904ce63fcdSMarek Szyprowski 17914ce63fcdSMarek Szyprowski /** 17924ce63fcdSMarek Szyprowski * arm_iommu_sync_sg_for_cpu 17934ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 17944ce63fcdSMarek Szyprowski * @sg: list of buffers 17954ce63fcdSMarek Szyprowski * @nents: number of buffers to map (returned from dma_map_sg) 17964ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 17974ce63fcdSMarek Szyprowski */ 17984ce63fcdSMarek Szyprowski void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 17994ce63fcdSMarek Szyprowski int nents, enum dma_data_direction dir) 18004ce63fcdSMarek Szyprowski { 18014ce63fcdSMarek Szyprowski struct scatterlist *s; 18024ce63fcdSMarek Szyprowski int i; 18034ce63fcdSMarek Szyprowski 18044ce63fcdSMarek Szyprowski for_each_sg(sg, s, nents, i) 18054ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); 18064ce63fcdSMarek Szyprowski 18074ce63fcdSMarek Szyprowski } 18084ce63fcdSMarek Szyprowski 18094ce63fcdSMarek Szyprowski /** 18104ce63fcdSMarek Szyprowski * arm_iommu_sync_sg_for_device 18114ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 18124ce63fcdSMarek Szyprowski * @sg: list of buffers 18134ce63fcdSMarek Szyprowski * @nents: number of buffers to map (returned from dma_map_sg) 18144ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 18154ce63fcdSMarek Szyprowski */ 18164ce63fcdSMarek Szyprowski void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 18174ce63fcdSMarek Szyprowski int nents, enum dma_data_direction dir) 18184ce63fcdSMarek Szyprowski { 18194ce63fcdSMarek Szyprowski struct scatterlist *s; 18204ce63fcdSMarek Szyprowski int i; 18214ce63fcdSMarek Szyprowski 18224ce63fcdSMarek Szyprowski for_each_sg(sg, s, nents, i) 18234ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 18244ce63fcdSMarek Szyprowski } 18254ce63fcdSMarek Szyprowski 18264ce63fcdSMarek Szyprowski 18274ce63fcdSMarek Szyprowski /** 18280fa478dfSRob Herring * arm_coherent_iommu_map_page 18290fa478dfSRob Herring * @dev: valid struct device pointer 18300fa478dfSRob Herring * @page: page that buffer resides in 18310fa478dfSRob Herring * @offset: offset into page for start of buffer 18320fa478dfSRob Herring * @size: size of buffer to map 18330fa478dfSRob Herring * @dir: DMA transfer direction 18340fa478dfSRob Herring * 18350fa478dfSRob Herring * Coherent IOMMU aware version of arm_dma_map_page() 18360fa478dfSRob Herring */ 18370fa478dfSRob Herring static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page, 18380fa478dfSRob Herring unsigned long offset, size_t size, enum dma_data_direction dir, 183900085f1eSKrzysztof Kozlowski unsigned long attrs) 18400fa478dfSRob Herring { 184189cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 18420fa478dfSRob Herring dma_addr_t dma_addr; 184313987d68SWill Deacon int ret, prot, len = PAGE_ALIGN(size + offset); 18440fa478dfSRob Herring 18450fa478dfSRob Herring dma_addr = __alloc_iova(mapping, len); 184672fd97bfSChristoph Hellwig if (dma_addr == DMA_MAPPING_ERROR) 18470fa478dfSRob Herring return dma_addr; 18480fa478dfSRob Herring 18497d2822dfSSricharan R prot = __dma_info_to_prot(dir, attrs); 185013987d68SWill Deacon 185113987d68SWill Deacon ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); 18520fa478dfSRob Herring if (ret < 0) 18530fa478dfSRob Herring goto fail; 18540fa478dfSRob Herring 18550fa478dfSRob Herring return dma_addr + offset; 18560fa478dfSRob Herring fail: 18570fa478dfSRob Herring __free_iova(mapping, dma_addr, len); 185872fd97bfSChristoph Hellwig return DMA_MAPPING_ERROR; 18590fa478dfSRob Herring } 18600fa478dfSRob Herring 18610fa478dfSRob Herring /** 18624ce63fcdSMarek Szyprowski * arm_iommu_map_page 18634ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 18644ce63fcdSMarek Szyprowski * @page: page that buffer resides in 18654ce63fcdSMarek Szyprowski * @offset: offset into page for start of buffer 18664ce63fcdSMarek Szyprowski * @size: size of buffer to map 18674ce63fcdSMarek Szyprowski * @dir: DMA transfer direction 18684ce63fcdSMarek Szyprowski * 18694ce63fcdSMarek Szyprowski * IOMMU aware version of arm_dma_map_page() 18704ce63fcdSMarek Szyprowski */ 18714ce63fcdSMarek Szyprowski static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, 18724ce63fcdSMarek Szyprowski unsigned long offset, size_t size, enum dma_data_direction dir, 187300085f1eSKrzysztof Kozlowski unsigned long attrs) 18744ce63fcdSMarek Szyprowski { 187500085f1eSKrzysztof Kozlowski if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 18764ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 18774ce63fcdSMarek Szyprowski 18780fa478dfSRob Herring return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); 18790fa478dfSRob Herring } 18804ce63fcdSMarek Szyprowski 18810fa478dfSRob Herring /** 18820fa478dfSRob Herring * arm_coherent_iommu_unmap_page 18830fa478dfSRob Herring * @dev: valid struct device pointer 18840fa478dfSRob Herring * @handle: DMA address of buffer 18850fa478dfSRob Herring * @size: size of buffer (same as passed to dma_map_page) 18860fa478dfSRob Herring * @dir: DMA transfer direction (same as passed to dma_map_page) 18870fa478dfSRob Herring * 18880fa478dfSRob Herring * Coherent IOMMU aware version of arm_dma_unmap_page() 18890fa478dfSRob Herring */ 18900fa478dfSRob Herring static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, 189100085f1eSKrzysztof Kozlowski size_t size, enum dma_data_direction dir, unsigned long attrs) 18920fa478dfSRob Herring { 189389cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 18940fa478dfSRob Herring dma_addr_t iova = handle & PAGE_MASK; 18950fa478dfSRob Herring int offset = handle & ~PAGE_MASK; 18960fa478dfSRob Herring int len = PAGE_ALIGN(size + offset); 18974ce63fcdSMarek Szyprowski 18980fa478dfSRob Herring if (!iova) 18990fa478dfSRob Herring return; 19000fa478dfSRob Herring 19010fa478dfSRob Herring iommu_unmap(mapping->domain, iova, len); 19020fa478dfSRob Herring __free_iova(mapping, iova, len); 19034ce63fcdSMarek Szyprowski } 19044ce63fcdSMarek Szyprowski 19054ce63fcdSMarek Szyprowski /** 19064ce63fcdSMarek Szyprowski * arm_iommu_unmap_page 19074ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 19084ce63fcdSMarek Szyprowski * @handle: DMA address of buffer 19094ce63fcdSMarek Szyprowski * @size: size of buffer (same as passed to dma_map_page) 19104ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as passed to dma_map_page) 19114ce63fcdSMarek Szyprowski * 19124ce63fcdSMarek Szyprowski * IOMMU aware version of arm_dma_unmap_page() 19134ce63fcdSMarek Szyprowski */ 19144ce63fcdSMarek Szyprowski static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, 191500085f1eSKrzysztof Kozlowski size_t size, enum dma_data_direction dir, unsigned long attrs) 19164ce63fcdSMarek Szyprowski { 191789cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 19184ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 19194ce63fcdSMarek Szyprowski struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 19204ce63fcdSMarek Szyprowski int offset = handle & ~PAGE_MASK; 19214ce63fcdSMarek Szyprowski int len = PAGE_ALIGN(size + offset); 19224ce63fcdSMarek Szyprowski 19234ce63fcdSMarek Szyprowski if (!iova) 19244ce63fcdSMarek Szyprowski return; 19254ce63fcdSMarek Szyprowski 192600085f1eSKrzysztof Kozlowski if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 19274ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(page, offset, size, dir); 19284ce63fcdSMarek Szyprowski 19294ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova, len); 19304ce63fcdSMarek Szyprowski __free_iova(mapping, iova, len); 19314ce63fcdSMarek Szyprowski } 19324ce63fcdSMarek Szyprowski 193324ed5d2cSNiklas Söderlund /** 193424ed5d2cSNiklas Söderlund * arm_iommu_map_resource - map a device resource for DMA 193524ed5d2cSNiklas Söderlund * @dev: valid struct device pointer 193624ed5d2cSNiklas Söderlund * @phys_addr: physical address of resource 193724ed5d2cSNiklas Söderlund * @size: size of resource to map 193824ed5d2cSNiklas Söderlund * @dir: DMA transfer direction 193924ed5d2cSNiklas Söderlund */ 194024ed5d2cSNiklas Söderlund static dma_addr_t arm_iommu_map_resource(struct device *dev, 194124ed5d2cSNiklas Söderlund phys_addr_t phys_addr, size_t size, 194224ed5d2cSNiklas Söderlund enum dma_data_direction dir, unsigned long attrs) 194324ed5d2cSNiklas Söderlund { 194424ed5d2cSNiklas Söderlund struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 194524ed5d2cSNiklas Söderlund dma_addr_t dma_addr; 194624ed5d2cSNiklas Söderlund int ret, prot; 194724ed5d2cSNiklas Söderlund phys_addr_t addr = phys_addr & PAGE_MASK; 194824ed5d2cSNiklas Söderlund unsigned int offset = phys_addr & ~PAGE_MASK; 194924ed5d2cSNiklas Söderlund size_t len = PAGE_ALIGN(size + offset); 195024ed5d2cSNiklas Söderlund 195124ed5d2cSNiklas Söderlund dma_addr = __alloc_iova(mapping, len); 195272fd97bfSChristoph Hellwig if (dma_addr == DMA_MAPPING_ERROR) 195324ed5d2cSNiklas Söderlund return dma_addr; 195424ed5d2cSNiklas Söderlund 19557d2822dfSSricharan R prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO; 195624ed5d2cSNiklas Söderlund 195724ed5d2cSNiklas Söderlund ret = iommu_map(mapping->domain, dma_addr, addr, len, prot); 195824ed5d2cSNiklas Söderlund if (ret < 0) 195924ed5d2cSNiklas Söderlund goto fail; 196024ed5d2cSNiklas Söderlund 196124ed5d2cSNiklas Söderlund return dma_addr + offset; 196224ed5d2cSNiklas Söderlund fail: 196324ed5d2cSNiklas Söderlund __free_iova(mapping, dma_addr, len); 196472fd97bfSChristoph Hellwig return DMA_MAPPING_ERROR; 196524ed5d2cSNiklas Söderlund } 196624ed5d2cSNiklas Söderlund 196724ed5d2cSNiklas Söderlund /** 196824ed5d2cSNiklas Söderlund * arm_iommu_unmap_resource - unmap a device DMA resource 196924ed5d2cSNiklas Söderlund * @dev: valid struct device pointer 197024ed5d2cSNiklas Söderlund * @dma_handle: DMA address to resource 197124ed5d2cSNiklas Söderlund * @size: size of resource to map 197224ed5d2cSNiklas Söderlund * @dir: DMA transfer direction 197324ed5d2cSNiklas Söderlund */ 197424ed5d2cSNiklas Söderlund static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle, 197524ed5d2cSNiklas Söderlund size_t size, enum dma_data_direction dir, 197624ed5d2cSNiklas Söderlund unsigned long attrs) 197724ed5d2cSNiklas Söderlund { 197824ed5d2cSNiklas Söderlund struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 197924ed5d2cSNiklas Söderlund dma_addr_t iova = dma_handle & PAGE_MASK; 198024ed5d2cSNiklas Söderlund unsigned int offset = dma_handle & ~PAGE_MASK; 198124ed5d2cSNiklas Söderlund size_t len = PAGE_ALIGN(size + offset); 198224ed5d2cSNiklas Söderlund 198324ed5d2cSNiklas Söderlund if (!iova) 198424ed5d2cSNiklas Söderlund return; 198524ed5d2cSNiklas Söderlund 198624ed5d2cSNiklas Söderlund iommu_unmap(mapping->domain, iova, len); 198724ed5d2cSNiklas Söderlund __free_iova(mapping, iova, len); 198824ed5d2cSNiklas Söderlund } 198924ed5d2cSNiklas Söderlund 19904ce63fcdSMarek Szyprowski static void arm_iommu_sync_single_for_cpu(struct device *dev, 19914ce63fcdSMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 19924ce63fcdSMarek Szyprowski { 199389cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 19944ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 19954ce63fcdSMarek Szyprowski struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 19964ce63fcdSMarek Szyprowski unsigned int offset = handle & ~PAGE_MASK; 19974ce63fcdSMarek Szyprowski 19984ce63fcdSMarek Szyprowski if (!iova) 19994ce63fcdSMarek Szyprowski return; 20004ce63fcdSMarek Szyprowski 20014ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(page, offset, size, dir); 20024ce63fcdSMarek Szyprowski } 20034ce63fcdSMarek Szyprowski 20044ce63fcdSMarek Szyprowski static void arm_iommu_sync_single_for_device(struct device *dev, 20054ce63fcdSMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 20064ce63fcdSMarek Szyprowski { 200789cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 20084ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 20094ce63fcdSMarek Szyprowski struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 20104ce63fcdSMarek Szyprowski unsigned int offset = handle & ~PAGE_MASK; 20114ce63fcdSMarek Szyprowski 20124ce63fcdSMarek Szyprowski if (!iova) 20134ce63fcdSMarek Szyprowski return; 20144ce63fcdSMarek Szyprowski 20154ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 20164ce63fcdSMarek Szyprowski } 20174ce63fcdSMarek Szyprowski 20185299709dSBart Van Assche const struct dma_map_ops iommu_ops = { 20194ce63fcdSMarek Szyprowski .alloc = arm_iommu_alloc_attrs, 20204ce63fcdSMarek Szyprowski .free = arm_iommu_free_attrs, 20214ce63fcdSMarek Szyprowski .mmap = arm_iommu_mmap_attrs, 2022dc2832e1SMarek Szyprowski .get_sgtable = arm_iommu_get_sgtable, 20234ce63fcdSMarek Szyprowski 20244ce63fcdSMarek Szyprowski .map_page = arm_iommu_map_page, 20254ce63fcdSMarek Szyprowski .unmap_page = arm_iommu_unmap_page, 20264ce63fcdSMarek Szyprowski .sync_single_for_cpu = arm_iommu_sync_single_for_cpu, 20274ce63fcdSMarek Szyprowski .sync_single_for_device = arm_iommu_sync_single_for_device, 20284ce63fcdSMarek Szyprowski 20294ce63fcdSMarek Szyprowski .map_sg = arm_iommu_map_sg, 20304ce63fcdSMarek Szyprowski .unmap_sg = arm_iommu_unmap_sg, 20314ce63fcdSMarek Szyprowski .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, 20324ce63fcdSMarek Szyprowski .sync_sg_for_device = arm_iommu_sync_sg_for_device, 203324ed5d2cSNiklas Söderlund 203424ed5d2cSNiklas Söderlund .map_resource = arm_iommu_map_resource, 203524ed5d2cSNiklas Söderlund .unmap_resource = arm_iommu_unmap_resource, 20369eef8b8cSChristoph Hellwig 2037418a7a7eSChristoph Hellwig .dma_supported = arm_dma_supported, 20384ce63fcdSMarek Szyprowski }; 20394ce63fcdSMarek Szyprowski 20405299709dSBart Van Assche const struct dma_map_ops iommu_coherent_ops = { 204156506822SGregory CLEMENT .alloc = arm_coherent_iommu_alloc_attrs, 204256506822SGregory CLEMENT .free = arm_coherent_iommu_free_attrs, 204356506822SGregory CLEMENT .mmap = arm_coherent_iommu_mmap_attrs, 20440fa478dfSRob Herring .get_sgtable = arm_iommu_get_sgtable, 20450fa478dfSRob Herring 20460fa478dfSRob Herring .map_page = arm_coherent_iommu_map_page, 20470fa478dfSRob Herring .unmap_page = arm_coherent_iommu_unmap_page, 20480fa478dfSRob Herring 20490fa478dfSRob Herring .map_sg = arm_coherent_iommu_map_sg, 20500fa478dfSRob Herring .unmap_sg = arm_coherent_iommu_unmap_sg, 205124ed5d2cSNiklas Söderlund 205224ed5d2cSNiklas Söderlund .map_resource = arm_iommu_map_resource, 205324ed5d2cSNiklas Söderlund .unmap_resource = arm_iommu_unmap_resource, 20549eef8b8cSChristoph Hellwig 2055418a7a7eSChristoph Hellwig .dma_supported = arm_dma_supported, 20560fa478dfSRob Herring }; 20570fa478dfSRob Herring 20584ce63fcdSMarek Szyprowski /** 20594ce63fcdSMarek Szyprowski * arm_iommu_create_mapping 20604ce63fcdSMarek Szyprowski * @bus: pointer to the bus holding the client device (for IOMMU calls) 20614ce63fcdSMarek Szyprowski * @base: start address of the valid IO address space 206268efd7d2SMarek Szyprowski * @size: maximum size of the valid IO address space 20634ce63fcdSMarek Szyprowski * 20644ce63fcdSMarek Szyprowski * Creates a mapping structure which holds information about used/unused 20654ce63fcdSMarek Szyprowski * IO address ranges, which is required to perform memory allocation and 20664ce63fcdSMarek Szyprowski * mapping with IOMMU aware functions. 20674ce63fcdSMarek Szyprowski * 20684ce63fcdSMarek Szyprowski * The client device need to be attached to the mapping with 20694ce63fcdSMarek Szyprowski * arm_iommu_attach_device function. 20704ce63fcdSMarek Szyprowski */ 20714ce63fcdSMarek Szyprowski struct dma_iommu_mapping * 20721424532bSMarek Szyprowski arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size) 20734ce63fcdSMarek Szyprowski { 207468efd7d2SMarek Szyprowski unsigned int bits = size >> PAGE_SHIFT; 207568efd7d2SMarek Szyprowski unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long); 20764ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping; 207768efd7d2SMarek Szyprowski int extensions = 1; 20784ce63fcdSMarek Szyprowski int err = -ENOMEM; 20794ce63fcdSMarek Szyprowski 20801424532bSMarek Szyprowski /* currently only 32-bit DMA address space is supported */ 20811424532bSMarek Szyprowski if (size > DMA_BIT_MASK(32) + 1) 20821424532bSMarek Szyprowski return ERR_PTR(-ERANGE); 20831424532bSMarek Szyprowski 208468efd7d2SMarek Szyprowski if (!bitmap_size) 20854ce63fcdSMarek Szyprowski return ERR_PTR(-EINVAL); 20864ce63fcdSMarek Szyprowski 208768efd7d2SMarek Szyprowski if (bitmap_size > PAGE_SIZE) { 208868efd7d2SMarek Szyprowski extensions = bitmap_size / PAGE_SIZE; 208968efd7d2SMarek Szyprowski bitmap_size = PAGE_SIZE; 209068efd7d2SMarek Szyprowski } 209168efd7d2SMarek Szyprowski 20924ce63fcdSMarek Szyprowski mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); 20934ce63fcdSMarek Szyprowski if (!mapping) 20944ce63fcdSMarek Szyprowski goto err; 20954ce63fcdSMarek Szyprowski 209668efd7d2SMarek Szyprowski mapping->bitmap_size = bitmap_size; 20976396bb22SKees Cook mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *), 20984d852ef8SAndreas Herrmann GFP_KERNEL); 20994d852ef8SAndreas Herrmann if (!mapping->bitmaps) 21004ce63fcdSMarek Szyprowski goto err2; 21014ce63fcdSMarek Szyprowski 210268efd7d2SMarek Szyprowski mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL); 21034d852ef8SAndreas Herrmann if (!mapping->bitmaps[0]) 21044d852ef8SAndreas Herrmann goto err3; 21054d852ef8SAndreas Herrmann 21064d852ef8SAndreas Herrmann mapping->nr_bitmaps = 1; 21074d852ef8SAndreas Herrmann mapping->extensions = extensions; 21084ce63fcdSMarek Szyprowski mapping->base = base; 210968efd7d2SMarek Szyprowski mapping->bits = BITS_PER_BYTE * bitmap_size; 21104d852ef8SAndreas Herrmann 21114ce63fcdSMarek Szyprowski spin_lock_init(&mapping->lock); 21124ce63fcdSMarek Szyprowski 21134ce63fcdSMarek Szyprowski mapping->domain = iommu_domain_alloc(bus); 21144ce63fcdSMarek Szyprowski if (!mapping->domain) 21154d852ef8SAndreas Herrmann goto err4; 21164ce63fcdSMarek Szyprowski 21174ce63fcdSMarek Szyprowski kref_init(&mapping->kref); 21184ce63fcdSMarek Szyprowski return mapping; 21194d852ef8SAndreas Herrmann err4: 21204d852ef8SAndreas Herrmann kfree(mapping->bitmaps[0]); 21214ce63fcdSMarek Szyprowski err3: 21224d852ef8SAndreas Herrmann kfree(mapping->bitmaps); 21234ce63fcdSMarek Szyprowski err2: 21244ce63fcdSMarek Szyprowski kfree(mapping); 21254ce63fcdSMarek Szyprowski err: 21264ce63fcdSMarek Szyprowski return ERR_PTR(err); 21274ce63fcdSMarek Szyprowski } 212818177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_create_mapping); 21294ce63fcdSMarek Szyprowski 21304ce63fcdSMarek Szyprowski static void release_iommu_mapping(struct kref *kref) 21314ce63fcdSMarek Szyprowski { 21324d852ef8SAndreas Herrmann int i; 21334ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = 21344ce63fcdSMarek Szyprowski container_of(kref, struct dma_iommu_mapping, kref); 21354ce63fcdSMarek Szyprowski 21364ce63fcdSMarek Szyprowski iommu_domain_free(mapping->domain); 21374d852ef8SAndreas Herrmann for (i = 0; i < mapping->nr_bitmaps; i++) 21384d852ef8SAndreas Herrmann kfree(mapping->bitmaps[i]); 21394d852ef8SAndreas Herrmann kfree(mapping->bitmaps); 21404ce63fcdSMarek Szyprowski kfree(mapping); 21414ce63fcdSMarek Szyprowski } 21424ce63fcdSMarek Szyprowski 21434d852ef8SAndreas Herrmann static int extend_iommu_mapping(struct dma_iommu_mapping *mapping) 21444d852ef8SAndreas Herrmann { 21454d852ef8SAndreas Herrmann int next_bitmap; 21464d852ef8SAndreas Herrmann 2147462859aaSMarek Szyprowski if (mapping->nr_bitmaps >= mapping->extensions) 21484d852ef8SAndreas Herrmann return -EINVAL; 21494d852ef8SAndreas Herrmann 21504d852ef8SAndreas Herrmann next_bitmap = mapping->nr_bitmaps; 21514d852ef8SAndreas Herrmann mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size, 21524d852ef8SAndreas Herrmann GFP_ATOMIC); 21534d852ef8SAndreas Herrmann if (!mapping->bitmaps[next_bitmap]) 21544d852ef8SAndreas Herrmann return -ENOMEM; 21554d852ef8SAndreas Herrmann 21564d852ef8SAndreas Herrmann mapping->nr_bitmaps++; 21574d852ef8SAndreas Herrmann 21584d852ef8SAndreas Herrmann return 0; 21594d852ef8SAndreas Herrmann } 21604d852ef8SAndreas Herrmann 21614ce63fcdSMarek Szyprowski void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) 21624ce63fcdSMarek Szyprowski { 21634ce63fcdSMarek Szyprowski if (mapping) 21644ce63fcdSMarek Szyprowski kref_put(&mapping->kref, release_iommu_mapping); 21654ce63fcdSMarek Szyprowski } 216618177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_release_mapping); 21674ce63fcdSMarek Szyprowski 2168eab8d653SLaurent Pinchart static int __arm_iommu_attach_device(struct device *dev, 21694ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping) 21704ce63fcdSMarek Szyprowski { 21714ce63fcdSMarek Szyprowski int err; 21724ce63fcdSMarek Szyprowski 21734ce63fcdSMarek Szyprowski err = iommu_attach_device(mapping->domain, dev); 21744ce63fcdSMarek Szyprowski if (err) 21754ce63fcdSMarek Szyprowski return err; 21764ce63fcdSMarek Szyprowski 21774ce63fcdSMarek Szyprowski kref_get(&mapping->kref); 217889cfdb19SWill Deacon to_dma_iommu_mapping(dev) = mapping; 21794ce63fcdSMarek Szyprowski 218075c59716SHiroshi Doyu pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); 21814ce63fcdSMarek Szyprowski return 0; 21824ce63fcdSMarek Szyprowski } 21834ce63fcdSMarek Szyprowski 21846fe36758SHiroshi Doyu /** 2185eab8d653SLaurent Pinchart * arm_iommu_attach_device 21866fe36758SHiroshi Doyu * @dev: valid struct device pointer 2187eab8d653SLaurent Pinchart * @mapping: io address space mapping structure (returned from 2188eab8d653SLaurent Pinchart * arm_iommu_create_mapping) 21896fe36758SHiroshi Doyu * 2190eab8d653SLaurent Pinchart * Attaches specified io address space mapping to the provided device. 2191eab8d653SLaurent Pinchart * This replaces the dma operations (dma_map_ops pointer) with the 2192eab8d653SLaurent Pinchart * IOMMU aware version. 2193eab8d653SLaurent Pinchart * 2194eab8d653SLaurent Pinchart * More than one client might be attached to the same io address space 2195eab8d653SLaurent Pinchart * mapping. 21966fe36758SHiroshi Doyu */ 2197eab8d653SLaurent Pinchart int arm_iommu_attach_device(struct device *dev, 2198eab8d653SLaurent Pinchart struct dma_iommu_mapping *mapping) 2199eab8d653SLaurent Pinchart { 2200eab8d653SLaurent Pinchart int err; 2201eab8d653SLaurent Pinchart 2202eab8d653SLaurent Pinchart err = __arm_iommu_attach_device(dev, mapping); 2203eab8d653SLaurent Pinchart if (err) 2204eab8d653SLaurent Pinchart return err; 2205eab8d653SLaurent Pinchart 2206eab8d653SLaurent Pinchart set_dma_ops(dev, &iommu_ops); 2207eab8d653SLaurent Pinchart return 0; 2208eab8d653SLaurent Pinchart } 2209eab8d653SLaurent Pinchart EXPORT_SYMBOL_GPL(arm_iommu_attach_device); 2210eab8d653SLaurent Pinchart 2211d3e01c51SSricharan R /** 2212d3e01c51SSricharan R * arm_iommu_detach_device 2213d3e01c51SSricharan R * @dev: valid struct device pointer 2214d3e01c51SSricharan R * 2215d3e01c51SSricharan R * Detaches the provided device from a previously attached map. 22164a4d68fcSWolfram Sang (Renesas) * This overwrites the dma_ops pointer with appropriate non-IOMMU ops. 2217d3e01c51SSricharan R */ 2218d3e01c51SSricharan R void arm_iommu_detach_device(struct device *dev) 22196fe36758SHiroshi Doyu { 22206fe36758SHiroshi Doyu struct dma_iommu_mapping *mapping; 22216fe36758SHiroshi Doyu 22226fe36758SHiroshi Doyu mapping = to_dma_iommu_mapping(dev); 22236fe36758SHiroshi Doyu if (!mapping) { 22246fe36758SHiroshi Doyu dev_warn(dev, "Not attached\n"); 22256fe36758SHiroshi Doyu return; 22266fe36758SHiroshi Doyu } 22276fe36758SHiroshi Doyu 22286fe36758SHiroshi Doyu iommu_detach_device(mapping->domain, dev); 22296fe36758SHiroshi Doyu kref_put(&mapping->kref, release_iommu_mapping); 223089cfdb19SWill Deacon to_dma_iommu_mapping(dev) = NULL; 22311874619aSThierry Reding set_dma_ops(dev, arm_get_dma_map_ops(dev->archdata.dma_coherent)); 22326fe36758SHiroshi Doyu 22336fe36758SHiroshi Doyu pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); 22346fe36758SHiroshi Doyu } 223518177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_detach_device); 22366fe36758SHiroshi Doyu 22375299709dSBart Van Assche static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) 22384bb25789SWill Deacon { 22394bb25789SWill Deacon return coherent ? &iommu_coherent_ops : &iommu_ops; 22404bb25789SWill Deacon } 22414bb25789SWill Deacon 22424bb25789SWill Deacon static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, 224353c92d79SRobin Murphy const struct iommu_ops *iommu) 22444bb25789SWill Deacon { 22454bb25789SWill Deacon struct dma_iommu_mapping *mapping; 22464bb25789SWill Deacon 22474bb25789SWill Deacon if (!iommu) 22484bb25789SWill Deacon return false; 22494bb25789SWill Deacon 22504bb25789SWill Deacon mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); 22514bb25789SWill Deacon if (IS_ERR(mapping)) { 22524bb25789SWill Deacon pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", 22534bb25789SWill Deacon size, dev_name(dev)); 22544bb25789SWill Deacon return false; 22554bb25789SWill Deacon } 22564bb25789SWill Deacon 2257eab8d653SLaurent Pinchart if (__arm_iommu_attach_device(dev, mapping)) { 22584bb25789SWill Deacon pr_warn("Failed to attached device %s to IOMMU_mapping\n", 22594bb25789SWill Deacon dev_name(dev)); 22604bb25789SWill Deacon arm_iommu_release_mapping(mapping); 22614bb25789SWill Deacon return false; 22624bb25789SWill Deacon } 22634bb25789SWill Deacon 22644bb25789SWill Deacon return true; 22654bb25789SWill Deacon } 22664bb25789SWill Deacon 22674bb25789SWill Deacon static void arm_teardown_iommu_dma_ops(struct device *dev) 22684bb25789SWill Deacon { 226989cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 22704bb25789SWill Deacon 2271c2273a18SWill Deacon if (!mapping) 2272c2273a18SWill Deacon return; 2273c2273a18SWill Deacon 2274d3e01c51SSricharan R arm_iommu_detach_device(dev); 22754bb25789SWill Deacon arm_iommu_release_mapping(mapping); 22764bb25789SWill Deacon } 22774bb25789SWill Deacon 22784bb25789SWill Deacon #else 22794bb25789SWill Deacon 22804bb25789SWill Deacon static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, 228153c92d79SRobin Murphy const struct iommu_ops *iommu) 22824bb25789SWill Deacon { 22834bb25789SWill Deacon return false; 22844bb25789SWill Deacon } 22854bb25789SWill Deacon 22864bb25789SWill Deacon static void arm_teardown_iommu_dma_ops(struct device *dev) { } 22874bb25789SWill Deacon 22884bb25789SWill Deacon #define arm_get_iommu_dma_map_ops arm_get_dma_map_ops 22894bb25789SWill Deacon 22904bb25789SWill Deacon #endif /* CONFIG_ARM_DMA_USE_IOMMU */ 22914bb25789SWill Deacon 22924bb25789SWill Deacon void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, 229353c92d79SRobin Murphy const struct iommu_ops *iommu, bool coherent) 22944bb25789SWill Deacon { 22955299709dSBart Van Assche const struct dma_map_ops *dma_ops; 22964bb25789SWill Deacon 22976f51ee70SLinus Torvalds dev->archdata.dma_coherent = coherent; 2298ad3c7b18SChristoph Hellwig #ifdef CONFIG_SWIOTLB 2299ad3c7b18SChristoph Hellwig dev->dma_coherent = coherent; 2300ad3c7b18SChristoph Hellwig #endif 230126b37b94SLaurent Pinchart 230226b37b94SLaurent Pinchart /* 230326b37b94SLaurent Pinchart * Don't override the dma_ops if they have already been set. Ideally 230426b37b94SLaurent Pinchart * this should be the only location where dma_ops are set, remove this 230526b37b94SLaurent Pinchart * check when all other callers of set_dma_ops will have disappeared. 230626b37b94SLaurent Pinchart */ 230726b37b94SLaurent Pinchart if (dev->dma_ops) 230826b37b94SLaurent Pinchart return; 230926b37b94SLaurent Pinchart 23104bb25789SWill Deacon if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu)) 23114bb25789SWill Deacon dma_ops = arm_get_iommu_dma_map_ops(coherent); 23124bb25789SWill Deacon else 23134bb25789SWill Deacon dma_ops = arm_get_dma_map_ops(coherent); 23144bb25789SWill Deacon 23154bb25789SWill Deacon set_dma_ops(dev, dma_ops); 2316e0586326SStefano Stabellini 2317e0586326SStefano Stabellini #ifdef CONFIG_XEN 23188e23c82cSChristoph Hellwig if (xen_initial_domain()) 23190e0d26e7SChristoph Hellwig dev->dma_ops = &xen_swiotlb_dma_ops; 2320e0586326SStefano Stabellini #endif 2321a93a121aSLaurent Pinchart dev->archdata.dma_ops_setup = true; 23224bb25789SWill Deacon } 23234bb25789SWill Deacon 23244bb25789SWill Deacon void arch_teardown_dma_ops(struct device *dev) 23254bb25789SWill Deacon { 2326a93a121aSLaurent Pinchart if (!dev->archdata.dma_ops_setup) 2327a93a121aSLaurent Pinchart return; 2328a93a121aSLaurent Pinchart 23294bb25789SWill Deacon arm_teardown_iommu_dma_ops(dev); 2330fc67e6f1SRobin Murphy /* Let arch_setup_dma_ops() start again from scratch upon re-probe */ 2331fc67e6f1SRobin Murphy set_dma_ops(dev, NULL); 23324bb25789SWill Deacon } 2333ad3c7b18SChristoph Hellwig 2334ad3c7b18SChristoph Hellwig #ifdef CONFIG_SWIOTLB 233556e35f9cSChristoph Hellwig void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, 233656e35f9cSChristoph Hellwig enum dma_data_direction dir) 2337ad3c7b18SChristoph Hellwig { 2338ad3c7b18SChristoph Hellwig __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), 2339ad3c7b18SChristoph Hellwig size, dir); 2340ad3c7b18SChristoph Hellwig } 2341ad3c7b18SChristoph Hellwig 234256e35f9cSChristoph Hellwig void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, 234356e35f9cSChristoph Hellwig enum dma_data_direction dir) 2344ad3c7b18SChristoph Hellwig { 2345ad3c7b18SChristoph Hellwig __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), 2346ad3c7b18SChristoph Hellwig size, dir); 2347ad3c7b18SChristoph Hellwig } 2348ad3c7b18SChristoph Hellwig 2349ad3c7b18SChristoph Hellwig void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 2350ad3c7b18SChristoph Hellwig gfp_t gfp, unsigned long attrs) 2351ad3c7b18SChristoph Hellwig { 2352ad3c7b18SChristoph Hellwig return __dma_alloc(dev, size, dma_handle, gfp, 2353ad3c7b18SChristoph Hellwig __get_dma_pgprot(attrs, PAGE_KERNEL), false, 2354ad3c7b18SChristoph Hellwig attrs, __builtin_return_address(0)); 2355ad3c7b18SChristoph Hellwig } 2356ad3c7b18SChristoph Hellwig 2357ad3c7b18SChristoph Hellwig void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, 2358ad3c7b18SChristoph Hellwig dma_addr_t dma_handle, unsigned long attrs) 2359ad3c7b18SChristoph Hellwig { 2360ad3c7b18SChristoph Hellwig __arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false); 2361ad3c7b18SChristoph Hellwig } 2362ad3c7b18SChristoph Hellwig #endif /* CONFIG_SWIOTLB */ 2363