10ddbccd1SRussell King /* 20ddbccd1SRussell King * linux/arch/arm/mm/dma-mapping.c 30ddbccd1SRussell King * 40ddbccd1SRussell King * Copyright (C) 2000-2004 Russell King 50ddbccd1SRussell King * 60ddbccd1SRussell King * This program is free software; you can redistribute it and/or modify 70ddbccd1SRussell King * it under the terms of the GNU General Public License version 2 as 80ddbccd1SRussell King * published by the Free Software Foundation. 90ddbccd1SRussell King * 100ddbccd1SRussell King * DMA uncached mapping support. 110ddbccd1SRussell King */ 1211a5aa32SRussell King #include <linux/bootmem.h> 130ddbccd1SRussell King #include <linux/module.h> 140ddbccd1SRussell King #include <linux/mm.h> 1536d0fd21SLaura Abbott #include <linux/genalloc.h> 165a0e3ad6STejun Heo #include <linux/gfp.h> 170ddbccd1SRussell King #include <linux/errno.h> 180ddbccd1SRussell King #include <linux/list.h> 190ddbccd1SRussell King #include <linux/init.h> 200ddbccd1SRussell King #include <linux/device.h> 210ddbccd1SRussell King #include <linux/dma-mapping.h> 22c7909509SMarek Szyprowski #include <linux/dma-contiguous.h> 2339af22a7SNicolas Pitre #include <linux/highmem.h> 24c7909509SMarek Szyprowski #include <linux/memblock.h> 2599d1717dSJon Medhurst #include <linux/slab.h> 264ce63fcdSMarek Szyprowski #include <linux/iommu.h> 27e9da6e99SMarek Szyprowski #include <linux/io.h> 284ce63fcdSMarek Szyprowski #include <linux/vmalloc.h> 29158e8bfeSAlessandro Rubini #include <linux/sizes.h> 30a254129eSJoonsoo Kim #include <linux/cma.h> 310ddbccd1SRussell King 320ddbccd1SRussell King #include <asm/memory.h> 3343377453SNicolas Pitre #include <asm/highmem.h> 340ddbccd1SRussell King #include <asm/cacheflush.h> 350ddbccd1SRussell King #include <asm/tlbflush.h> 3699d1717dSJon Medhurst #include <asm/mach/arch.h> 374ce63fcdSMarek Szyprowski #include <asm/dma-iommu.h> 38c7909509SMarek Szyprowski #include <asm/mach/map.h> 39c7909509SMarek Szyprowski #include <asm/system_info.h> 40c7909509SMarek Szyprowski #include <asm/dma-contiguous.h> 410ddbccd1SRussell King 421234e3fdSRussell King #include "dma.h" 43022ae537SRussell King #include "mm.h" 44022ae537SRussell King 45b4268676SRabin Vincent struct arm_dma_alloc_args { 46b4268676SRabin Vincent struct device *dev; 47b4268676SRabin Vincent size_t size; 48b4268676SRabin Vincent gfp_t gfp; 49b4268676SRabin Vincent pgprot_t prot; 50b4268676SRabin Vincent const void *caller; 51b4268676SRabin Vincent bool want_vaddr; 52b4268676SRabin Vincent }; 53b4268676SRabin Vincent 54b4268676SRabin Vincent struct arm_dma_free_args { 55b4268676SRabin Vincent struct device *dev; 56b4268676SRabin Vincent size_t size; 57b4268676SRabin Vincent void *cpu_addr; 58b4268676SRabin Vincent struct page *page; 59b4268676SRabin Vincent bool want_vaddr; 60b4268676SRabin Vincent }; 61b4268676SRabin Vincent 62b4268676SRabin Vincent struct arm_dma_allocator { 63b4268676SRabin Vincent void *(*alloc)(struct arm_dma_alloc_args *args, 64b4268676SRabin Vincent struct page **ret_page); 65b4268676SRabin Vincent void (*free)(struct arm_dma_free_args *args); 66b4268676SRabin Vincent }; 67b4268676SRabin Vincent 6819e6e5e5SRabin Vincent struct arm_dma_buffer { 6919e6e5e5SRabin Vincent struct list_head list; 7019e6e5e5SRabin Vincent void *virt; 71b4268676SRabin Vincent struct arm_dma_allocator *allocator; 7219e6e5e5SRabin Vincent }; 7319e6e5e5SRabin Vincent 7419e6e5e5SRabin Vincent static LIST_HEAD(arm_dma_bufs); 7519e6e5e5SRabin Vincent static DEFINE_SPINLOCK(arm_dma_bufs_lock); 7619e6e5e5SRabin Vincent 7719e6e5e5SRabin Vincent static struct arm_dma_buffer *arm_dma_buffer_find(void *virt) 7819e6e5e5SRabin Vincent { 7919e6e5e5SRabin Vincent struct arm_dma_buffer *buf, *found = NULL; 8019e6e5e5SRabin Vincent unsigned long flags; 8119e6e5e5SRabin Vincent 8219e6e5e5SRabin Vincent spin_lock_irqsave(&arm_dma_bufs_lock, flags); 8319e6e5e5SRabin Vincent list_for_each_entry(buf, &arm_dma_bufs, list) { 8419e6e5e5SRabin Vincent if (buf->virt == virt) { 8519e6e5e5SRabin Vincent list_del(&buf->list); 8619e6e5e5SRabin Vincent found = buf; 8719e6e5e5SRabin Vincent break; 8819e6e5e5SRabin Vincent } 8919e6e5e5SRabin Vincent } 9019e6e5e5SRabin Vincent spin_unlock_irqrestore(&arm_dma_bufs_lock, flags); 9119e6e5e5SRabin Vincent return found; 9219e6e5e5SRabin Vincent } 9319e6e5e5SRabin Vincent 9415237e1fSMarek Szyprowski /* 9515237e1fSMarek Szyprowski * The DMA API is built upon the notion of "buffer ownership". A buffer 9615237e1fSMarek Szyprowski * is either exclusively owned by the CPU (and therefore may be accessed 9715237e1fSMarek Szyprowski * by it) or exclusively owned by the DMA device. These helper functions 9815237e1fSMarek Szyprowski * represent the transitions between these two ownership states. 9915237e1fSMarek Szyprowski * 10015237e1fSMarek Szyprowski * Note, however, that on later ARMs, this notion does not work due to 10115237e1fSMarek Szyprowski * speculative prefetches. We model our approach on the assumption that 10215237e1fSMarek Szyprowski * the CPU does do speculative prefetches, which means we clean caches 10315237e1fSMarek Szyprowski * before transfers and delay cache invalidation until transfer completion. 10415237e1fSMarek Szyprowski * 10515237e1fSMarek Szyprowski */ 10651fde349SMarek Szyprowski static void __dma_page_cpu_to_dev(struct page *, unsigned long, 10715237e1fSMarek Szyprowski size_t, enum dma_data_direction); 10851fde349SMarek Szyprowski static void __dma_page_dev_to_cpu(struct page *, unsigned long, 10915237e1fSMarek Szyprowski size_t, enum dma_data_direction); 11015237e1fSMarek Szyprowski 1112dc6a016SMarek Szyprowski /** 1122dc6a016SMarek Szyprowski * arm_dma_map_page - map a portion of a page for streaming DMA 1132dc6a016SMarek Szyprowski * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1142dc6a016SMarek Szyprowski * @page: page that buffer resides in 1152dc6a016SMarek Szyprowski * @offset: offset into page for start of buffer 1162dc6a016SMarek Szyprowski * @size: size of buffer to map 1172dc6a016SMarek Szyprowski * @dir: DMA transfer direction 1182dc6a016SMarek Szyprowski * 1192dc6a016SMarek Szyprowski * Ensure that any data held in the cache is appropriately discarded 1202dc6a016SMarek Szyprowski * or written back. 1212dc6a016SMarek Szyprowski * 1222dc6a016SMarek Szyprowski * The device owns this memory once this call has completed. The CPU 1232dc6a016SMarek Szyprowski * can regain ownership by calling dma_unmap_page(). 1242dc6a016SMarek Szyprowski */ 12551fde349SMarek Szyprowski static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, 1262dc6a016SMarek Szyprowski unsigned long offset, size_t size, enum dma_data_direction dir, 1272dc6a016SMarek Szyprowski struct dma_attrs *attrs) 1282dc6a016SMarek Szyprowski { 129dd37e940SRob Herring if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 13051fde349SMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 13151fde349SMarek Szyprowski return pfn_to_dma(dev, page_to_pfn(page)) + offset; 1322dc6a016SMarek Szyprowski } 1332dc6a016SMarek Szyprowski 134dd37e940SRob Herring static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, 135dd37e940SRob Herring unsigned long offset, size_t size, enum dma_data_direction dir, 136dd37e940SRob Herring struct dma_attrs *attrs) 137dd37e940SRob Herring { 138dd37e940SRob Herring return pfn_to_dma(dev, page_to_pfn(page)) + offset; 139dd37e940SRob Herring } 140dd37e940SRob Herring 1412dc6a016SMarek Szyprowski /** 1422dc6a016SMarek Szyprowski * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page() 1432dc6a016SMarek Szyprowski * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1442dc6a016SMarek Szyprowski * @handle: DMA address of buffer 1452dc6a016SMarek Szyprowski * @size: size of buffer (same as passed to dma_map_page) 1462dc6a016SMarek Szyprowski * @dir: DMA transfer direction (same as passed to dma_map_page) 1472dc6a016SMarek Szyprowski * 1482dc6a016SMarek Szyprowski * Unmap a page streaming mode DMA translation. The handle and size 1492dc6a016SMarek Szyprowski * must match what was provided in the previous dma_map_page() call. 1502dc6a016SMarek Szyprowski * All other usages are undefined. 1512dc6a016SMarek Szyprowski * 1522dc6a016SMarek Szyprowski * After this call, reads by the CPU to the buffer are guaranteed to see 1532dc6a016SMarek Szyprowski * whatever the device wrote there. 1542dc6a016SMarek Szyprowski */ 15551fde349SMarek Szyprowski static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, 1562dc6a016SMarek Szyprowski size_t size, enum dma_data_direction dir, 1572dc6a016SMarek Szyprowski struct dma_attrs *attrs) 1582dc6a016SMarek Szyprowski { 159dd37e940SRob Herring if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 16051fde349SMarek Szyprowski __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), 16151fde349SMarek Szyprowski handle & ~PAGE_MASK, size, dir); 1622dc6a016SMarek Szyprowski } 1632dc6a016SMarek Szyprowski 16451fde349SMarek Szyprowski static void arm_dma_sync_single_for_cpu(struct device *dev, 1652dc6a016SMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 1662dc6a016SMarek Szyprowski { 1672dc6a016SMarek Szyprowski unsigned int offset = handle & (PAGE_SIZE - 1); 1682dc6a016SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 1692dc6a016SMarek Szyprowski __dma_page_dev_to_cpu(page, offset, size, dir); 1702dc6a016SMarek Szyprowski } 1712dc6a016SMarek Szyprowski 17251fde349SMarek Szyprowski static void arm_dma_sync_single_for_device(struct device *dev, 1732dc6a016SMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 1742dc6a016SMarek Szyprowski { 1752dc6a016SMarek Szyprowski unsigned int offset = handle & (PAGE_SIZE - 1); 1762dc6a016SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 1772dc6a016SMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 1782dc6a016SMarek Szyprowski } 1792dc6a016SMarek Szyprowski 1802dc6a016SMarek Szyprowski struct dma_map_ops arm_dma_ops = { 181f99d6034SMarek Szyprowski .alloc = arm_dma_alloc, 182f99d6034SMarek Szyprowski .free = arm_dma_free, 183f99d6034SMarek Szyprowski .mmap = arm_dma_mmap, 184dc2832e1SMarek Szyprowski .get_sgtable = arm_dma_get_sgtable, 1852dc6a016SMarek Szyprowski .map_page = arm_dma_map_page, 1862dc6a016SMarek Szyprowski .unmap_page = arm_dma_unmap_page, 1872dc6a016SMarek Szyprowski .map_sg = arm_dma_map_sg, 1882dc6a016SMarek Szyprowski .unmap_sg = arm_dma_unmap_sg, 1892dc6a016SMarek Szyprowski .sync_single_for_cpu = arm_dma_sync_single_for_cpu, 1902dc6a016SMarek Szyprowski .sync_single_for_device = arm_dma_sync_single_for_device, 1912dc6a016SMarek Szyprowski .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, 1922dc6a016SMarek Szyprowski .sync_sg_for_device = arm_dma_sync_sg_for_device, 1932dc6a016SMarek Szyprowski .set_dma_mask = arm_dma_set_mask, 1942dc6a016SMarek Szyprowski }; 1952dc6a016SMarek Szyprowski EXPORT_SYMBOL(arm_dma_ops); 1962dc6a016SMarek Szyprowski 197dd37e940SRob Herring static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 198dd37e940SRob Herring dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs); 199dd37e940SRob Herring static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 200dd37e940SRob Herring dma_addr_t handle, struct dma_attrs *attrs); 20155af8a91SMike Looijmans static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, 20255af8a91SMike Looijmans void *cpu_addr, dma_addr_t dma_addr, size_t size, 20355af8a91SMike Looijmans struct dma_attrs *attrs); 204dd37e940SRob Herring 205dd37e940SRob Herring struct dma_map_ops arm_coherent_dma_ops = { 206dd37e940SRob Herring .alloc = arm_coherent_dma_alloc, 207dd37e940SRob Herring .free = arm_coherent_dma_free, 20855af8a91SMike Looijmans .mmap = arm_coherent_dma_mmap, 209dd37e940SRob Herring .get_sgtable = arm_dma_get_sgtable, 210dd37e940SRob Herring .map_page = arm_coherent_dma_map_page, 211dd37e940SRob Herring .map_sg = arm_dma_map_sg, 212dd37e940SRob Herring .set_dma_mask = arm_dma_set_mask, 213dd37e940SRob Herring }; 214dd37e940SRob Herring EXPORT_SYMBOL(arm_coherent_dma_ops); 215dd37e940SRob Herring 2169f28cde0SRussell King static int __dma_supported(struct device *dev, u64 mask, bool warn) 2179f28cde0SRussell King { 2189f28cde0SRussell King unsigned long max_dma_pfn; 2199f28cde0SRussell King 2209f28cde0SRussell King /* 2219f28cde0SRussell King * If the mask allows for more memory than we can address, 2229f28cde0SRussell King * and we actually have that much memory, then we must 2239f28cde0SRussell King * indicate that DMA to this device is not supported. 2249f28cde0SRussell King */ 2259f28cde0SRussell King if (sizeof(mask) != sizeof(dma_addr_t) && 2269f28cde0SRussell King mask > (dma_addr_t)~0 && 2278bf1268fSRussell King dma_to_pfn(dev, ~0) < max_pfn - 1) { 2289f28cde0SRussell King if (warn) { 2299f28cde0SRussell King dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", 2309f28cde0SRussell King mask); 2319f28cde0SRussell King dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n"); 2329f28cde0SRussell King } 2339f28cde0SRussell King return 0; 2349f28cde0SRussell King } 2359f28cde0SRussell King 2369f28cde0SRussell King max_dma_pfn = min(max_pfn, arm_dma_pfn_limit); 2379f28cde0SRussell King 2389f28cde0SRussell King /* 2399f28cde0SRussell King * Translate the device's DMA mask to a PFN limit. This 2409f28cde0SRussell King * PFN number includes the page which we can DMA to. 2419f28cde0SRussell King */ 2429f28cde0SRussell King if (dma_to_pfn(dev, mask) < max_dma_pfn) { 2439f28cde0SRussell King if (warn) 2449f28cde0SRussell King dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", 2459f28cde0SRussell King mask, 2469f28cde0SRussell King dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, 2479f28cde0SRussell King max_dma_pfn + 1); 2489f28cde0SRussell King return 0; 2499f28cde0SRussell King } 2509f28cde0SRussell King 2519f28cde0SRussell King return 1; 2529f28cde0SRussell King } 2539f28cde0SRussell King 254ab6494f0SCatalin Marinas static u64 get_coherent_dma_mask(struct device *dev) 255ab6494f0SCatalin Marinas { 2564dcfa600SRussell King u64 mask = (u64)DMA_BIT_MASK(32); 2570ddbccd1SRussell King 258ab6494f0SCatalin Marinas if (dev) { 259ab6494f0SCatalin Marinas mask = dev->coherent_dma_mask; 260ab6494f0SCatalin Marinas 261ab6494f0SCatalin Marinas /* 262ab6494f0SCatalin Marinas * Sanity check the DMA mask - it must be non-zero, and 263ab6494f0SCatalin Marinas * must be able to be satisfied by a DMA allocation. 264ab6494f0SCatalin Marinas */ 265ab6494f0SCatalin Marinas if (mask == 0) { 266ab6494f0SCatalin Marinas dev_warn(dev, "coherent DMA mask is unset\n"); 267ab6494f0SCatalin Marinas return 0; 268ab6494f0SCatalin Marinas } 269ab6494f0SCatalin Marinas 2709f28cde0SRussell King if (!__dma_supported(dev, mask, true)) 2714dcfa600SRussell King return 0; 2724dcfa600SRussell King } 2734dcfa600SRussell King 274ab6494f0SCatalin Marinas return mask; 275ab6494f0SCatalin Marinas } 276ab6494f0SCatalin Marinas 277c7909509SMarek Szyprowski static void __dma_clear_buffer(struct page *page, size_t size) 278c7909509SMarek Szyprowski { 279c7909509SMarek Szyprowski /* 280c7909509SMarek Szyprowski * Ensure that the allocated pages are zeroed, and that any data 281c7909509SMarek Szyprowski * lurking in the kernel direct-mapped region is invalidated. 282c7909509SMarek Szyprowski */ 2839848e48fSMarek Szyprowski if (PageHighMem(page)) { 2849848e48fSMarek Szyprowski phys_addr_t base = __pfn_to_phys(page_to_pfn(page)); 2859848e48fSMarek Szyprowski phys_addr_t end = base + size; 2869848e48fSMarek Szyprowski while (size > 0) { 2879848e48fSMarek Szyprowski void *ptr = kmap_atomic(page); 2889848e48fSMarek Szyprowski memset(ptr, 0, PAGE_SIZE); 2899848e48fSMarek Szyprowski dmac_flush_range(ptr, ptr + PAGE_SIZE); 2909848e48fSMarek Szyprowski kunmap_atomic(ptr); 2919848e48fSMarek Szyprowski page++; 2929848e48fSMarek Szyprowski size -= PAGE_SIZE; 2939848e48fSMarek Szyprowski } 2949848e48fSMarek Szyprowski outer_flush_range(base, end); 2959848e48fSMarek Szyprowski } else { 2969848e48fSMarek Szyprowski void *ptr = page_address(page); 297c7909509SMarek Szyprowski memset(ptr, 0, size); 298c7909509SMarek Szyprowski dmac_flush_range(ptr, ptr + size); 299c7909509SMarek Szyprowski outer_flush_range(__pa(ptr), __pa(ptr) + size); 300c7909509SMarek Szyprowski } 3014ce63fcdSMarek Szyprowski } 302c7909509SMarek Szyprowski 3037a9a32a9SRussell King /* 3047a9a32a9SRussell King * Allocate a DMA buffer for 'dev' of size 'size' using the 3057a9a32a9SRussell King * specified gfp mask. Note that 'size' must be page aligned. 3067a9a32a9SRussell King */ 3077a9a32a9SRussell King static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) 3087a9a32a9SRussell King { 3097a9a32a9SRussell King unsigned long order = get_order(size); 3107a9a32a9SRussell King struct page *page, *p, *e; 3117a9a32a9SRussell King 3127a9a32a9SRussell King page = alloc_pages(gfp, order); 3137a9a32a9SRussell King if (!page) 3147a9a32a9SRussell King return NULL; 3157a9a32a9SRussell King 3167a9a32a9SRussell King /* 3177a9a32a9SRussell King * Now split the huge page and free the excess pages 3187a9a32a9SRussell King */ 3197a9a32a9SRussell King split_page(page, order); 3207a9a32a9SRussell King for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) 3217a9a32a9SRussell King __free_page(p); 3227a9a32a9SRussell King 323c7909509SMarek Szyprowski __dma_clear_buffer(page, size); 3247a9a32a9SRussell King 3257a9a32a9SRussell King return page; 3267a9a32a9SRussell King } 3277a9a32a9SRussell King 3287a9a32a9SRussell King /* 3297a9a32a9SRussell King * Free a DMA buffer. 'size' must be page aligned. 3307a9a32a9SRussell King */ 3317a9a32a9SRussell King static void __dma_free_buffer(struct page *page, size_t size) 3327a9a32a9SRussell King { 3337a9a32a9SRussell King struct page *e = page + (size >> PAGE_SHIFT); 3347a9a32a9SRussell King 3357a9a32a9SRussell King while (page < e) { 3367a9a32a9SRussell King __free_page(page); 3377a9a32a9SRussell King page++; 3387a9a32a9SRussell King } 3397a9a32a9SRussell King } 3407a9a32a9SRussell King 341ab6494f0SCatalin Marinas #ifdef CONFIG_MMU 3420ddbccd1SRussell King 343c7909509SMarek Szyprowski static void *__alloc_from_contiguous(struct device *dev, size_t size, 3449848e48fSMarek Szyprowski pgprot_t prot, struct page **ret_page, 3456e8266e3SCarlo Caione const void *caller, bool want_vaddr); 346c7909509SMarek Szyprowski 347e9da6e99SMarek Szyprowski static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 348e9da6e99SMarek Szyprowski pgprot_t prot, struct page **ret_page, 3496e8266e3SCarlo Caione const void *caller, bool want_vaddr); 350e9da6e99SMarek Szyprowski 351e9da6e99SMarek Szyprowski static void * 352e9da6e99SMarek Szyprowski __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, 353e9da6e99SMarek Szyprowski const void *caller) 354e9da6e99SMarek Szyprowski { 355e9da6e99SMarek Szyprowski /* 356e9da6e99SMarek Szyprowski * DMA allocation can be mapped to user space, so lets 357e9da6e99SMarek Szyprowski * set VM_USERMAP flags too. 358e9da6e99SMarek Szyprowski */ 359513510ddSLaura Abbott return dma_common_contiguous_remap(page, size, 360513510ddSLaura Abbott VM_ARM_DMA_CONSISTENT | VM_USERMAP, 361513510ddSLaura Abbott prot, caller); 362e9da6e99SMarek Szyprowski } 363e9da6e99SMarek Szyprowski 364e9da6e99SMarek Szyprowski static void __dma_free_remap(void *cpu_addr, size_t size) 365e9da6e99SMarek Szyprowski { 366513510ddSLaura Abbott dma_common_free_remap(cpu_addr, size, 367513510ddSLaura Abbott VM_ARM_DMA_CONSISTENT | VM_USERMAP); 368e9da6e99SMarek Szyprowski } 369e9da6e99SMarek Szyprowski 3706e5267aaSMarek Szyprowski #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K 37136d0fd21SLaura Abbott static struct gen_pool *atomic_pool; 3726e5267aaSMarek Szyprowski 37336d0fd21SLaura Abbott static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE; 374c7909509SMarek Szyprowski 375c7909509SMarek Szyprowski static int __init early_coherent_pool(char *p) 376c7909509SMarek Szyprowski { 37736d0fd21SLaura Abbott atomic_pool_size = memparse(p, &p); 378c7909509SMarek Szyprowski return 0; 379c7909509SMarek Szyprowski } 380c7909509SMarek Szyprowski early_param("coherent_pool", early_coherent_pool); 381c7909509SMarek Szyprowski 3826e5267aaSMarek Szyprowski void __init init_dma_coherent_pool_size(unsigned long size) 3836e5267aaSMarek Szyprowski { 3846e5267aaSMarek Szyprowski /* 3856e5267aaSMarek Szyprowski * Catch any attempt to set the pool size too late. 3866e5267aaSMarek Szyprowski */ 38736d0fd21SLaura Abbott BUG_ON(atomic_pool); 3886e5267aaSMarek Szyprowski 3896e5267aaSMarek Szyprowski /* 3906e5267aaSMarek Szyprowski * Set architecture specific coherent pool size only if 3916e5267aaSMarek Szyprowski * it has not been changed by kernel command line parameter. 3926e5267aaSMarek Szyprowski */ 39336d0fd21SLaura Abbott if (atomic_pool_size == DEFAULT_DMA_COHERENT_POOL_SIZE) 39436d0fd21SLaura Abbott atomic_pool_size = size; 3956e5267aaSMarek Szyprowski } 3966e5267aaSMarek Szyprowski 397c7909509SMarek Szyprowski /* 398c7909509SMarek Szyprowski * Initialise the coherent pool for atomic allocations. 399c7909509SMarek Szyprowski */ 400e9da6e99SMarek Szyprowski static int __init atomic_pool_init(void) 401c7909509SMarek Szyprowski { 40271b55663SRussell King pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL); 4039d1400cfSMarek Szyprowski gfp_t gfp = GFP_KERNEL | GFP_DMA; 404c7909509SMarek Szyprowski struct page *page; 405c7909509SMarek Szyprowski void *ptr; 406c7909509SMarek Szyprowski 40736d0fd21SLaura Abbott atomic_pool = gen_pool_create(PAGE_SHIFT, -1); 40836d0fd21SLaura Abbott if (!atomic_pool) 40936d0fd21SLaura Abbott goto out; 4106b3fe472SHiroshi Doyu 411e464ef16SGioh Kim if (dev_get_cma_area(NULL)) 41236d0fd21SLaura Abbott ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot, 4136e8266e3SCarlo Caione &page, atomic_pool_init, true); 414e9da6e99SMarek Szyprowski else 41536d0fd21SLaura Abbott ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot, 4166e8266e3SCarlo Caione &page, atomic_pool_init, true); 417c7909509SMarek Szyprowski if (ptr) { 41836d0fd21SLaura Abbott int ret; 4196b3fe472SHiroshi Doyu 42036d0fd21SLaura Abbott ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr, 42136d0fd21SLaura Abbott page_to_phys(page), 42236d0fd21SLaura Abbott atomic_pool_size, -1); 42336d0fd21SLaura Abbott if (ret) 42436d0fd21SLaura Abbott goto destroy_genpool; 4256b3fe472SHiroshi Doyu 42636d0fd21SLaura Abbott gen_pool_set_algo(atomic_pool, 42736d0fd21SLaura Abbott gen_pool_first_fit_order_align, 42836d0fd21SLaura Abbott (void *)PAGE_SHIFT); 42936d0fd21SLaura Abbott pr_info("DMA: preallocated %zd KiB pool for atomic coherent allocations\n", 43036d0fd21SLaura Abbott atomic_pool_size / 1024); 431c7909509SMarek Szyprowski return 0; 432c7909509SMarek Szyprowski } 433ec10665cSSachin Kamat 43436d0fd21SLaura Abbott destroy_genpool: 43536d0fd21SLaura Abbott gen_pool_destroy(atomic_pool); 43636d0fd21SLaura Abbott atomic_pool = NULL; 43736d0fd21SLaura Abbott out: 43836d0fd21SLaura Abbott pr_err("DMA: failed to allocate %zx KiB pool for atomic coherent allocation\n", 43936d0fd21SLaura Abbott atomic_pool_size / 1024); 440c7909509SMarek Szyprowski return -ENOMEM; 441c7909509SMarek Szyprowski } 442c7909509SMarek Szyprowski /* 443c7909509SMarek Szyprowski * CMA is activated by core_initcall, so we must be called after it. 444c7909509SMarek Szyprowski */ 445e9da6e99SMarek Szyprowski postcore_initcall(atomic_pool_init); 446c7909509SMarek Szyprowski 447c7909509SMarek Szyprowski struct dma_contig_early_reserve { 448c7909509SMarek Szyprowski phys_addr_t base; 449c7909509SMarek Szyprowski unsigned long size; 450c7909509SMarek Szyprowski }; 451c7909509SMarek Szyprowski 452c7909509SMarek Szyprowski static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata; 453c7909509SMarek Szyprowski 454c7909509SMarek Szyprowski static int dma_mmu_remap_num __initdata; 455c7909509SMarek Szyprowski 456c7909509SMarek Szyprowski void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) 457c7909509SMarek Szyprowski { 458c7909509SMarek Szyprowski dma_mmu_remap[dma_mmu_remap_num].base = base; 459c7909509SMarek Szyprowski dma_mmu_remap[dma_mmu_remap_num].size = size; 460c7909509SMarek Szyprowski dma_mmu_remap_num++; 461c7909509SMarek Szyprowski } 462c7909509SMarek Szyprowski 463c7909509SMarek Szyprowski void __init dma_contiguous_remap(void) 464c7909509SMarek Szyprowski { 465c7909509SMarek Szyprowski int i; 466c7909509SMarek Szyprowski for (i = 0; i < dma_mmu_remap_num; i++) { 467c7909509SMarek Szyprowski phys_addr_t start = dma_mmu_remap[i].base; 468c7909509SMarek Szyprowski phys_addr_t end = start + dma_mmu_remap[i].size; 469c7909509SMarek Szyprowski struct map_desc map; 470c7909509SMarek Szyprowski unsigned long addr; 471c7909509SMarek Szyprowski 472c7909509SMarek Szyprowski if (end > arm_lowmem_limit) 473c7909509SMarek Szyprowski end = arm_lowmem_limit; 474c7909509SMarek Szyprowski if (start >= end) 47539f78e70SChris Brand continue; 476c7909509SMarek Szyprowski 477c7909509SMarek Szyprowski map.pfn = __phys_to_pfn(start); 478c7909509SMarek Szyprowski map.virtual = __phys_to_virt(start); 479c7909509SMarek Szyprowski map.length = end - start; 480c7909509SMarek Szyprowski map.type = MT_MEMORY_DMA_READY; 481c7909509SMarek Szyprowski 482c7909509SMarek Szyprowski /* 4836b076991SRussell King * Clear previous low-memory mapping to ensure that the 4846b076991SRussell King * TLB does not see any conflicting entries, then flush 4856b076991SRussell King * the TLB of the old entries before creating new mappings. 4866b076991SRussell King * 4876b076991SRussell King * This ensures that any speculatively loaded TLB entries 4886b076991SRussell King * (even though they may be rare) can not cause any problems, 4896b076991SRussell King * and ensures that this code is architecturally compliant. 490c7909509SMarek Szyprowski */ 491c7909509SMarek Szyprowski for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); 49261f6c7a4SVitaly Andrianov addr += PMD_SIZE) 493c7909509SMarek Szyprowski pmd_clear(pmd_off_k(addr)); 494c7909509SMarek Szyprowski 4956b076991SRussell King flush_tlb_kernel_range(__phys_to_virt(start), 4966b076991SRussell King __phys_to_virt(end)); 4976b076991SRussell King 498c7909509SMarek Szyprowski iotable_init(&map, 1); 499c7909509SMarek Szyprowski } 500c7909509SMarek Szyprowski } 501c7909509SMarek Szyprowski 502c7909509SMarek Szyprowski static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr, 503c7909509SMarek Szyprowski void *data) 504c7909509SMarek Szyprowski { 505c7909509SMarek Szyprowski struct page *page = virt_to_page(addr); 506c7909509SMarek Szyprowski pgprot_t prot = *(pgprot_t *)data; 507c7909509SMarek Szyprowski 508c7909509SMarek Szyprowski set_pte_ext(pte, mk_pte(page, prot), 0); 509c7909509SMarek Szyprowski return 0; 510c7909509SMarek Szyprowski } 511c7909509SMarek Szyprowski 512c7909509SMarek Szyprowski static void __dma_remap(struct page *page, size_t size, pgprot_t prot) 513c7909509SMarek Szyprowski { 514c7909509SMarek Szyprowski unsigned long start = (unsigned long) page_address(page); 515c7909509SMarek Szyprowski unsigned end = start + size; 516c7909509SMarek Szyprowski 517c7909509SMarek Szyprowski apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); 518c7909509SMarek Szyprowski flush_tlb_kernel_range(start, end); 519c7909509SMarek Szyprowski } 520c7909509SMarek Szyprowski 521c7909509SMarek Szyprowski static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 522c7909509SMarek Szyprowski pgprot_t prot, struct page **ret_page, 5236e8266e3SCarlo Caione const void *caller, bool want_vaddr) 524c7909509SMarek Szyprowski { 525c7909509SMarek Szyprowski struct page *page; 5266e8266e3SCarlo Caione void *ptr = NULL; 527c7909509SMarek Szyprowski page = __dma_alloc_buffer(dev, size, gfp); 528c7909509SMarek Szyprowski if (!page) 529c7909509SMarek Szyprowski return NULL; 5306e8266e3SCarlo Caione if (!want_vaddr) 5316e8266e3SCarlo Caione goto out; 532c7909509SMarek Szyprowski 533c7909509SMarek Szyprowski ptr = __dma_alloc_remap(page, size, gfp, prot, caller); 534c7909509SMarek Szyprowski if (!ptr) { 535c7909509SMarek Szyprowski __dma_free_buffer(page, size); 536c7909509SMarek Szyprowski return NULL; 537c7909509SMarek Szyprowski } 538c7909509SMarek Szyprowski 5396e8266e3SCarlo Caione out: 540c7909509SMarek Szyprowski *ret_page = page; 541c7909509SMarek Szyprowski return ptr; 542c7909509SMarek Szyprowski } 543c7909509SMarek Szyprowski 544e9da6e99SMarek Szyprowski static void *__alloc_from_pool(size_t size, struct page **ret_page) 545c7909509SMarek Szyprowski { 54636d0fd21SLaura Abbott unsigned long val; 547e9da6e99SMarek Szyprowski void *ptr = NULL; 548c7909509SMarek Szyprowski 54936d0fd21SLaura Abbott if (!atomic_pool) { 550e9da6e99SMarek Szyprowski WARN(1, "coherent pool not initialised!\n"); 551c7909509SMarek Szyprowski return NULL; 552c7909509SMarek Szyprowski } 553c7909509SMarek Szyprowski 55436d0fd21SLaura Abbott val = gen_pool_alloc(atomic_pool, size); 55536d0fd21SLaura Abbott if (val) { 55636d0fd21SLaura Abbott phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val); 557e9da6e99SMarek Szyprowski 55836d0fd21SLaura Abbott *ret_page = phys_to_page(phys); 55936d0fd21SLaura Abbott ptr = (void *)val; 560e9da6e99SMarek Szyprowski } 561e9da6e99SMarek Szyprowski 562c7909509SMarek Szyprowski return ptr; 563c7909509SMarek Szyprowski } 564c7909509SMarek Szyprowski 56521d0a759SHiroshi Doyu static bool __in_atomic_pool(void *start, size_t size) 56621d0a759SHiroshi Doyu { 56736d0fd21SLaura Abbott return addr_in_gen_pool(atomic_pool, (unsigned long)start, size); 56821d0a759SHiroshi Doyu } 56921d0a759SHiroshi Doyu 570e9da6e99SMarek Szyprowski static int __free_from_pool(void *start, size_t size) 571c7909509SMarek Szyprowski { 57221d0a759SHiroshi Doyu if (!__in_atomic_pool(start, size)) 573c7909509SMarek Szyprowski return 0; 574c7909509SMarek Szyprowski 57536d0fd21SLaura Abbott gen_pool_free(atomic_pool, (unsigned long)start, size); 576e9da6e99SMarek Szyprowski 577c7909509SMarek Szyprowski return 1; 578c7909509SMarek Szyprowski } 579c7909509SMarek Szyprowski 580c7909509SMarek Szyprowski static void *__alloc_from_contiguous(struct device *dev, size_t size, 5819848e48fSMarek Szyprowski pgprot_t prot, struct page **ret_page, 5826e8266e3SCarlo Caione const void *caller, bool want_vaddr) 583c7909509SMarek Szyprowski { 584c7909509SMarek Szyprowski unsigned long order = get_order(size); 585c7909509SMarek Szyprowski size_t count = size >> PAGE_SHIFT; 586c7909509SMarek Szyprowski struct page *page; 5876e8266e3SCarlo Caione void *ptr = NULL; 588c7909509SMarek Szyprowski 589c7909509SMarek Szyprowski page = dma_alloc_from_contiguous(dev, count, order); 590c7909509SMarek Szyprowski if (!page) 591c7909509SMarek Szyprowski return NULL; 592c7909509SMarek Szyprowski 593c7909509SMarek Szyprowski __dma_clear_buffer(page, size); 594c7909509SMarek Szyprowski 5956e8266e3SCarlo Caione if (!want_vaddr) 5966e8266e3SCarlo Caione goto out; 5976e8266e3SCarlo Caione 5989848e48fSMarek Szyprowski if (PageHighMem(page)) { 5999848e48fSMarek Szyprowski ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller); 6009848e48fSMarek Szyprowski if (!ptr) { 6019848e48fSMarek Szyprowski dma_release_from_contiguous(dev, page, count); 6029848e48fSMarek Szyprowski return NULL; 6039848e48fSMarek Szyprowski } 6049848e48fSMarek Szyprowski } else { 6059848e48fSMarek Szyprowski __dma_remap(page, size, prot); 6069848e48fSMarek Szyprowski ptr = page_address(page); 6079848e48fSMarek Szyprowski } 6086e8266e3SCarlo Caione 6096e8266e3SCarlo Caione out: 610c7909509SMarek Szyprowski *ret_page = page; 6119848e48fSMarek Szyprowski return ptr; 612c7909509SMarek Szyprowski } 613c7909509SMarek Szyprowski 614c7909509SMarek Szyprowski static void __free_from_contiguous(struct device *dev, struct page *page, 6156e8266e3SCarlo Caione void *cpu_addr, size_t size, bool want_vaddr) 616c7909509SMarek Szyprowski { 6176e8266e3SCarlo Caione if (want_vaddr) { 6189848e48fSMarek Szyprowski if (PageHighMem(page)) 6199848e48fSMarek Szyprowski __dma_free_remap(cpu_addr, size); 6209848e48fSMarek Szyprowski else 62171b55663SRussell King __dma_remap(page, size, PAGE_KERNEL); 6226e8266e3SCarlo Caione } 623c7909509SMarek Szyprowski dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); 624c7909509SMarek Szyprowski } 625c7909509SMarek Szyprowski 626f99d6034SMarek Szyprowski static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot) 627f99d6034SMarek Szyprowski { 628f99d6034SMarek Szyprowski prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ? 629f99d6034SMarek Szyprowski pgprot_writecombine(prot) : 630f99d6034SMarek Szyprowski pgprot_dmacoherent(prot); 631f99d6034SMarek Szyprowski return prot; 632f99d6034SMarek Szyprowski } 633f99d6034SMarek Szyprowski 634c7909509SMarek Szyprowski #define nommu() 0 635c7909509SMarek Szyprowski 636ab6494f0SCatalin Marinas #else /* !CONFIG_MMU */ 637695ae0afSRussell King 638c7909509SMarek Szyprowski #define nommu() 1 639c7909509SMarek Szyprowski 640f99d6034SMarek Szyprowski #define __get_dma_pgprot(attrs, prot) __pgprot(0) 6416e8266e3SCarlo Caione #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL 642e9da6e99SMarek Szyprowski #define __alloc_from_pool(size, ret_page) NULL 6436e8266e3SCarlo Caione #define __alloc_from_contiguous(dev, size, prot, ret, c, wv) NULL 644b4268676SRabin Vincent #define __free_from_pool(cpu_addr, size) do { } while (0) 6456e8266e3SCarlo Caione #define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0) 646c7909509SMarek Szyprowski #define __dma_free_remap(cpu_addr, size) do { } while (0) 64731ebf944SRussell King 64831ebf944SRussell King #endif /* CONFIG_MMU */ 64931ebf944SRussell King 650c7909509SMarek Szyprowski static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, 651c7909509SMarek Szyprowski struct page **ret_page) 652ab6494f0SCatalin Marinas { 65304da5694SRussell King struct page *page; 654c7909509SMarek Szyprowski page = __dma_alloc_buffer(dev, size, gfp); 655c7909509SMarek Szyprowski if (!page) 656c7909509SMarek Szyprowski return NULL; 657c7909509SMarek Szyprowski 658c7909509SMarek Szyprowski *ret_page = page; 659c7909509SMarek Szyprowski return page_address(page); 660c7909509SMarek Szyprowski } 661c7909509SMarek Szyprowski 662b4268676SRabin Vincent static void *simple_allocator_alloc(struct arm_dma_alloc_args *args, 663b4268676SRabin Vincent struct page **ret_page) 664b4268676SRabin Vincent { 665b4268676SRabin Vincent return __alloc_simple_buffer(args->dev, args->size, args->gfp, 666b4268676SRabin Vincent ret_page); 667b4268676SRabin Vincent } 668c7909509SMarek Szyprowski 669b4268676SRabin Vincent static void simple_allocator_free(struct arm_dma_free_args *args) 670b4268676SRabin Vincent { 671b4268676SRabin Vincent __dma_free_buffer(args->page, args->size); 672b4268676SRabin Vincent } 673b4268676SRabin Vincent 674b4268676SRabin Vincent static struct arm_dma_allocator simple_allocator = { 675b4268676SRabin Vincent .alloc = simple_allocator_alloc, 676b4268676SRabin Vincent .free = simple_allocator_free, 677b4268676SRabin Vincent }; 678b4268676SRabin Vincent 679b4268676SRabin Vincent static void *cma_allocator_alloc(struct arm_dma_alloc_args *args, 680b4268676SRabin Vincent struct page **ret_page) 681b4268676SRabin Vincent { 682b4268676SRabin Vincent return __alloc_from_contiguous(args->dev, args->size, args->prot, 683b4268676SRabin Vincent ret_page, args->caller, 684b4268676SRabin Vincent args->want_vaddr); 685b4268676SRabin Vincent } 686b4268676SRabin Vincent 687b4268676SRabin Vincent static void cma_allocator_free(struct arm_dma_free_args *args) 688b4268676SRabin Vincent { 689b4268676SRabin Vincent __free_from_contiguous(args->dev, args->page, args->cpu_addr, 690b4268676SRabin Vincent args->size, args->want_vaddr); 691b4268676SRabin Vincent } 692b4268676SRabin Vincent 693b4268676SRabin Vincent static struct arm_dma_allocator cma_allocator = { 694b4268676SRabin Vincent .alloc = cma_allocator_alloc, 695b4268676SRabin Vincent .free = cma_allocator_free, 696b4268676SRabin Vincent }; 697b4268676SRabin Vincent 698b4268676SRabin Vincent static void *pool_allocator_alloc(struct arm_dma_alloc_args *args, 699b4268676SRabin Vincent struct page **ret_page) 700b4268676SRabin Vincent { 701b4268676SRabin Vincent return __alloc_from_pool(args->size, ret_page); 702b4268676SRabin Vincent } 703b4268676SRabin Vincent 704b4268676SRabin Vincent static void pool_allocator_free(struct arm_dma_free_args *args) 705b4268676SRabin Vincent { 706b4268676SRabin Vincent __free_from_pool(args->cpu_addr, args->size); 707b4268676SRabin Vincent } 708b4268676SRabin Vincent 709b4268676SRabin Vincent static struct arm_dma_allocator pool_allocator = { 710b4268676SRabin Vincent .alloc = pool_allocator_alloc, 711b4268676SRabin Vincent .free = pool_allocator_free, 712b4268676SRabin Vincent }; 713b4268676SRabin Vincent 714b4268676SRabin Vincent static void *remap_allocator_alloc(struct arm_dma_alloc_args *args, 715b4268676SRabin Vincent struct page **ret_page) 716b4268676SRabin Vincent { 717b4268676SRabin Vincent return __alloc_remap_buffer(args->dev, args->size, args->gfp, 718b4268676SRabin Vincent args->prot, ret_page, args->caller, 719b4268676SRabin Vincent args->want_vaddr); 720b4268676SRabin Vincent } 721b4268676SRabin Vincent 722b4268676SRabin Vincent static void remap_allocator_free(struct arm_dma_free_args *args) 723b4268676SRabin Vincent { 724b4268676SRabin Vincent if (args->want_vaddr) 725b4268676SRabin Vincent __dma_free_remap(args->cpu_addr, args->size); 726b4268676SRabin Vincent 727b4268676SRabin Vincent __dma_free_buffer(args->page, args->size); 728b4268676SRabin Vincent } 729b4268676SRabin Vincent 730b4268676SRabin Vincent static struct arm_dma_allocator remap_allocator = { 731b4268676SRabin Vincent .alloc = remap_allocator_alloc, 732b4268676SRabin Vincent .free = remap_allocator_free, 733b4268676SRabin Vincent }; 734c7909509SMarek Szyprowski 735c7909509SMarek Szyprowski static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 7366e8266e3SCarlo Caione gfp_t gfp, pgprot_t prot, bool is_coherent, 7376e8266e3SCarlo Caione struct dma_attrs *attrs, const void *caller) 738c7909509SMarek Szyprowski { 739c7909509SMarek Szyprowski u64 mask = get_coherent_dma_mask(dev); 7403dd7ea92SJingoo Han struct page *page = NULL; 74131ebf944SRussell King void *addr; 742b4268676SRabin Vincent bool allowblock, cma; 74319e6e5e5SRabin Vincent struct arm_dma_buffer *buf; 744b4268676SRabin Vincent struct arm_dma_alloc_args args = { 745b4268676SRabin Vincent .dev = dev, 746b4268676SRabin Vincent .size = PAGE_ALIGN(size), 747b4268676SRabin Vincent .gfp = gfp, 748b4268676SRabin Vincent .prot = prot, 749b4268676SRabin Vincent .caller = caller, 750b4268676SRabin Vincent .want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs), 751b4268676SRabin Vincent }; 752ab6494f0SCatalin Marinas 753c7909509SMarek Szyprowski #ifdef CONFIG_DMA_API_DEBUG 754c7909509SMarek Szyprowski u64 limit = (mask + 1) & ~mask; 755c7909509SMarek Szyprowski if (limit && size >= limit) { 756c7909509SMarek Szyprowski dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n", 757c7909509SMarek Szyprowski size, mask); 758c7909509SMarek Szyprowski return NULL; 759c7909509SMarek Szyprowski } 760c7909509SMarek Szyprowski #endif 761c7909509SMarek Szyprowski 762c7909509SMarek Szyprowski if (!mask) 763c7909509SMarek Szyprowski return NULL; 764c7909509SMarek Szyprowski 76519e6e5e5SRabin Vincent buf = kzalloc(sizeof(*buf), gfp); 76619e6e5e5SRabin Vincent if (!buf) 76719e6e5e5SRabin Vincent return NULL; 76819e6e5e5SRabin Vincent 769c7909509SMarek Szyprowski if (mask < 0xffffffffULL) 770c7909509SMarek Szyprowski gfp |= GFP_DMA; 771c7909509SMarek Szyprowski 772ea2e7057SSumit Bhattacharya /* 773ea2e7057SSumit Bhattacharya * Following is a work-around (a.k.a. hack) to prevent pages 774ea2e7057SSumit Bhattacharya * with __GFP_COMP being passed to split_page() which cannot 775ea2e7057SSumit Bhattacharya * handle them. The real problem is that this flag probably 776ea2e7057SSumit Bhattacharya * should be 0 on ARM as it is not supported on this 777ea2e7057SSumit Bhattacharya * platform; see CONFIG_HUGETLBFS. 778ea2e7057SSumit Bhattacharya */ 779ea2e7057SSumit Bhattacharya gfp &= ~(__GFP_COMP); 780b4268676SRabin Vincent args.gfp = gfp; 781ea2e7057SSumit Bhattacharya 782553ac788SMarek Szyprowski *handle = DMA_ERROR_CODE; 783b4268676SRabin Vincent allowblock = gfpflags_allow_blocking(gfp); 784b4268676SRabin Vincent cma = allowblock ? dev_get_cma_area(dev) : false; 78504da5694SRussell King 786b4268676SRabin Vincent if (cma) 787b4268676SRabin Vincent buf->allocator = &cma_allocator; 788b4268676SRabin Vincent else if (nommu() || is_coherent) 789b4268676SRabin Vincent buf->allocator = &simple_allocator; 790b4268676SRabin Vincent else if (allowblock) 791b4268676SRabin Vincent buf->allocator = &remap_allocator; 79231ebf944SRussell King else 793b4268676SRabin Vincent buf->allocator = &pool_allocator; 794b4268676SRabin Vincent 795b4268676SRabin Vincent addr = buf->allocator->alloc(&args, &page); 79631ebf944SRussell King 79719e6e5e5SRabin Vincent if (page) { 79819e6e5e5SRabin Vincent unsigned long flags; 79919e6e5e5SRabin Vincent 8009eedd963SRussell King *handle = pfn_to_dma(dev, page_to_pfn(page)); 801b4268676SRabin Vincent buf->virt = args.want_vaddr ? addr : page; 80219e6e5e5SRabin Vincent 80319e6e5e5SRabin Vincent spin_lock_irqsave(&arm_dma_bufs_lock, flags); 80419e6e5e5SRabin Vincent list_add(&buf->list, &arm_dma_bufs); 80519e6e5e5SRabin Vincent spin_unlock_irqrestore(&arm_dma_bufs_lock, flags); 80619e6e5e5SRabin Vincent } else { 80719e6e5e5SRabin Vincent kfree(buf); 80819e6e5e5SRabin Vincent } 80931ebf944SRussell King 810b4268676SRabin Vincent return args.want_vaddr ? addr : page; 811ab6494f0SCatalin Marinas } 812695ae0afSRussell King 8130ddbccd1SRussell King /* 8140ddbccd1SRussell King * Allocate DMA-coherent memory space and return both the kernel remapped 8150ddbccd1SRussell King * virtual and bus address for that space. 8160ddbccd1SRussell King */ 817f99d6034SMarek Szyprowski void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 818f99d6034SMarek Szyprowski gfp_t gfp, struct dma_attrs *attrs) 8190ddbccd1SRussell King { 8200ea1ec71SRussell King pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); 8210ddbccd1SRussell King 822dd37e940SRob Herring return __dma_alloc(dev, size, handle, gfp, prot, false, 8236e8266e3SCarlo Caione attrs, __builtin_return_address(0)); 824dd37e940SRob Herring } 825dd37e940SRob Herring 826dd37e940SRob Herring static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 827dd37e940SRob Herring dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) 828dd37e940SRob Herring { 82921caf3a7SLorenzo Nava return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true, 8306e8266e3SCarlo Caione attrs, __builtin_return_address(0)); 8310ddbccd1SRussell King } 8320ddbccd1SRussell King 83355af8a91SMike Looijmans static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, 834f99d6034SMarek Szyprowski void *cpu_addr, dma_addr_t dma_addr, size_t size, 835f99d6034SMarek Szyprowski struct dma_attrs *attrs) 8360ddbccd1SRussell King { 837ab6494f0SCatalin Marinas int ret = -ENXIO; 838ab6494f0SCatalin Marinas #ifdef CONFIG_MMU 83950262a4bSMarek Szyprowski unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 84050262a4bSMarek Szyprowski unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 841c7909509SMarek Szyprowski unsigned long pfn = dma_to_pfn(dev, dma_addr); 84250262a4bSMarek Szyprowski unsigned long off = vma->vm_pgoff; 84350262a4bSMarek Szyprowski 84447142f07SMarek Szyprowski if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) 84547142f07SMarek Szyprowski return ret; 84647142f07SMarek Szyprowski 84750262a4bSMarek Szyprowski if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { 8480ddbccd1SRussell King ret = remap_pfn_range(vma, vma->vm_start, 84950262a4bSMarek Szyprowski pfn + off, 850c7909509SMarek Szyprowski vma->vm_end - vma->vm_start, 8510ddbccd1SRussell King vma->vm_page_prot); 85250262a4bSMarek Szyprowski } 853ab6494f0SCatalin Marinas #endif /* CONFIG_MMU */ 8540ddbccd1SRussell King 8550ddbccd1SRussell King return ret; 8560ddbccd1SRussell King } 8570ddbccd1SRussell King 8580ddbccd1SRussell King /* 85955af8a91SMike Looijmans * Create userspace mapping for the DMA-coherent memory. 86055af8a91SMike Looijmans */ 86155af8a91SMike Looijmans static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, 86255af8a91SMike Looijmans void *cpu_addr, dma_addr_t dma_addr, size_t size, 86355af8a91SMike Looijmans struct dma_attrs *attrs) 86455af8a91SMike Looijmans { 86555af8a91SMike Looijmans return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 86655af8a91SMike Looijmans } 86755af8a91SMike Looijmans 86855af8a91SMike Looijmans int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, 86955af8a91SMike Looijmans void *cpu_addr, dma_addr_t dma_addr, size_t size, 87055af8a91SMike Looijmans struct dma_attrs *attrs) 87155af8a91SMike Looijmans { 87255af8a91SMike Looijmans #ifdef CONFIG_MMU 87355af8a91SMike Looijmans vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 87455af8a91SMike Looijmans #endif /* CONFIG_MMU */ 87555af8a91SMike Looijmans return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 87655af8a91SMike Looijmans } 87755af8a91SMike Looijmans 87855af8a91SMike Looijmans /* 879c7909509SMarek Szyprowski * Free a buffer as defined by the above mapping. 8800ddbccd1SRussell King */ 881dd37e940SRob Herring static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 882dd37e940SRob Herring dma_addr_t handle, struct dma_attrs *attrs, 883dd37e940SRob Herring bool is_coherent) 8840ddbccd1SRussell King { 885c7909509SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 88619e6e5e5SRabin Vincent struct arm_dma_buffer *buf; 887b4268676SRabin Vincent struct arm_dma_free_args args = { 888b4268676SRabin Vincent .dev = dev, 889b4268676SRabin Vincent .size = PAGE_ALIGN(size), 890b4268676SRabin Vincent .cpu_addr = cpu_addr, 891b4268676SRabin Vincent .page = page, 892b4268676SRabin Vincent .want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs), 893b4268676SRabin Vincent }; 89419e6e5e5SRabin Vincent 89519e6e5e5SRabin Vincent buf = arm_dma_buffer_find(cpu_addr); 89619e6e5e5SRabin Vincent if (WARN(!buf, "Freeing invalid buffer %p\n", cpu_addr)) 89719e6e5e5SRabin Vincent return; 8980ddbccd1SRussell King 899b4268676SRabin Vincent buf->allocator->free(&args); 90019e6e5e5SRabin Vincent kfree(buf); 9010ddbccd1SRussell King } 902afd1a321SRussell King 903dd37e940SRob Herring void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 904dd37e940SRob Herring dma_addr_t handle, struct dma_attrs *attrs) 905dd37e940SRob Herring { 906dd37e940SRob Herring __arm_dma_free(dev, size, cpu_addr, handle, attrs, false); 907dd37e940SRob Herring } 908dd37e940SRob Herring 909dd37e940SRob Herring static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 910dd37e940SRob Herring dma_addr_t handle, struct dma_attrs *attrs) 911dd37e940SRob Herring { 912dd37e940SRob Herring __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); 913dd37e940SRob Herring } 914dd37e940SRob Herring 915dc2832e1SMarek Szyprowski int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 916dc2832e1SMarek Szyprowski void *cpu_addr, dma_addr_t handle, size_t size, 917dc2832e1SMarek Szyprowski struct dma_attrs *attrs) 918dc2832e1SMarek Szyprowski { 919dc2832e1SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 920dc2832e1SMarek Szyprowski int ret; 921dc2832e1SMarek Szyprowski 922dc2832e1SMarek Szyprowski ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 923dc2832e1SMarek Szyprowski if (unlikely(ret)) 924dc2832e1SMarek Szyprowski return ret; 925dc2832e1SMarek Szyprowski 926dc2832e1SMarek Szyprowski sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 927dc2832e1SMarek Szyprowski return 0; 928dc2832e1SMarek Szyprowski } 929dc2832e1SMarek Szyprowski 93065af191aSRussell King static void dma_cache_maint_page(struct page *page, unsigned long offset, 931a9c9147eSRussell King size_t size, enum dma_data_direction dir, 932a9c9147eSRussell King void (*op)(const void *, size_t, int)) 93365af191aSRussell King { 93415653371SRussell King unsigned long pfn; 93515653371SRussell King size_t left = size; 93615653371SRussell King 93715653371SRussell King pfn = page_to_pfn(page) + offset / PAGE_SIZE; 93815653371SRussell King offset %= PAGE_SIZE; 93915653371SRussell King 94065af191aSRussell King /* 94165af191aSRussell King * A single sg entry may refer to multiple physically contiguous 94265af191aSRussell King * pages. But we still need to process highmem pages individually. 94365af191aSRussell King * If highmem is not configured then the bulk of this loop gets 94465af191aSRussell King * optimized out. 94565af191aSRussell King */ 94665af191aSRussell King do { 94765af191aSRussell King size_t len = left; 94893f1d629SRussell King void *vaddr; 94993f1d629SRussell King 95015653371SRussell King page = pfn_to_page(pfn); 95115653371SRussell King 95293f1d629SRussell King if (PageHighMem(page)) { 95315653371SRussell King if (len + offset > PAGE_SIZE) 95465af191aSRussell King len = PAGE_SIZE - offset; 955dd0f67f4SJoonsoo Kim 956dd0f67f4SJoonsoo Kim if (cache_is_vipt_nonaliasing()) { 95739af22a7SNicolas Pitre vaddr = kmap_atomic(page); 9587e5a69e8SNicolas Pitre op(vaddr + offset, len, dir); 95939af22a7SNicolas Pitre kunmap_atomic(vaddr); 960dd0f67f4SJoonsoo Kim } else { 961dd0f67f4SJoonsoo Kim vaddr = kmap_high_get(page); 962dd0f67f4SJoonsoo Kim if (vaddr) { 963dd0f67f4SJoonsoo Kim op(vaddr + offset, len, dir); 964dd0f67f4SJoonsoo Kim kunmap_high(page); 965dd0f67f4SJoonsoo Kim } 96693f1d629SRussell King } 96793f1d629SRussell King } else { 96893f1d629SRussell King vaddr = page_address(page) + offset; 969a9c9147eSRussell King op(vaddr, len, dir); 97093f1d629SRussell King } 97165af191aSRussell King offset = 0; 97215653371SRussell King pfn++; 97365af191aSRussell King left -= len; 97465af191aSRussell King } while (left); 97565af191aSRussell King } 97665af191aSRussell King 97751fde349SMarek Szyprowski /* 97851fde349SMarek Szyprowski * Make an area consistent for devices. 97951fde349SMarek Szyprowski * Note: Drivers should NOT use this function directly, as it will break 98051fde349SMarek Szyprowski * platforms with CONFIG_DMABOUNCE. 98151fde349SMarek Szyprowski * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 98251fde349SMarek Szyprowski */ 98351fde349SMarek Szyprowski static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, 98465af191aSRussell King size_t size, enum dma_data_direction dir) 98565af191aSRussell King { 9862161c248SSantosh Shilimkar phys_addr_t paddr; 98743377453SNicolas Pitre 988a9c9147eSRussell King dma_cache_maint_page(page, off, size, dir, dmac_map_area); 98943377453SNicolas Pitre 99065af191aSRussell King paddr = page_to_phys(page) + off; 9912ffe2da3SRussell King if (dir == DMA_FROM_DEVICE) { 9922ffe2da3SRussell King outer_inv_range(paddr, paddr + size); 9932ffe2da3SRussell King } else { 9942ffe2da3SRussell King outer_clean_range(paddr, paddr + size); 9952ffe2da3SRussell King } 9962ffe2da3SRussell King /* FIXME: non-speculating: flush on bidirectional mappings? */ 99743377453SNicolas Pitre } 9984ea0d737SRussell King 99951fde349SMarek Szyprowski static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, 10004ea0d737SRussell King size_t size, enum dma_data_direction dir) 10014ea0d737SRussell King { 10022161c248SSantosh Shilimkar phys_addr_t paddr = page_to_phys(page) + off; 10032ffe2da3SRussell King 10042ffe2da3SRussell King /* FIXME: non-speculating: not required */ 1005deace4a6SRussell King /* in any case, don't bother invalidating if DMA to device */ 1006deace4a6SRussell King if (dir != DMA_TO_DEVICE) { 10072ffe2da3SRussell King outer_inv_range(paddr, paddr + size); 10082ffe2da3SRussell King 1009a9c9147eSRussell King dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); 1010deace4a6SRussell King } 1011c0177800SCatalin Marinas 1012c0177800SCatalin Marinas /* 1013b2a234edSMing Lei * Mark the D-cache clean for these pages to avoid extra flushing. 1014c0177800SCatalin Marinas */ 1015b2a234edSMing Lei if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) { 1016b2a234edSMing Lei unsigned long pfn; 1017b2a234edSMing Lei size_t left = size; 1018b2a234edSMing Lei 1019b2a234edSMing Lei pfn = page_to_pfn(page) + off / PAGE_SIZE; 1020b2a234edSMing Lei off %= PAGE_SIZE; 1021b2a234edSMing Lei if (off) { 1022b2a234edSMing Lei pfn++; 1023b2a234edSMing Lei left -= PAGE_SIZE - off; 1024b2a234edSMing Lei } 1025b2a234edSMing Lei while (left >= PAGE_SIZE) { 1026b2a234edSMing Lei page = pfn_to_page(pfn++); 1027c0177800SCatalin Marinas set_bit(PG_dcache_clean, &page->flags); 1028b2a234edSMing Lei left -= PAGE_SIZE; 1029b2a234edSMing Lei } 1030b2a234edSMing Lei } 10314ea0d737SRussell King } 103243377453SNicolas Pitre 1033afd1a321SRussell King /** 10342a550e73SMarek Szyprowski * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA 1035afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1036afd1a321SRussell King * @sg: list of buffers 1037afd1a321SRussell King * @nents: number of buffers to map 1038afd1a321SRussell King * @dir: DMA transfer direction 1039afd1a321SRussell King * 1040afd1a321SRussell King * Map a set of buffers described by scatterlist in streaming mode for DMA. 1041afd1a321SRussell King * This is the scatter-gather version of the dma_map_single interface. 1042afd1a321SRussell King * Here the scatter gather list elements are each tagged with the 1043afd1a321SRussell King * appropriate dma address and length. They are obtained via 1044afd1a321SRussell King * sg_dma_{address,length}. 1045afd1a321SRussell King * 1046afd1a321SRussell King * Device ownership issues as mentioned for dma_map_single are the same 1047afd1a321SRussell King * here. 1048afd1a321SRussell King */ 10492dc6a016SMarek Szyprowski int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 10502dc6a016SMarek Szyprowski enum dma_data_direction dir, struct dma_attrs *attrs) 1051afd1a321SRussell King { 10522a550e73SMarek Szyprowski struct dma_map_ops *ops = get_dma_ops(dev); 1053afd1a321SRussell King struct scatterlist *s; 105401135d92SRussell King int i, j; 1055afd1a321SRussell King 1056afd1a321SRussell King for_each_sg(sg, s, nents, i) { 10574ce63fcdSMarek Szyprowski #ifdef CONFIG_NEED_SG_DMA_LENGTH 10584ce63fcdSMarek Szyprowski s->dma_length = s->length; 10594ce63fcdSMarek Szyprowski #endif 10602a550e73SMarek Szyprowski s->dma_address = ops->map_page(dev, sg_page(s), s->offset, 10612a550e73SMarek Szyprowski s->length, dir, attrs); 106201135d92SRussell King if (dma_mapping_error(dev, s->dma_address)) 106301135d92SRussell King goto bad_mapping; 1064afd1a321SRussell King } 1065afd1a321SRussell King return nents; 106601135d92SRussell King 106701135d92SRussell King bad_mapping: 106801135d92SRussell King for_each_sg(sg, s, i, j) 10692a550e73SMarek Szyprowski ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 107001135d92SRussell King return 0; 1071afd1a321SRussell King } 1072afd1a321SRussell King 1073afd1a321SRussell King /** 10742a550e73SMarek Szyprowski * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 1075afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1076afd1a321SRussell King * @sg: list of buffers 10770adfca6fSLinus Walleij * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 1078afd1a321SRussell King * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1079afd1a321SRussell King * 1080afd1a321SRussell King * Unmap a set of streaming mode DMA translations. Again, CPU access 1081afd1a321SRussell King * rules concerning calls here are the same as for dma_unmap_single(). 1082afd1a321SRussell King */ 10832dc6a016SMarek Szyprowski void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 10842dc6a016SMarek Szyprowski enum dma_data_direction dir, struct dma_attrs *attrs) 1085afd1a321SRussell King { 10862a550e73SMarek Szyprowski struct dma_map_ops *ops = get_dma_ops(dev); 108701135d92SRussell King struct scatterlist *s; 108801135d92SRussell King 108901135d92SRussell King int i; 109024056f52SRussell King 109101135d92SRussell King for_each_sg(sg, s, nents, i) 10922a550e73SMarek Szyprowski ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 1093afd1a321SRussell King } 1094afd1a321SRussell King 1095afd1a321SRussell King /** 10962a550e73SMarek Szyprowski * arm_dma_sync_sg_for_cpu 1097afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1098afd1a321SRussell King * @sg: list of buffers 1099afd1a321SRussell King * @nents: number of buffers to map (returned from dma_map_sg) 1100afd1a321SRussell King * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1101afd1a321SRussell King */ 11022dc6a016SMarek Szyprowski void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 1103afd1a321SRussell King int nents, enum dma_data_direction dir) 1104afd1a321SRussell King { 11052a550e73SMarek Szyprowski struct dma_map_ops *ops = get_dma_ops(dev); 1106afd1a321SRussell King struct scatterlist *s; 1107afd1a321SRussell King int i; 1108afd1a321SRussell King 11092a550e73SMarek Szyprowski for_each_sg(sg, s, nents, i) 11102a550e73SMarek Szyprowski ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length, 11112a550e73SMarek Szyprowski dir); 1112afd1a321SRussell King } 111324056f52SRussell King 1114afd1a321SRussell King /** 11152a550e73SMarek Szyprowski * arm_dma_sync_sg_for_device 1116afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1117afd1a321SRussell King * @sg: list of buffers 1118afd1a321SRussell King * @nents: number of buffers to map (returned from dma_map_sg) 1119afd1a321SRussell King * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1120afd1a321SRussell King */ 11212dc6a016SMarek Szyprowski void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 1122afd1a321SRussell King int nents, enum dma_data_direction dir) 1123afd1a321SRussell King { 11242a550e73SMarek Szyprowski struct dma_map_ops *ops = get_dma_ops(dev); 1125afd1a321SRussell King struct scatterlist *s; 1126afd1a321SRussell King int i; 1127afd1a321SRussell King 11282a550e73SMarek Szyprowski for_each_sg(sg, s, nents, i) 11292a550e73SMarek Szyprowski ops->sync_single_for_device(dev, sg_dma_address(s), s->length, 11302a550e73SMarek Szyprowski dir); 1131afd1a321SRussell King } 113224056f52SRussell King 1133022ae537SRussell King /* 1134022ae537SRussell King * Return whether the given device DMA address mask can be supported 1135022ae537SRussell King * properly. For example, if your device can only drive the low 24-bits 1136022ae537SRussell King * during bus mastering, then you would pass 0x00ffffff as the mask 1137022ae537SRussell King * to this function. 1138022ae537SRussell King */ 1139022ae537SRussell King int dma_supported(struct device *dev, u64 mask) 1140022ae537SRussell King { 11419f28cde0SRussell King return __dma_supported(dev, mask, false); 1142022ae537SRussell King } 1143022ae537SRussell King EXPORT_SYMBOL(dma_supported); 1144022ae537SRussell King 114587b54e78SGregory CLEMENT int arm_dma_set_mask(struct device *dev, u64 dma_mask) 1146022ae537SRussell King { 1147022ae537SRussell King if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 1148022ae537SRussell King return -EIO; 1149022ae537SRussell King 1150022ae537SRussell King *dev->dma_mask = dma_mask; 1151022ae537SRussell King 1152022ae537SRussell King return 0; 1153022ae537SRussell King } 1154022ae537SRussell King 115524056f52SRussell King #define PREALLOC_DMA_DEBUG_ENTRIES 4096 115624056f52SRussell King 115724056f52SRussell King static int __init dma_debug_do_init(void) 115824056f52SRussell King { 115924056f52SRussell King dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); 116024056f52SRussell King return 0; 116124056f52SRussell King } 116224056f52SRussell King fs_initcall(dma_debug_do_init); 11634ce63fcdSMarek Szyprowski 11644ce63fcdSMarek Szyprowski #ifdef CONFIG_ARM_DMA_USE_IOMMU 11654ce63fcdSMarek Szyprowski 11664ce63fcdSMarek Szyprowski /* IOMMU */ 11674ce63fcdSMarek Szyprowski 11684d852ef8SAndreas Herrmann static int extend_iommu_mapping(struct dma_iommu_mapping *mapping); 11694d852ef8SAndreas Herrmann 11704ce63fcdSMarek Szyprowski static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, 11714ce63fcdSMarek Szyprowski size_t size) 11724ce63fcdSMarek Szyprowski { 11734ce63fcdSMarek Szyprowski unsigned int order = get_order(size); 11744ce63fcdSMarek Szyprowski unsigned int align = 0; 11754ce63fcdSMarek Szyprowski unsigned int count, start; 1176006f841dSRitesh Harjani size_t mapping_size = mapping->bits << PAGE_SHIFT; 11774ce63fcdSMarek Szyprowski unsigned long flags; 11784d852ef8SAndreas Herrmann dma_addr_t iova; 11794d852ef8SAndreas Herrmann int i; 11804ce63fcdSMarek Szyprowski 118160460abfSSeung-Woo Kim if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT) 118260460abfSSeung-Woo Kim order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT; 118360460abfSSeung-Woo Kim 118468efd7d2SMarek Szyprowski count = PAGE_ALIGN(size) >> PAGE_SHIFT; 118568efd7d2SMarek Szyprowski align = (1 << order) - 1; 11864ce63fcdSMarek Szyprowski 11874ce63fcdSMarek Szyprowski spin_lock_irqsave(&mapping->lock, flags); 11884d852ef8SAndreas Herrmann for (i = 0; i < mapping->nr_bitmaps; i++) { 11894d852ef8SAndreas Herrmann start = bitmap_find_next_zero_area(mapping->bitmaps[i], 11904d852ef8SAndreas Herrmann mapping->bits, 0, count, align); 11914d852ef8SAndreas Herrmann 11924d852ef8SAndreas Herrmann if (start > mapping->bits) 11934d852ef8SAndreas Herrmann continue; 11944d852ef8SAndreas Herrmann 11954d852ef8SAndreas Herrmann bitmap_set(mapping->bitmaps[i], start, count); 11964d852ef8SAndreas Herrmann break; 11974d852ef8SAndreas Herrmann } 11984d852ef8SAndreas Herrmann 11994d852ef8SAndreas Herrmann /* 12004d852ef8SAndreas Herrmann * No unused range found. Try to extend the existing mapping 12014d852ef8SAndreas Herrmann * and perform a second attempt to reserve an IO virtual 12024d852ef8SAndreas Herrmann * address range of size bytes. 12034d852ef8SAndreas Herrmann */ 12044d852ef8SAndreas Herrmann if (i == mapping->nr_bitmaps) { 12054d852ef8SAndreas Herrmann if (extend_iommu_mapping(mapping)) { 12064d852ef8SAndreas Herrmann spin_unlock_irqrestore(&mapping->lock, flags); 12074d852ef8SAndreas Herrmann return DMA_ERROR_CODE; 12084d852ef8SAndreas Herrmann } 12094d852ef8SAndreas Herrmann 12104d852ef8SAndreas Herrmann start = bitmap_find_next_zero_area(mapping->bitmaps[i], 12114d852ef8SAndreas Herrmann mapping->bits, 0, count, align); 12124d852ef8SAndreas Herrmann 12134ce63fcdSMarek Szyprowski if (start > mapping->bits) { 12144ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 12154ce63fcdSMarek Szyprowski return DMA_ERROR_CODE; 12164ce63fcdSMarek Szyprowski } 12174ce63fcdSMarek Szyprowski 12184d852ef8SAndreas Herrmann bitmap_set(mapping->bitmaps[i], start, count); 12194d852ef8SAndreas Herrmann } 12204ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 12214ce63fcdSMarek Szyprowski 1222006f841dSRitesh Harjani iova = mapping->base + (mapping_size * i); 122368efd7d2SMarek Szyprowski iova += start << PAGE_SHIFT; 12244d852ef8SAndreas Herrmann 12254d852ef8SAndreas Herrmann return iova; 12264ce63fcdSMarek Szyprowski } 12274ce63fcdSMarek Szyprowski 12284ce63fcdSMarek Szyprowski static inline void __free_iova(struct dma_iommu_mapping *mapping, 12294ce63fcdSMarek Szyprowski dma_addr_t addr, size_t size) 12304ce63fcdSMarek Szyprowski { 12314d852ef8SAndreas Herrmann unsigned int start, count; 1232006f841dSRitesh Harjani size_t mapping_size = mapping->bits << PAGE_SHIFT; 12334ce63fcdSMarek Szyprowski unsigned long flags; 12344d852ef8SAndreas Herrmann dma_addr_t bitmap_base; 12354d852ef8SAndreas Herrmann u32 bitmap_index; 12364d852ef8SAndreas Herrmann 12374d852ef8SAndreas Herrmann if (!size) 12384d852ef8SAndreas Herrmann return; 12394d852ef8SAndreas Herrmann 1240006f841dSRitesh Harjani bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size; 12414d852ef8SAndreas Herrmann BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions); 12424d852ef8SAndreas Herrmann 1243006f841dSRitesh Harjani bitmap_base = mapping->base + mapping_size * bitmap_index; 12444d852ef8SAndreas Herrmann 124568efd7d2SMarek Szyprowski start = (addr - bitmap_base) >> PAGE_SHIFT; 12464d852ef8SAndreas Herrmann 1247006f841dSRitesh Harjani if (addr + size > bitmap_base + mapping_size) { 12484d852ef8SAndreas Herrmann /* 12494d852ef8SAndreas Herrmann * The address range to be freed reaches into the iova 12504d852ef8SAndreas Herrmann * range of the next bitmap. This should not happen as 12514d852ef8SAndreas Herrmann * we don't allow this in __alloc_iova (at the 12524d852ef8SAndreas Herrmann * moment). 12534d852ef8SAndreas Herrmann */ 12544d852ef8SAndreas Herrmann BUG(); 12554d852ef8SAndreas Herrmann } else 125668efd7d2SMarek Szyprowski count = size >> PAGE_SHIFT; 12574ce63fcdSMarek Szyprowski 12584ce63fcdSMarek Szyprowski spin_lock_irqsave(&mapping->lock, flags); 12594d852ef8SAndreas Herrmann bitmap_clear(mapping->bitmaps[bitmap_index], start, count); 12604ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 12614ce63fcdSMarek Szyprowski } 12624ce63fcdSMarek Szyprowski 126333298ef6SDoug Anderson /* We'll try 2M, 1M, 64K, and finally 4K; array must end with 0! */ 126433298ef6SDoug Anderson static const int iommu_order_array[] = { 9, 8, 4, 0 }; 126533298ef6SDoug Anderson 1266549a17e4SMarek Szyprowski static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, 1267549a17e4SMarek Szyprowski gfp_t gfp, struct dma_attrs *attrs) 12684ce63fcdSMarek Szyprowski { 12694ce63fcdSMarek Szyprowski struct page **pages; 12704ce63fcdSMarek Szyprowski int count = size >> PAGE_SHIFT; 12714ce63fcdSMarek Szyprowski int array_size = count * sizeof(struct page *); 12724ce63fcdSMarek Szyprowski int i = 0; 127333298ef6SDoug Anderson int order_idx = 0; 12744ce63fcdSMarek Szyprowski 12754ce63fcdSMarek Szyprowski if (array_size <= PAGE_SIZE) 127623be7fdaSAlexandre Courbot pages = kzalloc(array_size, GFP_KERNEL); 12774ce63fcdSMarek Szyprowski else 12784ce63fcdSMarek Szyprowski pages = vzalloc(array_size); 12794ce63fcdSMarek Szyprowski if (!pages) 12804ce63fcdSMarek Szyprowski return NULL; 12814ce63fcdSMarek Szyprowski 1282549a17e4SMarek Szyprowski if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) 1283549a17e4SMarek Szyprowski { 1284549a17e4SMarek Szyprowski unsigned long order = get_order(size); 1285549a17e4SMarek Szyprowski struct page *page; 1286549a17e4SMarek Szyprowski 1287549a17e4SMarek Szyprowski page = dma_alloc_from_contiguous(dev, count, order); 1288549a17e4SMarek Szyprowski if (!page) 1289549a17e4SMarek Szyprowski goto error; 1290549a17e4SMarek Szyprowski 1291549a17e4SMarek Szyprowski __dma_clear_buffer(page, size); 1292549a17e4SMarek Szyprowski 1293549a17e4SMarek Szyprowski for (i = 0; i < count; i++) 1294549a17e4SMarek Szyprowski pages[i] = page + i; 1295549a17e4SMarek Szyprowski 1296549a17e4SMarek Szyprowski return pages; 1297549a17e4SMarek Szyprowski } 1298549a17e4SMarek Szyprowski 129914d3ae2eSDoug Anderson /* Go straight to 4K chunks if caller says it's OK. */ 130014d3ae2eSDoug Anderson if (dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs)) 130114d3ae2eSDoug Anderson order_idx = ARRAY_SIZE(iommu_order_array) - 1; 130214d3ae2eSDoug Anderson 1303f8669befSMarek Szyprowski /* 1304f8669befSMarek Szyprowski * IOMMU can map any pages, so himem can also be used here 1305f8669befSMarek Szyprowski */ 1306f8669befSMarek Szyprowski gfp |= __GFP_NOWARN | __GFP_HIGHMEM; 1307f8669befSMarek Szyprowski 13084ce63fcdSMarek Szyprowski while (count) { 130949f28aa6STomasz Figa int j, order; 13104ce63fcdSMarek Szyprowski 131133298ef6SDoug Anderson order = iommu_order_array[order_idx]; 131233298ef6SDoug Anderson 131333298ef6SDoug Anderson /* Drop down when we get small */ 131433298ef6SDoug Anderson if (__fls(count) < order) { 131533298ef6SDoug Anderson order_idx++; 131633298ef6SDoug Anderson continue; 131749f28aa6STomasz Figa } 131849f28aa6STomasz Figa 131933298ef6SDoug Anderson if (order) { 132033298ef6SDoug Anderson /* See if it's easy to allocate a high-order chunk */ 132133298ef6SDoug Anderson pages[i] = alloc_pages(gfp | __GFP_NORETRY, order); 132233298ef6SDoug Anderson 132333298ef6SDoug Anderson /* Go down a notch at first sign of pressure */ 132449f28aa6STomasz Figa if (!pages[i]) { 132533298ef6SDoug Anderson order_idx++; 132633298ef6SDoug Anderson continue; 132733298ef6SDoug Anderson } 132833298ef6SDoug Anderson } else { 132949f28aa6STomasz Figa pages[i] = alloc_pages(gfp, 0); 13304ce63fcdSMarek Szyprowski if (!pages[i]) 13314ce63fcdSMarek Szyprowski goto error; 133249f28aa6STomasz Figa } 13334ce63fcdSMarek Szyprowski 13345a796eebSHiroshi Doyu if (order) { 13354ce63fcdSMarek Szyprowski split_page(pages[i], order); 13364ce63fcdSMarek Szyprowski j = 1 << order; 13374ce63fcdSMarek Szyprowski while (--j) 13384ce63fcdSMarek Szyprowski pages[i + j] = pages[i] + j; 13395a796eebSHiroshi Doyu } 13404ce63fcdSMarek Szyprowski 13414ce63fcdSMarek Szyprowski __dma_clear_buffer(pages[i], PAGE_SIZE << order); 13424ce63fcdSMarek Szyprowski i += 1 << order; 13434ce63fcdSMarek Szyprowski count -= 1 << order; 13444ce63fcdSMarek Szyprowski } 13454ce63fcdSMarek Szyprowski 13464ce63fcdSMarek Szyprowski return pages; 13474ce63fcdSMarek Szyprowski error: 13489fa8af91SMarek Szyprowski while (i--) 13494ce63fcdSMarek Szyprowski if (pages[i]) 13504ce63fcdSMarek Szyprowski __free_pages(pages[i], 0); 13511d5cfdb0STetsuo Handa kvfree(pages); 13524ce63fcdSMarek Szyprowski return NULL; 13534ce63fcdSMarek Szyprowski } 13544ce63fcdSMarek Szyprowski 1355549a17e4SMarek Szyprowski static int __iommu_free_buffer(struct device *dev, struct page **pages, 1356549a17e4SMarek Szyprowski size_t size, struct dma_attrs *attrs) 13574ce63fcdSMarek Szyprowski { 13584ce63fcdSMarek Szyprowski int count = size >> PAGE_SHIFT; 13594ce63fcdSMarek Szyprowski int i; 1360549a17e4SMarek Szyprowski 1361549a17e4SMarek Szyprowski if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) { 1362549a17e4SMarek Szyprowski dma_release_from_contiguous(dev, pages[0], count); 1363549a17e4SMarek Szyprowski } else { 13644ce63fcdSMarek Szyprowski for (i = 0; i < count; i++) 13654ce63fcdSMarek Szyprowski if (pages[i]) 13664ce63fcdSMarek Szyprowski __free_pages(pages[i], 0); 1367549a17e4SMarek Szyprowski } 1368549a17e4SMarek Szyprowski 13691d5cfdb0STetsuo Handa kvfree(pages); 13704ce63fcdSMarek Szyprowski return 0; 13714ce63fcdSMarek Szyprowski } 13724ce63fcdSMarek Szyprowski 13734ce63fcdSMarek Szyprowski /* 13744ce63fcdSMarek Szyprowski * Create a CPU mapping for a specified pages 13754ce63fcdSMarek Szyprowski */ 13764ce63fcdSMarek Szyprowski static void * 1377e9da6e99SMarek Szyprowski __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, 1378e9da6e99SMarek Szyprowski const void *caller) 13794ce63fcdSMarek Szyprowski { 1380513510ddSLaura Abbott return dma_common_pages_remap(pages, size, 1381513510ddSLaura Abbott VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller); 13824ce63fcdSMarek Szyprowski } 13834ce63fcdSMarek Szyprowski 13844ce63fcdSMarek Szyprowski /* 13854ce63fcdSMarek Szyprowski * Create a mapping in device IO address space for specified pages 13864ce63fcdSMarek Szyprowski */ 13874ce63fcdSMarek Szyprowski static dma_addr_t 13884ce63fcdSMarek Szyprowski __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) 13894ce63fcdSMarek Szyprowski { 139089cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 13914ce63fcdSMarek Szyprowski unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 13924ce63fcdSMarek Szyprowski dma_addr_t dma_addr, iova; 139390cde558SAndre Przywara int i; 13944ce63fcdSMarek Szyprowski 13954ce63fcdSMarek Szyprowski dma_addr = __alloc_iova(mapping, size); 13964ce63fcdSMarek Szyprowski if (dma_addr == DMA_ERROR_CODE) 13974ce63fcdSMarek Szyprowski return dma_addr; 13984ce63fcdSMarek Szyprowski 13994ce63fcdSMarek Szyprowski iova = dma_addr; 14004ce63fcdSMarek Szyprowski for (i = 0; i < count; ) { 140190cde558SAndre Przywara int ret; 140290cde558SAndre Przywara 14034ce63fcdSMarek Szyprowski unsigned int next_pfn = page_to_pfn(pages[i]) + 1; 14044ce63fcdSMarek Szyprowski phys_addr_t phys = page_to_phys(pages[i]); 14054ce63fcdSMarek Szyprowski unsigned int len, j; 14064ce63fcdSMarek Szyprowski 14074ce63fcdSMarek Szyprowski for (j = i + 1; j < count; j++, next_pfn++) 14084ce63fcdSMarek Szyprowski if (page_to_pfn(pages[j]) != next_pfn) 14094ce63fcdSMarek Szyprowski break; 14104ce63fcdSMarek Szyprowski 14114ce63fcdSMarek Szyprowski len = (j - i) << PAGE_SHIFT; 1412c9b24996SAndreas Herrmann ret = iommu_map(mapping->domain, iova, phys, len, 1413c9b24996SAndreas Herrmann IOMMU_READ|IOMMU_WRITE); 14144ce63fcdSMarek Szyprowski if (ret < 0) 14154ce63fcdSMarek Szyprowski goto fail; 14164ce63fcdSMarek Szyprowski iova += len; 14174ce63fcdSMarek Szyprowski i = j; 14184ce63fcdSMarek Szyprowski } 14194ce63fcdSMarek Szyprowski return dma_addr; 14204ce63fcdSMarek Szyprowski fail: 14214ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); 14224ce63fcdSMarek Szyprowski __free_iova(mapping, dma_addr, size); 14234ce63fcdSMarek Szyprowski return DMA_ERROR_CODE; 14244ce63fcdSMarek Szyprowski } 14254ce63fcdSMarek Szyprowski 14264ce63fcdSMarek Szyprowski static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) 14274ce63fcdSMarek Szyprowski { 142889cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 14294ce63fcdSMarek Szyprowski 14304ce63fcdSMarek Szyprowski /* 14314ce63fcdSMarek Szyprowski * add optional in-page offset from iova to size and align 14324ce63fcdSMarek Szyprowski * result to page size 14334ce63fcdSMarek Szyprowski */ 14344ce63fcdSMarek Szyprowski size = PAGE_ALIGN((iova & ~PAGE_MASK) + size); 14354ce63fcdSMarek Szyprowski iova &= PAGE_MASK; 14364ce63fcdSMarek Szyprowski 14374ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova, size); 14384ce63fcdSMarek Szyprowski __free_iova(mapping, iova, size); 14394ce63fcdSMarek Szyprowski return 0; 14404ce63fcdSMarek Szyprowski } 14414ce63fcdSMarek Szyprowski 1442665bad7bSHiroshi Doyu static struct page **__atomic_get_pages(void *addr) 1443665bad7bSHiroshi Doyu { 144436d0fd21SLaura Abbott struct page *page; 144536d0fd21SLaura Abbott phys_addr_t phys; 1446665bad7bSHiroshi Doyu 144736d0fd21SLaura Abbott phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr); 144836d0fd21SLaura Abbott page = phys_to_page(phys); 144936d0fd21SLaura Abbott 145036d0fd21SLaura Abbott return (struct page **)page; 1451665bad7bSHiroshi Doyu } 1452665bad7bSHiroshi Doyu 1453955c757eSMarek Szyprowski static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) 1454e9da6e99SMarek Szyprowski { 1455e9da6e99SMarek Szyprowski struct vm_struct *area; 1456e9da6e99SMarek Szyprowski 1457665bad7bSHiroshi Doyu if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) 1458665bad7bSHiroshi Doyu return __atomic_get_pages(cpu_addr); 1459665bad7bSHiroshi Doyu 1460955c757eSMarek Szyprowski if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) 1461955c757eSMarek Szyprowski return cpu_addr; 1462955c757eSMarek Szyprowski 1463e9da6e99SMarek Szyprowski area = find_vm_area(cpu_addr); 1464e9da6e99SMarek Szyprowski if (area && (area->flags & VM_ARM_DMA_CONSISTENT)) 1465e9da6e99SMarek Szyprowski return area->pages; 1466e9da6e99SMarek Szyprowski return NULL; 1467e9da6e99SMarek Szyprowski } 1468e9da6e99SMarek Szyprowski 1469479ed93aSHiroshi Doyu static void *__iommu_alloc_atomic(struct device *dev, size_t size, 1470479ed93aSHiroshi Doyu dma_addr_t *handle) 1471479ed93aSHiroshi Doyu { 1472479ed93aSHiroshi Doyu struct page *page; 1473479ed93aSHiroshi Doyu void *addr; 1474479ed93aSHiroshi Doyu 1475479ed93aSHiroshi Doyu addr = __alloc_from_pool(size, &page); 1476479ed93aSHiroshi Doyu if (!addr) 1477479ed93aSHiroshi Doyu return NULL; 1478479ed93aSHiroshi Doyu 1479479ed93aSHiroshi Doyu *handle = __iommu_create_mapping(dev, &page, size); 1480479ed93aSHiroshi Doyu if (*handle == DMA_ERROR_CODE) 1481479ed93aSHiroshi Doyu goto err_mapping; 1482479ed93aSHiroshi Doyu 1483479ed93aSHiroshi Doyu return addr; 1484479ed93aSHiroshi Doyu 1485479ed93aSHiroshi Doyu err_mapping: 1486479ed93aSHiroshi Doyu __free_from_pool(addr, size); 1487479ed93aSHiroshi Doyu return NULL; 1488479ed93aSHiroshi Doyu } 1489479ed93aSHiroshi Doyu 1490d5898291SMarek Szyprowski static void __iommu_free_atomic(struct device *dev, void *cpu_addr, 1491479ed93aSHiroshi Doyu dma_addr_t handle, size_t size) 1492479ed93aSHiroshi Doyu { 1493479ed93aSHiroshi Doyu __iommu_remove_mapping(dev, handle, size); 1494d5898291SMarek Szyprowski __free_from_pool(cpu_addr, size); 1495479ed93aSHiroshi Doyu } 1496479ed93aSHiroshi Doyu 14974ce63fcdSMarek Szyprowski static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, 14984ce63fcdSMarek Szyprowski dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) 14994ce63fcdSMarek Szyprowski { 150071b55663SRussell King pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); 15014ce63fcdSMarek Szyprowski struct page **pages; 15024ce63fcdSMarek Szyprowski void *addr = NULL; 15034ce63fcdSMarek Szyprowski 15044ce63fcdSMarek Szyprowski *handle = DMA_ERROR_CODE; 15054ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 15064ce63fcdSMarek Szyprowski 1507d0164adcSMel Gorman if (!gfpflags_allow_blocking(gfp)) 1508479ed93aSHiroshi Doyu return __iommu_alloc_atomic(dev, size, handle); 1509479ed93aSHiroshi Doyu 15105b91a98cSRichard Zhao /* 15115b91a98cSRichard Zhao * Following is a work-around (a.k.a. hack) to prevent pages 15125b91a98cSRichard Zhao * with __GFP_COMP being passed to split_page() which cannot 15135b91a98cSRichard Zhao * handle them. The real problem is that this flag probably 15145b91a98cSRichard Zhao * should be 0 on ARM as it is not supported on this 15155b91a98cSRichard Zhao * platform; see CONFIG_HUGETLBFS. 15165b91a98cSRichard Zhao */ 15175b91a98cSRichard Zhao gfp &= ~(__GFP_COMP); 15185b91a98cSRichard Zhao 1519549a17e4SMarek Szyprowski pages = __iommu_alloc_buffer(dev, size, gfp, attrs); 15204ce63fcdSMarek Szyprowski if (!pages) 15214ce63fcdSMarek Szyprowski return NULL; 15224ce63fcdSMarek Szyprowski 15234ce63fcdSMarek Szyprowski *handle = __iommu_create_mapping(dev, pages, size); 15244ce63fcdSMarek Szyprowski if (*handle == DMA_ERROR_CODE) 15254ce63fcdSMarek Szyprowski goto err_buffer; 15264ce63fcdSMarek Szyprowski 1527955c757eSMarek Szyprowski if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) 1528955c757eSMarek Szyprowski return pages; 1529955c757eSMarek Szyprowski 1530e9da6e99SMarek Szyprowski addr = __iommu_alloc_remap(pages, size, gfp, prot, 1531e9da6e99SMarek Szyprowski __builtin_return_address(0)); 15324ce63fcdSMarek Szyprowski if (!addr) 15334ce63fcdSMarek Szyprowski goto err_mapping; 15344ce63fcdSMarek Szyprowski 15354ce63fcdSMarek Szyprowski return addr; 15364ce63fcdSMarek Szyprowski 15374ce63fcdSMarek Szyprowski err_mapping: 15384ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, *handle, size); 15394ce63fcdSMarek Szyprowski err_buffer: 1540549a17e4SMarek Szyprowski __iommu_free_buffer(dev, pages, size, attrs); 15414ce63fcdSMarek Szyprowski return NULL; 15424ce63fcdSMarek Szyprowski } 15434ce63fcdSMarek Szyprowski 15444ce63fcdSMarek Szyprowski static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 15454ce63fcdSMarek Szyprowski void *cpu_addr, dma_addr_t dma_addr, size_t size, 15464ce63fcdSMarek Szyprowski struct dma_attrs *attrs) 15474ce63fcdSMarek Szyprowski { 15484ce63fcdSMarek Szyprowski unsigned long uaddr = vma->vm_start; 15494ce63fcdSMarek Szyprowski unsigned long usize = vma->vm_end - vma->vm_start; 1550955c757eSMarek Szyprowski struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1551371f0f08SMarek Szyprowski unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 1552371f0f08SMarek Szyprowski unsigned long off = vma->vm_pgoff; 1553e9da6e99SMarek Szyprowski 1554e9da6e99SMarek Szyprowski vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 1555e9da6e99SMarek Szyprowski 1556e9da6e99SMarek Szyprowski if (!pages) 1557e9da6e99SMarek Szyprowski return -ENXIO; 15584ce63fcdSMarek Szyprowski 1559371f0f08SMarek Szyprowski if (off >= nr_pages || (usize >> PAGE_SHIFT) > nr_pages - off) 1560371f0f08SMarek Szyprowski return -ENXIO; 1561371f0f08SMarek Szyprowski 15627e312103SMarek Szyprowski pages += off; 15637e312103SMarek Szyprowski 15644ce63fcdSMarek Szyprowski do { 1565e9da6e99SMarek Szyprowski int ret = vm_insert_page(vma, uaddr, *pages++); 15664ce63fcdSMarek Szyprowski if (ret) { 1567e9da6e99SMarek Szyprowski pr_err("Remapping memory failed: %d\n", ret); 15684ce63fcdSMarek Szyprowski return ret; 15694ce63fcdSMarek Szyprowski } 15704ce63fcdSMarek Szyprowski uaddr += PAGE_SIZE; 15714ce63fcdSMarek Szyprowski usize -= PAGE_SIZE; 15724ce63fcdSMarek Szyprowski } while (usize > 0); 1573e9da6e99SMarek Szyprowski 15744ce63fcdSMarek Szyprowski return 0; 15754ce63fcdSMarek Szyprowski } 15764ce63fcdSMarek Szyprowski 15774ce63fcdSMarek Szyprowski /* 15784ce63fcdSMarek Szyprowski * free a page as defined by the above mapping. 15794ce63fcdSMarek Szyprowski * Must not be called with IRQs disabled. 15804ce63fcdSMarek Szyprowski */ 15814ce63fcdSMarek Szyprowski void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, 15824ce63fcdSMarek Szyprowski dma_addr_t handle, struct dma_attrs *attrs) 15834ce63fcdSMarek Szyprowski { 1584836bfa0dSYoungJun Cho struct page **pages; 15854ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 15864ce63fcdSMarek Szyprowski 1587479ed93aSHiroshi Doyu if (__in_atomic_pool(cpu_addr, size)) { 1588d5898291SMarek Szyprowski __iommu_free_atomic(dev, cpu_addr, handle, size); 1589479ed93aSHiroshi Doyu return; 1590479ed93aSHiroshi Doyu } 1591479ed93aSHiroshi Doyu 1592836bfa0dSYoungJun Cho pages = __iommu_get_pages(cpu_addr, attrs); 1593836bfa0dSYoungJun Cho if (!pages) { 1594836bfa0dSYoungJun Cho WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); 1595836bfa0dSYoungJun Cho return; 1596836bfa0dSYoungJun Cho } 1597836bfa0dSYoungJun Cho 1598955c757eSMarek Szyprowski if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { 1599513510ddSLaura Abbott dma_common_free_remap(cpu_addr, size, 1600513510ddSLaura Abbott VM_ARM_DMA_CONSISTENT | VM_USERMAP); 1601955c757eSMarek Szyprowski } 1602e9da6e99SMarek Szyprowski 16034ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, handle, size); 1604549a17e4SMarek Szyprowski __iommu_free_buffer(dev, pages, size, attrs); 16054ce63fcdSMarek Szyprowski } 16064ce63fcdSMarek Szyprowski 1607dc2832e1SMarek Szyprowski static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, 1608dc2832e1SMarek Szyprowski void *cpu_addr, dma_addr_t dma_addr, 1609dc2832e1SMarek Szyprowski size_t size, struct dma_attrs *attrs) 1610dc2832e1SMarek Szyprowski { 1611dc2832e1SMarek Szyprowski unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1612dc2832e1SMarek Szyprowski struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1613dc2832e1SMarek Szyprowski 1614dc2832e1SMarek Szyprowski if (!pages) 1615dc2832e1SMarek Szyprowski return -ENXIO; 1616dc2832e1SMarek Szyprowski 1617dc2832e1SMarek Szyprowski return sg_alloc_table_from_pages(sgt, pages, count, 0, size, 1618dc2832e1SMarek Szyprowski GFP_KERNEL); 16194ce63fcdSMarek Szyprowski } 16204ce63fcdSMarek Szyprowski 1621c9b24996SAndreas Herrmann static int __dma_direction_to_prot(enum dma_data_direction dir) 1622c9b24996SAndreas Herrmann { 1623c9b24996SAndreas Herrmann int prot; 1624c9b24996SAndreas Herrmann 1625c9b24996SAndreas Herrmann switch (dir) { 1626c9b24996SAndreas Herrmann case DMA_BIDIRECTIONAL: 1627c9b24996SAndreas Herrmann prot = IOMMU_READ | IOMMU_WRITE; 1628c9b24996SAndreas Herrmann break; 1629c9b24996SAndreas Herrmann case DMA_TO_DEVICE: 1630c9b24996SAndreas Herrmann prot = IOMMU_READ; 1631c9b24996SAndreas Herrmann break; 1632c9b24996SAndreas Herrmann case DMA_FROM_DEVICE: 1633c9b24996SAndreas Herrmann prot = IOMMU_WRITE; 1634c9b24996SAndreas Herrmann break; 1635c9b24996SAndreas Herrmann default: 1636c9b24996SAndreas Herrmann prot = 0; 1637c9b24996SAndreas Herrmann } 1638c9b24996SAndreas Herrmann 1639c9b24996SAndreas Herrmann return prot; 1640c9b24996SAndreas Herrmann } 1641c9b24996SAndreas Herrmann 16424ce63fcdSMarek Szyprowski /* 16434ce63fcdSMarek Szyprowski * Map a part of the scatter-gather list into contiguous io address space 16444ce63fcdSMarek Szyprowski */ 16454ce63fcdSMarek Szyprowski static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, 16464ce63fcdSMarek Szyprowski size_t size, dma_addr_t *handle, 16470fa478dfSRob Herring enum dma_data_direction dir, struct dma_attrs *attrs, 16480fa478dfSRob Herring bool is_coherent) 16494ce63fcdSMarek Szyprowski { 165089cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 16514ce63fcdSMarek Szyprowski dma_addr_t iova, iova_base; 16524ce63fcdSMarek Szyprowski int ret = 0; 16534ce63fcdSMarek Szyprowski unsigned int count; 16544ce63fcdSMarek Szyprowski struct scatterlist *s; 1655c9b24996SAndreas Herrmann int prot; 16564ce63fcdSMarek Szyprowski 16574ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 16584ce63fcdSMarek Szyprowski *handle = DMA_ERROR_CODE; 16594ce63fcdSMarek Szyprowski 16604ce63fcdSMarek Szyprowski iova_base = iova = __alloc_iova(mapping, size); 16614ce63fcdSMarek Szyprowski if (iova == DMA_ERROR_CODE) 16624ce63fcdSMarek Szyprowski return -ENOMEM; 16634ce63fcdSMarek Szyprowski 16644ce63fcdSMarek Szyprowski for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { 16653e6110fdSDan Williams phys_addr_t phys = page_to_phys(sg_page(s)); 16664ce63fcdSMarek Szyprowski unsigned int len = PAGE_ALIGN(s->offset + s->length); 16674ce63fcdSMarek Szyprowski 16680fa478dfSRob Herring if (!is_coherent && 166997ef952aSMarek Szyprowski !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 16704ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 16714ce63fcdSMarek Szyprowski 1672c9b24996SAndreas Herrmann prot = __dma_direction_to_prot(dir); 1673c9b24996SAndreas Herrmann 1674c9b24996SAndreas Herrmann ret = iommu_map(mapping->domain, iova, phys, len, prot); 16754ce63fcdSMarek Szyprowski if (ret < 0) 16764ce63fcdSMarek Szyprowski goto fail; 16774ce63fcdSMarek Szyprowski count += len >> PAGE_SHIFT; 16784ce63fcdSMarek Szyprowski iova += len; 16794ce63fcdSMarek Szyprowski } 16804ce63fcdSMarek Szyprowski *handle = iova_base; 16814ce63fcdSMarek Szyprowski 16824ce63fcdSMarek Szyprowski return 0; 16834ce63fcdSMarek Szyprowski fail: 16844ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); 16854ce63fcdSMarek Szyprowski __free_iova(mapping, iova_base, size); 16864ce63fcdSMarek Szyprowski return ret; 16874ce63fcdSMarek Szyprowski } 16884ce63fcdSMarek Szyprowski 16890fa478dfSRob Herring static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, 16900fa478dfSRob Herring enum dma_data_direction dir, struct dma_attrs *attrs, 16910fa478dfSRob Herring bool is_coherent) 16924ce63fcdSMarek Szyprowski { 16934ce63fcdSMarek Szyprowski struct scatterlist *s = sg, *dma = sg, *start = sg; 16944ce63fcdSMarek Szyprowski int i, count = 0; 16954ce63fcdSMarek Szyprowski unsigned int offset = s->offset; 16964ce63fcdSMarek Szyprowski unsigned int size = s->offset + s->length; 16974ce63fcdSMarek Szyprowski unsigned int max = dma_get_max_seg_size(dev); 16984ce63fcdSMarek Szyprowski 16994ce63fcdSMarek Szyprowski for (i = 1; i < nents; i++) { 17004ce63fcdSMarek Szyprowski s = sg_next(s); 17014ce63fcdSMarek Szyprowski 17024ce63fcdSMarek Szyprowski s->dma_address = DMA_ERROR_CODE; 17034ce63fcdSMarek Szyprowski s->dma_length = 0; 17044ce63fcdSMarek Szyprowski 17054ce63fcdSMarek Szyprowski if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { 17064ce63fcdSMarek Szyprowski if (__map_sg_chunk(dev, start, size, &dma->dma_address, 17070fa478dfSRob Herring dir, attrs, is_coherent) < 0) 17084ce63fcdSMarek Szyprowski goto bad_mapping; 17094ce63fcdSMarek Szyprowski 17104ce63fcdSMarek Szyprowski dma->dma_address += offset; 17114ce63fcdSMarek Szyprowski dma->dma_length = size - offset; 17124ce63fcdSMarek Szyprowski 17134ce63fcdSMarek Szyprowski size = offset = s->offset; 17144ce63fcdSMarek Szyprowski start = s; 17154ce63fcdSMarek Szyprowski dma = sg_next(dma); 17164ce63fcdSMarek Szyprowski count += 1; 17174ce63fcdSMarek Szyprowski } 17184ce63fcdSMarek Szyprowski size += s->length; 17194ce63fcdSMarek Szyprowski } 17200fa478dfSRob Herring if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, 17210fa478dfSRob Herring is_coherent) < 0) 17224ce63fcdSMarek Szyprowski goto bad_mapping; 17234ce63fcdSMarek Szyprowski 17244ce63fcdSMarek Szyprowski dma->dma_address += offset; 17254ce63fcdSMarek Szyprowski dma->dma_length = size - offset; 17264ce63fcdSMarek Szyprowski 17274ce63fcdSMarek Szyprowski return count+1; 17284ce63fcdSMarek Szyprowski 17294ce63fcdSMarek Szyprowski bad_mapping: 17304ce63fcdSMarek Szyprowski for_each_sg(sg, s, count, i) 17314ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); 17324ce63fcdSMarek Szyprowski return 0; 17334ce63fcdSMarek Szyprowski } 17344ce63fcdSMarek Szyprowski 17354ce63fcdSMarek Szyprowski /** 17360fa478dfSRob Herring * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA 17370fa478dfSRob Herring * @dev: valid struct device pointer 17380fa478dfSRob Herring * @sg: list of buffers 17390fa478dfSRob Herring * @nents: number of buffers to map 17400fa478dfSRob Herring * @dir: DMA transfer direction 17410fa478dfSRob Herring * 17420fa478dfSRob Herring * Map a set of i/o coherent buffers described by scatterlist in streaming 17430fa478dfSRob Herring * mode for DMA. The scatter gather list elements are merged together (if 17440fa478dfSRob Herring * possible) and tagged with the appropriate dma address and length. They are 17450fa478dfSRob Herring * obtained via sg_dma_{address,length}. 17460fa478dfSRob Herring */ 17470fa478dfSRob Herring int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, 17480fa478dfSRob Herring int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 17490fa478dfSRob Herring { 17500fa478dfSRob Herring return __iommu_map_sg(dev, sg, nents, dir, attrs, true); 17510fa478dfSRob Herring } 17520fa478dfSRob Herring 17530fa478dfSRob Herring /** 17540fa478dfSRob Herring * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA 17550fa478dfSRob Herring * @dev: valid struct device pointer 17560fa478dfSRob Herring * @sg: list of buffers 17570fa478dfSRob Herring * @nents: number of buffers to map 17580fa478dfSRob Herring * @dir: DMA transfer direction 17590fa478dfSRob Herring * 17600fa478dfSRob Herring * Map a set of buffers described by scatterlist in streaming mode for DMA. 17610fa478dfSRob Herring * The scatter gather list elements are merged together (if possible) and 17620fa478dfSRob Herring * tagged with the appropriate dma address and length. They are obtained via 17630fa478dfSRob Herring * sg_dma_{address,length}. 17640fa478dfSRob Herring */ 17650fa478dfSRob Herring int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, 17660fa478dfSRob Herring int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 17670fa478dfSRob Herring { 17680fa478dfSRob Herring return __iommu_map_sg(dev, sg, nents, dir, attrs, false); 17690fa478dfSRob Herring } 17700fa478dfSRob Herring 17710fa478dfSRob Herring static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, 17720fa478dfSRob Herring int nents, enum dma_data_direction dir, struct dma_attrs *attrs, 17730fa478dfSRob Herring bool is_coherent) 17740fa478dfSRob Herring { 17750fa478dfSRob Herring struct scatterlist *s; 17760fa478dfSRob Herring int i; 17770fa478dfSRob Herring 17780fa478dfSRob Herring for_each_sg(sg, s, nents, i) { 17790fa478dfSRob Herring if (sg_dma_len(s)) 17800fa478dfSRob Herring __iommu_remove_mapping(dev, sg_dma_address(s), 17810fa478dfSRob Herring sg_dma_len(s)); 17820fa478dfSRob Herring if (!is_coherent && 17830fa478dfSRob Herring !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 17840fa478dfSRob Herring __dma_page_dev_to_cpu(sg_page(s), s->offset, 17850fa478dfSRob Herring s->length, dir); 17860fa478dfSRob Herring } 17870fa478dfSRob Herring } 17880fa478dfSRob Herring 17890fa478dfSRob Herring /** 17900fa478dfSRob Herring * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 17910fa478dfSRob Herring * @dev: valid struct device pointer 17920fa478dfSRob Herring * @sg: list of buffers 17930fa478dfSRob Herring * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 17940fa478dfSRob Herring * @dir: DMA transfer direction (same as was passed to dma_map_sg) 17950fa478dfSRob Herring * 17960fa478dfSRob Herring * Unmap a set of streaming mode DMA translations. Again, CPU access 17970fa478dfSRob Herring * rules concerning calls here are the same as for dma_unmap_single(). 17980fa478dfSRob Herring */ 17990fa478dfSRob Herring void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, 18000fa478dfSRob Herring int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 18010fa478dfSRob Herring { 18020fa478dfSRob Herring __iommu_unmap_sg(dev, sg, nents, dir, attrs, true); 18030fa478dfSRob Herring } 18040fa478dfSRob Herring 18050fa478dfSRob Herring /** 18064ce63fcdSMarek Szyprowski * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 18074ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 18084ce63fcdSMarek Szyprowski * @sg: list of buffers 18094ce63fcdSMarek Szyprowski * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 18104ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 18114ce63fcdSMarek Szyprowski * 18124ce63fcdSMarek Szyprowski * Unmap a set of streaming mode DMA translations. Again, CPU access 18134ce63fcdSMarek Szyprowski * rules concerning calls here are the same as for dma_unmap_single(). 18144ce63fcdSMarek Szyprowski */ 18154ce63fcdSMarek Szyprowski void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 18164ce63fcdSMarek Szyprowski enum dma_data_direction dir, struct dma_attrs *attrs) 18174ce63fcdSMarek Szyprowski { 18180fa478dfSRob Herring __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); 18194ce63fcdSMarek Szyprowski } 18204ce63fcdSMarek Szyprowski 18214ce63fcdSMarek Szyprowski /** 18224ce63fcdSMarek Szyprowski * arm_iommu_sync_sg_for_cpu 18234ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 18244ce63fcdSMarek Szyprowski * @sg: list of buffers 18254ce63fcdSMarek Szyprowski * @nents: number of buffers to map (returned from dma_map_sg) 18264ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 18274ce63fcdSMarek Szyprowski */ 18284ce63fcdSMarek Szyprowski void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 18294ce63fcdSMarek Szyprowski int nents, enum dma_data_direction dir) 18304ce63fcdSMarek Szyprowski { 18314ce63fcdSMarek Szyprowski struct scatterlist *s; 18324ce63fcdSMarek Szyprowski int i; 18334ce63fcdSMarek Szyprowski 18344ce63fcdSMarek Szyprowski for_each_sg(sg, s, nents, i) 18354ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); 18364ce63fcdSMarek Szyprowski 18374ce63fcdSMarek Szyprowski } 18384ce63fcdSMarek Szyprowski 18394ce63fcdSMarek Szyprowski /** 18404ce63fcdSMarek Szyprowski * arm_iommu_sync_sg_for_device 18414ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 18424ce63fcdSMarek Szyprowski * @sg: list of buffers 18434ce63fcdSMarek Szyprowski * @nents: number of buffers to map (returned from dma_map_sg) 18444ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 18454ce63fcdSMarek Szyprowski */ 18464ce63fcdSMarek Szyprowski void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 18474ce63fcdSMarek Szyprowski int nents, enum dma_data_direction dir) 18484ce63fcdSMarek Szyprowski { 18494ce63fcdSMarek Szyprowski struct scatterlist *s; 18504ce63fcdSMarek Szyprowski int i; 18514ce63fcdSMarek Szyprowski 18524ce63fcdSMarek Szyprowski for_each_sg(sg, s, nents, i) 18534ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 18544ce63fcdSMarek Szyprowski } 18554ce63fcdSMarek Szyprowski 18564ce63fcdSMarek Szyprowski 18574ce63fcdSMarek Szyprowski /** 18580fa478dfSRob Herring * arm_coherent_iommu_map_page 18590fa478dfSRob Herring * @dev: valid struct device pointer 18600fa478dfSRob Herring * @page: page that buffer resides in 18610fa478dfSRob Herring * @offset: offset into page for start of buffer 18620fa478dfSRob Herring * @size: size of buffer to map 18630fa478dfSRob Herring * @dir: DMA transfer direction 18640fa478dfSRob Herring * 18650fa478dfSRob Herring * Coherent IOMMU aware version of arm_dma_map_page() 18660fa478dfSRob Herring */ 18670fa478dfSRob Herring static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page, 18680fa478dfSRob Herring unsigned long offset, size_t size, enum dma_data_direction dir, 18690fa478dfSRob Herring struct dma_attrs *attrs) 18700fa478dfSRob Herring { 187189cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 18720fa478dfSRob Herring dma_addr_t dma_addr; 187313987d68SWill Deacon int ret, prot, len = PAGE_ALIGN(size + offset); 18740fa478dfSRob Herring 18750fa478dfSRob Herring dma_addr = __alloc_iova(mapping, len); 18760fa478dfSRob Herring if (dma_addr == DMA_ERROR_CODE) 18770fa478dfSRob Herring return dma_addr; 18780fa478dfSRob Herring 1879c9b24996SAndreas Herrmann prot = __dma_direction_to_prot(dir); 188013987d68SWill Deacon 188113987d68SWill Deacon ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); 18820fa478dfSRob Herring if (ret < 0) 18830fa478dfSRob Herring goto fail; 18840fa478dfSRob Herring 18850fa478dfSRob Herring return dma_addr + offset; 18860fa478dfSRob Herring fail: 18870fa478dfSRob Herring __free_iova(mapping, dma_addr, len); 18880fa478dfSRob Herring return DMA_ERROR_CODE; 18890fa478dfSRob Herring } 18900fa478dfSRob Herring 18910fa478dfSRob Herring /** 18924ce63fcdSMarek Szyprowski * arm_iommu_map_page 18934ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 18944ce63fcdSMarek Szyprowski * @page: page that buffer resides in 18954ce63fcdSMarek Szyprowski * @offset: offset into page for start of buffer 18964ce63fcdSMarek Szyprowski * @size: size of buffer to map 18974ce63fcdSMarek Szyprowski * @dir: DMA transfer direction 18984ce63fcdSMarek Szyprowski * 18994ce63fcdSMarek Szyprowski * IOMMU aware version of arm_dma_map_page() 19004ce63fcdSMarek Szyprowski */ 19014ce63fcdSMarek Szyprowski static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, 19024ce63fcdSMarek Szyprowski unsigned long offset, size_t size, enum dma_data_direction dir, 19034ce63fcdSMarek Szyprowski struct dma_attrs *attrs) 19044ce63fcdSMarek Szyprowski { 19050fa478dfSRob Herring if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 19064ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 19074ce63fcdSMarek Szyprowski 19080fa478dfSRob Herring return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); 19090fa478dfSRob Herring } 19104ce63fcdSMarek Szyprowski 19110fa478dfSRob Herring /** 19120fa478dfSRob Herring * arm_coherent_iommu_unmap_page 19130fa478dfSRob Herring * @dev: valid struct device pointer 19140fa478dfSRob Herring * @handle: DMA address of buffer 19150fa478dfSRob Herring * @size: size of buffer (same as passed to dma_map_page) 19160fa478dfSRob Herring * @dir: DMA transfer direction (same as passed to dma_map_page) 19170fa478dfSRob Herring * 19180fa478dfSRob Herring * Coherent IOMMU aware version of arm_dma_unmap_page() 19190fa478dfSRob Herring */ 19200fa478dfSRob Herring static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, 19210fa478dfSRob Herring size_t size, enum dma_data_direction dir, 19220fa478dfSRob Herring struct dma_attrs *attrs) 19230fa478dfSRob Herring { 192489cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 19250fa478dfSRob Herring dma_addr_t iova = handle & PAGE_MASK; 19260fa478dfSRob Herring int offset = handle & ~PAGE_MASK; 19270fa478dfSRob Herring int len = PAGE_ALIGN(size + offset); 19284ce63fcdSMarek Szyprowski 19290fa478dfSRob Herring if (!iova) 19300fa478dfSRob Herring return; 19310fa478dfSRob Herring 19320fa478dfSRob Herring iommu_unmap(mapping->domain, iova, len); 19330fa478dfSRob Herring __free_iova(mapping, iova, len); 19344ce63fcdSMarek Szyprowski } 19354ce63fcdSMarek Szyprowski 19364ce63fcdSMarek Szyprowski /** 19374ce63fcdSMarek Szyprowski * arm_iommu_unmap_page 19384ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 19394ce63fcdSMarek Szyprowski * @handle: DMA address of buffer 19404ce63fcdSMarek Szyprowski * @size: size of buffer (same as passed to dma_map_page) 19414ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as passed to dma_map_page) 19424ce63fcdSMarek Szyprowski * 19434ce63fcdSMarek Szyprowski * IOMMU aware version of arm_dma_unmap_page() 19444ce63fcdSMarek Szyprowski */ 19454ce63fcdSMarek Szyprowski static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, 19464ce63fcdSMarek Szyprowski size_t size, enum dma_data_direction dir, 19474ce63fcdSMarek Szyprowski struct dma_attrs *attrs) 19484ce63fcdSMarek Szyprowski { 194989cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 19504ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 19514ce63fcdSMarek Szyprowski struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 19524ce63fcdSMarek Szyprowski int offset = handle & ~PAGE_MASK; 19534ce63fcdSMarek Szyprowski int len = PAGE_ALIGN(size + offset); 19544ce63fcdSMarek Szyprowski 19554ce63fcdSMarek Szyprowski if (!iova) 19564ce63fcdSMarek Szyprowski return; 19574ce63fcdSMarek Szyprowski 19580fa478dfSRob Herring if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 19594ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(page, offset, size, dir); 19604ce63fcdSMarek Szyprowski 19614ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova, len); 19624ce63fcdSMarek Szyprowski __free_iova(mapping, iova, len); 19634ce63fcdSMarek Szyprowski } 19644ce63fcdSMarek Szyprowski 19654ce63fcdSMarek Szyprowski static void arm_iommu_sync_single_for_cpu(struct device *dev, 19664ce63fcdSMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 19674ce63fcdSMarek Szyprowski { 196889cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 19694ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 19704ce63fcdSMarek Szyprowski struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 19714ce63fcdSMarek Szyprowski unsigned int offset = handle & ~PAGE_MASK; 19724ce63fcdSMarek Szyprowski 19734ce63fcdSMarek Szyprowski if (!iova) 19744ce63fcdSMarek Szyprowski return; 19754ce63fcdSMarek Szyprowski 19764ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(page, offset, size, dir); 19774ce63fcdSMarek Szyprowski } 19784ce63fcdSMarek Szyprowski 19794ce63fcdSMarek Szyprowski static void arm_iommu_sync_single_for_device(struct device *dev, 19804ce63fcdSMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 19814ce63fcdSMarek Szyprowski { 198289cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 19834ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 19844ce63fcdSMarek Szyprowski struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 19854ce63fcdSMarek Szyprowski unsigned int offset = handle & ~PAGE_MASK; 19864ce63fcdSMarek Szyprowski 19874ce63fcdSMarek Szyprowski if (!iova) 19884ce63fcdSMarek Szyprowski return; 19894ce63fcdSMarek Szyprowski 19904ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 19914ce63fcdSMarek Szyprowski } 19924ce63fcdSMarek Szyprowski 19934ce63fcdSMarek Szyprowski struct dma_map_ops iommu_ops = { 19944ce63fcdSMarek Szyprowski .alloc = arm_iommu_alloc_attrs, 19954ce63fcdSMarek Szyprowski .free = arm_iommu_free_attrs, 19964ce63fcdSMarek Szyprowski .mmap = arm_iommu_mmap_attrs, 1997dc2832e1SMarek Szyprowski .get_sgtable = arm_iommu_get_sgtable, 19984ce63fcdSMarek Szyprowski 19994ce63fcdSMarek Szyprowski .map_page = arm_iommu_map_page, 20004ce63fcdSMarek Szyprowski .unmap_page = arm_iommu_unmap_page, 20014ce63fcdSMarek Szyprowski .sync_single_for_cpu = arm_iommu_sync_single_for_cpu, 20024ce63fcdSMarek Szyprowski .sync_single_for_device = arm_iommu_sync_single_for_device, 20034ce63fcdSMarek Szyprowski 20044ce63fcdSMarek Szyprowski .map_sg = arm_iommu_map_sg, 20054ce63fcdSMarek Szyprowski .unmap_sg = arm_iommu_unmap_sg, 20064ce63fcdSMarek Szyprowski .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, 20074ce63fcdSMarek Szyprowski .sync_sg_for_device = arm_iommu_sync_sg_for_device, 2008d09e1333SHiroshi Doyu 2009d09e1333SHiroshi Doyu .set_dma_mask = arm_dma_set_mask, 20104ce63fcdSMarek Szyprowski }; 20114ce63fcdSMarek Szyprowski 20120fa478dfSRob Herring struct dma_map_ops iommu_coherent_ops = { 20130fa478dfSRob Herring .alloc = arm_iommu_alloc_attrs, 20140fa478dfSRob Herring .free = arm_iommu_free_attrs, 20150fa478dfSRob Herring .mmap = arm_iommu_mmap_attrs, 20160fa478dfSRob Herring .get_sgtable = arm_iommu_get_sgtable, 20170fa478dfSRob Herring 20180fa478dfSRob Herring .map_page = arm_coherent_iommu_map_page, 20190fa478dfSRob Herring .unmap_page = arm_coherent_iommu_unmap_page, 20200fa478dfSRob Herring 20210fa478dfSRob Herring .map_sg = arm_coherent_iommu_map_sg, 20220fa478dfSRob Herring .unmap_sg = arm_coherent_iommu_unmap_sg, 2023d09e1333SHiroshi Doyu 2024d09e1333SHiroshi Doyu .set_dma_mask = arm_dma_set_mask, 20250fa478dfSRob Herring }; 20260fa478dfSRob Herring 20274ce63fcdSMarek Szyprowski /** 20284ce63fcdSMarek Szyprowski * arm_iommu_create_mapping 20294ce63fcdSMarek Szyprowski * @bus: pointer to the bus holding the client device (for IOMMU calls) 20304ce63fcdSMarek Szyprowski * @base: start address of the valid IO address space 203168efd7d2SMarek Szyprowski * @size: maximum size of the valid IO address space 20324ce63fcdSMarek Szyprowski * 20334ce63fcdSMarek Szyprowski * Creates a mapping structure which holds information about used/unused 20344ce63fcdSMarek Szyprowski * IO address ranges, which is required to perform memory allocation and 20354ce63fcdSMarek Szyprowski * mapping with IOMMU aware functions. 20364ce63fcdSMarek Szyprowski * 20374ce63fcdSMarek Szyprowski * The client device need to be attached to the mapping with 20384ce63fcdSMarek Szyprowski * arm_iommu_attach_device function. 20394ce63fcdSMarek Szyprowski */ 20404ce63fcdSMarek Szyprowski struct dma_iommu_mapping * 20411424532bSMarek Szyprowski arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size) 20424ce63fcdSMarek Szyprowski { 204368efd7d2SMarek Szyprowski unsigned int bits = size >> PAGE_SHIFT; 204468efd7d2SMarek Szyprowski unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long); 20454ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping; 204668efd7d2SMarek Szyprowski int extensions = 1; 20474ce63fcdSMarek Szyprowski int err = -ENOMEM; 20484ce63fcdSMarek Szyprowski 20491424532bSMarek Szyprowski /* currently only 32-bit DMA address space is supported */ 20501424532bSMarek Szyprowski if (size > DMA_BIT_MASK(32) + 1) 20511424532bSMarek Szyprowski return ERR_PTR(-ERANGE); 20521424532bSMarek Szyprowski 205368efd7d2SMarek Szyprowski if (!bitmap_size) 20544ce63fcdSMarek Szyprowski return ERR_PTR(-EINVAL); 20554ce63fcdSMarek Szyprowski 205668efd7d2SMarek Szyprowski if (bitmap_size > PAGE_SIZE) { 205768efd7d2SMarek Szyprowski extensions = bitmap_size / PAGE_SIZE; 205868efd7d2SMarek Szyprowski bitmap_size = PAGE_SIZE; 205968efd7d2SMarek Szyprowski } 206068efd7d2SMarek Szyprowski 20614ce63fcdSMarek Szyprowski mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); 20624ce63fcdSMarek Szyprowski if (!mapping) 20634ce63fcdSMarek Szyprowski goto err; 20644ce63fcdSMarek Szyprowski 206568efd7d2SMarek Szyprowski mapping->bitmap_size = bitmap_size; 206668efd7d2SMarek Szyprowski mapping->bitmaps = kzalloc(extensions * sizeof(unsigned long *), 20674d852ef8SAndreas Herrmann GFP_KERNEL); 20684d852ef8SAndreas Herrmann if (!mapping->bitmaps) 20694ce63fcdSMarek Szyprowski goto err2; 20704ce63fcdSMarek Szyprowski 207168efd7d2SMarek Szyprowski mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL); 20724d852ef8SAndreas Herrmann if (!mapping->bitmaps[0]) 20734d852ef8SAndreas Herrmann goto err3; 20744d852ef8SAndreas Herrmann 20754d852ef8SAndreas Herrmann mapping->nr_bitmaps = 1; 20764d852ef8SAndreas Herrmann mapping->extensions = extensions; 20774ce63fcdSMarek Szyprowski mapping->base = base; 207868efd7d2SMarek Szyprowski mapping->bits = BITS_PER_BYTE * bitmap_size; 20794d852ef8SAndreas Herrmann 20804ce63fcdSMarek Szyprowski spin_lock_init(&mapping->lock); 20814ce63fcdSMarek Szyprowski 20824ce63fcdSMarek Szyprowski mapping->domain = iommu_domain_alloc(bus); 20834ce63fcdSMarek Szyprowski if (!mapping->domain) 20844d852ef8SAndreas Herrmann goto err4; 20854ce63fcdSMarek Szyprowski 20864ce63fcdSMarek Szyprowski kref_init(&mapping->kref); 20874ce63fcdSMarek Szyprowski return mapping; 20884d852ef8SAndreas Herrmann err4: 20894d852ef8SAndreas Herrmann kfree(mapping->bitmaps[0]); 20904ce63fcdSMarek Szyprowski err3: 20914d852ef8SAndreas Herrmann kfree(mapping->bitmaps); 20924ce63fcdSMarek Szyprowski err2: 20934ce63fcdSMarek Szyprowski kfree(mapping); 20944ce63fcdSMarek Szyprowski err: 20954ce63fcdSMarek Szyprowski return ERR_PTR(err); 20964ce63fcdSMarek Szyprowski } 209718177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_create_mapping); 20984ce63fcdSMarek Szyprowski 20994ce63fcdSMarek Szyprowski static void release_iommu_mapping(struct kref *kref) 21004ce63fcdSMarek Szyprowski { 21014d852ef8SAndreas Herrmann int i; 21024ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = 21034ce63fcdSMarek Szyprowski container_of(kref, struct dma_iommu_mapping, kref); 21044ce63fcdSMarek Szyprowski 21054ce63fcdSMarek Szyprowski iommu_domain_free(mapping->domain); 21064d852ef8SAndreas Herrmann for (i = 0; i < mapping->nr_bitmaps; i++) 21074d852ef8SAndreas Herrmann kfree(mapping->bitmaps[i]); 21084d852ef8SAndreas Herrmann kfree(mapping->bitmaps); 21094ce63fcdSMarek Szyprowski kfree(mapping); 21104ce63fcdSMarek Szyprowski } 21114ce63fcdSMarek Szyprowski 21124d852ef8SAndreas Herrmann static int extend_iommu_mapping(struct dma_iommu_mapping *mapping) 21134d852ef8SAndreas Herrmann { 21144d852ef8SAndreas Herrmann int next_bitmap; 21154d852ef8SAndreas Herrmann 2116462859aaSMarek Szyprowski if (mapping->nr_bitmaps >= mapping->extensions) 21174d852ef8SAndreas Herrmann return -EINVAL; 21184d852ef8SAndreas Herrmann 21194d852ef8SAndreas Herrmann next_bitmap = mapping->nr_bitmaps; 21204d852ef8SAndreas Herrmann mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size, 21214d852ef8SAndreas Herrmann GFP_ATOMIC); 21224d852ef8SAndreas Herrmann if (!mapping->bitmaps[next_bitmap]) 21234d852ef8SAndreas Herrmann return -ENOMEM; 21244d852ef8SAndreas Herrmann 21254d852ef8SAndreas Herrmann mapping->nr_bitmaps++; 21264d852ef8SAndreas Herrmann 21274d852ef8SAndreas Herrmann return 0; 21284d852ef8SAndreas Herrmann } 21294d852ef8SAndreas Herrmann 21304ce63fcdSMarek Szyprowski void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) 21314ce63fcdSMarek Szyprowski { 21324ce63fcdSMarek Szyprowski if (mapping) 21334ce63fcdSMarek Szyprowski kref_put(&mapping->kref, release_iommu_mapping); 21344ce63fcdSMarek Szyprowski } 213518177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_release_mapping); 21364ce63fcdSMarek Szyprowski 2137eab8d653SLaurent Pinchart static int __arm_iommu_attach_device(struct device *dev, 21384ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping) 21394ce63fcdSMarek Szyprowski { 21404ce63fcdSMarek Szyprowski int err; 21414ce63fcdSMarek Szyprowski 21424ce63fcdSMarek Szyprowski err = iommu_attach_device(mapping->domain, dev); 21434ce63fcdSMarek Szyprowski if (err) 21444ce63fcdSMarek Szyprowski return err; 21454ce63fcdSMarek Szyprowski 21464ce63fcdSMarek Szyprowski kref_get(&mapping->kref); 214789cfdb19SWill Deacon to_dma_iommu_mapping(dev) = mapping; 21484ce63fcdSMarek Szyprowski 214975c59716SHiroshi Doyu pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); 21504ce63fcdSMarek Szyprowski return 0; 21514ce63fcdSMarek Szyprowski } 21524ce63fcdSMarek Szyprowski 21536fe36758SHiroshi Doyu /** 2154eab8d653SLaurent Pinchart * arm_iommu_attach_device 21556fe36758SHiroshi Doyu * @dev: valid struct device pointer 2156eab8d653SLaurent Pinchart * @mapping: io address space mapping structure (returned from 2157eab8d653SLaurent Pinchart * arm_iommu_create_mapping) 21586fe36758SHiroshi Doyu * 2159eab8d653SLaurent Pinchart * Attaches specified io address space mapping to the provided device. 2160eab8d653SLaurent Pinchart * This replaces the dma operations (dma_map_ops pointer) with the 2161eab8d653SLaurent Pinchart * IOMMU aware version. 2162eab8d653SLaurent Pinchart * 2163eab8d653SLaurent Pinchart * More than one client might be attached to the same io address space 2164eab8d653SLaurent Pinchart * mapping. 21656fe36758SHiroshi Doyu */ 2166eab8d653SLaurent Pinchart int arm_iommu_attach_device(struct device *dev, 2167eab8d653SLaurent Pinchart struct dma_iommu_mapping *mapping) 2168eab8d653SLaurent Pinchart { 2169eab8d653SLaurent Pinchart int err; 2170eab8d653SLaurent Pinchart 2171eab8d653SLaurent Pinchart err = __arm_iommu_attach_device(dev, mapping); 2172eab8d653SLaurent Pinchart if (err) 2173eab8d653SLaurent Pinchart return err; 2174eab8d653SLaurent Pinchart 2175eab8d653SLaurent Pinchart set_dma_ops(dev, &iommu_ops); 2176eab8d653SLaurent Pinchart return 0; 2177eab8d653SLaurent Pinchart } 2178eab8d653SLaurent Pinchart EXPORT_SYMBOL_GPL(arm_iommu_attach_device); 2179eab8d653SLaurent Pinchart 2180eab8d653SLaurent Pinchart static void __arm_iommu_detach_device(struct device *dev) 21816fe36758SHiroshi Doyu { 21826fe36758SHiroshi Doyu struct dma_iommu_mapping *mapping; 21836fe36758SHiroshi Doyu 21846fe36758SHiroshi Doyu mapping = to_dma_iommu_mapping(dev); 21856fe36758SHiroshi Doyu if (!mapping) { 21866fe36758SHiroshi Doyu dev_warn(dev, "Not attached\n"); 21876fe36758SHiroshi Doyu return; 21886fe36758SHiroshi Doyu } 21896fe36758SHiroshi Doyu 21906fe36758SHiroshi Doyu iommu_detach_device(mapping->domain, dev); 21916fe36758SHiroshi Doyu kref_put(&mapping->kref, release_iommu_mapping); 219289cfdb19SWill Deacon to_dma_iommu_mapping(dev) = NULL; 21936fe36758SHiroshi Doyu 21946fe36758SHiroshi Doyu pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); 21956fe36758SHiroshi Doyu } 2196eab8d653SLaurent Pinchart 2197eab8d653SLaurent Pinchart /** 2198eab8d653SLaurent Pinchart * arm_iommu_detach_device 2199eab8d653SLaurent Pinchart * @dev: valid struct device pointer 2200eab8d653SLaurent Pinchart * 2201eab8d653SLaurent Pinchart * Detaches the provided device from a previously attached map. 2202eab8d653SLaurent Pinchart * This voids the dma operations (dma_map_ops pointer) 2203eab8d653SLaurent Pinchart */ 2204eab8d653SLaurent Pinchart void arm_iommu_detach_device(struct device *dev) 2205eab8d653SLaurent Pinchart { 2206eab8d653SLaurent Pinchart __arm_iommu_detach_device(dev); 2207eab8d653SLaurent Pinchart set_dma_ops(dev, NULL); 2208eab8d653SLaurent Pinchart } 220918177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_detach_device); 22106fe36758SHiroshi Doyu 22114bb25789SWill Deacon static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) 22124bb25789SWill Deacon { 22134bb25789SWill Deacon return coherent ? &iommu_coherent_ops : &iommu_ops; 22144bb25789SWill Deacon } 22154bb25789SWill Deacon 22164bb25789SWill Deacon static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, 22174bb25789SWill Deacon struct iommu_ops *iommu) 22184bb25789SWill Deacon { 22194bb25789SWill Deacon struct dma_iommu_mapping *mapping; 22204bb25789SWill Deacon 22214bb25789SWill Deacon if (!iommu) 22224bb25789SWill Deacon return false; 22234bb25789SWill Deacon 22244bb25789SWill Deacon mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); 22254bb25789SWill Deacon if (IS_ERR(mapping)) { 22264bb25789SWill Deacon pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", 22274bb25789SWill Deacon size, dev_name(dev)); 22284bb25789SWill Deacon return false; 22294bb25789SWill Deacon } 22304bb25789SWill Deacon 2231eab8d653SLaurent Pinchart if (__arm_iommu_attach_device(dev, mapping)) { 22324bb25789SWill Deacon pr_warn("Failed to attached device %s to IOMMU_mapping\n", 22334bb25789SWill Deacon dev_name(dev)); 22344bb25789SWill Deacon arm_iommu_release_mapping(mapping); 22354bb25789SWill Deacon return false; 22364bb25789SWill Deacon } 22374bb25789SWill Deacon 22384bb25789SWill Deacon return true; 22394bb25789SWill Deacon } 22404bb25789SWill Deacon 22414bb25789SWill Deacon static void arm_teardown_iommu_dma_ops(struct device *dev) 22424bb25789SWill Deacon { 224389cfdb19SWill Deacon struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 22444bb25789SWill Deacon 2245c2273a18SWill Deacon if (!mapping) 2246c2273a18SWill Deacon return; 2247c2273a18SWill Deacon 2248eab8d653SLaurent Pinchart __arm_iommu_detach_device(dev); 22494bb25789SWill Deacon arm_iommu_release_mapping(mapping); 22504bb25789SWill Deacon } 22514bb25789SWill Deacon 22524bb25789SWill Deacon #else 22534bb25789SWill Deacon 22544bb25789SWill Deacon static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, 22554bb25789SWill Deacon struct iommu_ops *iommu) 22564bb25789SWill Deacon { 22574bb25789SWill Deacon return false; 22584bb25789SWill Deacon } 22594bb25789SWill Deacon 22604bb25789SWill Deacon static void arm_teardown_iommu_dma_ops(struct device *dev) { } 22614bb25789SWill Deacon 22624bb25789SWill Deacon #define arm_get_iommu_dma_map_ops arm_get_dma_map_ops 22634bb25789SWill Deacon 22644bb25789SWill Deacon #endif /* CONFIG_ARM_DMA_USE_IOMMU */ 22654bb25789SWill Deacon 22664bb25789SWill Deacon static struct dma_map_ops *arm_get_dma_map_ops(bool coherent) 22674bb25789SWill Deacon { 22684bb25789SWill Deacon return coherent ? &arm_coherent_dma_ops : &arm_dma_ops; 22694bb25789SWill Deacon } 22704bb25789SWill Deacon 22714bb25789SWill Deacon void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, 22724bb25789SWill Deacon struct iommu_ops *iommu, bool coherent) 22734bb25789SWill Deacon { 22744bb25789SWill Deacon struct dma_map_ops *dma_ops; 22754bb25789SWill Deacon 22766f51ee70SLinus Torvalds dev->archdata.dma_coherent = coherent; 22774bb25789SWill Deacon if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu)) 22784bb25789SWill Deacon dma_ops = arm_get_iommu_dma_map_ops(coherent); 22794bb25789SWill Deacon else 22804bb25789SWill Deacon dma_ops = arm_get_dma_map_ops(coherent); 22814bb25789SWill Deacon 22824bb25789SWill Deacon set_dma_ops(dev, dma_ops); 22834bb25789SWill Deacon } 22844bb25789SWill Deacon 22854bb25789SWill Deacon void arch_teardown_dma_ops(struct device *dev) 22864bb25789SWill Deacon { 22874bb25789SWill Deacon arm_teardown_iommu_dma_ops(dev); 22884bb25789SWill Deacon } 2289