10ddbccd1SRussell King /* 20ddbccd1SRussell King * linux/arch/arm/mm/dma-mapping.c 30ddbccd1SRussell King * 40ddbccd1SRussell King * Copyright (C) 2000-2004 Russell King 50ddbccd1SRussell King * 60ddbccd1SRussell King * This program is free software; you can redistribute it and/or modify 70ddbccd1SRussell King * it under the terms of the GNU General Public License version 2 as 80ddbccd1SRussell King * published by the Free Software Foundation. 90ddbccd1SRussell King * 100ddbccd1SRussell King * DMA uncached mapping support. 110ddbccd1SRussell King */ 1211a5aa32SRussell King #include <linux/bootmem.h> 130ddbccd1SRussell King #include <linux/module.h> 140ddbccd1SRussell King #include <linux/mm.h> 1536d0fd21SLaura Abbott #include <linux/genalloc.h> 165a0e3ad6STejun Heo #include <linux/gfp.h> 170ddbccd1SRussell King #include <linux/errno.h> 180ddbccd1SRussell King #include <linux/list.h> 190ddbccd1SRussell King #include <linux/init.h> 200ddbccd1SRussell King #include <linux/device.h> 210ddbccd1SRussell King #include <linux/dma-mapping.h> 22c7909509SMarek Szyprowski #include <linux/dma-contiguous.h> 2339af22a7SNicolas Pitre #include <linux/highmem.h> 24c7909509SMarek Szyprowski #include <linux/memblock.h> 2599d1717dSJon Medhurst #include <linux/slab.h> 264ce63fcdSMarek Szyprowski #include <linux/iommu.h> 27e9da6e99SMarek Szyprowski #include <linux/io.h> 284ce63fcdSMarek Szyprowski #include <linux/vmalloc.h> 29158e8bfeSAlessandro Rubini #include <linux/sizes.h> 30a254129eSJoonsoo Kim #include <linux/cma.h> 310ddbccd1SRussell King 320ddbccd1SRussell King #include <asm/memory.h> 3343377453SNicolas Pitre #include <asm/highmem.h> 340ddbccd1SRussell King #include <asm/cacheflush.h> 350ddbccd1SRussell King #include <asm/tlbflush.h> 3699d1717dSJon Medhurst #include <asm/mach/arch.h> 374ce63fcdSMarek Szyprowski #include <asm/dma-iommu.h> 38c7909509SMarek Szyprowski #include <asm/mach/map.h> 39c7909509SMarek Szyprowski #include <asm/system_info.h> 40c7909509SMarek Szyprowski #include <asm/dma-contiguous.h> 410ddbccd1SRussell King 42022ae537SRussell King #include "mm.h" 43022ae537SRussell King 4415237e1fSMarek Szyprowski /* 4515237e1fSMarek Szyprowski * The DMA API is built upon the notion of "buffer ownership". A buffer 4615237e1fSMarek Szyprowski * is either exclusively owned by the CPU (and therefore may be accessed 4715237e1fSMarek Szyprowski * by it) or exclusively owned by the DMA device. These helper functions 4815237e1fSMarek Szyprowski * represent the transitions between these two ownership states. 4915237e1fSMarek Szyprowski * 5015237e1fSMarek Szyprowski * Note, however, that on later ARMs, this notion does not work due to 5115237e1fSMarek Szyprowski * speculative prefetches. We model our approach on the assumption that 5215237e1fSMarek Szyprowski * the CPU does do speculative prefetches, which means we clean caches 5315237e1fSMarek Szyprowski * before transfers and delay cache invalidation until transfer completion. 5415237e1fSMarek Szyprowski * 5515237e1fSMarek Szyprowski */ 5651fde349SMarek Szyprowski static void __dma_page_cpu_to_dev(struct page *, unsigned long, 5715237e1fSMarek Szyprowski size_t, enum dma_data_direction); 5851fde349SMarek Szyprowski static void __dma_page_dev_to_cpu(struct page *, unsigned long, 5915237e1fSMarek Szyprowski size_t, enum dma_data_direction); 6015237e1fSMarek Szyprowski 612dc6a016SMarek Szyprowski /** 622dc6a016SMarek Szyprowski * arm_dma_map_page - map a portion of a page for streaming DMA 632dc6a016SMarek Szyprowski * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 642dc6a016SMarek Szyprowski * @page: page that buffer resides in 652dc6a016SMarek Szyprowski * @offset: offset into page for start of buffer 662dc6a016SMarek Szyprowski * @size: size of buffer to map 672dc6a016SMarek Szyprowski * @dir: DMA transfer direction 682dc6a016SMarek Szyprowski * 692dc6a016SMarek Szyprowski * Ensure that any data held in the cache is appropriately discarded 702dc6a016SMarek Szyprowski * or written back. 712dc6a016SMarek Szyprowski * 722dc6a016SMarek Szyprowski * The device owns this memory once this call has completed. The CPU 732dc6a016SMarek Szyprowski * can regain ownership by calling dma_unmap_page(). 742dc6a016SMarek Szyprowski */ 7551fde349SMarek Szyprowski static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, 762dc6a016SMarek Szyprowski unsigned long offset, size_t size, enum dma_data_direction dir, 772dc6a016SMarek Szyprowski struct dma_attrs *attrs) 782dc6a016SMarek Szyprowski { 79dd37e940SRob Herring if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 8051fde349SMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 8151fde349SMarek Szyprowski return pfn_to_dma(dev, page_to_pfn(page)) + offset; 822dc6a016SMarek Szyprowski } 832dc6a016SMarek Szyprowski 84dd37e940SRob Herring static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, 85dd37e940SRob Herring unsigned long offset, size_t size, enum dma_data_direction dir, 86dd37e940SRob Herring struct dma_attrs *attrs) 87dd37e940SRob Herring { 88dd37e940SRob Herring return pfn_to_dma(dev, page_to_pfn(page)) + offset; 89dd37e940SRob Herring } 90dd37e940SRob Herring 912dc6a016SMarek Szyprowski /** 922dc6a016SMarek Szyprowski * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page() 932dc6a016SMarek Szyprowski * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 942dc6a016SMarek Szyprowski * @handle: DMA address of buffer 952dc6a016SMarek Szyprowski * @size: size of buffer (same as passed to dma_map_page) 962dc6a016SMarek Szyprowski * @dir: DMA transfer direction (same as passed to dma_map_page) 972dc6a016SMarek Szyprowski * 982dc6a016SMarek Szyprowski * Unmap a page streaming mode DMA translation. The handle and size 992dc6a016SMarek Szyprowski * must match what was provided in the previous dma_map_page() call. 1002dc6a016SMarek Szyprowski * All other usages are undefined. 1012dc6a016SMarek Szyprowski * 1022dc6a016SMarek Szyprowski * After this call, reads by the CPU to the buffer are guaranteed to see 1032dc6a016SMarek Szyprowski * whatever the device wrote there. 1042dc6a016SMarek Szyprowski */ 10551fde349SMarek Szyprowski static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, 1062dc6a016SMarek Szyprowski size_t size, enum dma_data_direction dir, 1072dc6a016SMarek Szyprowski struct dma_attrs *attrs) 1082dc6a016SMarek Szyprowski { 109dd37e940SRob Herring if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 11051fde349SMarek Szyprowski __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), 11151fde349SMarek Szyprowski handle & ~PAGE_MASK, size, dir); 1122dc6a016SMarek Szyprowski } 1132dc6a016SMarek Szyprowski 11451fde349SMarek Szyprowski static void arm_dma_sync_single_for_cpu(struct device *dev, 1152dc6a016SMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 1162dc6a016SMarek Szyprowski { 1172dc6a016SMarek Szyprowski unsigned int offset = handle & (PAGE_SIZE - 1); 1182dc6a016SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 1192dc6a016SMarek Szyprowski __dma_page_dev_to_cpu(page, offset, size, dir); 1202dc6a016SMarek Szyprowski } 1212dc6a016SMarek Szyprowski 12251fde349SMarek Szyprowski static void arm_dma_sync_single_for_device(struct device *dev, 1232dc6a016SMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 1242dc6a016SMarek Szyprowski { 1252dc6a016SMarek Szyprowski unsigned int offset = handle & (PAGE_SIZE - 1); 1262dc6a016SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 1272dc6a016SMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 1282dc6a016SMarek Szyprowski } 1292dc6a016SMarek Szyprowski 1302dc6a016SMarek Szyprowski struct dma_map_ops arm_dma_ops = { 131f99d6034SMarek Szyprowski .alloc = arm_dma_alloc, 132f99d6034SMarek Szyprowski .free = arm_dma_free, 133f99d6034SMarek Szyprowski .mmap = arm_dma_mmap, 134dc2832e1SMarek Szyprowski .get_sgtable = arm_dma_get_sgtable, 1352dc6a016SMarek Szyprowski .map_page = arm_dma_map_page, 1362dc6a016SMarek Szyprowski .unmap_page = arm_dma_unmap_page, 1372dc6a016SMarek Szyprowski .map_sg = arm_dma_map_sg, 1382dc6a016SMarek Szyprowski .unmap_sg = arm_dma_unmap_sg, 1392dc6a016SMarek Szyprowski .sync_single_for_cpu = arm_dma_sync_single_for_cpu, 1402dc6a016SMarek Szyprowski .sync_single_for_device = arm_dma_sync_single_for_device, 1412dc6a016SMarek Szyprowski .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, 1422dc6a016SMarek Szyprowski .sync_sg_for_device = arm_dma_sync_sg_for_device, 1432dc6a016SMarek Szyprowski .set_dma_mask = arm_dma_set_mask, 1442dc6a016SMarek Szyprowski }; 1452dc6a016SMarek Szyprowski EXPORT_SYMBOL(arm_dma_ops); 1462dc6a016SMarek Szyprowski 147dd37e940SRob Herring static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 148dd37e940SRob Herring dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs); 149dd37e940SRob Herring static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 150dd37e940SRob Herring dma_addr_t handle, struct dma_attrs *attrs); 151dd37e940SRob Herring 152dd37e940SRob Herring struct dma_map_ops arm_coherent_dma_ops = { 153dd37e940SRob Herring .alloc = arm_coherent_dma_alloc, 154dd37e940SRob Herring .free = arm_coherent_dma_free, 155dd37e940SRob Herring .mmap = arm_dma_mmap, 156dd37e940SRob Herring .get_sgtable = arm_dma_get_sgtable, 157dd37e940SRob Herring .map_page = arm_coherent_dma_map_page, 158dd37e940SRob Herring .map_sg = arm_dma_map_sg, 159dd37e940SRob Herring .set_dma_mask = arm_dma_set_mask, 160dd37e940SRob Herring }; 161dd37e940SRob Herring EXPORT_SYMBOL(arm_coherent_dma_ops); 162dd37e940SRob Herring 1639f28cde0SRussell King static int __dma_supported(struct device *dev, u64 mask, bool warn) 1649f28cde0SRussell King { 1659f28cde0SRussell King unsigned long max_dma_pfn; 1669f28cde0SRussell King 1679f28cde0SRussell King /* 1689f28cde0SRussell King * If the mask allows for more memory than we can address, 1699f28cde0SRussell King * and we actually have that much memory, then we must 1709f28cde0SRussell King * indicate that DMA to this device is not supported. 1719f28cde0SRussell King */ 1729f28cde0SRussell King if (sizeof(mask) != sizeof(dma_addr_t) && 1739f28cde0SRussell King mask > (dma_addr_t)~0 && 1749f28cde0SRussell King dma_to_pfn(dev, ~0) < max_pfn) { 1759f28cde0SRussell King if (warn) { 1769f28cde0SRussell King dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", 1779f28cde0SRussell King mask); 1789f28cde0SRussell King dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n"); 1799f28cde0SRussell King } 1809f28cde0SRussell King return 0; 1819f28cde0SRussell King } 1829f28cde0SRussell King 1839f28cde0SRussell King max_dma_pfn = min(max_pfn, arm_dma_pfn_limit); 1849f28cde0SRussell King 1859f28cde0SRussell King /* 1869f28cde0SRussell King * Translate the device's DMA mask to a PFN limit. This 1879f28cde0SRussell King * PFN number includes the page which we can DMA to. 1889f28cde0SRussell King */ 1899f28cde0SRussell King if (dma_to_pfn(dev, mask) < max_dma_pfn) { 1909f28cde0SRussell King if (warn) 1919f28cde0SRussell King dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", 1929f28cde0SRussell King mask, 1939f28cde0SRussell King dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, 1949f28cde0SRussell King max_dma_pfn + 1); 1959f28cde0SRussell King return 0; 1969f28cde0SRussell King } 1979f28cde0SRussell King 1989f28cde0SRussell King return 1; 1999f28cde0SRussell King } 2009f28cde0SRussell King 201ab6494f0SCatalin Marinas static u64 get_coherent_dma_mask(struct device *dev) 202ab6494f0SCatalin Marinas { 2034dcfa600SRussell King u64 mask = (u64)DMA_BIT_MASK(32); 2040ddbccd1SRussell King 205ab6494f0SCatalin Marinas if (dev) { 206ab6494f0SCatalin Marinas mask = dev->coherent_dma_mask; 207ab6494f0SCatalin Marinas 208ab6494f0SCatalin Marinas /* 209ab6494f0SCatalin Marinas * Sanity check the DMA mask - it must be non-zero, and 210ab6494f0SCatalin Marinas * must be able to be satisfied by a DMA allocation. 211ab6494f0SCatalin Marinas */ 212ab6494f0SCatalin Marinas if (mask == 0) { 213ab6494f0SCatalin Marinas dev_warn(dev, "coherent DMA mask is unset\n"); 214ab6494f0SCatalin Marinas return 0; 215ab6494f0SCatalin Marinas } 216ab6494f0SCatalin Marinas 2179f28cde0SRussell King if (!__dma_supported(dev, mask, true)) 2184dcfa600SRussell King return 0; 2194dcfa600SRussell King } 2204dcfa600SRussell King 221ab6494f0SCatalin Marinas return mask; 222ab6494f0SCatalin Marinas } 223ab6494f0SCatalin Marinas 224c7909509SMarek Szyprowski static void __dma_clear_buffer(struct page *page, size_t size) 225c7909509SMarek Szyprowski { 226c7909509SMarek Szyprowski /* 227c7909509SMarek Szyprowski * Ensure that the allocated pages are zeroed, and that any data 228c7909509SMarek Szyprowski * lurking in the kernel direct-mapped region is invalidated. 229c7909509SMarek Szyprowski */ 2309848e48fSMarek Szyprowski if (PageHighMem(page)) { 2319848e48fSMarek Szyprowski phys_addr_t base = __pfn_to_phys(page_to_pfn(page)); 2329848e48fSMarek Szyprowski phys_addr_t end = base + size; 2339848e48fSMarek Szyprowski while (size > 0) { 2349848e48fSMarek Szyprowski void *ptr = kmap_atomic(page); 2359848e48fSMarek Szyprowski memset(ptr, 0, PAGE_SIZE); 2369848e48fSMarek Szyprowski dmac_flush_range(ptr, ptr + PAGE_SIZE); 2379848e48fSMarek Szyprowski kunmap_atomic(ptr); 2389848e48fSMarek Szyprowski page++; 2399848e48fSMarek Szyprowski size -= PAGE_SIZE; 2409848e48fSMarek Szyprowski } 2419848e48fSMarek Szyprowski outer_flush_range(base, end); 2429848e48fSMarek Szyprowski } else { 2439848e48fSMarek Szyprowski void *ptr = page_address(page); 244c7909509SMarek Szyprowski memset(ptr, 0, size); 245c7909509SMarek Szyprowski dmac_flush_range(ptr, ptr + size); 246c7909509SMarek Szyprowski outer_flush_range(__pa(ptr), __pa(ptr) + size); 247c7909509SMarek Szyprowski } 2484ce63fcdSMarek Szyprowski } 249c7909509SMarek Szyprowski 2507a9a32a9SRussell King /* 2517a9a32a9SRussell King * Allocate a DMA buffer for 'dev' of size 'size' using the 2527a9a32a9SRussell King * specified gfp mask. Note that 'size' must be page aligned. 2537a9a32a9SRussell King */ 2547a9a32a9SRussell King static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) 2557a9a32a9SRussell King { 2567a9a32a9SRussell King unsigned long order = get_order(size); 2577a9a32a9SRussell King struct page *page, *p, *e; 2587a9a32a9SRussell King 2597a9a32a9SRussell King page = alloc_pages(gfp, order); 2607a9a32a9SRussell King if (!page) 2617a9a32a9SRussell King return NULL; 2627a9a32a9SRussell King 2637a9a32a9SRussell King /* 2647a9a32a9SRussell King * Now split the huge page and free the excess pages 2657a9a32a9SRussell King */ 2667a9a32a9SRussell King split_page(page, order); 2677a9a32a9SRussell King for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) 2687a9a32a9SRussell King __free_page(p); 2697a9a32a9SRussell King 270c7909509SMarek Szyprowski __dma_clear_buffer(page, size); 2717a9a32a9SRussell King 2727a9a32a9SRussell King return page; 2737a9a32a9SRussell King } 2747a9a32a9SRussell King 2757a9a32a9SRussell King /* 2767a9a32a9SRussell King * Free a DMA buffer. 'size' must be page aligned. 2777a9a32a9SRussell King */ 2787a9a32a9SRussell King static void __dma_free_buffer(struct page *page, size_t size) 2797a9a32a9SRussell King { 2807a9a32a9SRussell King struct page *e = page + (size >> PAGE_SHIFT); 2817a9a32a9SRussell King 2827a9a32a9SRussell King while (page < e) { 2837a9a32a9SRussell King __free_page(page); 2847a9a32a9SRussell King page++; 2857a9a32a9SRussell King } 2867a9a32a9SRussell King } 2877a9a32a9SRussell King 288ab6494f0SCatalin Marinas #ifdef CONFIG_MMU 2890ddbccd1SRussell King 290c7909509SMarek Szyprowski static void *__alloc_from_contiguous(struct device *dev, size_t size, 2919848e48fSMarek Szyprowski pgprot_t prot, struct page **ret_page, 2929848e48fSMarek Szyprowski const void *caller); 293c7909509SMarek Szyprowski 294e9da6e99SMarek Szyprowski static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 295e9da6e99SMarek Szyprowski pgprot_t prot, struct page **ret_page, 296e9da6e99SMarek Szyprowski const void *caller); 297e9da6e99SMarek Szyprowski 298e9da6e99SMarek Szyprowski static void * 299e9da6e99SMarek Szyprowski __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, 300e9da6e99SMarek Szyprowski const void *caller) 301e9da6e99SMarek Szyprowski { 302e9da6e99SMarek Szyprowski /* 303e9da6e99SMarek Szyprowski * DMA allocation can be mapped to user space, so lets 304e9da6e99SMarek Szyprowski * set VM_USERMAP flags too. 305e9da6e99SMarek Szyprowski */ 306513510ddSLaura Abbott return dma_common_contiguous_remap(page, size, 307513510ddSLaura Abbott VM_ARM_DMA_CONSISTENT | VM_USERMAP, 308513510ddSLaura Abbott prot, caller); 309e9da6e99SMarek Szyprowski } 310e9da6e99SMarek Szyprowski 311e9da6e99SMarek Szyprowski static void __dma_free_remap(void *cpu_addr, size_t size) 312e9da6e99SMarek Szyprowski { 313513510ddSLaura Abbott dma_common_free_remap(cpu_addr, size, 314513510ddSLaura Abbott VM_ARM_DMA_CONSISTENT | VM_USERMAP); 315e9da6e99SMarek Szyprowski } 316e9da6e99SMarek Szyprowski 3176e5267aaSMarek Szyprowski #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K 31836d0fd21SLaura Abbott static struct gen_pool *atomic_pool; 3196e5267aaSMarek Szyprowski 32036d0fd21SLaura Abbott static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE; 321c7909509SMarek Szyprowski 322c7909509SMarek Szyprowski static int __init early_coherent_pool(char *p) 323c7909509SMarek Szyprowski { 32436d0fd21SLaura Abbott atomic_pool_size = memparse(p, &p); 325c7909509SMarek Szyprowski return 0; 326c7909509SMarek Szyprowski } 327c7909509SMarek Szyprowski early_param("coherent_pool", early_coherent_pool); 328c7909509SMarek Szyprowski 3296e5267aaSMarek Szyprowski void __init init_dma_coherent_pool_size(unsigned long size) 3306e5267aaSMarek Szyprowski { 3316e5267aaSMarek Szyprowski /* 3326e5267aaSMarek Szyprowski * Catch any attempt to set the pool size too late. 3336e5267aaSMarek Szyprowski */ 33436d0fd21SLaura Abbott BUG_ON(atomic_pool); 3356e5267aaSMarek Szyprowski 3366e5267aaSMarek Szyprowski /* 3376e5267aaSMarek Szyprowski * Set architecture specific coherent pool size only if 3386e5267aaSMarek Szyprowski * it has not been changed by kernel command line parameter. 3396e5267aaSMarek Szyprowski */ 34036d0fd21SLaura Abbott if (atomic_pool_size == DEFAULT_DMA_COHERENT_POOL_SIZE) 34136d0fd21SLaura Abbott atomic_pool_size = size; 3426e5267aaSMarek Szyprowski } 3436e5267aaSMarek Szyprowski 344c7909509SMarek Szyprowski /* 345c7909509SMarek Szyprowski * Initialise the coherent pool for atomic allocations. 346c7909509SMarek Szyprowski */ 347e9da6e99SMarek Szyprowski static int __init atomic_pool_init(void) 348c7909509SMarek Szyprowski { 34971b55663SRussell King pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL); 3509d1400cfSMarek Szyprowski gfp_t gfp = GFP_KERNEL | GFP_DMA; 351c7909509SMarek Szyprowski struct page *page; 352c7909509SMarek Szyprowski void *ptr; 353c7909509SMarek Szyprowski 35436d0fd21SLaura Abbott atomic_pool = gen_pool_create(PAGE_SHIFT, -1); 35536d0fd21SLaura Abbott if (!atomic_pool) 35636d0fd21SLaura Abbott goto out; 3576b3fe472SHiroshi Doyu 358e464ef16SGioh Kim if (dev_get_cma_area(NULL)) 35936d0fd21SLaura Abbott ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot, 36036d0fd21SLaura Abbott &page, atomic_pool_init); 361e9da6e99SMarek Szyprowski else 36236d0fd21SLaura Abbott ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot, 36336d0fd21SLaura Abbott &page, atomic_pool_init); 364c7909509SMarek Szyprowski if (ptr) { 36536d0fd21SLaura Abbott int ret; 3666b3fe472SHiroshi Doyu 36736d0fd21SLaura Abbott ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr, 36836d0fd21SLaura Abbott page_to_phys(page), 36936d0fd21SLaura Abbott atomic_pool_size, -1); 37036d0fd21SLaura Abbott if (ret) 37136d0fd21SLaura Abbott goto destroy_genpool; 3726b3fe472SHiroshi Doyu 37336d0fd21SLaura Abbott gen_pool_set_algo(atomic_pool, 37436d0fd21SLaura Abbott gen_pool_first_fit_order_align, 37536d0fd21SLaura Abbott (void *)PAGE_SHIFT); 37636d0fd21SLaura Abbott pr_info("DMA: preallocated %zd KiB pool for atomic coherent allocations\n", 37736d0fd21SLaura Abbott atomic_pool_size / 1024); 378c7909509SMarek Szyprowski return 0; 379c7909509SMarek Szyprowski } 380ec10665cSSachin Kamat 38136d0fd21SLaura Abbott destroy_genpool: 38236d0fd21SLaura Abbott gen_pool_destroy(atomic_pool); 38336d0fd21SLaura Abbott atomic_pool = NULL; 38436d0fd21SLaura Abbott out: 38536d0fd21SLaura Abbott pr_err("DMA: failed to allocate %zx KiB pool for atomic coherent allocation\n", 38636d0fd21SLaura Abbott atomic_pool_size / 1024); 387c7909509SMarek Szyprowski return -ENOMEM; 388c7909509SMarek Szyprowski } 389c7909509SMarek Szyprowski /* 390c7909509SMarek Szyprowski * CMA is activated by core_initcall, so we must be called after it. 391c7909509SMarek Szyprowski */ 392e9da6e99SMarek Szyprowski postcore_initcall(atomic_pool_init); 393c7909509SMarek Szyprowski 394c7909509SMarek Szyprowski struct dma_contig_early_reserve { 395c7909509SMarek Szyprowski phys_addr_t base; 396c7909509SMarek Szyprowski unsigned long size; 397c7909509SMarek Szyprowski }; 398c7909509SMarek Szyprowski 399c7909509SMarek Szyprowski static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata; 400c7909509SMarek Szyprowski 401c7909509SMarek Szyprowski static int dma_mmu_remap_num __initdata; 402c7909509SMarek Szyprowski 403c7909509SMarek Szyprowski void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) 404c7909509SMarek Szyprowski { 405c7909509SMarek Szyprowski dma_mmu_remap[dma_mmu_remap_num].base = base; 406c7909509SMarek Szyprowski dma_mmu_remap[dma_mmu_remap_num].size = size; 407c7909509SMarek Szyprowski dma_mmu_remap_num++; 408c7909509SMarek Szyprowski } 409c7909509SMarek Szyprowski 410c7909509SMarek Szyprowski void __init dma_contiguous_remap(void) 411c7909509SMarek Szyprowski { 412c7909509SMarek Szyprowski int i; 413c7909509SMarek Szyprowski for (i = 0; i < dma_mmu_remap_num; i++) { 414c7909509SMarek Szyprowski phys_addr_t start = dma_mmu_remap[i].base; 415c7909509SMarek Szyprowski phys_addr_t end = start + dma_mmu_remap[i].size; 416c7909509SMarek Szyprowski struct map_desc map; 417c7909509SMarek Szyprowski unsigned long addr; 418c7909509SMarek Szyprowski 419c7909509SMarek Szyprowski if (end > arm_lowmem_limit) 420c7909509SMarek Szyprowski end = arm_lowmem_limit; 421c7909509SMarek Szyprowski if (start >= end) 42239f78e70SChris Brand continue; 423c7909509SMarek Szyprowski 424c7909509SMarek Szyprowski map.pfn = __phys_to_pfn(start); 425c7909509SMarek Szyprowski map.virtual = __phys_to_virt(start); 426c7909509SMarek Szyprowski map.length = end - start; 427c7909509SMarek Szyprowski map.type = MT_MEMORY_DMA_READY; 428c7909509SMarek Szyprowski 429c7909509SMarek Szyprowski /* 4306b076991SRussell King * Clear previous low-memory mapping to ensure that the 4316b076991SRussell King * TLB does not see any conflicting entries, then flush 4326b076991SRussell King * the TLB of the old entries before creating new mappings. 4336b076991SRussell King * 4346b076991SRussell King * This ensures that any speculatively loaded TLB entries 4356b076991SRussell King * (even though they may be rare) can not cause any problems, 4366b076991SRussell King * and ensures that this code is architecturally compliant. 437c7909509SMarek Szyprowski */ 438c7909509SMarek Szyprowski for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); 43961f6c7a4SVitaly Andrianov addr += PMD_SIZE) 440c7909509SMarek Szyprowski pmd_clear(pmd_off_k(addr)); 441c7909509SMarek Szyprowski 4426b076991SRussell King flush_tlb_kernel_range(__phys_to_virt(start), 4436b076991SRussell King __phys_to_virt(end)); 4446b076991SRussell King 445c7909509SMarek Szyprowski iotable_init(&map, 1); 446c7909509SMarek Szyprowski } 447c7909509SMarek Szyprowski } 448c7909509SMarek Szyprowski 449c7909509SMarek Szyprowski static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr, 450c7909509SMarek Szyprowski void *data) 451c7909509SMarek Szyprowski { 452c7909509SMarek Szyprowski struct page *page = virt_to_page(addr); 453c7909509SMarek Szyprowski pgprot_t prot = *(pgprot_t *)data; 454c7909509SMarek Szyprowski 455c7909509SMarek Szyprowski set_pte_ext(pte, mk_pte(page, prot), 0); 456c7909509SMarek Szyprowski return 0; 457c7909509SMarek Szyprowski } 458c7909509SMarek Szyprowski 459c7909509SMarek Szyprowski static void __dma_remap(struct page *page, size_t size, pgprot_t prot) 460c7909509SMarek Szyprowski { 461c7909509SMarek Szyprowski unsigned long start = (unsigned long) page_address(page); 462c7909509SMarek Szyprowski unsigned end = start + size; 463c7909509SMarek Szyprowski 464c7909509SMarek Szyprowski apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); 465c7909509SMarek Szyprowski flush_tlb_kernel_range(start, end); 466c7909509SMarek Szyprowski } 467c7909509SMarek Szyprowski 468c7909509SMarek Szyprowski static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 469c7909509SMarek Szyprowski pgprot_t prot, struct page **ret_page, 470c7909509SMarek Szyprowski const void *caller) 471c7909509SMarek Szyprowski { 472c7909509SMarek Szyprowski struct page *page; 473c7909509SMarek Szyprowski void *ptr; 474c7909509SMarek Szyprowski page = __dma_alloc_buffer(dev, size, gfp); 475c7909509SMarek Szyprowski if (!page) 476c7909509SMarek Szyprowski return NULL; 477c7909509SMarek Szyprowski 478c7909509SMarek Szyprowski ptr = __dma_alloc_remap(page, size, gfp, prot, caller); 479c7909509SMarek Szyprowski if (!ptr) { 480c7909509SMarek Szyprowski __dma_free_buffer(page, size); 481c7909509SMarek Szyprowski return NULL; 482c7909509SMarek Szyprowski } 483c7909509SMarek Szyprowski 484c7909509SMarek Szyprowski *ret_page = page; 485c7909509SMarek Szyprowski return ptr; 486c7909509SMarek Szyprowski } 487c7909509SMarek Szyprowski 488e9da6e99SMarek Szyprowski static void *__alloc_from_pool(size_t size, struct page **ret_page) 489c7909509SMarek Szyprowski { 49036d0fd21SLaura Abbott unsigned long val; 491e9da6e99SMarek Szyprowski void *ptr = NULL; 492c7909509SMarek Szyprowski 49336d0fd21SLaura Abbott if (!atomic_pool) { 494e9da6e99SMarek Szyprowski WARN(1, "coherent pool not initialised!\n"); 495c7909509SMarek Szyprowski return NULL; 496c7909509SMarek Szyprowski } 497c7909509SMarek Szyprowski 49836d0fd21SLaura Abbott val = gen_pool_alloc(atomic_pool, size); 49936d0fd21SLaura Abbott if (val) { 50036d0fd21SLaura Abbott phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val); 501e9da6e99SMarek Szyprowski 50236d0fd21SLaura Abbott *ret_page = phys_to_page(phys); 50336d0fd21SLaura Abbott ptr = (void *)val; 504e9da6e99SMarek Szyprowski } 505e9da6e99SMarek Szyprowski 506c7909509SMarek Szyprowski return ptr; 507c7909509SMarek Szyprowski } 508c7909509SMarek Szyprowski 50921d0a759SHiroshi Doyu static bool __in_atomic_pool(void *start, size_t size) 51021d0a759SHiroshi Doyu { 51136d0fd21SLaura Abbott return addr_in_gen_pool(atomic_pool, (unsigned long)start, size); 51221d0a759SHiroshi Doyu } 51321d0a759SHiroshi Doyu 514e9da6e99SMarek Szyprowski static int __free_from_pool(void *start, size_t size) 515c7909509SMarek Szyprowski { 51621d0a759SHiroshi Doyu if (!__in_atomic_pool(start, size)) 517c7909509SMarek Szyprowski return 0; 518c7909509SMarek Szyprowski 51936d0fd21SLaura Abbott gen_pool_free(atomic_pool, (unsigned long)start, size); 520e9da6e99SMarek Szyprowski 521c7909509SMarek Szyprowski return 1; 522c7909509SMarek Szyprowski } 523c7909509SMarek Szyprowski 524c7909509SMarek Szyprowski static void *__alloc_from_contiguous(struct device *dev, size_t size, 5259848e48fSMarek Szyprowski pgprot_t prot, struct page **ret_page, 5269848e48fSMarek Szyprowski const void *caller) 527c7909509SMarek Szyprowski { 528c7909509SMarek Szyprowski unsigned long order = get_order(size); 529c7909509SMarek Szyprowski size_t count = size >> PAGE_SHIFT; 530c7909509SMarek Szyprowski struct page *page; 5319848e48fSMarek Szyprowski void *ptr; 532c7909509SMarek Szyprowski 533c7909509SMarek Szyprowski page = dma_alloc_from_contiguous(dev, count, order); 534c7909509SMarek Szyprowski if (!page) 535c7909509SMarek Szyprowski return NULL; 536c7909509SMarek Szyprowski 537c7909509SMarek Szyprowski __dma_clear_buffer(page, size); 538c7909509SMarek Szyprowski 5399848e48fSMarek Szyprowski if (PageHighMem(page)) { 5409848e48fSMarek Szyprowski ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller); 5419848e48fSMarek Szyprowski if (!ptr) { 5429848e48fSMarek Szyprowski dma_release_from_contiguous(dev, page, count); 5439848e48fSMarek Szyprowski return NULL; 5449848e48fSMarek Szyprowski } 5459848e48fSMarek Szyprowski } else { 5469848e48fSMarek Szyprowski __dma_remap(page, size, prot); 5479848e48fSMarek Szyprowski ptr = page_address(page); 5489848e48fSMarek Szyprowski } 549c7909509SMarek Szyprowski *ret_page = page; 5509848e48fSMarek Szyprowski return ptr; 551c7909509SMarek Szyprowski } 552c7909509SMarek Szyprowski 553c7909509SMarek Szyprowski static void __free_from_contiguous(struct device *dev, struct page *page, 5549848e48fSMarek Szyprowski void *cpu_addr, size_t size) 555c7909509SMarek Szyprowski { 5569848e48fSMarek Szyprowski if (PageHighMem(page)) 5579848e48fSMarek Szyprowski __dma_free_remap(cpu_addr, size); 5589848e48fSMarek Szyprowski else 55971b55663SRussell King __dma_remap(page, size, PAGE_KERNEL); 560c7909509SMarek Szyprowski dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); 561c7909509SMarek Szyprowski } 562c7909509SMarek Szyprowski 563f99d6034SMarek Szyprowski static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot) 564f99d6034SMarek Szyprowski { 565f99d6034SMarek Szyprowski prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ? 566f99d6034SMarek Szyprowski pgprot_writecombine(prot) : 567f99d6034SMarek Szyprowski pgprot_dmacoherent(prot); 568f99d6034SMarek Szyprowski return prot; 569f99d6034SMarek Szyprowski } 570f99d6034SMarek Szyprowski 571c7909509SMarek Szyprowski #define nommu() 0 572c7909509SMarek Szyprowski 573ab6494f0SCatalin Marinas #else /* !CONFIG_MMU */ 574695ae0afSRussell King 575c7909509SMarek Szyprowski #define nommu() 1 576c7909509SMarek Szyprowski 577f99d6034SMarek Szyprowski #define __get_dma_pgprot(attrs, prot) __pgprot(0) 578c7909509SMarek Szyprowski #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL 579e9da6e99SMarek Szyprowski #define __alloc_from_pool(size, ret_page) NULL 5809848e48fSMarek Szyprowski #define __alloc_from_contiguous(dev, size, prot, ret, c) NULL 581c7909509SMarek Szyprowski #define __free_from_pool(cpu_addr, size) 0 5829848e48fSMarek Szyprowski #define __free_from_contiguous(dev, page, cpu_addr, size) do { } while (0) 583c7909509SMarek Szyprowski #define __dma_free_remap(cpu_addr, size) do { } while (0) 58431ebf944SRussell King 58531ebf944SRussell King #endif /* CONFIG_MMU */ 58631ebf944SRussell King 587c7909509SMarek Szyprowski static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, 588c7909509SMarek Szyprowski struct page **ret_page) 589ab6494f0SCatalin Marinas { 59004da5694SRussell King struct page *page; 591c7909509SMarek Szyprowski page = __dma_alloc_buffer(dev, size, gfp); 592c7909509SMarek Szyprowski if (!page) 593c7909509SMarek Szyprowski return NULL; 594c7909509SMarek Szyprowski 595c7909509SMarek Szyprowski *ret_page = page; 596c7909509SMarek Szyprowski return page_address(page); 597c7909509SMarek Szyprowski } 598c7909509SMarek Szyprowski 599c7909509SMarek Szyprowski 600c7909509SMarek Szyprowski 601c7909509SMarek Szyprowski static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 602dd37e940SRob Herring gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller) 603c7909509SMarek Szyprowski { 604c7909509SMarek Szyprowski u64 mask = get_coherent_dma_mask(dev); 6053dd7ea92SJingoo Han struct page *page = NULL; 60631ebf944SRussell King void *addr; 607ab6494f0SCatalin Marinas 608c7909509SMarek Szyprowski #ifdef CONFIG_DMA_API_DEBUG 609c7909509SMarek Szyprowski u64 limit = (mask + 1) & ~mask; 610c7909509SMarek Szyprowski if (limit && size >= limit) { 611c7909509SMarek Szyprowski dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n", 612c7909509SMarek Szyprowski size, mask); 613c7909509SMarek Szyprowski return NULL; 614c7909509SMarek Szyprowski } 615c7909509SMarek Szyprowski #endif 616c7909509SMarek Szyprowski 617c7909509SMarek Szyprowski if (!mask) 618c7909509SMarek Szyprowski return NULL; 619c7909509SMarek Szyprowski 620c7909509SMarek Szyprowski if (mask < 0xffffffffULL) 621c7909509SMarek Szyprowski gfp |= GFP_DMA; 622c7909509SMarek Szyprowski 623ea2e7057SSumit Bhattacharya /* 624ea2e7057SSumit Bhattacharya * Following is a work-around (a.k.a. hack) to prevent pages 625ea2e7057SSumit Bhattacharya * with __GFP_COMP being passed to split_page() which cannot 626ea2e7057SSumit Bhattacharya * handle them. The real problem is that this flag probably 627ea2e7057SSumit Bhattacharya * should be 0 on ARM as it is not supported on this 628ea2e7057SSumit Bhattacharya * platform; see CONFIG_HUGETLBFS. 629ea2e7057SSumit Bhattacharya */ 630ea2e7057SSumit Bhattacharya gfp &= ~(__GFP_COMP); 631ea2e7057SSumit Bhattacharya 632553ac788SMarek Szyprowski *handle = DMA_ERROR_CODE; 63304da5694SRussell King size = PAGE_ALIGN(size); 63404da5694SRussell King 635dd37e940SRob Herring if (is_coherent || nommu()) 636c7909509SMarek Szyprowski addr = __alloc_simple_buffer(dev, size, gfp, &page); 637633dc92aSRussell King else if (!(gfp & __GFP_WAIT)) 638e9da6e99SMarek Szyprowski addr = __alloc_from_pool(size, &page); 639e464ef16SGioh Kim else if (!dev_get_cma_area(dev)) 640c7909509SMarek Szyprowski addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); 64131ebf944SRussell King else 6429848e48fSMarek Szyprowski addr = __alloc_from_contiguous(dev, size, prot, &page, caller); 64331ebf944SRussell King 64431ebf944SRussell King if (addr) 6459eedd963SRussell King *handle = pfn_to_dma(dev, page_to_pfn(page)); 64631ebf944SRussell King 64731ebf944SRussell King return addr; 648ab6494f0SCatalin Marinas } 649695ae0afSRussell King 6500ddbccd1SRussell King /* 6510ddbccd1SRussell King * Allocate DMA-coherent memory space and return both the kernel remapped 6520ddbccd1SRussell King * virtual and bus address for that space. 6530ddbccd1SRussell King */ 654f99d6034SMarek Szyprowski void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 655f99d6034SMarek Szyprowski gfp_t gfp, struct dma_attrs *attrs) 6560ddbccd1SRussell King { 6570ea1ec71SRussell King pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); 6580ddbccd1SRussell King void *memory; 6590ddbccd1SRussell King 6600ddbccd1SRussell King if (dma_alloc_from_coherent(dev, size, handle, &memory)) 6610ddbccd1SRussell King return memory; 6620ddbccd1SRussell King 663dd37e940SRob Herring return __dma_alloc(dev, size, handle, gfp, prot, false, 664dd37e940SRob Herring __builtin_return_address(0)); 665dd37e940SRob Herring } 666dd37e940SRob Herring 667dd37e940SRob Herring static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 668dd37e940SRob Herring dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) 669dd37e940SRob Herring { 6700ea1ec71SRussell King pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); 671dd37e940SRob Herring void *memory; 672dd37e940SRob Herring 673dd37e940SRob Herring if (dma_alloc_from_coherent(dev, size, handle, &memory)) 674dd37e940SRob Herring return memory; 675dd37e940SRob Herring 676dd37e940SRob Herring return __dma_alloc(dev, size, handle, gfp, prot, true, 67745cd5290SRussell King __builtin_return_address(0)); 6780ddbccd1SRussell King } 6790ddbccd1SRussell King 6800ddbccd1SRussell King /* 681f99d6034SMarek Szyprowski * Create userspace mapping for the DMA-coherent memory. 6820ddbccd1SRussell King */ 683f99d6034SMarek Szyprowski int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, 684f99d6034SMarek Szyprowski void *cpu_addr, dma_addr_t dma_addr, size_t size, 685f99d6034SMarek Szyprowski struct dma_attrs *attrs) 6860ddbccd1SRussell King { 687ab6494f0SCatalin Marinas int ret = -ENXIO; 688ab6494f0SCatalin Marinas #ifdef CONFIG_MMU 68950262a4bSMarek Szyprowski unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 69050262a4bSMarek Szyprowski unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 691c7909509SMarek Szyprowski unsigned long pfn = dma_to_pfn(dev, dma_addr); 69250262a4bSMarek Szyprowski unsigned long off = vma->vm_pgoff; 69350262a4bSMarek Szyprowski 694f99d6034SMarek Szyprowski vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 695f99d6034SMarek Szyprowski 69647142f07SMarek Szyprowski if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) 69747142f07SMarek Szyprowski return ret; 69847142f07SMarek Szyprowski 69950262a4bSMarek Szyprowski if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { 7000ddbccd1SRussell King ret = remap_pfn_range(vma, vma->vm_start, 70150262a4bSMarek Szyprowski pfn + off, 702c7909509SMarek Szyprowski vma->vm_end - vma->vm_start, 7030ddbccd1SRussell King vma->vm_page_prot); 70450262a4bSMarek Szyprowski } 705ab6494f0SCatalin Marinas #endif /* CONFIG_MMU */ 7060ddbccd1SRussell King 7070ddbccd1SRussell King return ret; 7080ddbccd1SRussell King } 7090ddbccd1SRussell King 7100ddbccd1SRussell King /* 711c7909509SMarek Szyprowski * Free a buffer as defined by the above mapping. 7120ddbccd1SRussell King */ 713dd37e940SRob Herring static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 714dd37e940SRob Herring dma_addr_t handle, struct dma_attrs *attrs, 715dd37e940SRob Herring bool is_coherent) 7160ddbccd1SRussell King { 717c7909509SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 7180ddbccd1SRussell King 7190ddbccd1SRussell King if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) 7200ddbccd1SRussell King return; 7210ddbccd1SRussell King 7223e82d012SRussell King size = PAGE_ALIGN(size); 7233e82d012SRussell King 724dd37e940SRob Herring if (is_coherent || nommu()) { 725c7909509SMarek Szyprowski __dma_free_buffer(page, size); 726d9e0d149SAaro Koskinen } else if (__free_from_pool(cpu_addr, size)) { 727d9e0d149SAaro Koskinen return; 728e464ef16SGioh Kim } else if (!dev_get_cma_area(dev)) { 729695ae0afSRussell King __dma_free_remap(cpu_addr, size); 730c7909509SMarek Szyprowski __dma_free_buffer(page, size); 731c7909509SMarek Szyprowski } else { 732c7909509SMarek Szyprowski /* 733c7909509SMarek Szyprowski * Non-atomic allocations cannot be freed with IRQs disabled 734c7909509SMarek Szyprowski */ 735c7909509SMarek Szyprowski WARN_ON(irqs_disabled()); 7369848e48fSMarek Szyprowski __free_from_contiguous(dev, page, cpu_addr, size); 737c7909509SMarek Szyprowski } 7380ddbccd1SRussell King } 739afd1a321SRussell King 740dd37e940SRob Herring void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 741dd37e940SRob Herring dma_addr_t handle, struct dma_attrs *attrs) 742dd37e940SRob Herring { 743dd37e940SRob Herring __arm_dma_free(dev, size, cpu_addr, handle, attrs, false); 744dd37e940SRob Herring } 745dd37e940SRob Herring 746dd37e940SRob Herring static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 747dd37e940SRob Herring dma_addr_t handle, struct dma_attrs *attrs) 748dd37e940SRob Herring { 749dd37e940SRob Herring __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); 750dd37e940SRob Herring } 751dd37e940SRob Herring 752dc2832e1SMarek Szyprowski int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 753dc2832e1SMarek Szyprowski void *cpu_addr, dma_addr_t handle, size_t size, 754dc2832e1SMarek Szyprowski struct dma_attrs *attrs) 755dc2832e1SMarek Szyprowski { 756dc2832e1SMarek Szyprowski struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 757dc2832e1SMarek Szyprowski int ret; 758dc2832e1SMarek Szyprowski 759dc2832e1SMarek Szyprowski ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 760dc2832e1SMarek Szyprowski if (unlikely(ret)) 761dc2832e1SMarek Szyprowski return ret; 762dc2832e1SMarek Szyprowski 763dc2832e1SMarek Szyprowski sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 764dc2832e1SMarek Szyprowski return 0; 765dc2832e1SMarek Szyprowski } 766dc2832e1SMarek Szyprowski 76765af191aSRussell King static void dma_cache_maint_page(struct page *page, unsigned long offset, 768a9c9147eSRussell King size_t size, enum dma_data_direction dir, 769a9c9147eSRussell King void (*op)(const void *, size_t, int)) 77065af191aSRussell King { 77115653371SRussell King unsigned long pfn; 77215653371SRussell King size_t left = size; 77315653371SRussell King 77415653371SRussell King pfn = page_to_pfn(page) + offset / PAGE_SIZE; 77515653371SRussell King offset %= PAGE_SIZE; 77615653371SRussell King 77765af191aSRussell King /* 77865af191aSRussell King * A single sg entry may refer to multiple physically contiguous 77965af191aSRussell King * pages. But we still need to process highmem pages individually. 78065af191aSRussell King * If highmem is not configured then the bulk of this loop gets 78165af191aSRussell King * optimized out. 78265af191aSRussell King */ 78365af191aSRussell King do { 78465af191aSRussell King size_t len = left; 78593f1d629SRussell King void *vaddr; 78693f1d629SRussell King 78715653371SRussell King page = pfn_to_page(pfn); 78815653371SRussell King 78993f1d629SRussell King if (PageHighMem(page)) { 79015653371SRussell King if (len + offset > PAGE_SIZE) 79165af191aSRussell King len = PAGE_SIZE - offset; 792dd0f67f4SJoonsoo Kim 793dd0f67f4SJoonsoo Kim if (cache_is_vipt_nonaliasing()) { 79439af22a7SNicolas Pitre vaddr = kmap_atomic(page); 7957e5a69e8SNicolas Pitre op(vaddr + offset, len, dir); 79639af22a7SNicolas Pitre kunmap_atomic(vaddr); 797dd0f67f4SJoonsoo Kim } else { 798dd0f67f4SJoonsoo Kim vaddr = kmap_high_get(page); 799dd0f67f4SJoonsoo Kim if (vaddr) { 800dd0f67f4SJoonsoo Kim op(vaddr + offset, len, dir); 801dd0f67f4SJoonsoo Kim kunmap_high(page); 802dd0f67f4SJoonsoo Kim } 80393f1d629SRussell King } 80493f1d629SRussell King } else { 80593f1d629SRussell King vaddr = page_address(page) + offset; 806a9c9147eSRussell King op(vaddr, len, dir); 80793f1d629SRussell King } 80865af191aSRussell King offset = 0; 80915653371SRussell King pfn++; 81065af191aSRussell King left -= len; 81165af191aSRussell King } while (left); 81265af191aSRussell King } 81365af191aSRussell King 81451fde349SMarek Szyprowski /* 81551fde349SMarek Szyprowski * Make an area consistent for devices. 81651fde349SMarek Szyprowski * Note: Drivers should NOT use this function directly, as it will break 81751fde349SMarek Szyprowski * platforms with CONFIG_DMABOUNCE. 81851fde349SMarek Szyprowski * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 81951fde349SMarek Szyprowski */ 82051fde349SMarek Szyprowski static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, 82165af191aSRussell King size_t size, enum dma_data_direction dir) 82265af191aSRussell King { 8232161c248SSantosh Shilimkar phys_addr_t paddr; 82443377453SNicolas Pitre 825a9c9147eSRussell King dma_cache_maint_page(page, off, size, dir, dmac_map_area); 82643377453SNicolas Pitre 82765af191aSRussell King paddr = page_to_phys(page) + off; 8282ffe2da3SRussell King if (dir == DMA_FROM_DEVICE) { 8292ffe2da3SRussell King outer_inv_range(paddr, paddr + size); 8302ffe2da3SRussell King } else { 8312ffe2da3SRussell King outer_clean_range(paddr, paddr + size); 8322ffe2da3SRussell King } 8332ffe2da3SRussell King /* FIXME: non-speculating: flush on bidirectional mappings? */ 83443377453SNicolas Pitre } 8354ea0d737SRussell King 83651fde349SMarek Szyprowski static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, 8374ea0d737SRussell King size_t size, enum dma_data_direction dir) 8384ea0d737SRussell King { 8392161c248SSantosh Shilimkar phys_addr_t paddr = page_to_phys(page) + off; 8402ffe2da3SRussell King 8412ffe2da3SRussell King /* FIXME: non-speculating: not required */ 842deace4a6SRussell King /* in any case, don't bother invalidating if DMA to device */ 843deace4a6SRussell King if (dir != DMA_TO_DEVICE) { 8442ffe2da3SRussell King outer_inv_range(paddr, paddr + size); 8452ffe2da3SRussell King 846a9c9147eSRussell King dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); 847deace4a6SRussell King } 848c0177800SCatalin Marinas 849c0177800SCatalin Marinas /* 850b2a234edSMing Lei * Mark the D-cache clean for these pages to avoid extra flushing. 851c0177800SCatalin Marinas */ 852b2a234edSMing Lei if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) { 853b2a234edSMing Lei unsigned long pfn; 854b2a234edSMing Lei size_t left = size; 855b2a234edSMing Lei 856b2a234edSMing Lei pfn = page_to_pfn(page) + off / PAGE_SIZE; 857b2a234edSMing Lei off %= PAGE_SIZE; 858b2a234edSMing Lei if (off) { 859b2a234edSMing Lei pfn++; 860b2a234edSMing Lei left -= PAGE_SIZE - off; 861b2a234edSMing Lei } 862b2a234edSMing Lei while (left >= PAGE_SIZE) { 863b2a234edSMing Lei page = pfn_to_page(pfn++); 864c0177800SCatalin Marinas set_bit(PG_dcache_clean, &page->flags); 865b2a234edSMing Lei left -= PAGE_SIZE; 866b2a234edSMing Lei } 867b2a234edSMing Lei } 8684ea0d737SRussell King } 86943377453SNicolas Pitre 870afd1a321SRussell King /** 8712a550e73SMarek Szyprowski * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA 872afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 873afd1a321SRussell King * @sg: list of buffers 874afd1a321SRussell King * @nents: number of buffers to map 875afd1a321SRussell King * @dir: DMA transfer direction 876afd1a321SRussell King * 877afd1a321SRussell King * Map a set of buffers described by scatterlist in streaming mode for DMA. 878afd1a321SRussell King * This is the scatter-gather version of the dma_map_single interface. 879afd1a321SRussell King * Here the scatter gather list elements are each tagged with the 880afd1a321SRussell King * appropriate dma address and length. They are obtained via 881afd1a321SRussell King * sg_dma_{address,length}. 882afd1a321SRussell King * 883afd1a321SRussell King * Device ownership issues as mentioned for dma_map_single are the same 884afd1a321SRussell King * here. 885afd1a321SRussell King */ 8862dc6a016SMarek Szyprowski int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 8872dc6a016SMarek Szyprowski enum dma_data_direction dir, struct dma_attrs *attrs) 888afd1a321SRussell King { 8892a550e73SMarek Szyprowski struct dma_map_ops *ops = get_dma_ops(dev); 890afd1a321SRussell King struct scatterlist *s; 89101135d92SRussell King int i, j; 892afd1a321SRussell King 893afd1a321SRussell King for_each_sg(sg, s, nents, i) { 8944ce63fcdSMarek Szyprowski #ifdef CONFIG_NEED_SG_DMA_LENGTH 8954ce63fcdSMarek Szyprowski s->dma_length = s->length; 8964ce63fcdSMarek Szyprowski #endif 8972a550e73SMarek Szyprowski s->dma_address = ops->map_page(dev, sg_page(s), s->offset, 8982a550e73SMarek Szyprowski s->length, dir, attrs); 89901135d92SRussell King if (dma_mapping_error(dev, s->dma_address)) 90001135d92SRussell King goto bad_mapping; 901afd1a321SRussell King } 902afd1a321SRussell King return nents; 90301135d92SRussell King 90401135d92SRussell King bad_mapping: 90501135d92SRussell King for_each_sg(sg, s, i, j) 9062a550e73SMarek Szyprowski ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 90701135d92SRussell King return 0; 908afd1a321SRussell King } 909afd1a321SRussell King 910afd1a321SRussell King /** 9112a550e73SMarek Szyprowski * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 912afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 913afd1a321SRussell King * @sg: list of buffers 9140adfca6fSLinus Walleij * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 915afd1a321SRussell King * @dir: DMA transfer direction (same as was passed to dma_map_sg) 916afd1a321SRussell King * 917afd1a321SRussell King * Unmap a set of streaming mode DMA translations. Again, CPU access 918afd1a321SRussell King * rules concerning calls here are the same as for dma_unmap_single(). 919afd1a321SRussell King */ 9202dc6a016SMarek Szyprowski void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 9212dc6a016SMarek Szyprowski enum dma_data_direction dir, struct dma_attrs *attrs) 922afd1a321SRussell King { 9232a550e73SMarek Szyprowski struct dma_map_ops *ops = get_dma_ops(dev); 92401135d92SRussell King struct scatterlist *s; 92501135d92SRussell King 92601135d92SRussell King int i; 92724056f52SRussell King 92801135d92SRussell King for_each_sg(sg, s, nents, i) 9292a550e73SMarek Szyprowski ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 930afd1a321SRussell King } 931afd1a321SRussell King 932afd1a321SRussell King /** 9332a550e73SMarek Szyprowski * arm_dma_sync_sg_for_cpu 934afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 935afd1a321SRussell King * @sg: list of buffers 936afd1a321SRussell King * @nents: number of buffers to map (returned from dma_map_sg) 937afd1a321SRussell King * @dir: DMA transfer direction (same as was passed to dma_map_sg) 938afd1a321SRussell King */ 9392dc6a016SMarek Szyprowski void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 940afd1a321SRussell King int nents, enum dma_data_direction dir) 941afd1a321SRussell King { 9422a550e73SMarek Szyprowski struct dma_map_ops *ops = get_dma_ops(dev); 943afd1a321SRussell King struct scatterlist *s; 944afd1a321SRussell King int i; 945afd1a321SRussell King 9462a550e73SMarek Szyprowski for_each_sg(sg, s, nents, i) 9472a550e73SMarek Szyprowski ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length, 9482a550e73SMarek Szyprowski dir); 949afd1a321SRussell King } 95024056f52SRussell King 951afd1a321SRussell King /** 9522a550e73SMarek Szyprowski * arm_dma_sync_sg_for_device 953afd1a321SRussell King * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 954afd1a321SRussell King * @sg: list of buffers 955afd1a321SRussell King * @nents: number of buffers to map (returned from dma_map_sg) 956afd1a321SRussell King * @dir: DMA transfer direction (same as was passed to dma_map_sg) 957afd1a321SRussell King */ 9582dc6a016SMarek Szyprowski void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 959afd1a321SRussell King int nents, enum dma_data_direction dir) 960afd1a321SRussell King { 9612a550e73SMarek Szyprowski struct dma_map_ops *ops = get_dma_ops(dev); 962afd1a321SRussell King struct scatterlist *s; 963afd1a321SRussell King int i; 964afd1a321SRussell King 9652a550e73SMarek Szyprowski for_each_sg(sg, s, nents, i) 9662a550e73SMarek Szyprowski ops->sync_single_for_device(dev, sg_dma_address(s), s->length, 9672a550e73SMarek Szyprowski dir); 968afd1a321SRussell King } 96924056f52SRussell King 970022ae537SRussell King /* 971022ae537SRussell King * Return whether the given device DMA address mask can be supported 972022ae537SRussell King * properly. For example, if your device can only drive the low 24-bits 973022ae537SRussell King * during bus mastering, then you would pass 0x00ffffff as the mask 974022ae537SRussell King * to this function. 975022ae537SRussell King */ 976022ae537SRussell King int dma_supported(struct device *dev, u64 mask) 977022ae537SRussell King { 9789f28cde0SRussell King return __dma_supported(dev, mask, false); 979022ae537SRussell King } 980022ae537SRussell King EXPORT_SYMBOL(dma_supported); 981022ae537SRussell King 98287b54e78SGregory CLEMENT int arm_dma_set_mask(struct device *dev, u64 dma_mask) 983022ae537SRussell King { 984022ae537SRussell King if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 985022ae537SRussell King return -EIO; 986022ae537SRussell King 987022ae537SRussell King *dev->dma_mask = dma_mask; 988022ae537SRussell King 989022ae537SRussell King return 0; 990022ae537SRussell King } 991022ae537SRussell King 99224056f52SRussell King #define PREALLOC_DMA_DEBUG_ENTRIES 4096 99324056f52SRussell King 99424056f52SRussell King static int __init dma_debug_do_init(void) 99524056f52SRussell King { 99624056f52SRussell King dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); 99724056f52SRussell King return 0; 99824056f52SRussell King } 99924056f52SRussell King fs_initcall(dma_debug_do_init); 10004ce63fcdSMarek Szyprowski 10014ce63fcdSMarek Szyprowski #ifdef CONFIG_ARM_DMA_USE_IOMMU 10024ce63fcdSMarek Szyprowski 10034ce63fcdSMarek Szyprowski /* IOMMU */ 10044ce63fcdSMarek Szyprowski 10054d852ef8SAndreas Herrmann static int extend_iommu_mapping(struct dma_iommu_mapping *mapping); 10064d852ef8SAndreas Herrmann 10074ce63fcdSMarek Szyprowski static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, 10084ce63fcdSMarek Szyprowski size_t size) 10094ce63fcdSMarek Szyprowski { 10104ce63fcdSMarek Szyprowski unsigned int order = get_order(size); 10114ce63fcdSMarek Szyprowski unsigned int align = 0; 10124ce63fcdSMarek Szyprowski unsigned int count, start; 1013006f841dSRitesh Harjani size_t mapping_size = mapping->bits << PAGE_SHIFT; 10144ce63fcdSMarek Szyprowski unsigned long flags; 10154d852ef8SAndreas Herrmann dma_addr_t iova; 10164d852ef8SAndreas Herrmann int i; 10174ce63fcdSMarek Szyprowski 101860460abfSSeung-Woo Kim if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT) 101960460abfSSeung-Woo Kim order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT; 102060460abfSSeung-Woo Kim 102168efd7d2SMarek Szyprowski count = PAGE_ALIGN(size) >> PAGE_SHIFT; 102268efd7d2SMarek Szyprowski align = (1 << order) - 1; 10234ce63fcdSMarek Szyprowski 10244ce63fcdSMarek Szyprowski spin_lock_irqsave(&mapping->lock, flags); 10254d852ef8SAndreas Herrmann for (i = 0; i < mapping->nr_bitmaps; i++) { 10264d852ef8SAndreas Herrmann start = bitmap_find_next_zero_area(mapping->bitmaps[i], 10274d852ef8SAndreas Herrmann mapping->bits, 0, count, align); 10284d852ef8SAndreas Herrmann 10294d852ef8SAndreas Herrmann if (start > mapping->bits) 10304d852ef8SAndreas Herrmann continue; 10314d852ef8SAndreas Herrmann 10324d852ef8SAndreas Herrmann bitmap_set(mapping->bitmaps[i], start, count); 10334d852ef8SAndreas Herrmann break; 10344d852ef8SAndreas Herrmann } 10354d852ef8SAndreas Herrmann 10364d852ef8SAndreas Herrmann /* 10374d852ef8SAndreas Herrmann * No unused range found. Try to extend the existing mapping 10384d852ef8SAndreas Herrmann * and perform a second attempt to reserve an IO virtual 10394d852ef8SAndreas Herrmann * address range of size bytes. 10404d852ef8SAndreas Herrmann */ 10414d852ef8SAndreas Herrmann if (i == mapping->nr_bitmaps) { 10424d852ef8SAndreas Herrmann if (extend_iommu_mapping(mapping)) { 10434d852ef8SAndreas Herrmann spin_unlock_irqrestore(&mapping->lock, flags); 10444d852ef8SAndreas Herrmann return DMA_ERROR_CODE; 10454d852ef8SAndreas Herrmann } 10464d852ef8SAndreas Herrmann 10474d852ef8SAndreas Herrmann start = bitmap_find_next_zero_area(mapping->bitmaps[i], 10484d852ef8SAndreas Herrmann mapping->bits, 0, count, align); 10494d852ef8SAndreas Herrmann 10504ce63fcdSMarek Szyprowski if (start > mapping->bits) { 10514ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 10524ce63fcdSMarek Szyprowski return DMA_ERROR_CODE; 10534ce63fcdSMarek Szyprowski } 10544ce63fcdSMarek Szyprowski 10554d852ef8SAndreas Herrmann bitmap_set(mapping->bitmaps[i], start, count); 10564d852ef8SAndreas Herrmann } 10574ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 10584ce63fcdSMarek Szyprowski 1059006f841dSRitesh Harjani iova = mapping->base + (mapping_size * i); 106068efd7d2SMarek Szyprowski iova += start << PAGE_SHIFT; 10614d852ef8SAndreas Herrmann 10624d852ef8SAndreas Herrmann return iova; 10634ce63fcdSMarek Szyprowski } 10644ce63fcdSMarek Szyprowski 10654ce63fcdSMarek Szyprowski static inline void __free_iova(struct dma_iommu_mapping *mapping, 10664ce63fcdSMarek Szyprowski dma_addr_t addr, size_t size) 10674ce63fcdSMarek Szyprowski { 10684d852ef8SAndreas Herrmann unsigned int start, count; 1069006f841dSRitesh Harjani size_t mapping_size = mapping->bits << PAGE_SHIFT; 10704ce63fcdSMarek Szyprowski unsigned long flags; 10714d852ef8SAndreas Herrmann dma_addr_t bitmap_base; 10724d852ef8SAndreas Herrmann u32 bitmap_index; 10734d852ef8SAndreas Herrmann 10744d852ef8SAndreas Herrmann if (!size) 10754d852ef8SAndreas Herrmann return; 10764d852ef8SAndreas Herrmann 1077006f841dSRitesh Harjani bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size; 10784d852ef8SAndreas Herrmann BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions); 10794d852ef8SAndreas Herrmann 1080006f841dSRitesh Harjani bitmap_base = mapping->base + mapping_size * bitmap_index; 10814d852ef8SAndreas Herrmann 108268efd7d2SMarek Szyprowski start = (addr - bitmap_base) >> PAGE_SHIFT; 10834d852ef8SAndreas Herrmann 1084006f841dSRitesh Harjani if (addr + size > bitmap_base + mapping_size) { 10854d852ef8SAndreas Herrmann /* 10864d852ef8SAndreas Herrmann * The address range to be freed reaches into the iova 10874d852ef8SAndreas Herrmann * range of the next bitmap. This should not happen as 10884d852ef8SAndreas Herrmann * we don't allow this in __alloc_iova (at the 10894d852ef8SAndreas Herrmann * moment). 10904d852ef8SAndreas Herrmann */ 10914d852ef8SAndreas Herrmann BUG(); 10924d852ef8SAndreas Herrmann } else 109368efd7d2SMarek Szyprowski count = size >> PAGE_SHIFT; 10944ce63fcdSMarek Szyprowski 10954ce63fcdSMarek Szyprowski spin_lock_irqsave(&mapping->lock, flags); 10964d852ef8SAndreas Herrmann bitmap_clear(mapping->bitmaps[bitmap_index], start, count); 10974ce63fcdSMarek Szyprowski spin_unlock_irqrestore(&mapping->lock, flags); 10984ce63fcdSMarek Szyprowski } 10994ce63fcdSMarek Szyprowski 1100549a17e4SMarek Szyprowski static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, 1101549a17e4SMarek Szyprowski gfp_t gfp, struct dma_attrs *attrs) 11024ce63fcdSMarek Szyprowski { 11034ce63fcdSMarek Szyprowski struct page **pages; 11044ce63fcdSMarek Szyprowski int count = size >> PAGE_SHIFT; 11054ce63fcdSMarek Szyprowski int array_size = count * sizeof(struct page *); 11064ce63fcdSMarek Szyprowski int i = 0; 11074ce63fcdSMarek Szyprowski 11084ce63fcdSMarek Szyprowski if (array_size <= PAGE_SIZE) 11094ce63fcdSMarek Szyprowski pages = kzalloc(array_size, gfp); 11104ce63fcdSMarek Szyprowski else 11114ce63fcdSMarek Szyprowski pages = vzalloc(array_size); 11124ce63fcdSMarek Szyprowski if (!pages) 11134ce63fcdSMarek Szyprowski return NULL; 11144ce63fcdSMarek Szyprowski 1115549a17e4SMarek Szyprowski if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) 1116549a17e4SMarek Szyprowski { 1117549a17e4SMarek Szyprowski unsigned long order = get_order(size); 1118549a17e4SMarek Szyprowski struct page *page; 1119549a17e4SMarek Szyprowski 1120549a17e4SMarek Szyprowski page = dma_alloc_from_contiguous(dev, count, order); 1121549a17e4SMarek Szyprowski if (!page) 1122549a17e4SMarek Szyprowski goto error; 1123549a17e4SMarek Szyprowski 1124549a17e4SMarek Szyprowski __dma_clear_buffer(page, size); 1125549a17e4SMarek Szyprowski 1126549a17e4SMarek Szyprowski for (i = 0; i < count; i++) 1127549a17e4SMarek Szyprowski pages[i] = page + i; 1128549a17e4SMarek Szyprowski 1129549a17e4SMarek Szyprowski return pages; 1130549a17e4SMarek Szyprowski } 1131549a17e4SMarek Szyprowski 1132f8669befSMarek Szyprowski /* 1133f8669befSMarek Szyprowski * IOMMU can map any pages, so himem can also be used here 1134f8669befSMarek Szyprowski */ 1135f8669befSMarek Szyprowski gfp |= __GFP_NOWARN | __GFP_HIGHMEM; 1136f8669befSMarek Szyprowski 11374ce63fcdSMarek Szyprowski while (count) { 1138593f4735SMarek Szyprowski int j, order = __fls(count); 11394ce63fcdSMarek Szyprowski 1140f8669befSMarek Szyprowski pages[i] = alloc_pages(gfp, order); 11414ce63fcdSMarek Szyprowski while (!pages[i] && order) 1142f8669befSMarek Szyprowski pages[i] = alloc_pages(gfp, --order); 11434ce63fcdSMarek Szyprowski if (!pages[i]) 11444ce63fcdSMarek Szyprowski goto error; 11454ce63fcdSMarek Szyprowski 11465a796eebSHiroshi Doyu if (order) { 11474ce63fcdSMarek Szyprowski split_page(pages[i], order); 11484ce63fcdSMarek Szyprowski j = 1 << order; 11494ce63fcdSMarek Szyprowski while (--j) 11504ce63fcdSMarek Szyprowski pages[i + j] = pages[i] + j; 11515a796eebSHiroshi Doyu } 11524ce63fcdSMarek Szyprowski 11534ce63fcdSMarek Szyprowski __dma_clear_buffer(pages[i], PAGE_SIZE << order); 11544ce63fcdSMarek Szyprowski i += 1 << order; 11554ce63fcdSMarek Szyprowski count -= 1 << order; 11564ce63fcdSMarek Szyprowski } 11574ce63fcdSMarek Szyprowski 11584ce63fcdSMarek Szyprowski return pages; 11594ce63fcdSMarek Szyprowski error: 11609fa8af91SMarek Szyprowski while (i--) 11614ce63fcdSMarek Szyprowski if (pages[i]) 11624ce63fcdSMarek Szyprowski __free_pages(pages[i], 0); 116346c87852SPrathyush K if (array_size <= PAGE_SIZE) 11644ce63fcdSMarek Szyprowski kfree(pages); 11654ce63fcdSMarek Szyprowski else 11664ce63fcdSMarek Szyprowski vfree(pages); 11674ce63fcdSMarek Szyprowski return NULL; 11684ce63fcdSMarek Szyprowski } 11694ce63fcdSMarek Szyprowski 1170549a17e4SMarek Szyprowski static int __iommu_free_buffer(struct device *dev, struct page **pages, 1171549a17e4SMarek Szyprowski size_t size, struct dma_attrs *attrs) 11724ce63fcdSMarek Szyprowski { 11734ce63fcdSMarek Szyprowski int count = size >> PAGE_SHIFT; 11744ce63fcdSMarek Szyprowski int array_size = count * sizeof(struct page *); 11754ce63fcdSMarek Szyprowski int i; 1176549a17e4SMarek Szyprowski 1177549a17e4SMarek Szyprowski if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) { 1178549a17e4SMarek Szyprowski dma_release_from_contiguous(dev, pages[0], count); 1179549a17e4SMarek Szyprowski } else { 11804ce63fcdSMarek Szyprowski for (i = 0; i < count; i++) 11814ce63fcdSMarek Szyprowski if (pages[i]) 11824ce63fcdSMarek Szyprowski __free_pages(pages[i], 0); 1183549a17e4SMarek Szyprowski } 1184549a17e4SMarek Szyprowski 118546c87852SPrathyush K if (array_size <= PAGE_SIZE) 11864ce63fcdSMarek Szyprowski kfree(pages); 11874ce63fcdSMarek Szyprowski else 11884ce63fcdSMarek Szyprowski vfree(pages); 11894ce63fcdSMarek Szyprowski return 0; 11904ce63fcdSMarek Szyprowski } 11914ce63fcdSMarek Szyprowski 11924ce63fcdSMarek Szyprowski /* 11934ce63fcdSMarek Szyprowski * Create a CPU mapping for a specified pages 11944ce63fcdSMarek Szyprowski */ 11954ce63fcdSMarek Szyprowski static void * 1196e9da6e99SMarek Szyprowski __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, 1197e9da6e99SMarek Szyprowski const void *caller) 11984ce63fcdSMarek Szyprowski { 1199513510ddSLaura Abbott return dma_common_pages_remap(pages, size, 1200513510ddSLaura Abbott VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller); 12014ce63fcdSMarek Szyprowski } 12024ce63fcdSMarek Szyprowski 12034ce63fcdSMarek Szyprowski /* 12044ce63fcdSMarek Szyprowski * Create a mapping in device IO address space for specified pages 12054ce63fcdSMarek Szyprowski */ 12064ce63fcdSMarek Szyprowski static dma_addr_t 12074ce63fcdSMarek Szyprowski __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) 12084ce63fcdSMarek Szyprowski { 12094ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = dev->archdata.mapping; 12104ce63fcdSMarek Szyprowski unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 12114ce63fcdSMarek Szyprowski dma_addr_t dma_addr, iova; 12124ce63fcdSMarek Szyprowski int i, ret = DMA_ERROR_CODE; 12134ce63fcdSMarek Szyprowski 12144ce63fcdSMarek Szyprowski dma_addr = __alloc_iova(mapping, size); 12154ce63fcdSMarek Szyprowski if (dma_addr == DMA_ERROR_CODE) 12164ce63fcdSMarek Szyprowski return dma_addr; 12174ce63fcdSMarek Szyprowski 12184ce63fcdSMarek Szyprowski iova = dma_addr; 12194ce63fcdSMarek Szyprowski for (i = 0; i < count; ) { 12204ce63fcdSMarek Szyprowski unsigned int next_pfn = page_to_pfn(pages[i]) + 1; 12214ce63fcdSMarek Szyprowski phys_addr_t phys = page_to_phys(pages[i]); 12224ce63fcdSMarek Szyprowski unsigned int len, j; 12234ce63fcdSMarek Szyprowski 12244ce63fcdSMarek Szyprowski for (j = i + 1; j < count; j++, next_pfn++) 12254ce63fcdSMarek Szyprowski if (page_to_pfn(pages[j]) != next_pfn) 12264ce63fcdSMarek Szyprowski break; 12274ce63fcdSMarek Szyprowski 12284ce63fcdSMarek Szyprowski len = (j - i) << PAGE_SHIFT; 1229c9b24996SAndreas Herrmann ret = iommu_map(mapping->domain, iova, phys, len, 1230c9b24996SAndreas Herrmann IOMMU_READ|IOMMU_WRITE); 12314ce63fcdSMarek Szyprowski if (ret < 0) 12324ce63fcdSMarek Szyprowski goto fail; 12334ce63fcdSMarek Szyprowski iova += len; 12344ce63fcdSMarek Szyprowski i = j; 12354ce63fcdSMarek Szyprowski } 12364ce63fcdSMarek Szyprowski return dma_addr; 12374ce63fcdSMarek Szyprowski fail: 12384ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); 12394ce63fcdSMarek Szyprowski __free_iova(mapping, dma_addr, size); 12404ce63fcdSMarek Szyprowski return DMA_ERROR_CODE; 12414ce63fcdSMarek Szyprowski } 12424ce63fcdSMarek Szyprowski 12434ce63fcdSMarek Szyprowski static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) 12444ce63fcdSMarek Szyprowski { 12454ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = dev->archdata.mapping; 12464ce63fcdSMarek Szyprowski 12474ce63fcdSMarek Szyprowski /* 12484ce63fcdSMarek Szyprowski * add optional in-page offset from iova to size and align 12494ce63fcdSMarek Szyprowski * result to page size 12504ce63fcdSMarek Szyprowski */ 12514ce63fcdSMarek Szyprowski size = PAGE_ALIGN((iova & ~PAGE_MASK) + size); 12524ce63fcdSMarek Szyprowski iova &= PAGE_MASK; 12534ce63fcdSMarek Szyprowski 12544ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova, size); 12554ce63fcdSMarek Szyprowski __free_iova(mapping, iova, size); 12564ce63fcdSMarek Szyprowski return 0; 12574ce63fcdSMarek Szyprowski } 12584ce63fcdSMarek Szyprowski 1259665bad7bSHiroshi Doyu static struct page **__atomic_get_pages(void *addr) 1260665bad7bSHiroshi Doyu { 126136d0fd21SLaura Abbott struct page *page; 126236d0fd21SLaura Abbott phys_addr_t phys; 1263665bad7bSHiroshi Doyu 126436d0fd21SLaura Abbott phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr); 126536d0fd21SLaura Abbott page = phys_to_page(phys); 126636d0fd21SLaura Abbott 126736d0fd21SLaura Abbott return (struct page **)page; 1268665bad7bSHiroshi Doyu } 1269665bad7bSHiroshi Doyu 1270955c757eSMarek Szyprowski static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) 1271e9da6e99SMarek Szyprowski { 1272e9da6e99SMarek Szyprowski struct vm_struct *area; 1273e9da6e99SMarek Szyprowski 1274665bad7bSHiroshi Doyu if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) 1275665bad7bSHiroshi Doyu return __atomic_get_pages(cpu_addr); 1276665bad7bSHiroshi Doyu 1277955c757eSMarek Szyprowski if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) 1278955c757eSMarek Szyprowski return cpu_addr; 1279955c757eSMarek Szyprowski 1280e9da6e99SMarek Szyprowski area = find_vm_area(cpu_addr); 1281e9da6e99SMarek Szyprowski if (area && (area->flags & VM_ARM_DMA_CONSISTENT)) 1282e9da6e99SMarek Szyprowski return area->pages; 1283e9da6e99SMarek Szyprowski return NULL; 1284e9da6e99SMarek Szyprowski } 1285e9da6e99SMarek Szyprowski 1286479ed93aSHiroshi Doyu static void *__iommu_alloc_atomic(struct device *dev, size_t size, 1287479ed93aSHiroshi Doyu dma_addr_t *handle) 1288479ed93aSHiroshi Doyu { 1289479ed93aSHiroshi Doyu struct page *page; 1290479ed93aSHiroshi Doyu void *addr; 1291479ed93aSHiroshi Doyu 1292479ed93aSHiroshi Doyu addr = __alloc_from_pool(size, &page); 1293479ed93aSHiroshi Doyu if (!addr) 1294479ed93aSHiroshi Doyu return NULL; 1295479ed93aSHiroshi Doyu 1296479ed93aSHiroshi Doyu *handle = __iommu_create_mapping(dev, &page, size); 1297479ed93aSHiroshi Doyu if (*handle == DMA_ERROR_CODE) 1298479ed93aSHiroshi Doyu goto err_mapping; 1299479ed93aSHiroshi Doyu 1300479ed93aSHiroshi Doyu return addr; 1301479ed93aSHiroshi Doyu 1302479ed93aSHiroshi Doyu err_mapping: 1303479ed93aSHiroshi Doyu __free_from_pool(addr, size); 1304479ed93aSHiroshi Doyu return NULL; 1305479ed93aSHiroshi Doyu } 1306479ed93aSHiroshi Doyu 1307d5898291SMarek Szyprowski static void __iommu_free_atomic(struct device *dev, void *cpu_addr, 1308479ed93aSHiroshi Doyu dma_addr_t handle, size_t size) 1309479ed93aSHiroshi Doyu { 1310479ed93aSHiroshi Doyu __iommu_remove_mapping(dev, handle, size); 1311d5898291SMarek Szyprowski __free_from_pool(cpu_addr, size); 1312479ed93aSHiroshi Doyu } 1313479ed93aSHiroshi Doyu 13144ce63fcdSMarek Szyprowski static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, 13154ce63fcdSMarek Szyprowski dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) 13164ce63fcdSMarek Szyprowski { 131771b55663SRussell King pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); 13184ce63fcdSMarek Szyprowski struct page **pages; 13194ce63fcdSMarek Szyprowski void *addr = NULL; 13204ce63fcdSMarek Szyprowski 13214ce63fcdSMarek Szyprowski *handle = DMA_ERROR_CODE; 13224ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 13234ce63fcdSMarek Szyprowski 132410c8562fSMarek Szyprowski if (!(gfp & __GFP_WAIT)) 1325479ed93aSHiroshi Doyu return __iommu_alloc_atomic(dev, size, handle); 1326479ed93aSHiroshi Doyu 13275b91a98cSRichard Zhao /* 13285b91a98cSRichard Zhao * Following is a work-around (a.k.a. hack) to prevent pages 13295b91a98cSRichard Zhao * with __GFP_COMP being passed to split_page() which cannot 13305b91a98cSRichard Zhao * handle them. The real problem is that this flag probably 13315b91a98cSRichard Zhao * should be 0 on ARM as it is not supported on this 13325b91a98cSRichard Zhao * platform; see CONFIG_HUGETLBFS. 13335b91a98cSRichard Zhao */ 13345b91a98cSRichard Zhao gfp &= ~(__GFP_COMP); 13355b91a98cSRichard Zhao 1336549a17e4SMarek Szyprowski pages = __iommu_alloc_buffer(dev, size, gfp, attrs); 13374ce63fcdSMarek Szyprowski if (!pages) 13384ce63fcdSMarek Szyprowski return NULL; 13394ce63fcdSMarek Szyprowski 13404ce63fcdSMarek Szyprowski *handle = __iommu_create_mapping(dev, pages, size); 13414ce63fcdSMarek Szyprowski if (*handle == DMA_ERROR_CODE) 13424ce63fcdSMarek Szyprowski goto err_buffer; 13434ce63fcdSMarek Szyprowski 1344955c757eSMarek Szyprowski if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) 1345955c757eSMarek Szyprowski return pages; 1346955c757eSMarek Szyprowski 1347e9da6e99SMarek Szyprowski addr = __iommu_alloc_remap(pages, size, gfp, prot, 1348e9da6e99SMarek Szyprowski __builtin_return_address(0)); 13494ce63fcdSMarek Szyprowski if (!addr) 13504ce63fcdSMarek Szyprowski goto err_mapping; 13514ce63fcdSMarek Szyprowski 13524ce63fcdSMarek Szyprowski return addr; 13534ce63fcdSMarek Szyprowski 13544ce63fcdSMarek Szyprowski err_mapping: 13554ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, *handle, size); 13564ce63fcdSMarek Szyprowski err_buffer: 1357549a17e4SMarek Szyprowski __iommu_free_buffer(dev, pages, size, attrs); 13584ce63fcdSMarek Szyprowski return NULL; 13594ce63fcdSMarek Szyprowski } 13604ce63fcdSMarek Szyprowski 13614ce63fcdSMarek Szyprowski static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 13624ce63fcdSMarek Szyprowski void *cpu_addr, dma_addr_t dma_addr, size_t size, 13634ce63fcdSMarek Szyprowski struct dma_attrs *attrs) 13644ce63fcdSMarek Szyprowski { 13654ce63fcdSMarek Szyprowski unsigned long uaddr = vma->vm_start; 13664ce63fcdSMarek Szyprowski unsigned long usize = vma->vm_end - vma->vm_start; 1367955c757eSMarek Szyprowski struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1368e9da6e99SMarek Szyprowski 1369e9da6e99SMarek Szyprowski vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 1370e9da6e99SMarek Szyprowski 1371e9da6e99SMarek Szyprowski if (!pages) 1372e9da6e99SMarek Szyprowski return -ENXIO; 13734ce63fcdSMarek Szyprowski 13744ce63fcdSMarek Szyprowski do { 1375e9da6e99SMarek Szyprowski int ret = vm_insert_page(vma, uaddr, *pages++); 13764ce63fcdSMarek Szyprowski if (ret) { 1377e9da6e99SMarek Szyprowski pr_err("Remapping memory failed: %d\n", ret); 13784ce63fcdSMarek Szyprowski return ret; 13794ce63fcdSMarek Szyprowski } 13804ce63fcdSMarek Szyprowski uaddr += PAGE_SIZE; 13814ce63fcdSMarek Szyprowski usize -= PAGE_SIZE; 13824ce63fcdSMarek Szyprowski } while (usize > 0); 1383e9da6e99SMarek Szyprowski 13844ce63fcdSMarek Szyprowski return 0; 13854ce63fcdSMarek Szyprowski } 13864ce63fcdSMarek Szyprowski 13874ce63fcdSMarek Szyprowski /* 13884ce63fcdSMarek Szyprowski * free a page as defined by the above mapping. 13894ce63fcdSMarek Szyprowski * Must not be called with IRQs disabled. 13904ce63fcdSMarek Szyprowski */ 13914ce63fcdSMarek Szyprowski void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, 13924ce63fcdSMarek Szyprowski dma_addr_t handle, struct dma_attrs *attrs) 13934ce63fcdSMarek Szyprowski { 1394836bfa0dSYoungJun Cho struct page **pages; 13954ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 13964ce63fcdSMarek Szyprowski 1397479ed93aSHiroshi Doyu if (__in_atomic_pool(cpu_addr, size)) { 1398d5898291SMarek Szyprowski __iommu_free_atomic(dev, cpu_addr, handle, size); 1399479ed93aSHiroshi Doyu return; 1400479ed93aSHiroshi Doyu } 1401479ed93aSHiroshi Doyu 1402836bfa0dSYoungJun Cho pages = __iommu_get_pages(cpu_addr, attrs); 1403836bfa0dSYoungJun Cho if (!pages) { 1404836bfa0dSYoungJun Cho WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); 1405836bfa0dSYoungJun Cho return; 1406836bfa0dSYoungJun Cho } 1407836bfa0dSYoungJun Cho 1408955c757eSMarek Szyprowski if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { 1409513510ddSLaura Abbott dma_common_free_remap(cpu_addr, size, 1410513510ddSLaura Abbott VM_ARM_DMA_CONSISTENT | VM_USERMAP); 1411955c757eSMarek Szyprowski } 1412e9da6e99SMarek Szyprowski 14134ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, handle, size); 1414549a17e4SMarek Szyprowski __iommu_free_buffer(dev, pages, size, attrs); 14154ce63fcdSMarek Szyprowski } 14164ce63fcdSMarek Szyprowski 1417dc2832e1SMarek Szyprowski static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, 1418dc2832e1SMarek Szyprowski void *cpu_addr, dma_addr_t dma_addr, 1419dc2832e1SMarek Szyprowski size_t size, struct dma_attrs *attrs) 1420dc2832e1SMarek Szyprowski { 1421dc2832e1SMarek Szyprowski unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1422dc2832e1SMarek Szyprowski struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1423dc2832e1SMarek Szyprowski 1424dc2832e1SMarek Szyprowski if (!pages) 1425dc2832e1SMarek Szyprowski return -ENXIO; 1426dc2832e1SMarek Szyprowski 1427dc2832e1SMarek Szyprowski return sg_alloc_table_from_pages(sgt, pages, count, 0, size, 1428dc2832e1SMarek Szyprowski GFP_KERNEL); 14294ce63fcdSMarek Szyprowski } 14304ce63fcdSMarek Szyprowski 1431c9b24996SAndreas Herrmann static int __dma_direction_to_prot(enum dma_data_direction dir) 1432c9b24996SAndreas Herrmann { 1433c9b24996SAndreas Herrmann int prot; 1434c9b24996SAndreas Herrmann 1435c9b24996SAndreas Herrmann switch (dir) { 1436c9b24996SAndreas Herrmann case DMA_BIDIRECTIONAL: 1437c9b24996SAndreas Herrmann prot = IOMMU_READ | IOMMU_WRITE; 1438c9b24996SAndreas Herrmann break; 1439c9b24996SAndreas Herrmann case DMA_TO_DEVICE: 1440c9b24996SAndreas Herrmann prot = IOMMU_READ; 1441c9b24996SAndreas Herrmann break; 1442c9b24996SAndreas Herrmann case DMA_FROM_DEVICE: 1443c9b24996SAndreas Herrmann prot = IOMMU_WRITE; 1444c9b24996SAndreas Herrmann break; 1445c9b24996SAndreas Herrmann default: 1446c9b24996SAndreas Herrmann prot = 0; 1447c9b24996SAndreas Herrmann } 1448c9b24996SAndreas Herrmann 1449c9b24996SAndreas Herrmann return prot; 1450c9b24996SAndreas Herrmann } 1451c9b24996SAndreas Herrmann 14524ce63fcdSMarek Szyprowski /* 14534ce63fcdSMarek Szyprowski * Map a part of the scatter-gather list into contiguous io address space 14544ce63fcdSMarek Szyprowski */ 14554ce63fcdSMarek Szyprowski static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, 14564ce63fcdSMarek Szyprowski size_t size, dma_addr_t *handle, 14570fa478dfSRob Herring enum dma_data_direction dir, struct dma_attrs *attrs, 14580fa478dfSRob Herring bool is_coherent) 14594ce63fcdSMarek Szyprowski { 14604ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = dev->archdata.mapping; 14614ce63fcdSMarek Szyprowski dma_addr_t iova, iova_base; 14624ce63fcdSMarek Szyprowski int ret = 0; 14634ce63fcdSMarek Szyprowski unsigned int count; 14644ce63fcdSMarek Szyprowski struct scatterlist *s; 1465c9b24996SAndreas Herrmann int prot; 14664ce63fcdSMarek Szyprowski 14674ce63fcdSMarek Szyprowski size = PAGE_ALIGN(size); 14684ce63fcdSMarek Szyprowski *handle = DMA_ERROR_CODE; 14694ce63fcdSMarek Szyprowski 14704ce63fcdSMarek Szyprowski iova_base = iova = __alloc_iova(mapping, size); 14714ce63fcdSMarek Szyprowski if (iova == DMA_ERROR_CODE) 14724ce63fcdSMarek Szyprowski return -ENOMEM; 14734ce63fcdSMarek Szyprowski 14744ce63fcdSMarek Szyprowski for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { 14754ce63fcdSMarek Szyprowski phys_addr_t phys = page_to_phys(sg_page(s)); 14764ce63fcdSMarek Szyprowski unsigned int len = PAGE_ALIGN(s->offset + s->length); 14774ce63fcdSMarek Szyprowski 14780fa478dfSRob Herring if (!is_coherent && 147997ef952aSMarek Szyprowski !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 14804ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 14814ce63fcdSMarek Szyprowski 1482c9b24996SAndreas Herrmann prot = __dma_direction_to_prot(dir); 1483c9b24996SAndreas Herrmann 1484c9b24996SAndreas Herrmann ret = iommu_map(mapping->domain, iova, phys, len, prot); 14854ce63fcdSMarek Szyprowski if (ret < 0) 14864ce63fcdSMarek Szyprowski goto fail; 14874ce63fcdSMarek Szyprowski count += len >> PAGE_SHIFT; 14884ce63fcdSMarek Szyprowski iova += len; 14894ce63fcdSMarek Szyprowski } 14904ce63fcdSMarek Szyprowski *handle = iova_base; 14914ce63fcdSMarek Szyprowski 14924ce63fcdSMarek Szyprowski return 0; 14934ce63fcdSMarek Szyprowski fail: 14944ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); 14954ce63fcdSMarek Szyprowski __free_iova(mapping, iova_base, size); 14964ce63fcdSMarek Szyprowski return ret; 14974ce63fcdSMarek Szyprowski } 14984ce63fcdSMarek Szyprowski 14990fa478dfSRob Herring static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, 15000fa478dfSRob Herring enum dma_data_direction dir, struct dma_attrs *attrs, 15010fa478dfSRob Herring bool is_coherent) 15024ce63fcdSMarek Szyprowski { 15034ce63fcdSMarek Szyprowski struct scatterlist *s = sg, *dma = sg, *start = sg; 15044ce63fcdSMarek Szyprowski int i, count = 0; 15054ce63fcdSMarek Szyprowski unsigned int offset = s->offset; 15064ce63fcdSMarek Szyprowski unsigned int size = s->offset + s->length; 15074ce63fcdSMarek Szyprowski unsigned int max = dma_get_max_seg_size(dev); 15084ce63fcdSMarek Szyprowski 15094ce63fcdSMarek Szyprowski for (i = 1; i < nents; i++) { 15104ce63fcdSMarek Szyprowski s = sg_next(s); 15114ce63fcdSMarek Szyprowski 15124ce63fcdSMarek Szyprowski s->dma_address = DMA_ERROR_CODE; 15134ce63fcdSMarek Szyprowski s->dma_length = 0; 15144ce63fcdSMarek Szyprowski 15154ce63fcdSMarek Szyprowski if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { 15164ce63fcdSMarek Szyprowski if (__map_sg_chunk(dev, start, size, &dma->dma_address, 15170fa478dfSRob Herring dir, attrs, is_coherent) < 0) 15184ce63fcdSMarek Szyprowski goto bad_mapping; 15194ce63fcdSMarek Szyprowski 15204ce63fcdSMarek Szyprowski dma->dma_address += offset; 15214ce63fcdSMarek Szyprowski dma->dma_length = size - offset; 15224ce63fcdSMarek Szyprowski 15234ce63fcdSMarek Szyprowski size = offset = s->offset; 15244ce63fcdSMarek Szyprowski start = s; 15254ce63fcdSMarek Szyprowski dma = sg_next(dma); 15264ce63fcdSMarek Szyprowski count += 1; 15274ce63fcdSMarek Szyprowski } 15284ce63fcdSMarek Szyprowski size += s->length; 15294ce63fcdSMarek Szyprowski } 15300fa478dfSRob Herring if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, 15310fa478dfSRob Herring is_coherent) < 0) 15324ce63fcdSMarek Szyprowski goto bad_mapping; 15334ce63fcdSMarek Szyprowski 15344ce63fcdSMarek Szyprowski dma->dma_address += offset; 15354ce63fcdSMarek Szyprowski dma->dma_length = size - offset; 15364ce63fcdSMarek Szyprowski 15374ce63fcdSMarek Szyprowski return count+1; 15384ce63fcdSMarek Szyprowski 15394ce63fcdSMarek Szyprowski bad_mapping: 15404ce63fcdSMarek Szyprowski for_each_sg(sg, s, count, i) 15414ce63fcdSMarek Szyprowski __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); 15424ce63fcdSMarek Szyprowski return 0; 15434ce63fcdSMarek Szyprowski } 15444ce63fcdSMarek Szyprowski 15454ce63fcdSMarek Szyprowski /** 15460fa478dfSRob Herring * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA 15470fa478dfSRob Herring * @dev: valid struct device pointer 15480fa478dfSRob Herring * @sg: list of buffers 15490fa478dfSRob Herring * @nents: number of buffers to map 15500fa478dfSRob Herring * @dir: DMA transfer direction 15510fa478dfSRob Herring * 15520fa478dfSRob Herring * Map a set of i/o coherent buffers described by scatterlist in streaming 15530fa478dfSRob Herring * mode for DMA. The scatter gather list elements are merged together (if 15540fa478dfSRob Herring * possible) and tagged with the appropriate dma address and length. They are 15550fa478dfSRob Herring * obtained via sg_dma_{address,length}. 15560fa478dfSRob Herring */ 15570fa478dfSRob Herring int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, 15580fa478dfSRob Herring int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 15590fa478dfSRob Herring { 15600fa478dfSRob Herring return __iommu_map_sg(dev, sg, nents, dir, attrs, true); 15610fa478dfSRob Herring } 15620fa478dfSRob Herring 15630fa478dfSRob Herring /** 15640fa478dfSRob Herring * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA 15650fa478dfSRob Herring * @dev: valid struct device pointer 15660fa478dfSRob Herring * @sg: list of buffers 15670fa478dfSRob Herring * @nents: number of buffers to map 15680fa478dfSRob Herring * @dir: DMA transfer direction 15690fa478dfSRob Herring * 15700fa478dfSRob Herring * Map a set of buffers described by scatterlist in streaming mode for DMA. 15710fa478dfSRob Herring * The scatter gather list elements are merged together (if possible) and 15720fa478dfSRob Herring * tagged with the appropriate dma address and length. They are obtained via 15730fa478dfSRob Herring * sg_dma_{address,length}. 15740fa478dfSRob Herring */ 15750fa478dfSRob Herring int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, 15760fa478dfSRob Herring int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 15770fa478dfSRob Herring { 15780fa478dfSRob Herring return __iommu_map_sg(dev, sg, nents, dir, attrs, false); 15790fa478dfSRob Herring } 15800fa478dfSRob Herring 15810fa478dfSRob Herring static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, 15820fa478dfSRob Herring int nents, enum dma_data_direction dir, struct dma_attrs *attrs, 15830fa478dfSRob Herring bool is_coherent) 15840fa478dfSRob Herring { 15850fa478dfSRob Herring struct scatterlist *s; 15860fa478dfSRob Herring int i; 15870fa478dfSRob Herring 15880fa478dfSRob Herring for_each_sg(sg, s, nents, i) { 15890fa478dfSRob Herring if (sg_dma_len(s)) 15900fa478dfSRob Herring __iommu_remove_mapping(dev, sg_dma_address(s), 15910fa478dfSRob Herring sg_dma_len(s)); 15920fa478dfSRob Herring if (!is_coherent && 15930fa478dfSRob Herring !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 15940fa478dfSRob Herring __dma_page_dev_to_cpu(sg_page(s), s->offset, 15950fa478dfSRob Herring s->length, dir); 15960fa478dfSRob Herring } 15970fa478dfSRob Herring } 15980fa478dfSRob Herring 15990fa478dfSRob Herring /** 16000fa478dfSRob Herring * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 16010fa478dfSRob Herring * @dev: valid struct device pointer 16020fa478dfSRob Herring * @sg: list of buffers 16030fa478dfSRob Herring * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 16040fa478dfSRob Herring * @dir: DMA transfer direction (same as was passed to dma_map_sg) 16050fa478dfSRob Herring * 16060fa478dfSRob Herring * Unmap a set of streaming mode DMA translations. Again, CPU access 16070fa478dfSRob Herring * rules concerning calls here are the same as for dma_unmap_single(). 16080fa478dfSRob Herring */ 16090fa478dfSRob Herring void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, 16100fa478dfSRob Herring int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 16110fa478dfSRob Herring { 16120fa478dfSRob Herring __iommu_unmap_sg(dev, sg, nents, dir, attrs, true); 16130fa478dfSRob Herring } 16140fa478dfSRob Herring 16150fa478dfSRob Herring /** 16164ce63fcdSMarek Szyprowski * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 16174ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 16184ce63fcdSMarek Szyprowski * @sg: list of buffers 16194ce63fcdSMarek Szyprowski * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 16204ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 16214ce63fcdSMarek Szyprowski * 16224ce63fcdSMarek Szyprowski * Unmap a set of streaming mode DMA translations. Again, CPU access 16234ce63fcdSMarek Szyprowski * rules concerning calls here are the same as for dma_unmap_single(). 16244ce63fcdSMarek Szyprowski */ 16254ce63fcdSMarek Szyprowski void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 16264ce63fcdSMarek Szyprowski enum dma_data_direction dir, struct dma_attrs *attrs) 16274ce63fcdSMarek Szyprowski { 16280fa478dfSRob Herring __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); 16294ce63fcdSMarek Szyprowski } 16304ce63fcdSMarek Szyprowski 16314ce63fcdSMarek Szyprowski /** 16324ce63fcdSMarek Szyprowski * arm_iommu_sync_sg_for_cpu 16334ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 16344ce63fcdSMarek Szyprowski * @sg: list of buffers 16354ce63fcdSMarek Szyprowski * @nents: number of buffers to map (returned from dma_map_sg) 16364ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 16374ce63fcdSMarek Szyprowski */ 16384ce63fcdSMarek Szyprowski void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 16394ce63fcdSMarek Szyprowski int nents, enum dma_data_direction dir) 16404ce63fcdSMarek Szyprowski { 16414ce63fcdSMarek Szyprowski struct scatterlist *s; 16424ce63fcdSMarek Szyprowski int i; 16434ce63fcdSMarek Szyprowski 16444ce63fcdSMarek Szyprowski for_each_sg(sg, s, nents, i) 16454ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); 16464ce63fcdSMarek Szyprowski 16474ce63fcdSMarek Szyprowski } 16484ce63fcdSMarek Szyprowski 16494ce63fcdSMarek Szyprowski /** 16504ce63fcdSMarek Szyprowski * arm_iommu_sync_sg_for_device 16514ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 16524ce63fcdSMarek Szyprowski * @sg: list of buffers 16534ce63fcdSMarek Szyprowski * @nents: number of buffers to map (returned from dma_map_sg) 16544ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as was passed to dma_map_sg) 16554ce63fcdSMarek Szyprowski */ 16564ce63fcdSMarek Szyprowski void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 16574ce63fcdSMarek Szyprowski int nents, enum dma_data_direction dir) 16584ce63fcdSMarek Szyprowski { 16594ce63fcdSMarek Szyprowski struct scatterlist *s; 16604ce63fcdSMarek Szyprowski int i; 16614ce63fcdSMarek Szyprowski 16624ce63fcdSMarek Szyprowski for_each_sg(sg, s, nents, i) 16634ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 16644ce63fcdSMarek Szyprowski } 16654ce63fcdSMarek Szyprowski 16664ce63fcdSMarek Szyprowski 16674ce63fcdSMarek Szyprowski /** 16680fa478dfSRob Herring * arm_coherent_iommu_map_page 16690fa478dfSRob Herring * @dev: valid struct device pointer 16700fa478dfSRob Herring * @page: page that buffer resides in 16710fa478dfSRob Herring * @offset: offset into page for start of buffer 16720fa478dfSRob Herring * @size: size of buffer to map 16730fa478dfSRob Herring * @dir: DMA transfer direction 16740fa478dfSRob Herring * 16750fa478dfSRob Herring * Coherent IOMMU aware version of arm_dma_map_page() 16760fa478dfSRob Herring */ 16770fa478dfSRob Herring static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page, 16780fa478dfSRob Herring unsigned long offset, size_t size, enum dma_data_direction dir, 16790fa478dfSRob Herring struct dma_attrs *attrs) 16800fa478dfSRob Herring { 16810fa478dfSRob Herring struct dma_iommu_mapping *mapping = dev->archdata.mapping; 16820fa478dfSRob Herring dma_addr_t dma_addr; 168313987d68SWill Deacon int ret, prot, len = PAGE_ALIGN(size + offset); 16840fa478dfSRob Herring 16850fa478dfSRob Herring dma_addr = __alloc_iova(mapping, len); 16860fa478dfSRob Herring if (dma_addr == DMA_ERROR_CODE) 16870fa478dfSRob Herring return dma_addr; 16880fa478dfSRob Herring 1689c9b24996SAndreas Herrmann prot = __dma_direction_to_prot(dir); 169013987d68SWill Deacon 169113987d68SWill Deacon ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); 16920fa478dfSRob Herring if (ret < 0) 16930fa478dfSRob Herring goto fail; 16940fa478dfSRob Herring 16950fa478dfSRob Herring return dma_addr + offset; 16960fa478dfSRob Herring fail: 16970fa478dfSRob Herring __free_iova(mapping, dma_addr, len); 16980fa478dfSRob Herring return DMA_ERROR_CODE; 16990fa478dfSRob Herring } 17000fa478dfSRob Herring 17010fa478dfSRob Herring /** 17024ce63fcdSMarek Szyprowski * arm_iommu_map_page 17034ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 17044ce63fcdSMarek Szyprowski * @page: page that buffer resides in 17054ce63fcdSMarek Szyprowski * @offset: offset into page for start of buffer 17064ce63fcdSMarek Szyprowski * @size: size of buffer to map 17074ce63fcdSMarek Szyprowski * @dir: DMA transfer direction 17084ce63fcdSMarek Szyprowski * 17094ce63fcdSMarek Szyprowski * IOMMU aware version of arm_dma_map_page() 17104ce63fcdSMarek Szyprowski */ 17114ce63fcdSMarek Szyprowski static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, 17124ce63fcdSMarek Szyprowski unsigned long offset, size_t size, enum dma_data_direction dir, 17134ce63fcdSMarek Szyprowski struct dma_attrs *attrs) 17144ce63fcdSMarek Szyprowski { 17150fa478dfSRob Herring if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 17164ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 17174ce63fcdSMarek Szyprowski 17180fa478dfSRob Herring return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); 17190fa478dfSRob Herring } 17204ce63fcdSMarek Szyprowski 17210fa478dfSRob Herring /** 17220fa478dfSRob Herring * arm_coherent_iommu_unmap_page 17230fa478dfSRob Herring * @dev: valid struct device pointer 17240fa478dfSRob Herring * @handle: DMA address of buffer 17250fa478dfSRob Herring * @size: size of buffer (same as passed to dma_map_page) 17260fa478dfSRob Herring * @dir: DMA transfer direction (same as passed to dma_map_page) 17270fa478dfSRob Herring * 17280fa478dfSRob Herring * Coherent IOMMU aware version of arm_dma_unmap_page() 17290fa478dfSRob Herring */ 17300fa478dfSRob Herring static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, 17310fa478dfSRob Herring size_t size, enum dma_data_direction dir, 17320fa478dfSRob Herring struct dma_attrs *attrs) 17330fa478dfSRob Herring { 17340fa478dfSRob Herring struct dma_iommu_mapping *mapping = dev->archdata.mapping; 17350fa478dfSRob Herring dma_addr_t iova = handle & PAGE_MASK; 17360fa478dfSRob Herring int offset = handle & ~PAGE_MASK; 17370fa478dfSRob Herring int len = PAGE_ALIGN(size + offset); 17384ce63fcdSMarek Szyprowski 17390fa478dfSRob Herring if (!iova) 17400fa478dfSRob Herring return; 17410fa478dfSRob Herring 17420fa478dfSRob Herring iommu_unmap(mapping->domain, iova, len); 17430fa478dfSRob Herring __free_iova(mapping, iova, len); 17444ce63fcdSMarek Szyprowski } 17454ce63fcdSMarek Szyprowski 17464ce63fcdSMarek Szyprowski /** 17474ce63fcdSMarek Szyprowski * arm_iommu_unmap_page 17484ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 17494ce63fcdSMarek Szyprowski * @handle: DMA address of buffer 17504ce63fcdSMarek Szyprowski * @size: size of buffer (same as passed to dma_map_page) 17514ce63fcdSMarek Szyprowski * @dir: DMA transfer direction (same as passed to dma_map_page) 17524ce63fcdSMarek Szyprowski * 17534ce63fcdSMarek Szyprowski * IOMMU aware version of arm_dma_unmap_page() 17544ce63fcdSMarek Szyprowski */ 17554ce63fcdSMarek Szyprowski static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, 17564ce63fcdSMarek Szyprowski size_t size, enum dma_data_direction dir, 17574ce63fcdSMarek Szyprowski struct dma_attrs *attrs) 17584ce63fcdSMarek Szyprowski { 17594ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = dev->archdata.mapping; 17604ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 17614ce63fcdSMarek Szyprowski struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 17624ce63fcdSMarek Szyprowski int offset = handle & ~PAGE_MASK; 17634ce63fcdSMarek Szyprowski int len = PAGE_ALIGN(size + offset); 17644ce63fcdSMarek Szyprowski 17654ce63fcdSMarek Szyprowski if (!iova) 17664ce63fcdSMarek Szyprowski return; 17674ce63fcdSMarek Szyprowski 17680fa478dfSRob Herring if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 17694ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(page, offset, size, dir); 17704ce63fcdSMarek Szyprowski 17714ce63fcdSMarek Szyprowski iommu_unmap(mapping->domain, iova, len); 17724ce63fcdSMarek Szyprowski __free_iova(mapping, iova, len); 17734ce63fcdSMarek Szyprowski } 17744ce63fcdSMarek Szyprowski 17754ce63fcdSMarek Szyprowski static void arm_iommu_sync_single_for_cpu(struct device *dev, 17764ce63fcdSMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 17774ce63fcdSMarek Szyprowski { 17784ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = dev->archdata.mapping; 17794ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 17804ce63fcdSMarek Szyprowski struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 17814ce63fcdSMarek Szyprowski unsigned int offset = handle & ~PAGE_MASK; 17824ce63fcdSMarek Szyprowski 17834ce63fcdSMarek Szyprowski if (!iova) 17844ce63fcdSMarek Szyprowski return; 17854ce63fcdSMarek Szyprowski 17864ce63fcdSMarek Szyprowski __dma_page_dev_to_cpu(page, offset, size, dir); 17874ce63fcdSMarek Szyprowski } 17884ce63fcdSMarek Szyprowski 17894ce63fcdSMarek Szyprowski static void arm_iommu_sync_single_for_device(struct device *dev, 17904ce63fcdSMarek Szyprowski dma_addr_t handle, size_t size, enum dma_data_direction dir) 17914ce63fcdSMarek Szyprowski { 17924ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = dev->archdata.mapping; 17934ce63fcdSMarek Szyprowski dma_addr_t iova = handle & PAGE_MASK; 17944ce63fcdSMarek Szyprowski struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 17954ce63fcdSMarek Szyprowski unsigned int offset = handle & ~PAGE_MASK; 17964ce63fcdSMarek Szyprowski 17974ce63fcdSMarek Szyprowski if (!iova) 17984ce63fcdSMarek Szyprowski return; 17994ce63fcdSMarek Szyprowski 18004ce63fcdSMarek Szyprowski __dma_page_cpu_to_dev(page, offset, size, dir); 18014ce63fcdSMarek Szyprowski } 18024ce63fcdSMarek Szyprowski 18034ce63fcdSMarek Szyprowski struct dma_map_ops iommu_ops = { 18044ce63fcdSMarek Szyprowski .alloc = arm_iommu_alloc_attrs, 18054ce63fcdSMarek Szyprowski .free = arm_iommu_free_attrs, 18064ce63fcdSMarek Szyprowski .mmap = arm_iommu_mmap_attrs, 1807dc2832e1SMarek Szyprowski .get_sgtable = arm_iommu_get_sgtable, 18084ce63fcdSMarek Szyprowski 18094ce63fcdSMarek Szyprowski .map_page = arm_iommu_map_page, 18104ce63fcdSMarek Szyprowski .unmap_page = arm_iommu_unmap_page, 18114ce63fcdSMarek Szyprowski .sync_single_for_cpu = arm_iommu_sync_single_for_cpu, 18124ce63fcdSMarek Szyprowski .sync_single_for_device = arm_iommu_sync_single_for_device, 18134ce63fcdSMarek Szyprowski 18144ce63fcdSMarek Szyprowski .map_sg = arm_iommu_map_sg, 18154ce63fcdSMarek Szyprowski .unmap_sg = arm_iommu_unmap_sg, 18164ce63fcdSMarek Szyprowski .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, 18174ce63fcdSMarek Szyprowski .sync_sg_for_device = arm_iommu_sync_sg_for_device, 1818d09e1333SHiroshi Doyu 1819d09e1333SHiroshi Doyu .set_dma_mask = arm_dma_set_mask, 18204ce63fcdSMarek Szyprowski }; 18214ce63fcdSMarek Szyprowski 18220fa478dfSRob Herring struct dma_map_ops iommu_coherent_ops = { 18230fa478dfSRob Herring .alloc = arm_iommu_alloc_attrs, 18240fa478dfSRob Herring .free = arm_iommu_free_attrs, 18250fa478dfSRob Herring .mmap = arm_iommu_mmap_attrs, 18260fa478dfSRob Herring .get_sgtable = arm_iommu_get_sgtable, 18270fa478dfSRob Herring 18280fa478dfSRob Herring .map_page = arm_coherent_iommu_map_page, 18290fa478dfSRob Herring .unmap_page = arm_coherent_iommu_unmap_page, 18300fa478dfSRob Herring 18310fa478dfSRob Herring .map_sg = arm_coherent_iommu_map_sg, 18320fa478dfSRob Herring .unmap_sg = arm_coherent_iommu_unmap_sg, 1833d09e1333SHiroshi Doyu 1834d09e1333SHiroshi Doyu .set_dma_mask = arm_dma_set_mask, 18350fa478dfSRob Herring }; 18360fa478dfSRob Herring 18374ce63fcdSMarek Szyprowski /** 18384ce63fcdSMarek Szyprowski * arm_iommu_create_mapping 18394ce63fcdSMarek Szyprowski * @bus: pointer to the bus holding the client device (for IOMMU calls) 18404ce63fcdSMarek Szyprowski * @base: start address of the valid IO address space 184168efd7d2SMarek Szyprowski * @size: maximum size of the valid IO address space 18424ce63fcdSMarek Szyprowski * 18434ce63fcdSMarek Szyprowski * Creates a mapping structure which holds information about used/unused 18444ce63fcdSMarek Szyprowski * IO address ranges, which is required to perform memory allocation and 18454ce63fcdSMarek Szyprowski * mapping with IOMMU aware functions. 18464ce63fcdSMarek Szyprowski * 18474ce63fcdSMarek Szyprowski * The client device need to be attached to the mapping with 18484ce63fcdSMarek Szyprowski * arm_iommu_attach_device function. 18494ce63fcdSMarek Szyprowski */ 18504ce63fcdSMarek Szyprowski struct dma_iommu_mapping * 185168efd7d2SMarek Szyprowski arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size) 18524ce63fcdSMarek Szyprowski { 185368efd7d2SMarek Szyprowski unsigned int bits = size >> PAGE_SHIFT; 185468efd7d2SMarek Szyprowski unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long); 18554ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping; 185668efd7d2SMarek Szyprowski int extensions = 1; 18574ce63fcdSMarek Szyprowski int err = -ENOMEM; 18584ce63fcdSMarek Szyprowski 185968efd7d2SMarek Szyprowski if (!bitmap_size) 18604ce63fcdSMarek Szyprowski return ERR_PTR(-EINVAL); 18614ce63fcdSMarek Szyprowski 186268efd7d2SMarek Szyprowski if (bitmap_size > PAGE_SIZE) { 186368efd7d2SMarek Szyprowski extensions = bitmap_size / PAGE_SIZE; 186468efd7d2SMarek Szyprowski bitmap_size = PAGE_SIZE; 186568efd7d2SMarek Szyprowski } 186668efd7d2SMarek Szyprowski 18674ce63fcdSMarek Szyprowski mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); 18684ce63fcdSMarek Szyprowski if (!mapping) 18694ce63fcdSMarek Szyprowski goto err; 18704ce63fcdSMarek Szyprowski 187168efd7d2SMarek Szyprowski mapping->bitmap_size = bitmap_size; 187268efd7d2SMarek Szyprowski mapping->bitmaps = kzalloc(extensions * sizeof(unsigned long *), 18734d852ef8SAndreas Herrmann GFP_KERNEL); 18744d852ef8SAndreas Herrmann if (!mapping->bitmaps) 18754ce63fcdSMarek Szyprowski goto err2; 18764ce63fcdSMarek Szyprowski 187768efd7d2SMarek Szyprowski mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL); 18784d852ef8SAndreas Herrmann if (!mapping->bitmaps[0]) 18794d852ef8SAndreas Herrmann goto err3; 18804d852ef8SAndreas Herrmann 18814d852ef8SAndreas Herrmann mapping->nr_bitmaps = 1; 18824d852ef8SAndreas Herrmann mapping->extensions = extensions; 18834ce63fcdSMarek Szyprowski mapping->base = base; 188468efd7d2SMarek Szyprowski mapping->bits = BITS_PER_BYTE * bitmap_size; 18854d852ef8SAndreas Herrmann 18864ce63fcdSMarek Szyprowski spin_lock_init(&mapping->lock); 18874ce63fcdSMarek Szyprowski 18884ce63fcdSMarek Szyprowski mapping->domain = iommu_domain_alloc(bus); 18894ce63fcdSMarek Szyprowski if (!mapping->domain) 18904d852ef8SAndreas Herrmann goto err4; 18914ce63fcdSMarek Szyprowski 18924ce63fcdSMarek Szyprowski kref_init(&mapping->kref); 18934ce63fcdSMarek Szyprowski return mapping; 18944d852ef8SAndreas Herrmann err4: 18954d852ef8SAndreas Herrmann kfree(mapping->bitmaps[0]); 18964ce63fcdSMarek Szyprowski err3: 18974d852ef8SAndreas Herrmann kfree(mapping->bitmaps); 18984ce63fcdSMarek Szyprowski err2: 18994ce63fcdSMarek Szyprowski kfree(mapping); 19004ce63fcdSMarek Szyprowski err: 19014ce63fcdSMarek Szyprowski return ERR_PTR(err); 19024ce63fcdSMarek Szyprowski } 190318177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_create_mapping); 19044ce63fcdSMarek Szyprowski 19054ce63fcdSMarek Szyprowski static void release_iommu_mapping(struct kref *kref) 19064ce63fcdSMarek Szyprowski { 19074d852ef8SAndreas Herrmann int i; 19084ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping = 19094ce63fcdSMarek Szyprowski container_of(kref, struct dma_iommu_mapping, kref); 19104ce63fcdSMarek Szyprowski 19114ce63fcdSMarek Szyprowski iommu_domain_free(mapping->domain); 19124d852ef8SAndreas Herrmann for (i = 0; i < mapping->nr_bitmaps; i++) 19134d852ef8SAndreas Herrmann kfree(mapping->bitmaps[i]); 19144d852ef8SAndreas Herrmann kfree(mapping->bitmaps); 19154ce63fcdSMarek Szyprowski kfree(mapping); 19164ce63fcdSMarek Szyprowski } 19174ce63fcdSMarek Szyprowski 19184d852ef8SAndreas Herrmann static int extend_iommu_mapping(struct dma_iommu_mapping *mapping) 19194d852ef8SAndreas Herrmann { 19204d852ef8SAndreas Herrmann int next_bitmap; 19214d852ef8SAndreas Herrmann 19224d852ef8SAndreas Herrmann if (mapping->nr_bitmaps > mapping->extensions) 19234d852ef8SAndreas Herrmann return -EINVAL; 19244d852ef8SAndreas Herrmann 19254d852ef8SAndreas Herrmann next_bitmap = mapping->nr_bitmaps; 19264d852ef8SAndreas Herrmann mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size, 19274d852ef8SAndreas Herrmann GFP_ATOMIC); 19284d852ef8SAndreas Herrmann if (!mapping->bitmaps[next_bitmap]) 19294d852ef8SAndreas Herrmann return -ENOMEM; 19304d852ef8SAndreas Herrmann 19314d852ef8SAndreas Herrmann mapping->nr_bitmaps++; 19324d852ef8SAndreas Herrmann 19334d852ef8SAndreas Herrmann return 0; 19344d852ef8SAndreas Herrmann } 19354d852ef8SAndreas Herrmann 19364ce63fcdSMarek Szyprowski void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) 19374ce63fcdSMarek Szyprowski { 19384ce63fcdSMarek Szyprowski if (mapping) 19394ce63fcdSMarek Szyprowski kref_put(&mapping->kref, release_iommu_mapping); 19404ce63fcdSMarek Szyprowski } 194118177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_release_mapping); 19424ce63fcdSMarek Szyprowski 19434ce63fcdSMarek Szyprowski /** 19444ce63fcdSMarek Szyprowski * arm_iommu_attach_device 19454ce63fcdSMarek Szyprowski * @dev: valid struct device pointer 19464ce63fcdSMarek Szyprowski * @mapping: io address space mapping structure (returned from 19474ce63fcdSMarek Szyprowski * arm_iommu_create_mapping) 19484ce63fcdSMarek Szyprowski * 19494ce63fcdSMarek Szyprowski * Attaches specified io address space mapping to the provided device, 19504bb25789SWill Deacon * More than one client might be attached to the same io address space 19514bb25789SWill Deacon * mapping. 19524ce63fcdSMarek Szyprowski */ 19534ce63fcdSMarek Szyprowski int arm_iommu_attach_device(struct device *dev, 19544ce63fcdSMarek Szyprowski struct dma_iommu_mapping *mapping) 19554ce63fcdSMarek Szyprowski { 19564ce63fcdSMarek Szyprowski int err; 19574ce63fcdSMarek Szyprowski 19584ce63fcdSMarek Szyprowski err = iommu_attach_device(mapping->domain, dev); 19594ce63fcdSMarek Szyprowski if (err) 19604ce63fcdSMarek Szyprowski return err; 19614ce63fcdSMarek Szyprowski 19624ce63fcdSMarek Szyprowski kref_get(&mapping->kref); 19634ce63fcdSMarek Szyprowski dev->archdata.mapping = mapping; 19644ce63fcdSMarek Szyprowski 196575c59716SHiroshi Doyu pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); 19664ce63fcdSMarek Szyprowski return 0; 19674ce63fcdSMarek Szyprowski } 196818177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_attach_device); 19694ce63fcdSMarek Szyprowski 19706fe36758SHiroshi Doyu /** 19716fe36758SHiroshi Doyu * arm_iommu_detach_device 19726fe36758SHiroshi Doyu * @dev: valid struct device pointer 19736fe36758SHiroshi Doyu * 19746fe36758SHiroshi Doyu * Detaches the provided device from a previously attached map. 19756fe36758SHiroshi Doyu */ 19766fe36758SHiroshi Doyu void arm_iommu_detach_device(struct device *dev) 19776fe36758SHiroshi Doyu { 19786fe36758SHiroshi Doyu struct dma_iommu_mapping *mapping; 19796fe36758SHiroshi Doyu 19806fe36758SHiroshi Doyu mapping = to_dma_iommu_mapping(dev); 19816fe36758SHiroshi Doyu if (!mapping) { 19826fe36758SHiroshi Doyu dev_warn(dev, "Not attached\n"); 19836fe36758SHiroshi Doyu return; 19846fe36758SHiroshi Doyu } 19856fe36758SHiroshi Doyu 19866fe36758SHiroshi Doyu iommu_detach_device(mapping->domain, dev); 19876fe36758SHiroshi Doyu kref_put(&mapping->kref, release_iommu_mapping); 19889e4b259dSWill Deacon dev->archdata.mapping = NULL; 19896fe36758SHiroshi Doyu 19906fe36758SHiroshi Doyu pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); 19916fe36758SHiroshi Doyu } 199218177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_detach_device); 19936fe36758SHiroshi Doyu 19944bb25789SWill Deacon static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) 19954bb25789SWill Deacon { 19964bb25789SWill Deacon return coherent ? &iommu_coherent_ops : &iommu_ops; 19974bb25789SWill Deacon } 19984bb25789SWill Deacon 19994bb25789SWill Deacon static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, 20004bb25789SWill Deacon struct iommu_ops *iommu) 20014bb25789SWill Deacon { 20024bb25789SWill Deacon struct dma_iommu_mapping *mapping; 20034bb25789SWill Deacon 20044bb25789SWill Deacon if (!iommu) 20054bb25789SWill Deacon return false; 20064bb25789SWill Deacon 20074bb25789SWill Deacon mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); 20084bb25789SWill Deacon if (IS_ERR(mapping)) { 20094bb25789SWill Deacon pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", 20104bb25789SWill Deacon size, dev_name(dev)); 20114bb25789SWill Deacon return false; 20124bb25789SWill Deacon } 20134bb25789SWill Deacon 20144bb25789SWill Deacon if (arm_iommu_attach_device(dev, mapping)) { 20154bb25789SWill Deacon pr_warn("Failed to attached device %s to IOMMU_mapping\n", 20164bb25789SWill Deacon dev_name(dev)); 20174bb25789SWill Deacon arm_iommu_release_mapping(mapping); 20184bb25789SWill Deacon return false; 20194bb25789SWill Deacon } 20204bb25789SWill Deacon 20214bb25789SWill Deacon return true; 20224bb25789SWill Deacon } 20234bb25789SWill Deacon 20244bb25789SWill Deacon static void arm_teardown_iommu_dma_ops(struct device *dev) 20254bb25789SWill Deacon { 20264bb25789SWill Deacon struct dma_iommu_mapping *mapping = dev->archdata.mapping; 20274bb25789SWill Deacon 2028c2273a18SWill Deacon if (!mapping) 2029c2273a18SWill Deacon return; 2030c2273a18SWill Deacon 20314bb25789SWill Deacon arm_iommu_detach_device(dev); 20324bb25789SWill Deacon arm_iommu_release_mapping(mapping); 20334bb25789SWill Deacon } 20344bb25789SWill Deacon 20354bb25789SWill Deacon #else 20364bb25789SWill Deacon 20374bb25789SWill Deacon static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, 20384bb25789SWill Deacon struct iommu_ops *iommu) 20394bb25789SWill Deacon { 20404bb25789SWill Deacon return false; 20414bb25789SWill Deacon } 20424bb25789SWill Deacon 20434bb25789SWill Deacon static void arm_teardown_iommu_dma_ops(struct device *dev) { } 20444bb25789SWill Deacon 20454bb25789SWill Deacon #define arm_get_iommu_dma_map_ops arm_get_dma_map_ops 20464bb25789SWill Deacon 20474bb25789SWill Deacon #endif /* CONFIG_ARM_DMA_USE_IOMMU */ 20484bb25789SWill Deacon 20494bb25789SWill Deacon static struct dma_map_ops *arm_get_dma_map_ops(bool coherent) 20504bb25789SWill Deacon { 20514bb25789SWill Deacon return coherent ? &arm_coherent_dma_ops : &arm_dma_ops; 20524bb25789SWill Deacon } 20534bb25789SWill Deacon 20544bb25789SWill Deacon void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, 20554bb25789SWill Deacon struct iommu_ops *iommu, bool coherent) 20564bb25789SWill Deacon { 20574bb25789SWill Deacon struct dma_map_ops *dma_ops; 20584bb25789SWill Deacon 20596f51ee70SLinus Torvalds dev->archdata.dma_coherent = coherent; 20604bb25789SWill Deacon if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu)) 20614bb25789SWill Deacon dma_ops = arm_get_iommu_dma_map_ops(coherent); 20624bb25789SWill Deacon else 20634bb25789SWill Deacon dma_ops = arm_get_dma_map_ops(coherent); 20644bb25789SWill Deacon 20654bb25789SWill Deacon set_dma_ops(dev, dma_ops); 20664bb25789SWill Deacon } 20674bb25789SWill Deacon 20684bb25789SWill Deacon void arch_teardown_dma_ops(struct device *dev) 20694bb25789SWill Deacon { 20704bb25789SWill Deacon arm_teardown_iommu_dma_ops(dev); 20714bb25789SWill Deacon } 2072