xref: /openbmc/linux/arch/arm/mm/dma-mapping.c (revision 9848e48f)
10ddbccd1SRussell King /*
20ddbccd1SRussell King  *  linux/arch/arm/mm/dma-mapping.c
30ddbccd1SRussell King  *
40ddbccd1SRussell King  *  Copyright (C) 2000-2004 Russell King
50ddbccd1SRussell King  *
60ddbccd1SRussell King  * This program is free software; you can redistribute it and/or modify
70ddbccd1SRussell King  * it under the terms of the GNU General Public License version 2 as
80ddbccd1SRussell King  * published by the Free Software Foundation.
90ddbccd1SRussell King  *
100ddbccd1SRussell King  *  DMA uncached mapping support.
110ddbccd1SRussell King  */
120ddbccd1SRussell King #include <linux/module.h>
130ddbccd1SRussell King #include <linux/mm.h>
145a0e3ad6STejun Heo #include <linux/gfp.h>
150ddbccd1SRussell King #include <linux/errno.h>
160ddbccd1SRussell King #include <linux/list.h>
170ddbccd1SRussell King #include <linux/init.h>
180ddbccd1SRussell King #include <linux/device.h>
190ddbccd1SRussell King #include <linux/dma-mapping.h>
20c7909509SMarek Szyprowski #include <linux/dma-contiguous.h>
2139af22a7SNicolas Pitre #include <linux/highmem.h>
22c7909509SMarek Szyprowski #include <linux/memblock.h>
2399d1717dSJon Medhurst #include <linux/slab.h>
244ce63fcdSMarek Szyprowski #include <linux/iommu.h>
25e9da6e99SMarek Szyprowski #include <linux/io.h>
264ce63fcdSMarek Szyprowski #include <linux/vmalloc.h>
27158e8bfeSAlessandro Rubini #include <linux/sizes.h>
280ddbccd1SRussell King 
290ddbccd1SRussell King #include <asm/memory.h>
3043377453SNicolas Pitre #include <asm/highmem.h>
310ddbccd1SRussell King #include <asm/cacheflush.h>
320ddbccd1SRussell King #include <asm/tlbflush.h>
3399d1717dSJon Medhurst #include <asm/mach/arch.h>
344ce63fcdSMarek Szyprowski #include <asm/dma-iommu.h>
35c7909509SMarek Szyprowski #include <asm/mach/map.h>
36c7909509SMarek Szyprowski #include <asm/system_info.h>
37c7909509SMarek Szyprowski #include <asm/dma-contiguous.h>
380ddbccd1SRussell King 
39022ae537SRussell King #include "mm.h"
40022ae537SRussell King 
4115237e1fSMarek Szyprowski /*
4215237e1fSMarek Szyprowski  * The DMA API is built upon the notion of "buffer ownership".  A buffer
4315237e1fSMarek Szyprowski  * is either exclusively owned by the CPU (and therefore may be accessed
4415237e1fSMarek Szyprowski  * by it) or exclusively owned by the DMA device.  These helper functions
4515237e1fSMarek Szyprowski  * represent the transitions between these two ownership states.
4615237e1fSMarek Szyprowski  *
4715237e1fSMarek Szyprowski  * Note, however, that on later ARMs, this notion does not work due to
4815237e1fSMarek Szyprowski  * speculative prefetches.  We model our approach on the assumption that
4915237e1fSMarek Szyprowski  * the CPU does do speculative prefetches, which means we clean caches
5015237e1fSMarek Szyprowski  * before transfers and delay cache invalidation until transfer completion.
5115237e1fSMarek Szyprowski  *
5215237e1fSMarek Szyprowski  */
5351fde349SMarek Szyprowski static void __dma_page_cpu_to_dev(struct page *, unsigned long,
5415237e1fSMarek Szyprowski 		size_t, enum dma_data_direction);
5551fde349SMarek Szyprowski static void __dma_page_dev_to_cpu(struct page *, unsigned long,
5615237e1fSMarek Szyprowski 		size_t, enum dma_data_direction);
5715237e1fSMarek Szyprowski 
582dc6a016SMarek Szyprowski /**
592dc6a016SMarek Szyprowski  * arm_dma_map_page - map a portion of a page for streaming DMA
602dc6a016SMarek Szyprowski  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
612dc6a016SMarek Szyprowski  * @page: page that buffer resides in
622dc6a016SMarek Szyprowski  * @offset: offset into page for start of buffer
632dc6a016SMarek Szyprowski  * @size: size of buffer to map
642dc6a016SMarek Szyprowski  * @dir: DMA transfer direction
652dc6a016SMarek Szyprowski  *
662dc6a016SMarek Szyprowski  * Ensure that any data held in the cache is appropriately discarded
672dc6a016SMarek Szyprowski  * or written back.
682dc6a016SMarek Szyprowski  *
692dc6a016SMarek Szyprowski  * The device owns this memory once this call has completed.  The CPU
702dc6a016SMarek Szyprowski  * can regain ownership by calling dma_unmap_page().
712dc6a016SMarek Szyprowski  */
7251fde349SMarek Szyprowski static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
732dc6a016SMarek Szyprowski 	     unsigned long offset, size_t size, enum dma_data_direction dir,
742dc6a016SMarek Szyprowski 	     struct dma_attrs *attrs)
752dc6a016SMarek Szyprowski {
76dd37e940SRob Herring 	if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
7751fde349SMarek Szyprowski 		__dma_page_cpu_to_dev(page, offset, size, dir);
7851fde349SMarek Szyprowski 	return pfn_to_dma(dev, page_to_pfn(page)) + offset;
792dc6a016SMarek Szyprowski }
802dc6a016SMarek Szyprowski 
81dd37e940SRob Herring static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page,
82dd37e940SRob Herring 	     unsigned long offset, size_t size, enum dma_data_direction dir,
83dd37e940SRob Herring 	     struct dma_attrs *attrs)
84dd37e940SRob Herring {
85dd37e940SRob Herring 	return pfn_to_dma(dev, page_to_pfn(page)) + offset;
86dd37e940SRob Herring }
87dd37e940SRob Herring 
882dc6a016SMarek Szyprowski /**
892dc6a016SMarek Szyprowski  * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
902dc6a016SMarek Szyprowski  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
912dc6a016SMarek Szyprowski  * @handle: DMA address of buffer
922dc6a016SMarek Szyprowski  * @size: size of buffer (same as passed to dma_map_page)
932dc6a016SMarek Szyprowski  * @dir: DMA transfer direction (same as passed to dma_map_page)
942dc6a016SMarek Szyprowski  *
952dc6a016SMarek Szyprowski  * Unmap a page streaming mode DMA translation.  The handle and size
962dc6a016SMarek Szyprowski  * must match what was provided in the previous dma_map_page() call.
972dc6a016SMarek Szyprowski  * All other usages are undefined.
982dc6a016SMarek Szyprowski  *
992dc6a016SMarek Szyprowski  * After this call, reads by the CPU to the buffer are guaranteed to see
1002dc6a016SMarek Szyprowski  * whatever the device wrote there.
1012dc6a016SMarek Szyprowski  */
10251fde349SMarek Szyprowski static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
1032dc6a016SMarek Szyprowski 		size_t size, enum dma_data_direction dir,
1042dc6a016SMarek Szyprowski 		struct dma_attrs *attrs)
1052dc6a016SMarek Szyprowski {
106dd37e940SRob Herring 	if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
10751fde349SMarek Szyprowski 		__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
10851fde349SMarek Szyprowski 				      handle & ~PAGE_MASK, size, dir);
1092dc6a016SMarek Szyprowski }
1102dc6a016SMarek Szyprowski 
11151fde349SMarek Szyprowski static void arm_dma_sync_single_for_cpu(struct device *dev,
1122dc6a016SMarek Szyprowski 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
1132dc6a016SMarek Szyprowski {
1142dc6a016SMarek Szyprowski 	unsigned int offset = handle & (PAGE_SIZE - 1);
1152dc6a016SMarek Szyprowski 	struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
1162dc6a016SMarek Szyprowski 	__dma_page_dev_to_cpu(page, offset, size, dir);
1172dc6a016SMarek Szyprowski }
1182dc6a016SMarek Szyprowski 
11951fde349SMarek Szyprowski static void arm_dma_sync_single_for_device(struct device *dev,
1202dc6a016SMarek Szyprowski 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
1212dc6a016SMarek Szyprowski {
1222dc6a016SMarek Szyprowski 	unsigned int offset = handle & (PAGE_SIZE - 1);
1232dc6a016SMarek Szyprowski 	struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
1242dc6a016SMarek Szyprowski 	__dma_page_cpu_to_dev(page, offset, size, dir);
1252dc6a016SMarek Szyprowski }
1262dc6a016SMarek Szyprowski 
1272dc6a016SMarek Szyprowski struct dma_map_ops arm_dma_ops = {
128f99d6034SMarek Szyprowski 	.alloc			= arm_dma_alloc,
129f99d6034SMarek Szyprowski 	.free			= arm_dma_free,
130f99d6034SMarek Szyprowski 	.mmap			= arm_dma_mmap,
131dc2832e1SMarek Szyprowski 	.get_sgtable		= arm_dma_get_sgtable,
1322dc6a016SMarek Szyprowski 	.map_page		= arm_dma_map_page,
1332dc6a016SMarek Szyprowski 	.unmap_page		= arm_dma_unmap_page,
1342dc6a016SMarek Szyprowski 	.map_sg			= arm_dma_map_sg,
1352dc6a016SMarek Szyprowski 	.unmap_sg		= arm_dma_unmap_sg,
1362dc6a016SMarek Szyprowski 	.sync_single_for_cpu	= arm_dma_sync_single_for_cpu,
1372dc6a016SMarek Szyprowski 	.sync_single_for_device	= arm_dma_sync_single_for_device,
1382dc6a016SMarek Szyprowski 	.sync_sg_for_cpu	= arm_dma_sync_sg_for_cpu,
1392dc6a016SMarek Szyprowski 	.sync_sg_for_device	= arm_dma_sync_sg_for_device,
1402dc6a016SMarek Szyprowski 	.set_dma_mask		= arm_dma_set_mask,
1412dc6a016SMarek Szyprowski };
1422dc6a016SMarek Szyprowski EXPORT_SYMBOL(arm_dma_ops);
1432dc6a016SMarek Szyprowski 
144dd37e940SRob Herring static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
145dd37e940SRob Herring 	dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs);
146dd37e940SRob Herring static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
147dd37e940SRob Herring 				  dma_addr_t handle, struct dma_attrs *attrs);
148dd37e940SRob Herring 
149dd37e940SRob Herring struct dma_map_ops arm_coherent_dma_ops = {
150dd37e940SRob Herring 	.alloc			= arm_coherent_dma_alloc,
151dd37e940SRob Herring 	.free			= arm_coherent_dma_free,
152dd37e940SRob Herring 	.mmap			= arm_dma_mmap,
153dd37e940SRob Herring 	.get_sgtable		= arm_dma_get_sgtable,
154dd37e940SRob Herring 	.map_page		= arm_coherent_dma_map_page,
155dd37e940SRob Herring 	.map_sg			= arm_dma_map_sg,
156dd37e940SRob Herring 	.set_dma_mask		= arm_dma_set_mask,
157dd37e940SRob Herring };
158dd37e940SRob Herring EXPORT_SYMBOL(arm_coherent_dma_ops);
159dd37e940SRob Herring 
160ab6494f0SCatalin Marinas static u64 get_coherent_dma_mask(struct device *dev)
161ab6494f0SCatalin Marinas {
162022ae537SRussell King 	u64 mask = (u64)arm_dma_limit;
1630ddbccd1SRussell King 
164ab6494f0SCatalin Marinas 	if (dev) {
165ab6494f0SCatalin Marinas 		mask = dev->coherent_dma_mask;
166ab6494f0SCatalin Marinas 
167ab6494f0SCatalin Marinas 		/*
168ab6494f0SCatalin Marinas 		 * Sanity check the DMA mask - it must be non-zero, and
169ab6494f0SCatalin Marinas 		 * must be able to be satisfied by a DMA allocation.
170ab6494f0SCatalin Marinas 		 */
171ab6494f0SCatalin Marinas 		if (mask == 0) {
172ab6494f0SCatalin Marinas 			dev_warn(dev, "coherent DMA mask is unset\n");
173ab6494f0SCatalin Marinas 			return 0;
174ab6494f0SCatalin Marinas 		}
175ab6494f0SCatalin Marinas 
176022ae537SRussell King 		if ((~mask) & (u64)arm_dma_limit) {
177ab6494f0SCatalin Marinas 			dev_warn(dev, "coherent DMA mask %#llx is smaller "
178ab6494f0SCatalin Marinas 				 "than system GFP_DMA mask %#llx\n",
179022ae537SRussell King 				 mask, (u64)arm_dma_limit);
180ab6494f0SCatalin Marinas 			return 0;
181ab6494f0SCatalin Marinas 		}
182ab6494f0SCatalin Marinas 	}
183ab6494f0SCatalin Marinas 
184ab6494f0SCatalin Marinas 	return mask;
185ab6494f0SCatalin Marinas }
186ab6494f0SCatalin Marinas 
187c7909509SMarek Szyprowski static void __dma_clear_buffer(struct page *page, size_t size)
188c7909509SMarek Szyprowski {
189c7909509SMarek Szyprowski 	/*
190c7909509SMarek Szyprowski 	 * Ensure that the allocated pages are zeroed, and that any data
191c7909509SMarek Szyprowski 	 * lurking in the kernel direct-mapped region is invalidated.
192c7909509SMarek Szyprowski 	 */
1939848e48fSMarek Szyprowski 	if (PageHighMem(page)) {
1949848e48fSMarek Szyprowski 		phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
1959848e48fSMarek Szyprowski 		phys_addr_t end = base + size;
1969848e48fSMarek Szyprowski 		while (size > 0) {
1979848e48fSMarek Szyprowski 			void *ptr = kmap_atomic(page);
1989848e48fSMarek Szyprowski 			memset(ptr, 0, PAGE_SIZE);
1999848e48fSMarek Szyprowski 			dmac_flush_range(ptr, ptr + PAGE_SIZE);
2009848e48fSMarek Szyprowski 			kunmap_atomic(ptr);
2019848e48fSMarek Szyprowski 			page++;
2029848e48fSMarek Szyprowski 			size -= PAGE_SIZE;
2039848e48fSMarek Szyprowski 		}
2049848e48fSMarek Szyprowski 		outer_flush_range(base, end);
2059848e48fSMarek Szyprowski 	} else {
2069848e48fSMarek Szyprowski 		void *ptr = page_address(page);
207c7909509SMarek Szyprowski 		memset(ptr, 0, size);
208c7909509SMarek Szyprowski 		dmac_flush_range(ptr, ptr + size);
209c7909509SMarek Szyprowski 		outer_flush_range(__pa(ptr), __pa(ptr) + size);
210c7909509SMarek Szyprowski 	}
2114ce63fcdSMarek Szyprowski }
212c7909509SMarek Szyprowski 
2137a9a32a9SRussell King /*
2147a9a32a9SRussell King  * Allocate a DMA buffer for 'dev' of size 'size' using the
2157a9a32a9SRussell King  * specified gfp mask.  Note that 'size' must be page aligned.
2167a9a32a9SRussell King  */
2177a9a32a9SRussell King static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
2187a9a32a9SRussell King {
2197a9a32a9SRussell King 	unsigned long order = get_order(size);
2207a9a32a9SRussell King 	struct page *page, *p, *e;
2217a9a32a9SRussell King 
2227a9a32a9SRussell King 	page = alloc_pages(gfp, order);
2237a9a32a9SRussell King 	if (!page)
2247a9a32a9SRussell King 		return NULL;
2257a9a32a9SRussell King 
2267a9a32a9SRussell King 	/*
2277a9a32a9SRussell King 	 * Now split the huge page and free the excess pages
2287a9a32a9SRussell King 	 */
2297a9a32a9SRussell King 	split_page(page, order);
2307a9a32a9SRussell King 	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
2317a9a32a9SRussell King 		__free_page(p);
2327a9a32a9SRussell King 
233c7909509SMarek Szyprowski 	__dma_clear_buffer(page, size);
2347a9a32a9SRussell King 
2357a9a32a9SRussell King 	return page;
2367a9a32a9SRussell King }
2377a9a32a9SRussell King 
2387a9a32a9SRussell King /*
2397a9a32a9SRussell King  * Free a DMA buffer.  'size' must be page aligned.
2407a9a32a9SRussell King  */
2417a9a32a9SRussell King static void __dma_free_buffer(struct page *page, size_t size)
2427a9a32a9SRussell King {
2437a9a32a9SRussell King 	struct page *e = page + (size >> PAGE_SHIFT);
2447a9a32a9SRussell King 
2457a9a32a9SRussell King 	while (page < e) {
2467a9a32a9SRussell King 		__free_page(page);
2477a9a32a9SRussell King 		page++;
2487a9a32a9SRussell King 	}
2497a9a32a9SRussell King }
2507a9a32a9SRussell King 
251ab6494f0SCatalin Marinas #ifdef CONFIG_MMU
2520ddbccd1SRussell King #ifdef CONFIG_HUGETLB_PAGE
2530ddbccd1SRussell King #error ARM Coherent DMA allocator does not (yet) support huge TLB
2540ddbccd1SRussell King #endif
2550ddbccd1SRussell King 
256c7909509SMarek Szyprowski static void *__alloc_from_contiguous(struct device *dev, size_t size,
2579848e48fSMarek Szyprowski 				     pgprot_t prot, struct page **ret_page,
2589848e48fSMarek Szyprowski 				     const void *caller);
259c7909509SMarek Szyprowski 
260e9da6e99SMarek Szyprowski static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
261e9da6e99SMarek Szyprowski 				 pgprot_t prot, struct page **ret_page,
262e9da6e99SMarek Szyprowski 				 const void *caller);
263e9da6e99SMarek Szyprowski 
264e9da6e99SMarek Szyprowski static void *
265e9da6e99SMarek Szyprowski __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
266e9da6e99SMarek Szyprowski 	const void *caller)
267e9da6e99SMarek Szyprowski {
268e9da6e99SMarek Szyprowski 	struct vm_struct *area;
269e9da6e99SMarek Szyprowski 	unsigned long addr;
270e9da6e99SMarek Szyprowski 
271e9da6e99SMarek Szyprowski 	/*
272e9da6e99SMarek Szyprowski 	 * DMA allocation can be mapped to user space, so lets
273e9da6e99SMarek Szyprowski 	 * set VM_USERMAP flags too.
274e9da6e99SMarek Szyprowski 	 */
275e9da6e99SMarek Szyprowski 	area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
276e9da6e99SMarek Szyprowski 				  caller);
277e9da6e99SMarek Szyprowski 	if (!area)
278e9da6e99SMarek Szyprowski 		return NULL;
279e9da6e99SMarek Szyprowski 	addr = (unsigned long)area->addr;
280e9da6e99SMarek Szyprowski 	area->phys_addr = __pfn_to_phys(page_to_pfn(page));
281e9da6e99SMarek Szyprowski 
282e9da6e99SMarek Szyprowski 	if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
283e9da6e99SMarek Szyprowski 		vunmap((void *)addr);
284e9da6e99SMarek Szyprowski 		return NULL;
285e9da6e99SMarek Szyprowski 	}
286e9da6e99SMarek Szyprowski 	return (void *)addr;
287e9da6e99SMarek Szyprowski }
288e9da6e99SMarek Szyprowski 
289e9da6e99SMarek Szyprowski static void __dma_free_remap(void *cpu_addr, size_t size)
290e9da6e99SMarek Szyprowski {
291e9da6e99SMarek Szyprowski 	unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP;
292e9da6e99SMarek Szyprowski 	struct vm_struct *area = find_vm_area(cpu_addr);
293e9da6e99SMarek Szyprowski 	if (!area || (area->flags & flags) != flags) {
294e9da6e99SMarek Szyprowski 		WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
295e9da6e99SMarek Szyprowski 		return;
296e9da6e99SMarek Szyprowski 	}
297e9da6e99SMarek Szyprowski 	unmap_kernel_range((unsigned long)cpu_addr, size);
298e9da6e99SMarek Szyprowski 	vunmap(cpu_addr);
299e9da6e99SMarek Szyprowski }
300e9da6e99SMarek Szyprowski 
3016e5267aaSMarek Szyprowski #define DEFAULT_DMA_COHERENT_POOL_SIZE	SZ_256K
3026e5267aaSMarek Szyprowski 
303e9da6e99SMarek Szyprowski struct dma_pool {
304e9da6e99SMarek Szyprowski 	size_t size;
305e9da6e99SMarek Szyprowski 	spinlock_t lock;
306e9da6e99SMarek Szyprowski 	unsigned long *bitmap;
307e9da6e99SMarek Szyprowski 	unsigned long nr_pages;
308e9da6e99SMarek Szyprowski 	void *vaddr;
3096b3fe472SHiroshi Doyu 	struct page **pages;
310c7909509SMarek Szyprowski };
311c7909509SMarek Szyprowski 
312e9da6e99SMarek Szyprowski static struct dma_pool atomic_pool = {
3136e5267aaSMarek Szyprowski 	.size = DEFAULT_DMA_COHERENT_POOL_SIZE,
314e9da6e99SMarek Szyprowski };
315c7909509SMarek Szyprowski 
316c7909509SMarek Szyprowski static int __init early_coherent_pool(char *p)
317c7909509SMarek Szyprowski {
318e9da6e99SMarek Szyprowski 	atomic_pool.size = memparse(p, &p);
319c7909509SMarek Szyprowski 	return 0;
320c7909509SMarek Szyprowski }
321c7909509SMarek Szyprowski early_param("coherent_pool", early_coherent_pool);
322c7909509SMarek Szyprowski 
3236e5267aaSMarek Szyprowski void __init init_dma_coherent_pool_size(unsigned long size)
3246e5267aaSMarek Szyprowski {
3256e5267aaSMarek Szyprowski 	/*
3266e5267aaSMarek Szyprowski 	 * Catch any attempt to set the pool size too late.
3276e5267aaSMarek Szyprowski 	 */
3286e5267aaSMarek Szyprowski 	BUG_ON(atomic_pool.vaddr);
3296e5267aaSMarek Szyprowski 
3306e5267aaSMarek Szyprowski 	/*
3316e5267aaSMarek Szyprowski 	 * Set architecture specific coherent pool size only if
3326e5267aaSMarek Szyprowski 	 * it has not been changed by kernel command line parameter.
3336e5267aaSMarek Szyprowski 	 */
3346e5267aaSMarek Szyprowski 	if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE)
3356e5267aaSMarek Szyprowski 		atomic_pool.size = size;
3366e5267aaSMarek Szyprowski }
3376e5267aaSMarek Szyprowski 
338c7909509SMarek Szyprowski /*
339c7909509SMarek Szyprowski  * Initialise the coherent pool for atomic allocations.
340c7909509SMarek Szyprowski  */
341e9da6e99SMarek Szyprowski static int __init atomic_pool_init(void)
342c7909509SMarek Szyprowski {
343e9da6e99SMarek Szyprowski 	struct dma_pool *pool = &atomic_pool;
344c7909509SMarek Szyprowski 	pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
345e9da6e99SMarek Szyprowski 	unsigned long nr_pages = pool->size >> PAGE_SHIFT;
346e9da6e99SMarek Szyprowski 	unsigned long *bitmap;
347c7909509SMarek Szyprowski 	struct page *page;
3486b3fe472SHiroshi Doyu 	struct page **pages;
349c7909509SMarek Szyprowski 	void *ptr;
350e9da6e99SMarek Szyprowski 	int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
351c7909509SMarek Szyprowski 
352e9da6e99SMarek Szyprowski 	bitmap = kzalloc(bitmap_size, GFP_KERNEL);
353e9da6e99SMarek Szyprowski 	if (!bitmap)
354e9da6e99SMarek Szyprowski 		goto no_bitmap;
355c7909509SMarek Szyprowski 
3566b3fe472SHiroshi Doyu 	pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
3576b3fe472SHiroshi Doyu 	if (!pages)
3586b3fe472SHiroshi Doyu 		goto no_pages;
3596b3fe472SHiroshi Doyu 
360e9da6e99SMarek Szyprowski 	if (IS_ENABLED(CONFIG_CMA))
3619848e48fSMarek Szyprowski 		ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
3629848e48fSMarek Szyprowski 					      atomic_pool_init);
363e9da6e99SMarek Szyprowski 	else
364e9da6e99SMarek Szyprowski 		ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
3659848e48fSMarek Szyprowski 					   &page, atomic_pool_init);
366c7909509SMarek Szyprowski 	if (ptr) {
3676b3fe472SHiroshi Doyu 		int i;
3686b3fe472SHiroshi Doyu 
3696b3fe472SHiroshi Doyu 		for (i = 0; i < nr_pages; i++)
3706b3fe472SHiroshi Doyu 			pages[i] = page + i;
3716b3fe472SHiroshi Doyu 
372e9da6e99SMarek Szyprowski 		spin_lock_init(&pool->lock);
373e9da6e99SMarek Szyprowski 		pool->vaddr = ptr;
3746b3fe472SHiroshi Doyu 		pool->pages = pages;
375e9da6e99SMarek Szyprowski 		pool->bitmap = bitmap;
376e9da6e99SMarek Szyprowski 		pool->nr_pages = nr_pages;
377e9da6e99SMarek Szyprowski 		pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
378e9da6e99SMarek Szyprowski 		       (unsigned)pool->size / 1024);
379c7909509SMarek Szyprowski 		return 0;
380c7909509SMarek Szyprowski 	}
381ec10665cSSachin Kamat 
382ec10665cSSachin Kamat 	kfree(pages);
3836b3fe472SHiroshi Doyu no_pages:
384e9da6e99SMarek Szyprowski 	kfree(bitmap);
385e9da6e99SMarek Szyprowski no_bitmap:
386e9da6e99SMarek Szyprowski 	pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
387e9da6e99SMarek Szyprowski 	       (unsigned)pool->size / 1024);
388c7909509SMarek Szyprowski 	return -ENOMEM;
389c7909509SMarek Szyprowski }
390c7909509SMarek Szyprowski /*
391c7909509SMarek Szyprowski  * CMA is activated by core_initcall, so we must be called after it.
392c7909509SMarek Szyprowski  */
393e9da6e99SMarek Szyprowski postcore_initcall(atomic_pool_init);
394c7909509SMarek Szyprowski 
395c7909509SMarek Szyprowski struct dma_contig_early_reserve {
396c7909509SMarek Szyprowski 	phys_addr_t base;
397c7909509SMarek Szyprowski 	unsigned long size;
398c7909509SMarek Szyprowski };
399c7909509SMarek Szyprowski 
400c7909509SMarek Szyprowski static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
401c7909509SMarek Szyprowski 
402c7909509SMarek Szyprowski static int dma_mmu_remap_num __initdata;
403c7909509SMarek Szyprowski 
404c7909509SMarek Szyprowski void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
405c7909509SMarek Szyprowski {
406c7909509SMarek Szyprowski 	dma_mmu_remap[dma_mmu_remap_num].base = base;
407c7909509SMarek Szyprowski 	dma_mmu_remap[dma_mmu_remap_num].size = size;
408c7909509SMarek Szyprowski 	dma_mmu_remap_num++;
409c7909509SMarek Szyprowski }
410c7909509SMarek Szyprowski 
411c7909509SMarek Szyprowski void __init dma_contiguous_remap(void)
412c7909509SMarek Szyprowski {
413c7909509SMarek Szyprowski 	int i;
414c7909509SMarek Szyprowski 	for (i = 0; i < dma_mmu_remap_num; i++) {
415c7909509SMarek Szyprowski 		phys_addr_t start = dma_mmu_remap[i].base;
416c7909509SMarek Szyprowski 		phys_addr_t end = start + dma_mmu_remap[i].size;
417c7909509SMarek Szyprowski 		struct map_desc map;
418c7909509SMarek Szyprowski 		unsigned long addr;
419c7909509SMarek Szyprowski 
420c7909509SMarek Szyprowski 		if (end > arm_lowmem_limit)
421c7909509SMarek Szyprowski 			end = arm_lowmem_limit;
422c7909509SMarek Szyprowski 		if (start >= end)
42339f78e70SChris Brand 			continue;
424c7909509SMarek Szyprowski 
425c7909509SMarek Szyprowski 		map.pfn = __phys_to_pfn(start);
426c7909509SMarek Szyprowski 		map.virtual = __phys_to_virt(start);
427c7909509SMarek Szyprowski 		map.length = end - start;
428c7909509SMarek Szyprowski 		map.type = MT_MEMORY_DMA_READY;
429c7909509SMarek Szyprowski 
430c7909509SMarek Szyprowski 		/*
431c7909509SMarek Szyprowski 		 * Clear previous low-memory mapping
432c7909509SMarek Szyprowski 		 */
433c7909509SMarek Szyprowski 		for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
43461f6c7a4SVitaly Andrianov 		     addr += PMD_SIZE)
435c7909509SMarek Szyprowski 			pmd_clear(pmd_off_k(addr));
436c7909509SMarek Szyprowski 
437c7909509SMarek Szyprowski 		iotable_init(&map, 1);
438c7909509SMarek Szyprowski 	}
439c7909509SMarek Szyprowski }
440c7909509SMarek Szyprowski 
441c7909509SMarek Szyprowski static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
442c7909509SMarek Szyprowski 			    void *data)
443c7909509SMarek Szyprowski {
444c7909509SMarek Szyprowski 	struct page *page = virt_to_page(addr);
445c7909509SMarek Szyprowski 	pgprot_t prot = *(pgprot_t *)data;
446c7909509SMarek Szyprowski 
447c7909509SMarek Szyprowski 	set_pte_ext(pte, mk_pte(page, prot), 0);
448c7909509SMarek Szyprowski 	return 0;
449c7909509SMarek Szyprowski }
450c7909509SMarek Szyprowski 
451c7909509SMarek Szyprowski static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
452c7909509SMarek Szyprowski {
453c7909509SMarek Szyprowski 	unsigned long start = (unsigned long) page_address(page);
454c7909509SMarek Szyprowski 	unsigned end = start + size;
455c7909509SMarek Szyprowski 
456c7909509SMarek Szyprowski 	apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
457c7909509SMarek Szyprowski 	dsb();
458c7909509SMarek Szyprowski 	flush_tlb_kernel_range(start, end);
459c7909509SMarek Szyprowski }
460c7909509SMarek Szyprowski 
461c7909509SMarek Szyprowski static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
462c7909509SMarek Szyprowski 				 pgprot_t prot, struct page **ret_page,
463c7909509SMarek Szyprowski 				 const void *caller)
464c7909509SMarek Szyprowski {
465c7909509SMarek Szyprowski 	struct page *page;
466c7909509SMarek Szyprowski 	void *ptr;
467c7909509SMarek Szyprowski 	page = __dma_alloc_buffer(dev, size, gfp);
468c7909509SMarek Szyprowski 	if (!page)
469c7909509SMarek Szyprowski 		return NULL;
470c7909509SMarek Szyprowski 
471c7909509SMarek Szyprowski 	ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
472c7909509SMarek Szyprowski 	if (!ptr) {
473c7909509SMarek Szyprowski 		__dma_free_buffer(page, size);
474c7909509SMarek Szyprowski 		return NULL;
475c7909509SMarek Szyprowski 	}
476c7909509SMarek Szyprowski 
477c7909509SMarek Szyprowski 	*ret_page = page;
478c7909509SMarek Szyprowski 	return ptr;
479c7909509SMarek Szyprowski }
480c7909509SMarek Szyprowski 
481e9da6e99SMarek Szyprowski static void *__alloc_from_pool(size_t size, struct page **ret_page)
482c7909509SMarek Szyprowski {
483e9da6e99SMarek Szyprowski 	struct dma_pool *pool = &atomic_pool;
484e9da6e99SMarek Szyprowski 	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
485e9da6e99SMarek Szyprowski 	unsigned int pageno;
486e9da6e99SMarek Szyprowski 	unsigned long flags;
487e9da6e99SMarek Szyprowski 	void *ptr = NULL;
488e4ea6918SAaro Koskinen 	unsigned long align_mask;
489c7909509SMarek Szyprowski 
490e9da6e99SMarek Szyprowski 	if (!pool->vaddr) {
491e9da6e99SMarek Szyprowski 		WARN(1, "coherent pool not initialised!\n");
492c7909509SMarek Szyprowski 		return NULL;
493c7909509SMarek Szyprowski 	}
494c7909509SMarek Szyprowski 
495c7909509SMarek Szyprowski 	/*
496c7909509SMarek Szyprowski 	 * Align the region allocation - allocations from pool are rather
497c7909509SMarek Szyprowski 	 * small, so align them to their order in pages, minimum is a page
498c7909509SMarek Szyprowski 	 * size. This helps reduce fragmentation of the DMA space.
499c7909509SMarek Szyprowski 	 */
500e4ea6918SAaro Koskinen 	align_mask = (1 << get_order(size)) - 1;
501e9da6e99SMarek Szyprowski 
502e9da6e99SMarek Szyprowski 	spin_lock_irqsave(&pool->lock, flags);
503e9da6e99SMarek Szyprowski 	pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages,
504e4ea6918SAaro Koskinen 					    0, count, align_mask);
505e9da6e99SMarek Szyprowski 	if (pageno < pool->nr_pages) {
506e9da6e99SMarek Szyprowski 		bitmap_set(pool->bitmap, pageno, count);
507e9da6e99SMarek Szyprowski 		ptr = pool->vaddr + PAGE_SIZE * pageno;
5086b3fe472SHiroshi Doyu 		*ret_page = pool->pages[pageno];
509fb71285fSMarek Szyprowski 	} else {
510fb71285fSMarek Szyprowski 		pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
511fb71285fSMarek Szyprowski 			    "Please increase it with coherent_pool= kernel parameter!\n",
512fb71285fSMarek Szyprowski 			    (unsigned)pool->size / 1024);
513e9da6e99SMarek Szyprowski 	}
514e9da6e99SMarek Szyprowski 	spin_unlock_irqrestore(&pool->lock, flags);
515e9da6e99SMarek Szyprowski 
516c7909509SMarek Szyprowski 	return ptr;
517c7909509SMarek Szyprowski }
518c7909509SMarek Szyprowski 
51921d0a759SHiroshi Doyu static bool __in_atomic_pool(void *start, size_t size)
52021d0a759SHiroshi Doyu {
52121d0a759SHiroshi Doyu 	struct dma_pool *pool = &atomic_pool;
52221d0a759SHiroshi Doyu 	void *end = start + size;
52321d0a759SHiroshi Doyu 	void *pool_start = pool->vaddr;
52421d0a759SHiroshi Doyu 	void *pool_end = pool->vaddr + pool->size;
52521d0a759SHiroshi Doyu 
526f3d87524SThomas Petazzoni 	if (start < pool_start || start >= pool_end)
52721d0a759SHiroshi Doyu 		return false;
52821d0a759SHiroshi Doyu 
52921d0a759SHiroshi Doyu 	if (end <= pool_end)
53021d0a759SHiroshi Doyu 		return true;
53121d0a759SHiroshi Doyu 
53221d0a759SHiroshi Doyu 	WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n",
53321d0a759SHiroshi Doyu 	     start, end - 1, pool_start, pool_end - 1);
53421d0a759SHiroshi Doyu 
53521d0a759SHiroshi Doyu 	return false;
53621d0a759SHiroshi Doyu }
53721d0a759SHiroshi Doyu 
538e9da6e99SMarek Szyprowski static int __free_from_pool(void *start, size_t size)
539c7909509SMarek Szyprowski {
540e9da6e99SMarek Szyprowski 	struct dma_pool *pool = &atomic_pool;
541e9da6e99SMarek Szyprowski 	unsigned long pageno, count;
542e9da6e99SMarek Szyprowski 	unsigned long flags;
543c7909509SMarek Szyprowski 
54421d0a759SHiroshi Doyu 	if (!__in_atomic_pool(start, size))
545c7909509SMarek Szyprowski 		return 0;
546c7909509SMarek Szyprowski 
547e9da6e99SMarek Szyprowski 	pageno = (start - pool->vaddr) >> PAGE_SHIFT;
548e9da6e99SMarek Szyprowski 	count = size >> PAGE_SHIFT;
549e9da6e99SMarek Szyprowski 
550e9da6e99SMarek Szyprowski 	spin_lock_irqsave(&pool->lock, flags);
551e9da6e99SMarek Szyprowski 	bitmap_clear(pool->bitmap, pageno, count);
552e9da6e99SMarek Szyprowski 	spin_unlock_irqrestore(&pool->lock, flags);
553e9da6e99SMarek Szyprowski 
554c7909509SMarek Szyprowski 	return 1;
555c7909509SMarek Szyprowski }
556c7909509SMarek Szyprowski 
557c7909509SMarek Szyprowski static void *__alloc_from_contiguous(struct device *dev, size_t size,
5589848e48fSMarek Szyprowski 				     pgprot_t prot, struct page **ret_page,
5599848e48fSMarek Szyprowski 				     const void *caller)
560c7909509SMarek Szyprowski {
561c7909509SMarek Szyprowski 	unsigned long order = get_order(size);
562c7909509SMarek Szyprowski 	size_t count = size >> PAGE_SHIFT;
563c7909509SMarek Szyprowski 	struct page *page;
5649848e48fSMarek Szyprowski 	void *ptr;
565c7909509SMarek Szyprowski 
566c7909509SMarek Szyprowski 	page = dma_alloc_from_contiguous(dev, count, order);
567c7909509SMarek Szyprowski 	if (!page)
568c7909509SMarek Szyprowski 		return NULL;
569c7909509SMarek Szyprowski 
570c7909509SMarek Szyprowski 	__dma_clear_buffer(page, size);
571c7909509SMarek Szyprowski 
5729848e48fSMarek Szyprowski 	if (PageHighMem(page)) {
5739848e48fSMarek Szyprowski 		ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
5749848e48fSMarek Szyprowski 		if (!ptr) {
5759848e48fSMarek Szyprowski 			dma_release_from_contiguous(dev, page, count);
5769848e48fSMarek Szyprowski 			return NULL;
5779848e48fSMarek Szyprowski 		}
5789848e48fSMarek Szyprowski 	} else {
5799848e48fSMarek Szyprowski 		__dma_remap(page, size, prot);
5809848e48fSMarek Szyprowski 		ptr = page_address(page);
5819848e48fSMarek Szyprowski 	}
582c7909509SMarek Szyprowski 	*ret_page = page;
5839848e48fSMarek Szyprowski 	return ptr;
584c7909509SMarek Szyprowski }
585c7909509SMarek Szyprowski 
586c7909509SMarek Szyprowski static void __free_from_contiguous(struct device *dev, struct page *page,
5879848e48fSMarek Szyprowski 				   void *cpu_addr, size_t size)
588c7909509SMarek Szyprowski {
5899848e48fSMarek Szyprowski 	if (PageHighMem(page))
5909848e48fSMarek Szyprowski 		__dma_free_remap(cpu_addr, size);
5919848e48fSMarek Szyprowski 	else
592c7909509SMarek Szyprowski 		__dma_remap(page, size, pgprot_kernel);
593c7909509SMarek Szyprowski 	dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
594c7909509SMarek Szyprowski }
595c7909509SMarek Szyprowski 
596f99d6034SMarek Szyprowski static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
597f99d6034SMarek Szyprowski {
598f99d6034SMarek Szyprowski 	prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ?
599f99d6034SMarek Szyprowski 			    pgprot_writecombine(prot) :
600f99d6034SMarek Szyprowski 			    pgprot_dmacoherent(prot);
601f99d6034SMarek Szyprowski 	return prot;
602f99d6034SMarek Szyprowski }
603f99d6034SMarek Szyprowski 
604c7909509SMarek Szyprowski #define nommu() 0
605c7909509SMarek Szyprowski 
606ab6494f0SCatalin Marinas #else	/* !CONFIG_MMU */
607695ae0afSRussell King 
608c7909509SMarek Szyprowski #define nommu() 1
609c7909509SMarek Szyprowski 
610f99d6034SMarek Szyprowski #define __get_dma_pgprot(attrs, prot)	__pgprot(0)
611c7909509SMarek Szyprowski #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c)	NULL
612e9da6e99SMarek Szyprowski #define __alloc_from_pool(size, ret_page)			NULL
6139848e48fSMarek Szyprowski #define __alloc_from_contiguous(dev, size, prot, ret, c)	NULL
614c7909509SMarek Szyprowski #define __free_from_pool(cpu_addr, size)			0
6159848e48fSMarek Szyprowski #define __free_from_contiguous(dev, page, cpu_addr, size)	do { } while (0)
616c7909509SMarek Szyprowski #define __dma_free_remap(cpu_addr, size)			do { } while (0)
61731ebf944SRussell King 
61831ebf944SRussell King #endif	/* CONFIG_MMU */
61931ebf944SRussell King 
620c7909509SMarek Szyprowski static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
621c7909509SMarek Szyprowski 				   struct page **ret_page)
622ab6494f0SCatalin Marinas {
62304da5694SRussell King 	struct page *page;
624c7909509SMarek Szyprowski 	page = __dma_alloc_buffer(dev, size, gfp);
625c7909509SMarek Szyprowski 	if (!page)
626c7909509SMarek Szyprowski 		return NULL;
627c7909509SMarek Szyprowski 
628c7909509SMarek Szyprowski 	*ret_page = page;
629c7909509SMarek Szyprowski 	return page_address(page);
630c7909509SMarek Szyprowski }
631c7909509SMarek Szyprowski 
632c7909509SMarek Szyprowski 
633c7909509SMarek Szyprowski 
634c7909509SMarek Szyprowski static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
635dd37e940SRob Herring 			 gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller)
636c7909509SMarek Szyprowski {
637c7909509SMarek Szyprowski 	u64 mask = get_coherent_dma_mask(dev);
6383dd7ea92SJingoo Han 	struct page *page = NULL;
63931ebf944SRussell King 	void *addr;
640ab6494f0SCatalin Marinas 
641c7909509SMarek Szyprowski #ifdef CONFIG_DMA_API_DEBUG
642c7909509SMarek Szyprowski 	u64 limit = (mask + 1) & ~mask;
643c7909509SMarek Szyprowski 	if (limit && size >= limit) {
644c7909509SMarek Szyprowski 		dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
645c7909509SMarek Szyprowski 			size, mask);
646c7909509SMarek Szyprowski 		return NULL;
647c7909509SMarek Szyprowski 	}
648c7909509SMarek Szyprowski #endif
649c7909509SMarek Szyprowski 
650c7909509SMarek Szyprowski 	if (!mask)
651c7909509SMarek Szyprowski 		return NULL;
652c7909509SMarek Szyprowski 
653c7909509SMarek Szyprowski 	if (mask < 0xffffffffULL)
654c7909509SMarek Szyprowski 		gfp |= GFP_DMA;
655c7909509SMarek Szyprowski 
656ea2e7057SSumit Bhattacharya 	/*
657ea2e7057SSumit Bhattacharya 	 * Following is a work-around (a.k.a. hack) to prevent pages
658ea2e7057SSumit Bhattacharya 	 * with __GFP_COMP being passed to split_page() which cannot
659ea2e7057SSumit Bhattacharya 	 * handle them.  The real problem is that this flag probably
660ea2e7057SSumit Bhattacharya 	 * should be 0 on ARM as it is not supported on this
661ea2e7057SSumit Bhattacharya 	 * platform; see CONFIG_HUGETLBFS.
662ea2e7057SSumit Bhattacharya 	 */
663ea2e7057SSumit Bhattacharya 	gfp &= ~(__GFP_COMP);
664ea2e7057SSumit Bhattacharya 
665553ac788SMarek Szyprowski 	*handle = DMA_ERROR_CODE;
66604da5694SRussell King 	size = PAGE_ALIGN(size);
66704da5694SRussell King 
668dd37e940SRob Herring 	if (is_coherent || nommu())
669c7909509SMarek Szyprowski 		addr = __alloc_simple_buffer(dev, size, gfp, &page);
670633dc92aSRussell King 	else if (!(gfp & __GFP_WAIT))
671e9da6e99SMarek Szyprowski 		addr = __alloc_from_pool(size, &page);
672f1ae98daSMarek Szyprowski 	else if (!IS_ENABLED(CONFIG_CMA))
673c7909509SMarek Szyprowski 		addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
67431ebf944SRussell King 	else
6759848e48fSMarek Szyprowski 		addr = __alloc_from_contiguous(dev, size, prot, &page, caller);
67631ebf944SRussell King 
67731ebf944SRussell King 	if (addr)
6789eedd963SRussell King 		*handle = pfn_to_dma(dev, page_to_pfn(page));
67931ebf944SRussell King 
68031ebf944SRussell King 	return addr;
681ab6494f0SCatalin Marinas }
682695ae0afSRussell King 
6830ddbccd1SRussell King /*
6840ddbccd1SRussell King  * Allocate DMA-coherent memory space and return both the kernel remapped
6850ddbccd1SRussell King  * virtual and bus address for that space.
6860ddbccd1SRussell King  */
687f99d6034SMarek Szyprowski void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
688f99d6034SMarek Szyprowski 		    gfp_t gfp, struct dma_attrs *attrs)
6890ddbccd1SRussell King {
690f99d6034SMarek Szyprowski 	pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
6910ddbccd1SRussell King 	void *memory;
6920ddbccd1SRussell King 
6930ddbccd1SRussell King 	if (dma_alloc_from_coherent(dev, size, handle, &memory))
6940ddbccd1SRussell King 		return memory;
6950ddbccd1SRussell King 
696dd37e940SRob Herring 	return __dma_alloc(dev, size, handle, gfp, prot, false,
697dd37e940SRob Herring 			   __builtin_return_address(0));
698dd37e940SRob Herring }
699dd37e940SRob Herring 
700dd37e940SRob Herring static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
701dd37e940SRob Herring 	dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
702dd37e940SRob Herring {
703dd37e940SRob Herring 	pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
704dd37e940SRob Herring 	void *memory;
705dd37e940SRob Herring 
706dd37e940SRob Herring 	if (dma_alloc_from_coherent(dev, size, handle, &memory))
707dd37e940SRob Herring 		return memory;
708dd37e940SRob Herring 
709dd37e940SRob Herring 	return __dma_alloc(dev, size, handle, gfp, prot, true,
71045cd5290SRussell King 			   __builtin_return_address(0));
7110ddbccd1SRussell King }
7120ddbccd1SRussell King 
7130ddbccd1SRussell King /*
714f99d6034SMarek Szyprowski  * Create userspace mapping for the DMA-coherent memory.
7150ddbccd1SRussell King  */
716f99d6034SMarek Szyprowski int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
717f99d6034SMarek Szyprowski 		 void *cpu_addr, dma_addr_t dma_addr, size_t size,
718f99d6034SMarek Szyprowski 		 struct dma_attrs *attrs)
7190ddbccd1SRussell King {
720ab6494f0SCatalin Marinas 	int ret = -ENXIO;
721ab6494f0SCatalin Marinas #ifdef CONFIG_MMU
72250262a4bSMarek Szyprowski 	unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
72350262a4bSMarek Szyprowski 	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
724c7909509SMarek Szyprowski 	unsigned long pfn = dma_to_pfn(dev, dma_addr);
72550262a4bSMarek Szyprowski 	unsigned long off = vma->vm_pgoff;
72650262a4bSMarek Szyprowski 
727f99d6034SMarek Szyprowski 	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
728f99d6034SMarek Szyprowski 
72947142f07SMarek Szyprowski 	if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
73047142f07SMarek Szyprowski 		return ret;
73147142f07SMarek Szyprowski 
73250262a4bSMarek Szyprowski 	if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
7330ddbccd1SRussell King 		ret = remap_pfn_range(vma, vma->vm_start,
73450262a4bSMarek Szyprowski 				      pfn + off,
735c7909509SMarek Szyprowski 				      vma->vm_end - vma->vm_start,
7360ddbccd1SRussell King 				      vma->vm_page_prot);
73750262a4bSMarek Szyprowski 	}
738ab6494f0SCatalin Marinas #endif	/* CONFIG_MMU */
7390ddbccd1SRussell King 
7400ddbccd1SRussell King 	return ret;
7410ddbccd1SRussell King }
7420ddbccd1SRussell King 
7430ddbccd1SRussell King /*
744c7909509SMarek Szyprowski  * Free a buffer as defined by the above mapping.
7450ddbccd1SRussell King  */
746dd37e940SRob Herring static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
747dd37e940SRob Herring 			   dma_addr_t handle, struct dma_attrs *attrs,
748dd37e940SRob Herring 			   bool is_coherent)
7490ddbccd1SRussell King {
750c7909509SMarek Szyprowski 	struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
7510ddbccd1SRussell King 
7520ddbccd1SRussell King 	if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
7530ddbccd1SRussell King 		return;
7540ddbccd1SRussell King 
7553e82d012SRussell King 	size = PAGE_ALIGN(size);
7563e82d012SRussell King 
757dd37e940SRob Herring 	if (is_coherent || nommu()) {
758c7909509SMarek Szyprowski 		__dma_free_buffer(page, size);
759d9e0d149SAaro Koskinen 	} else if (__free_from_pool(cpu_addr, size)) {
760d9e0d149SAaro Koskinen 		return;
761f1ae98daSMarek Szyprowski 	} else if (!IS_ENABLED(CONFIG_CMA)) {
762695ae0afSRussell King 		__dma_free_remap(cpu_addr, size);
763c7909509SMarek Szyprowski 		__dma_free_buffer(page, size);
764c7909509SMarek Szyprowski 	} else {
765c7909509SMarek Szyprowski 		/*
766c7909509SMarek Szyprowski 		 * Non-atomic allocations cannot be freed with IRQs disabled
767c7909509SMarek Szyprowski 		 */
768c7909509SMarek Szyprowski 		WARN_ON(irqs_disabled());
7699848e48fSMarek Szyprowski 		__free_from_contiguous(dev, page, cpu_addr, size);
770c7909509SMarek Szyprowski 	}
7710ddbccd1SRussell King }
772afd1a321SRussell King 
773dd37e940SRob Herring void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
774dd37e940SRob Herring 		  dma_addr_t handle, struct dma_attrs *attrs)
775dd37e940SRob Herring {
776dd37e940SRob Herring 	__arm_dma_free(dev, size, cpu_addr, handle, attrs, false);
777dd37e940SRob Herring }
778dd37e940SRob Herring 
779dd37e940SRob Herring static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
780dd37e940SRob Herring 				  dma_addr_t handle, struct dma_attrs *attrs)
781dd37e940SRob Herring {
782dd37e940SRob Herring 	__arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
783dd37e940SRob Herring }
784dd37e940SRob Herring 
785dc2832e1SMarek Szyprowski int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
786dc2832e1SMarek Szyprowski 		 void *cpu_addr, dma_addr_t handle, size_t size,
787dc2832e1SMarek Szyprowski 		 struct dma_attrs *attrs)
788dc2832e1SMarek Szyprowski {
789dc2832e1SMarek Szyprowski 	struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
790dc2832e1SMarek Szyprowski 	int ret;
791dc2832e1SMarek Szyprowski 
792dc2832e1SMarek Szyprowski 	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
793dc2832e1SMarek Szyprowski 	if (unlikely(ret))
794dc2832e1SMarek Szyprowski 		return ret;
795dc2832e1SMarek Szyprowski 
796dc2832e1SMarek Szyprowski 	sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
797dc2832e1SMarek Szyprowski 	return 0;
798dc2832e1SMarek Szyprowski }
799dc2832e1SMarek Szyprowski 
80065af191aSRussell King static void dma_cache_maint_page(struct page *page, unsigned long offset,
801a9c9147eSRussell King 	size_t size, enum dma_data_direction dir,
802a9c9147eSRussell King 	void (*op)(const void *, size_t, int))
80365af191aSRussell King {
80415653371SRussell King 	unsigned long pfn;
80515653371SRussell King 	size_t left = size;
80615653371SRussell King 
80715653371SRussell King 	pfn = page_to_pfn(page) + offset / PAGE_SIZE;
80815653371SRussell King 	offset %= PAGE_SIZE;
80915653371SRussell King 
81065af191aSRussell King 	/*
81165af191aSRussell King 	 * A single sg entry may refer to multiple physically contiguous
81265af191aSRussell King 	 * pages.  But we still need to process highmem pages individually.
81365af191aSRussell King 	 * If highmem is not configured then the bulk of this loop gets
81465af191aSRussell King 	 * optimized out.
81565af191aSRussell King 	 */
81665af191aSRussell King 	do {
81765af191aSRussell King 		size_t len = left;
81893f1d629SRussell King 		void *vaddr;
81993f1d629SRussell King 
82015653371SRussell King 		page = pfn_to_page(pfn);
82115653371SRussell King 
82293f1d629SRussell King 		if (PageHighMem(page)) {
82315653371SRussell King 			if (len + offset > PAGE_SIZE)
82465af191aSRussell King 				len = PAGE_SIZE - offset;
82593f1d629SRussell King 			vaddr = kmap_high_get(page);
82693f1d629SRussell King 			if (vaddr) {
82793f1d629SRussell King 				vaddr += offset;
828a9c9147eSRussell King 				op(vaddr, len, dir);
82993f1d629SRussell King 				kunmap_high(page);
8307e5a69e8SNicolas Pitre 			} else if (cache_is_vipt()) {
83139af22a7SNicolas Pitre 				/* unmapped pages might still be cached */
83239af22a7SNicolas Pitre 				vaddr = kmap_atomic(page);
8337e5a69e8SNicolas Pitre 				op(vaddr + offset, len, dir);
83439af22a7SNicolas Pitre 				kunmap_atomic(vaddr);
83593f1d629SRussell King 			}
83693f1d629SRussell King 		} else {
83793f1d629SRussell King 			vaddr = page_address(page) + offset;
838a9c9147eSRussell King 			op(vaddr, len, dir);
83993f1d629SRussell King 		}
84065af191aSRussell King 		offset = 0;
84115653371SRussell King 		pfn++;
84265af191aSRussell King 		left -= len;
84365af191aSRussell King 	} while (left);
84465af191aSRussell King }
84565af191aSRussell King 
84651fde349SMarek Szyprowski /*
84751fde349SMarek Szyprowski  * Make an area consistent for devices.
84851fde349SMarek Szyprowski  * Note: Drivers should NOT use this function directly, as it will break
84951fde349SMarek Szyprowski  * platforms with CONFIG_DMABOUNCE.
85051fde349SMarek Szyprowski  * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
85151fde349SMarek Szyprowski  */
85251fde349SMarek Szyprowski static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
85365af191aSRussell King 	size_t size, enum dma_data_direction dir)
85465af191aSRussell King {
85543377453SNicolas Pitre 	unsigned long paddr;
85643377453SNicolas Pitre 
857a9c9147eSRussell King 	dma_cache_maint_page(page, off, size, dir, dmac_map_area);
85843377453SNicolas Pitre 
85965af191aSRussell King 	paddr = page_to_phys(page) + off;
8602ffe2da3SRussell King 	if (dir == DMA_FROM_DEVICE) {
8612ffe2da3SRussell King 		outer_inv_range(paddr, paddr + size);
8622ffe2da3SRussell King 	} else {
8632ffe2da3SRussell King 		outer_clean_range(paddr, paddr + size);
8642ffe2da3SRussell King 	}
8652ffe2da3SRussell King 	/* FIXME: non-speculating: flush on bidirectional mappings? */
86643377453SNicolas Pitre }
8674ea0d737SRussell King 
86851fde349SMarek Szyprowski static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
8694ea0d737SRussell King 	size_t size, enum dma_data_direction dir)
8704ea0d737SRussell King {
8712ffe2da3SRussell King 	unsigned long paddr = page_to_phys(page) + off;
8722ffe2da3SRussell King 
8732ffe2da3SRussell King 	/* FIXME: non-speculating: not required */
8742ffe2da3SRussell King 	/* don't bother invalidating if DMA to device */
8752ffe2da3SRussell King 	if (dir != DMA_TO_DEVICE)
8762ffe2da3SRussell King 		outer_inv_range(paddr, paddr + size);
8772ffe2da3SRussell King 
878a9c9147eSRussell King 	dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
879c0177800SCatalin Marinas 
880c0177800SCatalin Marinas 	/*
881c0177800SCatalin Marinas 	 * Mark the D-cache clean for this page to avoid extra flushing.
882c0177800SCatalin Marinas 	 */
883c0177800SCatalin Marinas 	if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
884c0177800SCatalin Marinas 		set_bit(PG_dcache_clean, &page->flags);
8854ea0d737SRussell King }
88643377453SNicolas Pitre 
887afd1a321SRussell King /**
8882a550e73SMarek Szyprowski  * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
889afd1a321SRussell King  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
890afd1a321SRussell King  * @sg: list of buffers
891afd1a321SRussell King  * @nents: number of buffers to map
892afd1a321SRussell King  * @dir: DMA transfer direction
893afd1a321SRussell King  *
894afd1a321SRussell King  * Map a set of buffers described by scatterlist in streaming mode for DMA.
895afd1a321SRussell King  * This is the scatter-gather version of the dma_map_single interface.
896afd1a321SRussell King  * Here the scatter gather list elements are each tagged with the
897afd1a321SRussell King  * appropriate dma address and length.  They are obtained via
898afd1a321SRussell King  * sg_dma_{address,length}.
899afd1a321SRussell King  *
900afd1a321SRussell King  * Device ownership issues as mentioned for dma_map_single are the same
901afd1a321SRussell King  * here.
902afd1a321SRussell King  */
9032dc6a016SMarek Szyprowski int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
9042dc6a016SMarek Szyprowski 		enum dma_data_direction dir, struct dma_attrs *attrs)
905afd1a321SRussell King {
9062a550e73SMarek Szyprowski 	struct dma_map_ops *ops = get_dma_ops(dev);
907afd1a321SRussell King 	struct scatterlist *s;
90801135d92SRussell King 	int i, j;
909afd1a321SRussell King 
910afd1a321SRussell King 	for_each_sg(sg, s, nents, i) {
9114ce63fcdSMarek Szyprowski #ifdef CONFIG_NEED_SG_DMA_LENGTH
9124ce63fcdSMarek Szyprowski 		s->dma_length = s->length;
9134ce63fcdSMarek Szyprowski #endif
9142a550e73SMarek Szyprowski 		s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
9152a550e73SMarek Szyprowski 						s->length, dir, attrs);
91601135d92SRussell King 		if (dma_mapping_error(dev, s->dma_address))
91701135d92SRussell King 			goto bad_mapping;
918afd1a321SRussell King 	}
919afd1a321SRussell King 	return nents;
92001135d92SRussell King 
92101135d92SRussell King  bad_mapping:
92201135d92SRussell King 	for_each_sg(sg, s, i, j)
9232a550e73SMarek Szyprowski 		ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
92401135d92SRussell King 	return 0;
925afd1a321SRussell King }
926afd1a321SRussell King 
927afd1a321SRussell King /**
9282a550e73SMarek Szyprowski  * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
929afd1a321SRussell King  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
930afd1a321SRussell King  * @sg: list of buffers
9310adfca6fSLinus Walleij  * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
932afd1a321SRussell King  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
933afd1a321SRussell King  *
934afd1a321SRussell King  * Unmap a set of streaming mode DMA translations.  Again, CPU access
935afd1a321SRussell King  * rules concerning calls here are the same as for dma_unmap_single().
936afd1a321SRussell King  */
9372dc6a016SMarek Szyprowski void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
9382dc6a016SMarek Szyprowski 		enum dma_data_direction dir, struct dma_attrs *attrs)
939afd1a321SRussell King {
9402a550e73SMarek Szyprowski 	struct dma_map_ops *ops = get_dma_ops(dev);
94101135d92SRussell King 	struct scatterlist *s;
94201135d92SRussell King 
94301135d92SRussell King 	int i;
94424056f52SRussell King 
94501135d92SRussell King 	for_each_sg(sg, s, nents, i)
9462a550e73SMarek Szyprowski 		ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
947afd1a321SRussell King }
948afd1a321SRussell King 
949afd1a321SRussell King /**
9502a550e73SMarek Szyprowski  * arm_dma_sync_sg_for_cpu
951afd1a321SRussell King  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
952afd1a321SRussell King  * @sg: list of buffers
953afd1a321SRussell King  * @nents: number of buffers to map (returned from dma_map_sg)
954afd1a321SRussell King  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
955afd1a321SRussell King  */
9562dc6a016SMarek Szyprowski void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
957afd1a321SRussell King 			int nents, enum dma_data_direction dir)
958afd1a321SRussell King {
9592a550e73SMarek Szyprowski 	struct dma_map_ops *ops = get_dma_ops(dev);
960afd1a321SRussell King 	struct scatterlist *s;
961afd1a321SRussell King 	int i;
962afd1a321SRussell King 
9632a550e73SMarek Szyprowski 	for_each_sg(sg, s, nents, i)
9642a550e73SMarek Szyprowski 		ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
9652a550e73SMarek Szyprowski 					 dir);
966afd1a321SRussell King }
96724056f52SRussell King 
968afd1a321SRussell King /**
9692a550e73SMarek Szyprowski  * arm_dma_sync_sg_for_device
970afd1a321SRussell King  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
971afd1a321SRussell King  * @sg: list of buffers
972afd1a321SRussell King  * @nents: number of buffers to map (returned from dma_map_sg)
973afd1a321SRussell King  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
974afd1a321SRussell King  */
9752dc6a016SMarek Szyprowski void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
976afd1a321SRussell King 			int nents, enum dma_data_direction dir)
977afd1a321SRussell King {
9782a550e73SMarek Szyprowski 	struct dma_map_ops *ops = get_dma_ops(dev);
979afd1a321SRussell King 	struct scatterlist *s;
980afd1a321SRussell King 	int i;
981afd1a321SRussell King 
9822a550e73SMarek Szyprowski 	for_each_sg(sg, s, nents, i)
9832a550e73SMarek Szyprowski 		ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
9842a550e73SMarek Szyprowski 					    dir);
985afd1a321SRussell King }
98624056f52SRussell King 
987022ae537SRussell King /*
988022ae537SRussell King  * Return whether the given device DMA address mask can be supported
989022ae537SRussell King  * properly.  For example, if your device can only drive the low 24-bits
990022ae537SRussell King  * during bus mastering, then you would pass 0x00ffffff as the mask
991022ae537SRussell King  * to this function.
992022ae537SRussell King  */
993022ae537SRussell King int dma_supported(struct device *dev, u64 mask)
994022ae537SRussell King {
995022ae537SRussell King 	if (mask < (u64)arm_dma_limit)
996022ae537SRussell King 		return 0;
997022ae537SRussell King 	return 1;
998022ae537SRussell King }
999022ae537SRussell King EXPORT_SYMBOL(dma_supported);
1000022ae537SRussell King 
100187b54e78SGregory CLEMENT int arm_dma_set_mask(struct device *dev, u64 dma_mask)
1002022ae537SRussell King {
1003022ae537SRussell King 	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
1004022ae537SRussell King 		return -EIO;
1005022ae537SRussell King 
1006022ae537SRussell King 	*dev->dma_mask = dma_mask;
1007022ae537SRussell King 
1008022ae537SRussell King 	return 0;
1009022ae537SRussell King }
1010022ae537SRussell King 
101124056f52SRussell King #define PREALLOC_DMA_DEBUG_ENTRIES	4096
101224056f52SRussell King 
101324056f52SRussell King static int __init dma_debug_do_init(void)
101424056f52SRussell King {
101524056f52SRussell King 	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
101624056f52SRussell King 	return 0;
101724056f52SRussell King }
101824056f52SRussell King fs_initcall(dma_debug_do_init);
10194ce63fcdSMarek Szyprowski 
10204ce63fcdSMarek Szyprowski #ifdef CONFIG_ARM_DMA_USE_IOMMU
10214ce63fcdSMarek Szyprowski 
10224ce63fcdSMarek Szyprowski /* IOMMU */
10234ce63fcdSMarek Szyprowski 
10244ce63fcdSMarek Szyprowski static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
10254ce63fcdSMarek Szyprowski 				      size_t size)
10264ce63fcdSMarek Szyprowski {
10274ce63fcdSMarek Szyprowski 	unsigned int order = get_order(size);
10284ce63fcdSMarek Szyprowski 	unsigned int align = 0;
10294ce63fcdSMarek Szyprowski 	unsigned int count, start;
10304ce63fcdSMarek Szyprowski 	unsigned long flags;
10314ce63fcdSMarek Szyprowski 
10324ce63fcdSMarek Szyprowski 	count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
10334ce63fcdSMarek Szyprowski 		 (1 << mapping->order) - 1) >> mapping->order;
10344ce63fcdSMarek Szyprowski 
10354ce63fcdSMarek Szyprowski 	if (order > mapping->order)
10364ce63fcdSMarek Szyprowski 		align = (1 << (order - mapping->order)) - 1;
10374ce63fcdSMarek Szyprowski 
10384ce63fcdSMarek Szyprowski 	spin_lock_irqsave(&mapping->lock, flags);
10394ce63fcdSMarek Szyprowski 	start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
10404ce63fcdSMarek Szyprowski 					   count, align);
10414ce63fcdSMarek Szyprowski 	if (start > mapping->bits) {
10424ce63fcdSMarek Szyprowski 		spin_unlock_irqrestore(&mapping->lock, flags);
10434ce63fcdSMarek Szyprowski 		return DMA_ERROR_CODE;
10444ce63fcdSMarek Szyprowski 	}
10454ce63fcdSMarek Szyprowski 
10464ce63fcdSMarek Szyprowski 	bitmap_set(mapping->bitmap, start, count);
10474ce63fcdSMarek Szyprowski 	spin_unlock_irqrestore(&mapping->lock, flags);
10484ce63fcdSMarek Szyprowski 
10494ce63fcdSMarek Szyprowski 	return mapping->base + (start << (mapping->order + PAGE_SHIFT));
10504ce63fcdSMarek Szyprowski }
10514ce63fcdSMarek Szyprowski 
10524ce63fcdSMarek Szyprowski static inline void __free_iova(struct dma_iommu_mapping *mapping,
10534ce63fcdSMarek Szyprowski 			       dma_addr_t addr, size_t size)
10544ce63fcdSMarek Szyprowski {
10554ce63fcdSMarek Szyprowski 	unsigned int start = (addr - mapping->base) >>
10564ce63fcdSMarek Szyprowski 			     (mapping->order + PAGE_SHIFT);
10574ce63fcdSMarek Szyprowski 	unsigned int count = ((size >> PAGE_SHIFT) +
10584ce63fcdSMarek Szyprowski 			      (1 << mapping->order) - 1) >> mapping->order;
10594ce63fcdSMarek Szyprowski 	unsigned long flags;
10604ce63fcdSMarek Szyprowski 
10614ce63fcdSMarek Szyprowski 	spin_lock_irqsave(&mapping->lock, flags);
10624ce63fcdSMarek Szyprowski 	bitmap_clear(mapping->bitmap, start, count);
10634ce63fcdSMarek Szyprowski 	spin_unlock_irqrestore(&mapping->lock, flags);
10644ce63fcdSMarek Szyprowski }
10654ce63fcdSMarek Szyprowski 
1066549a17e4SMarek Szyprowski static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
1067549a17e4SMarek Szyprowski 					  gfp_t gfp, struct dma_attrs *attrs)
10684ce63fcdSMarek Szyprowski {
10694ce63fcdSMarek Szyprowski 	struct page **pages;
10704ce63fcdSMarek Szyprowski 	int count = size >> PAGE_SHIFT;
10714ce63fcdSMarek Szyprowski 	int array_size = count * sizeof(struct page *);
10724ce63fcdSMarek Szyprowski 	int i = 0;
10734ce63fcdSMarek Szyprowski 
10744ce63fcdSMarek Szyprowski 	if (array_size <= PAGE_SIZE)
10754ce63fcdSMarek Szyprowski 		pages = kzalloc(array_size, gfp);
10764ce63fcdSMarek Szyprowski 	else
10774ce63fcdSMarek Szyprowski 		pages = vzalloc(array_size);
10784ce63fcdSMarek Szyprowski 	if (!pages)
10794ce63fcdSMarek Szyprowski 		return NULL;
10804ce63fcdSMarek Szyprowski 
1081549a17e4SMarek Szyprowski 	if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs))
1082549a17e4SMarek Szyprowski 	{
1083549a17e4SMarek Szyprowski 		unsigned long order = get_order(size);
1084549a17e4SMarek Szyprowski 		struct page *page;
1085549a17e4SMarek Szyprowski 
1086549a17e4SMarek Szyprowski 		page = dma_alloc_from_contiguous(dev, count, order);
1087549a17e4SMarek Szyprowski 		if (!page)
1088549a17e4SMarek Szyprowski 			goto error;
1089549a17e4SMarek Szyprowski 
1090549a17e4SMarek Szyprowski 		__dma_clear_buffer(page, size);
1091549a17e4SMarek Szyprowski 
1092549a17e4SMarek Szyprowski 		for (i = 0; i < count; i++)
1093549a17e4SMarek Szyprowski 			pages[i] = page + i;
1094549a17e4SMarek Szyprowski 
1095549a17e4SMarek Szyprowski 		return pages;
1096549a17e4SMarek Szyprowski 	}
1097549a17e4SMarek Szyprowski 
10984ce63fcdSMarek Szyprowski 	while (count) {
1099593f4735SMarek Szyprowski 		int j, order = __fls(count);
11004ce63fcdSMarek Szyprowski 
11014ce63fcdSMarek Szyprowski 		pages[i] = alloc_pages(gfp | __GFP_NOWARN, order);
11024ce63fcdSMarek Szyprowski 		while (!pages[i] && order)
11034ce63fcdSMarek Szyprowski 			pages[i] = alloc_pages(gfp | __GFP_NOWARN, --order);
11044ce63fcdSMarek Szyprowski 		if (!pages[i])
11054ce63fcdSMarek Szyprowski 			goto error;
11064ce63fcdSMarek Szyprowski 
11075a796eebSHiroshi Doyu 		if (order) {
11084ce63fcdSMarek Szyprowski 			split_page(pages[i], order);
11094ce63fcdSMarek Szyprowski 			j = 1 << order;
11104ce63fcdSMarek Szyprowski 			while (--j)
11114ce63fcdSMarek Szyprowski 				pages[i + j] = pages[i] + j;
11125a796eebSHiroshi Doyu 		}
11134ce63fcdSMarek Szyprowski 
11144ce63fcdSMarek Szyprowski 		__dma_clear_buffer(pages[i], PAGE_SIZE << order);
11154ce63fcdSMarek Szyprowski 		i += 1 << order;
11164ce63fcdSMarek Szyprowski 		count -= 1 << order;
11174ce63fcdSMarek Szyprowski 	}
11184ce63fcdSMarek Szyprowski 
11194ce63fcdSMarek Szyprowski 	return pages;
11204ce63fcdSMarek Szyprowski error:
11219fa8af91SMarek Szyprowski 	while (i--)
11224ce63fcdSMarek Szyprowski 		if (pages[i])
11234ce63fcdSMarek Szyprowski 			__free_pages(pages[i], 0);
112446c87852SPrathyush K 	if (array_size <= PAGE_SIZE)
11254ce63fcdSMarek Szyprowski 		kfree(pages);
11264ce63fcdSMarek Szyprowski 	else
11274ce63fcdSMarek Szyprowski 		vfree(pages);
11284ce63fcdSMarek Szyprowski 	return NULL;
11294ce63fcdSMarek Szyprowski }
11304ce63fcdSMarek Szyprowski 
1131549a17e4SMarek Szyprowski static int __iommu_free_buffer(struct device *dev, struct page **pages,
1132549a17e4SMarek Szyprowski 			       size_t size, struct dma_attrs *attrs)
11334ce63fcdSMarek Szyprowski {
11344ce63fcdSMarek Szyprowski 	int count = size >> PAGE_SHIFT;
11354ce63fcdSMarek Szyprowski 	int array_size = count * sizeof(struct page *);
11364ce63fcdSMarek Szyprowski 	int i;
1137549a17e4SMarek Szyprowski 
1138549a17e4SMarek Szyprowski 	if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
1139549a17e4SMarek Szyprowski 		dma_release_from_contiguous(dev, pages[0], count);
1140549a17e4SMarek Szyprowski 	} else {
11414ce63fcdSMarek Szyprowski 		for (i = 0; i < count; i++)
11424ce63fcdSMarek Szyprowski 			if (pages[i])
11434ce63fcdSMarek Szyprowski 				__free_pages(pages[i], 0);
1144549a17e4SMarek Szyprowski 	}
1145549a17e4SMarek Szyprowski 
114646c87852SPrathyush K 	if (array_size <= PAGE_SIZE)
11474ce63fcdSMarek Szyprowski 		kfree(pages);
11484ce63fcdSMarek Szyprowski 	else
11494ce63fcdSMarek Szyprowski 		vfree(pages);
11504ce63fcdSMarek Szyprowski 	return 0;
11514ce63fcdSMarek Szyprowski }
11524ce63fcdSMarek Szyprowski 
11534ce63fcdSMarek Szyprowski /*
11544ce63fcdSMarek Szyprowski  * Create a CPU mapping for a specified pages
11554ce63fcdSMarek Szyprowski  */
11564ce63fcdSMarek Szyprowski static void *
1157e9da6e99SMarek Szyprowski __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
1158e9da6e99SMarek Szyprowski 		    const void *caller)
11594ce63fcdSMarek Szyprowski {
1160e9da6e99SMarek Szyprowski 	unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1161e9da6e99SMarek Szyprowski 	struct vm_struct *area;
1162e9da6e99SMarek Szyprowski 	unsigned long p;
11634ce63fcdSMarek Szyprowski 
1164e9da6e99SMarek Szyprowski 	area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
1165e9da6e99SMarek Szyprowski 				  caller);
1166e9da6e99SMarek Szyprowski 	if (!area)
11674ce63fcdSMarek Szyprowski 		return NULL;
1168e9da6e99SMarek Szyprowski 
1169e9da6e99SMarek Szyprowski 	area->pages = pages;
1170e9da6e99SMarek Szyprowski 	area->nr_pages = nr_pages;
1171e9da6e99SMarek Szyprowski 	p = (unsigned long)area->addr;
1172e9da6e99SMarek Szyprowski 
1173e9da6e99SMarek Szyprowski 	for (i = 0; i < nr_pages; i++) {
1174e9da6e99SMarek Szyprowski 		phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i]));
1175e9da6e99SMarek Szyprowski 		if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot))
1176e9da6e99SMarek Szyprowski 			goto err;
1177e9da6e99SMarek Szyprowski 		p += PAGE_SIZE;
11784ce63fcdSMarek Szyprowski 	}
1179e9da6e99SMarek Szyprowski 	return area->addr;
1180e9da6e99SMarek Szyprowski err:
1181e9da6e99SMarek Szyprowski 	unmap_kernel_range((unsigned long)area->addr, size);
1182e9da6e99SMarek Szyprowski 	vunmap(area->addr);
11834ce63fcdSMarek Szyprowski 	return NULL;
11844ce63fcdSMarek Szyprowski }
11854ce63fcdSMarek Szyprowski 
11864ce63fcdSMarek Szyprowski /*
11874ce63fcdSMarek Szyprowski  * Create a mapping in device IO address space for specified pages
11884ce63fcdSMarek Szyprowski  */
11894ce63fcdSMarek Szyprowski static dma_addr_t
11904ce63fcdSMarek Szyprowski __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
11914ce63fcdSMarek Szyprowski {
11924ce63fcdSMarek Szyprowski 	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
11934ce63fcdSMarek Szyprowski 	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
11944ce63fcdSMarek Szyprowski 	dma_addr_t dma_addr, iova;
11954ce63fcdSMarek Szyprowski 	int i, ret = DMA_ERROR_CODE;
11964ce63fcdSMarek Szyprowski 
11974ce63fcdSMarek Szyprowski 	dma_addr = __alloc_iova(mapping, size);
11984ce63fcdSMarek Szyprowski 	if (dma_addr == DMA_ERROR_CODE)
11994ce63fcdSMarek Szyprowski 		return dma_addr;
12004ce63fcdSMarek Szyprowski 
12014ce63fcdSMarek Szyprowski 	iova = dma_addr;
12024ce63fcdSMarek Szyprowski 	for (i = 0; i < count; ) {
12034ce63fcdSMarek Szyprowski 		unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
12044ce63fcdSMarek Szyprowski 		phys_addr_t phys = page_to_phys(pages[i]);
12054ce63fcdSMarek Szyprowski 		unsigned int len, j;
12064ce63fcdSMarek Szyprowski 
12074ce63fcdSMarek Szyprowski 		for (j = i + 1; j < count; j++, next_pfn++)
12084ce63fcdSMarek Szyprowski 			if (page_to_pfn(pages[j]) != next_pfn)
12094ce63fcdSMarek Szyprowski 				break;
12104ce63fcdSMarek Szyprowski 
12114ce63fcdSMarek Szyprowski 		len = (j - i) << PAGE_SHIFT;
12124ce63fcdSMarek Szyprowski 		ret = iommu_map(mapping->domain, iova, phys, len, 0);
12134ce63fcdSMarek Szyprowski 		if (ret < 0)
12144ce63fcdSMarek Szyprowski 			goto fail;
12154ce63fcdSMarek Szyprowski 		iova += len;
12164ce63fcdSMarek Szyprowski 		i = j;
12174ce63fcdSMarek Szyprowski 	}
12184ce63fcdSMarek Szyprowski 	return dma_addr;
12194ce63fcdSMarek Szyprowski fail:
12204ce63fcdSMarek Szyprowski 	iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
12214ce63fcdSMarek Szyprowski 	__free_iova(mapping, dma_addr, size);
12224ce63fcdSMarek Szyprowski 	return DMA_ERROR_CODE;
12234ce63fcdSMarek Szyprowski }
12244ce63fcdSMarek Szyprowski 
12254ce63fcdSMarek Szyprowski static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
12264ce63fcdSMarek Szyprowski {
12274ce63fcdSMarek Szyprowski 	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
12284ce63fcdSMarek Szyprowski 
12294ce63fcdSMarek Szyprowski 	/*
12304ce63fcdSMarek Szyprowski 	 * add optional in-page offset from iova to size and align
12314ce63fcdSMarek Szyprowski 	 * result to page size
12324ce63fcdSMarek Szyprowski 	 */
12334ce63fcdSMarek Szyprowski 	size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
12344ce63fcdSMarek Szyprowski 	iova &= PAGE_MASK;
12354ce63fcdSMarek Szyprowski 
12364ce63fcdSMarek Szyprowski 	iommu_unmap(mapping->domain, iova, size);
12374ce63fcdSMarek Szyprowski 	__free_iova(mapping, iova, size);
12384ce63fcdSMarek Szyprowski 	return 0;
12394ce63fcdSMarek Szyprowski }
12404ce63fcdSMarek Szyprowski 
1241665bad7bSHiroshi Doyu static struct page **__atomic_get_pages(void *addr)
1242665bad7bSHiroshi Doyu {
1243665bad7bSHiroshi Doyu 	struct dma_pool *pool = &atomic_pool;
1244665bad7bSHiroshi Doyu 	struct page **pages = pool->pages;
1245665bad7bSHiroshi Doyu 	int offs = (addr - pool->vaddr) >> PAGE_SHIFT;
1246665bad7bSHiroshi Doyu 
1247665bad7bSHiroshi Doyu 	return pages + offs;
1248665bad7bSHiroshi Doyu }
1249665bad7bSHiroshi Doyu 
1250955c757eSMarek Szyprowski static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
1251e9da6e99SMarek Szyprowski {
1252e9da6e99SMarek Szyprowski 	struct vm_struct *area;
1253e9da6e99SMarek Szyprowski 
1254665bad7bSHiroshi Doyu 	if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
1255665bad7bSHiroshi Doyu 		return __atomic_get_pages(cpu_addr);
1256665bad7bSHiroshi Doyu 
1257955c757eSMarek Szyprowski 	if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
1258955c757eSMarek Szyprowski 		return cpu_addr;
1259955c757eSMarek Szyprowski 
1260e9da6e99SMarek Szyprowski 	area = find_vm_area(cpu_addr);
1261e9da6e99SMarek Szyprowski 	if (area && (area->flags & VM_ARM_DMA_CONSISTENT))
1262e9da6e99SMarek Szyprowski 		return area->pages;
1263e9da6e99SMarek Szyprowski 	return NULL;
1264e9da6e99SMarek Szyprowski }
1265e9da6e99SMarek Szyprowski 
1266479ed93aSHiroshi Doyu static void *__iommu_alloc_atomic(struct device *dev, size_t size,
1267479ed93aSHiroshi Doyu 				  dma_addr_t *handle)
1268479ed93aSHiroshi Doyu {
1269479ed93aSHiroshi Doyu 	struct page *page;
1270479ed93aSHiroshi Doyu 	void *addr;
1271479ed93aSHiroshi Doyu 
1272479ed93aSHiroshi Doyu 	addr = __alloc_from_pool(size, &page);
1273479ed93aSHiroshi Doyu 	if (!addr)
1274479ed93aSHiroshi Doyu 		return NULL;
1275479ed93aSHiroshi Doyu 
1276479ed93aSHiroshi Doyu 	*handle = __iommu_create_mapping(dev, &page, size);
1277479ed93aSHiroshi Doyu 	if (*handle == DMA_ERROR_CODE)
1278479ed93aSHiroshi Doyu 		goto err_mapping;
1279479ed93aSHiroshi Doyu 
1280479ed93aSHiroshi Doyu 	return addr;
1281479ed93aSHiroshi Doyu 
1282479ed93aSHiroshi Doyu err_mapping:
1283479ed93aSHiroshi Doyu 	__free_from_pool(addr, size);
1284479ed93aSHiroshi Doyu 	return NULL;
1285479ed93aSHiroshi Doyu }
1286479ed93aSHiroshi Doyu 
1287479ed93aSHiroshi Doyu static void __iommu_free_atomic(struct device *dev, struct page **pages,
1288479ed93aSHiroshi Doyu 				dma_addr_t handle, size_t size)
1289479ed93aSHiroshi Doyu {
1290479ed93aSHiroshi Doyu 	__iommu_remove_mapping(dev, handle, size);
1291479ed93aSHiroshi Doyu 	__free_from_pool(page_address(pages[0]), size);
1292479ed93aSHiroshi Doyu }
1293479ed93aSHiroshi Doyu 
12944ce63fcdSMarek Szyprowski static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
12954ce63fcdSMarek Szyprowski 	    dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
12964ce63fcdSMarek Szyprowski {
12974ce63fcdSMarek Szyprowski 	pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
12984ce63fcdSMarek Szyprowski 	struct page **pages;
12994ce63fcdSMarek Szyprowski 	void *addr = NULL;
13004ce63fcdSMarek Szyprowski 
13014ce63fcdSMarek Szyprowski 	*handle = DMA_ERROR_CODE;
13024ce63fcdSMarek Szyprowski 	size = PAGE_ALIGN(size);
13034ce63fcdSMarek Szyprowski 
1304479ed93aSHiroshi Doyu 	if (gfp & GFP_ATOMIC)
1305479ed93aSHiroshi Doyu 		return __iommu_alloc_atomic(dev, size, handle);
1306479ed93aSHiroshi Doyu 
1307549a17e4SMarek Szyprowski 	pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
13084ce63fcdSMarek Szyprowski 	if (!pages)
13094ce63fcdSMarek Szyprowski 		return NULL;
13104ce63fcdSMarek Szyprowski 
13114ce63fcdSMarek Szyprowski 	*handle = __iommu_create_mapping(dev, pages, size);
13124ce63fcdSMarek Szyprowski 	if (*handle == DMA_ERROR_CODE)
13134ce63fcdSMarek Szyprowski 		goto err_buffer;
13144ce63fcdSMarek Szyprowski 
1315955c757eSMarek Szyprowski 	if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
1316955c757eSMarek Szyprowski 		return pages;
1317955c757eSMarek Szyprowski 
1318e9da6e99SMarek Szyprowski 	addr = __iommu_alloc_remap(pages, size, gfp, prot,
1319e9da6e99SMarek Szyprowski 				   __builtin_return_address(0));
13204ce63fcdSMarek Szyprowski 	if (!addr)
13214ce63fcdSMarek Szyprowski 		goto err_mapping;
13224ce63fcdSMarek Szyprowski 
13234ce63fcdSMarek Szyprowski 	return addr;
13244ce63fcdSMarek Szyprowski 
13254ce63fcdSMarek Szyprowski err_mapping:
13264ce63fcdSMarek Szyprowski 	__iommu_remove_mapping(dev, *handle, size);
13274ce63fcdSMarek Szyprowski err_buffer:
1328549a17e4SMarek Szyprowski 	__iommu_free_buffer(dev, pages, size, attrs);
13294ce63fcdSMarek Szyprowski 	return NULL;
13304ce63fcdSMarek Szyprowski }
13314ce63fcdSMarek Szyprowski 
13324ce63fcdSMarek Szyprowski static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
13334ce63fcdSMarek Szyprowski 		    void *cpu_addr, dma_addr_t dma_addr, size_t size,
13344ce63fcdSMarek Szyprowski 		    struct dma_attrs *attrs)
13354ce63fcdSMarek Szyprowski {
13364ce63fcdSMarek Szyprowski 	unsigned long uaddr = vma->vm_start;
13374ce63fcdSMarek Szyprowski 	unsigned long usize = vma->vm_end - vma->vm_start;
1338955c757eSMarek Szyprowski 	struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1339e9da6e99SMarek Szyprowski 
1340e9da6e99SMarek Szyprowski 	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
1341e9da6e99SMarek Szyprowski 
1342e9da6e99SMarek Szyprowski 	if (!pages)
1343e9da6e99SMarek Szyprowski 		return -ENXIO;
13444ce63fcdSMarek Szyprowski 
13454ce63fcdSMarek Szyprowski 	do {
1346e9da6e99SMarek Szyprowski 		int ret = vm_insert_page(vma, uaddr, *pages++);
13474ce63fcdSMarek Szyprowski 		if (ret) {
1348e9da6e99SMarek Szyprowski 			pr_err("Remapping memory failed: %d\n", ret);
13494ce63fcdSMarek Szyprowski 			return ret;
13504ce63fcdSMarek Szyprowski 		}
13514ce63fcdSMarek Szyprowski 		uaddr += PAGE_SIZE;
13524ce63fcdSMarek Szyprowski 		usize -= PAGE_SIZE;
13534ce63fcdSMarek Szyprowski 	} while (usize > 0);
1354e9da6e99SMarek Szyprowski 
13554ce63fcdSMarek Szyprowski 	return 0;
13564ce63fcdSMarek Szyprowski }
13574ce63fcdSMarek Szyprowski 
13584ce63fcdSMarek Szyprowski /*
13594ce63fcdSMarek Szyprowski  * free a page as defined by the above mapping.
13604ce63fcdSMarek Szyprowski  * Must not be called with IRQs disabled.
13614ce63fcdSMarek Szyprowski  */
13624ce63fcdSMarek Szyprowski void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
13634ce63fcdSMarek Szyprowski 			  dma_addr_t handle, struct dma_attrs *attrs)
13644ce63fcdSMarek Szyprowski {
1365955c757eSMarek Szyprowski 	struct page **pages = __iommu_get_pages(cpu_addr, attrs);
13664ce63fcdSMarek Szyprowski 	size = PAGE_ALIGN(size);
13674ce63fcdSMarek Szyprowski 
1368e9da6e99SMarek Szyprowski 	if (!pages) {
1369e9da6e99SMarek Szyprowski 		WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
1370e9da6e99SMarek Szyprowski 		return;
1371e9da6e99SMarek Szyprowski 	}
1372e9da6e99SMarek Szyprowski 
1373479ed93aSHiroshi Doyu 	if (__in_atomic_pool(cpu_addr, size)) {
1374479ed93aSHiroshi Doyu 		__iommu_free_atomic(dev, pages, handle, size);
1375479ed93aSHiroshi Doyu 		return;
1376479ed93aSHiroshi Doyu 	}
1377479ed93aSHiroshi Doyu 
1378955c757eSMarek Szyprowski 	if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
1379e9da6e99SMarek Szyprowski 		unmap_kernel_range((unsigned long)cpu_addr, size);
1380e9da6e99SMarek Szyprowski 		vunmap(cpu_addr);
1381955c757eSMarek Szyprowski 	}
1382e9da6e99SMarek Szyprowski 
13834ce63fcdSMarek Szyprowski 	__iommu_remove_mapping(dev, handle, size);
1384549a17e4SMarek Szyprowski 	__iommu_free_buffer(dev, pages, size, attrs);
13854ce63fcdSMarek Szyprowski }
13864ce63fcdSMarek Szyprowski 
1387dc2832e1SMarek Szyprowski static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
1388dc2832e1SMarek Szyprowski 				 void *cpu_addr, dma_addr_t dma_addr,
1389dc2832e1SMarek Szyprowski 				 size_t size, struct dma_attrs *attrs)
1390dc2832e1SMarek Szyprowski {
1391dc2832e1SMarek Szyprowski 	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1392dc2832e1SMarek Szyprowski 	struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1393dc2832e1SMarek Szyprowski 
1394dc2832e1SMarek Szyprowski 	if (!pages)
1395dc2832e1SMarek Szyprowski 		return -ENXIO;
1396dc2832e1SMarek Szyprowski 
1397dc2832e1SMarek Szyprowski 	return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
1398dc2832e1SMarek Szyprowski 					 GFP_KERNEL);
13994ce63fcdSMarek Szyprowski }
14004ce63fcdSMarek Szyprowski 
14014ce63fcdSMarek Szyprowski /*
14024ce63fcdSMarek Szyprowski  * Map a part of the scatter-gather list into contiguous io address space
14034ce63fcdSMarek Szyprowski  */
14044ce63fcdSMarek Szyprowski static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
14054ce63fcdSMarek Szyprowski 			  size_t size, dma_addr_t *handle,
14060fa478dfSRob Herring 			  enum dma_data_direction dir, struct dma_attrs *attrs,
14070fa478dfSRob Herring 			  bool is_coherent)
14084ce63fcdSMarek Szyprowski {
14094ce63fcdSMarek Szyprowski 	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
14104ce63fcdSMarek Szyprowski 	dma_addr_t iova, iova_base;
14114ce63fcdSMarek Szyprowski 	int ret = 0;
14124ce63fcdSMarek Szyprowski 	unsigned int count;
14134ce63fcdSMarek Szyprowski 	struct scatterlist *s;
14144ce63fcdSMarek Szyprowski 
14154ce63fcdSMarek Szyprowski 	size = PAGE_ALIGN(size);
14164ce63fcdSMarek Szyprowski 	*handle = DMA_ERROR_CODE;
14174ce63fcdSMarek Szyprowski 
14184ce63fcdSMarek Szyprowski 	iova_base = iova = __alloc_iova(mapping, size);
14194ce63fcdSMarek Szyprowski 	if (iova == DMA_ERROR_CODE)
14204ce63fcdSMarek Szyprowski 		return -ENOMEM;
14214ce63fcdSMarek Szyprowski 
14224ce63fcdSMarek Szyprowski 	for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
14234ce63fcdSMarek Szyprowski 		phys_addr_t phys = page_to_phys(sg_page(s));
14244ce63fcdSMarek Szyprowski 		unsigned int len = PAGE_ALIGN(s->offset + s->length);
14254ce63fcdSMarek Szyprowski 
14260fa478dfSRob Herring 		if (!is_coherent &&
142797ef952aSMarek Szyprowski 			!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
14284ce63fcdSMarek Szyprowski 			__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
14294ce63fcdSMarek Szyprowski 
14304ce63fcdSMarek Szyprowski 		ret = iommu_map(mapping->domain, iova, phys, len, 0);
14314ce63fcdSMarek Szyprowski 		if (ret < 0)
14324ce63fcdSMarek Szyprowski 			goto fail;
14334ce63fcdSMarek Szyprowski 		count += len >> PAGE_SHIFT;
14344ce63fcdSMarek Szyprowski 		iova += len;
14354ce63fcdSMarek Szyprowski 	}
14364ce63fcdSMarek Szyprowski 	*handle = iova_base;
14374ce63fcdSMarek Szyprowski 
14384ce63fcdSMarek Szyprowski 	return 0;
14394ce63fcdSMarek Szyprowski fail:
14404ce63fcdSMarek Szyprowski 	iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
14414ce63fcdSMarek Szyprowski 	__free_iova(mapping, iova_base, size);
14424ce63fcdSMarek Szyprowski 	return ret;
14434ce63fcdSMarek Szyprowski }
14444ce63fcdSMarek Szyprowski 
14450fa478dfSRob Herring static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
14460fa478dfSRob Herring 		     enum dma_data_direction dir, struct dma_attrs *attrs,
14470fa478dfSRob Herring 		     bool is_coherent)
14484ce63fcdSMarek Szyprowski {
14494ce63fcdSMarek Szyprowski 	struct scatterlist *s = sg, *dma = sg, *start = sg;
14504ce63fcdSMarek Szyprowski 	int i, count = 0;
14514ce63fcdSMarek Szyprowski 	unsigned int offset = s->offset;
14524ce63fcdSMarek Szyprowski 	unsigned int size = s->offset + s->length;
14534ce63fcdSMarek Szyprowski 	unsigned int max = dma_get_max_seg_size(dev);
14544ce63fcdSMarek Szyprowski 
14554ce63fcdSMarek Szyprowski 	for (i = 1; i < nents; i++) {
14564ce63fcdSMarek Szyprowski 		s = sg_next(s);
14574ce63fcdSMarek Szyprowski 
14584ce63fcdSMarek Szyprowski 		s->dma_address = DMA_ERROR_CODE;
14594ce63fcdSMarek Szyprowski 		s->dma_length = 0;
14604ce63fcdSMarek Szyprowski 
14614ce63fcdSMarek Szyprowski 		if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
14624ce63fcdSMarek Szyprowski 			if (__map_sg_chunk(dev, start, size, &dma->dma_address,
14630fa478dfSRob Herring 			    dir, attrs, is_coherent) < 0)
14644ce63fcdSMarek Szyprowski 				goto bad_mapping;
14654ce63fcdSMarek Szyprowski 
14664ce63fcdSMarek Szyprowski 			dma->dma_address += offset;
14674ce63fcdSMarek Szyprowski 			dma->dma_length = size - offset;
14684ce63fcdSMarek Szyprowski 
14694ce63fcdSMarek Szyprowski 			size = offset = s->offset;
14704ce63fcdSMarek Szyprowski 			start = s;
14714ce63fcdSMarek Szyprowski 			dma = sg_next(dma);
14724ce63fcdSMarek Szyprowski 			count += 1;
14734ce63fcdSMarek Szyprowski 		}
14744ce63fcdSMarek Szyprowski 		size += s->length;
14754ce63fcdSMarek Szyprowski 	}
14760fa478dfSRob Herring 	if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
14770fa478dfSRob Herring 		is_coherent) < 0)
14784ce63fcdSMarek Szyprowski 		goto bad_mapping;
14794ce63fcdSMarek Szyprowski 
14804ce63fcdSMarek Szyprowski 	dma->dma_address += offset;
14814ce63fcdSMarek Szyprowski 	dma->dma_length = size - offset;
14824ce63fcdSMarek Szyprowski 
14834ce63fcdSMarek Szyprowski 	return count+1;
14844ce63fcdSMarek Szyprowski 
14854ce63fcdSMarek Szyprowski bad_mapping:
14864ce63fcdSMarek Szyprowski 	for_each_sg(sg, s, count, i)
14874ce63fcdSMarek Szyprowski 		__iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
14884ce63fcdSMarek Szyprowski 	return 0;
14894ce63fcdSMarek Szyprowski }
14904ce63fcdSMarek Szyprowski 
14914ce63fcdSMarek Szyprowski /**
14920fa478dfSRob Herring  * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
14930fa478dfSRob Herring  * @dev: valid struct device pointer
14940fa478dfSRob Herring  * @sg: list of buffers
14950fa478dfSRob Herring  * @nents: number of buffers to map
14960fa478dfSRob Herring  * @dir: DMA transfer direction
14970fa478dfSRob Herring  *
14980fa478dfSRob Herring  * Map a set of i/o coherent buffers described by scatterlist in streaming
14990fa478dfSRob Herring  * mode for DMA. The scatter gather list elements are merged together (if
15000fa478dfSRob Herring  * possible) and tagged with the appropriate dma address and length. They are
15010fa478dfSRob Herring  * obtained via sg_dma_{address,length}.
15020fa478dfSRob Herring  */
15030fa478dfSRob Herring int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
15040fa478dfSRob Herring 		int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
15050fa478dfSRob Herring {
15060fa478dfSRob Herring 	return __iommu_map_sg(dev, sg, nents, dir, attrs, true);
15070fa478dfSRob Herring }
15080fa478dfSRob Herring 
15090fa478dfSRob Herring /**
15100fa478dfSRob Herring  * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
15110fa478dfSRob Herring  * @dev: valid struct device pointer
15120fa478dfSRob Herring  * @sg: list of buffers
15130fa478dfSRob Herring  * @nents: number of buffers to map
15140fa478dfSRob Herring  * @dir: DMA transfer direction
15150fa478dfSRob Herring  *
15160fa478dfSRob Herring  * Map a set of buffers described by scatterlist in streaming mode for DMA.
15170fa478dfSRob Herring  * The scatter gather list elements are merged together (if possible) and
15180fa478dfSRob Herring  * tagged with the appropriate dma address and length. They are obtained via
15190fa478dfSRob Herring  * sg_dma_{address,length}.
15200fa478dfSRob Herring  */
15210fa478dfSRob Herring int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
15220fa478dfSRob Herring 		int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
15230fa478dfSRob Herring {
15240fa478dfSRob Herring 	return __iommu_map_sg(dev, sg, nents, dir, attrs, false);
15250fa478dfSRob Herring }
15260fa478dfSRob Herring 
15270fa478dfSRob Herring static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
15280fa478dfSRob Herring 		int nents, enum dma_data_direction dir, struct dma_attrs *attrs,
15290fa478dfSRob Herring 		bool is_coherent)
15300fa478dfSRob Herring {
15310fa478dfSRob Herring 	struct scatterlist *s;
15320fa478dfSRob Herring 	int i;
15330fa478dfSRob Herring 
15340fa478dfSRob Herring 	for_each_sg(sg, s, nents, i) {
15350fa478dfSRob Herring 		if (sg_dma_len(s))
15360fa478dfSRob Herring 			__iommu_remove_mapping(dev, sg_dma_address(s),
15370fa478dfSRob Herring 					       sg_dma_len(s));
15380fa478dfSRob Herring 		if (!is_coherent &&
15390fa478dfSRob Herring 		    !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
15400fa478dfSRob Herring 			__dma_page_dev_to_cpu(sg_page(s), s->offset,
15410fa478dfSRob Herring 					      s->length, dir);
15420fa478dfSRob Herring 	}
15430fa478dfSRob Herring }
15440fa478dfSRob Herring 
15450fa478dfSRob Herring /**
15460fa478dfSRob Herring  * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
15470fa478dfSRob Herring  * @dev: valid struct device pointer
15480fa478dfSRob Herring  * @sg: list of buffers
15490fa478dfSRob Herring  * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
15500fa478dfSRob Herring  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
15510fa478dfSRob Herring  *
15520fa478dfSRob Herring  * Unmap a set of streaming mode DMA translations.  Again, CPU access
15530fa478dfSRob Herring  * rules concerning calls here are the same as for dma_unmap_single().
15540fa478dfSRob Herring  */
15550fa478dfSRob Herring void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
15560fa478dfSRob Herring 		int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
15570fa478dfSRob Herring {
15580fa478dfSRob Herring 	__iommu_unmap_sg(dev, sg, nents, dir, attrs, true);
15590fa478dfSRob Herring }
15600fa478dfSRob Herring 
15610fa478dfSRob Herring /**
15624ce63fcdSMarek Szyprowski  * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
15634ce63fcdSMarek Szyprowski  * @dev: valid struct device pointer
15644ce63fcdSMarek Szyprowski  * @sg: list of buffers
15654ce63fcdSMarek Szyprowski  * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
15664ce63fcdSMarek Szyprowski  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
15674ce63fcdSMarek Szyprowski  *
15684ce63fcdSMarek Szyprowski  * Unmap a set of streaming mode DMA translations.  Again, CPU access
15694ce63fcdSMarek Szyprowski  * rules concerning calls here are the same as for dma_unmap_single().
15704ce63fcdSMarek Szyprowski  */
15714ce63fcdSMarek Szyprowski void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
15724ce63fcdSMarek Szyprowski 			enum dma_data_direction dir, struct dma_attrs *attrs)
15734ce63fcdSMarek Szyprowski {
15740fa478dfSRob Herring 	__iommu_unmap_sg(dev, sg, nents, dir, attrs, false);
15754ce63fcdSMarek Szyprowski }
15764ce63fcdSMarek Szyprowski 
15774ce63fcdSMarek Szyprowski /**
15784ce63fcdSMarek Szyprowski  * arm_iommu_sync_sg_for_cpu
15794ce63fcdSMarek Szyprowski  * @dev: valid struct device pointer
15804ce63fcdSMarek Szyprowski  * @sg: list of buffers
15814ce63fcdSMarek Szyprowski  * @nents: number of buffers to map (returned from dma_map_sg)
15824ce63fcdSMarek Szyprowski  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
15834ce63fcdSMarek Szyprowski  */
15844ce63fcdSMarek Szyprowski void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
15854ce63fcdSMarek Szyprowski 			int nents, enum dma_data_direction dir)
15864ce63fcdSMarek Szyprowski {
15874ce63fcdSMarek Szyprowski 	struct scatterlist *s;
15884ce63fcdSMarek Szyprowski 	int i;
15894ce63fcdSMarek Szyprowski 
15904ce63fcdSMarek Szyprowski 	for_each_sg(sg, s, nents, i)
15914ce63fcdSMarek Szyprowski 		__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
15924ce63fcdSMarek Szyprowski 
15934ce63fcdSMarek Szyprowski }
15944ce63fcdSMarek Szyprowski 
15954ce63fcdSMarek Szyprowski /**
15964ce63fcdSMarek Szyprowski  * arm_iommu_sync_sg_for_device
15974ce63fcdSMarek Szyprowski  * @dev: valid struct device pointer
15984ce63fcdSMarek Szyprowski  * @sg: list of buffers
15994ce63fcdSMarek Szyprowski  * @nents: number of buffers to map (returned from dma_map_sg)
16004ce63fcdSMarek Szyprowski  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
16014ce63fcdSMarek Szyprowski  */
16024ce63fcdSMarek Szyprowski void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
16034ce63fcdSMarek Szyprowski 			int nents, enum dma_data_direction dir)
16044ce63fcdSMarek Szyprowski {
16054ce63fcdSMarek Szyprowski 	struct scatterlist *s;
16064ce63fcdSMarek Szyprowski 	int i;
16074ce63fcdSMarek Szyprowski 
16084ce63fcdSMarek Szyprowski 	for_each_sg(sg, s, nents, i)
16094ce63fcdSMarek Szyprowski 		__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
16104ce63fcdSMarek Szyprowski }
16114ce63fcdSMarek Szyprowski 
16124ce63fcdSMarek Szyprowski 
16134ce63fcdSMarek Szyprowski /**
16140fa478dfSRob Herring  * arm_coherent_iommu_map_page
16150fa478dfSRob Herring  * @dev: valid struct device pointer
16160fa478dfSRob Herring  * @page: page that buffer resides in
16170fa478dfSRob Herring  * @offset: offset into page for start of buffer
16180fa478dfSRob Herring  * @size: size of buffer to map
16190fa478dfSRob Herring  * @dir: DMA transfer direction
16200fa478dfSRob Herring  *
16210fa478dfSRob Herring  * Coherent IOMMU aware version of arm_dma_map_page()
16220fa478dfSRob Herring  */
16230fa478dfSRob Herring static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page,
16240fa478dfSRob Herring 	     unsigned long offset, size_t size, enum dma_data_direction dir,
16250fa478dfSRob Herring 	     struct dma_attrs *attrs)
16260fa478dfSRob Herring {
16270fa478dfSRob Herring 	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
16280fa478dfSRob Herring 	dma_addr_t dma_addr;
16290fa478dfSRob Herring 	int ret, len = PAGE_ALIGN(size + offset);
16300fa478dfSRob Herring 
16310fa478dfSRob Herring 	dma_addr = __alloc_iova(mapping, len);
16320fa478dfSRob Herring 	if (dma_addr == DMA_ERROR_CODE)
16330fa478dfSRob Herring 		return dma_addr;
16340fa478dfSRob Herring 
16350fa478dfSRob Herring 	ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0);
16360fa478dfSRob Herring 	if (ret < 0)
16370fa478dfSRob Herring 		goto fail;
16380fa478dfSRob Herring 
16390fa478dfSRob Herring 	return dma_addr + offset;
16400fa478dfSRob Herring fail:
16410fa478dfSRob Herring 	__free_iova(mapping, dma_addr, len);
16420fa478dfSRob Herring 	return DMA_ERROR_CODE;
16430fa478dfSRob Herring }
16440fa478dfSRob Herring 
16450fa478dfSRob Herring /**
16464ce63fcdSMarek Szyprowski  * arm_iommu_map_page
16474ce63fcdSMarek Szyprowski  * @dev: valid struct device pointer
16484ce63fcdSMarek Szyprowski  * @page: page that buffer resides in
16494ce63fcdSMarek Szyprowski  * @offset: offset into page for start of buffer
16504ce63fcdSMarek Szyprowski  * @size: size of buffer to map
16514ce63fcdSMarek Szyprowski  * @dir: DMA transfer direction
16524ce63fcdSMarek Szyprowski  *
16534ce63fcdSMarek Szyprowski  * IOMMU aware version of arm_dma_map_page()
16544ce63fcdSMarek Szyprowski  */
16554ce63fcdSMarek Szyprowski static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
16564ce63fcdSMarek Szyprowski 	     unsigned long offset, size_t size, enum dma_data_direction dir,
16574ce63fcdSMarek Szyprowski 	     struct dma_attrs *attrs)
16584ce63fcdSMarek Szyprowski {
16590fa478dfSRob Herring 	if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
16604ce63fcdSMarek Szyprowski 		__dma_page_cpu_to_dev(page, offset, size, dir);
16614ce63fcdSMarek Szyprowski 
16620fa478dfSRob Herring 	return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
16630fa478dfSRob Herring }
16644ce63fcdSMarek Szyprowski 
16650fa478dfSRob Herring /**
16660fa478dfSRob Herring  * arm_coherent_iommu_unmap_page
16670fa478dfSRob Herring  * @dev: valid struct device pointer
16680fa478dfSRob Herring  * @handle: DMA address of buffer
16690fa478dfSRob Herring  * @size: size of buffer (same as passed to dma_map_page)
16700fa478dfSRob Herring  * @dir: DMA transfer direction (same as passed to dma_map_page)
16710fa478dfSRob Herring  *
16720fa478dfSRob Herring  * Coherent IOMMU aware version of arm_dma_unmap_page()
16730fa478dfSRob Herring  */
16740fa478dfSRob Herring static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
16750fa478dfSRob Herring 		size_t size, enum dma_data_direction dir,
16760fa478dfSRob Herring 		struct dma_attrs *attrs)
16770fa478dfSRob Herring {
16780fa478dfSRob Herring 	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
16790fa478dfSRob Herring 	dma_addr_t iova = handle & PAGE_MASK;
16800fa478dfSRob Herring 	int offset = handle & ~PAGE_MASK;
16810fa478dfSRob Herring 	int len = PAGE_ALIGN(size + offset);
16824ce63fcdSMarek Szyprowski 
16830fa478dfSRob Herring 	if (!iova)
16840fa478dfSRob Herring 		return;
16850fa478dfSRob Herring 
16860fa478dfSRob Herring 	iommu_unmap(mapping->domain, iova, len);
16870fa478dfSRob Herring 	__free_iova(mapping, iova, len);
16884ce63fcdSMarek Szyprowski }
16894ce63fcdSMarek Szyprowski 
16904ce63fcdSMarek Szyprowski /**
16914ce63fcdSMarek Szyprowski  * arm_iommu_unmap_page
16924ce63fcdSMarek Szyprowski  * @dev: valid struct device pointer
16934ce63fcdSMarek Szyprowski  * @handle: DMA address of buffer
16944ce63fcdSMarek Szyprowski  * @size: size of buffer (same as passed to dma_map_page)
16954ce63fcdSMarek Szyprowski  * @dir: DMA transfer direction (same as passed to dma_map_page)
16964ce63fcdSMarek Szyprowski  *
16974ce63fcdSMarek Szyprowski  * IOMMU aware version of arm_dma_unmap_page()
16984ce63fcdSMarek Szyprowski  */
16994ce63fcdSMarek Szyprowski static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
17004ce63fcdSMarek Szyprowski 		size_t size, enum dma_data_direction dir,
17014ce63fcdSMarek Szyprowski 		struct dma_attrs *attrs)
17024ce63fcdSMarek Szyprowski {
17034ce63fcdSMarek Szyprowski 	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
17044ce63fcdSMarek Szyprowski 	dma_addr_t iova = handle & PAGE_MASK;
17054ce63fcdSMarek Szyprowski 	struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
17064ce63fcdSMarek Szyprowski 	int offset = handle & ~PAGE_MASK;
17074ce63fcdSMarek Szyprowski 	int len = PAGE_ALIGN(size + offset);
17084ce63fcdSMarek Szyprowski 
17094ce63fcdSMarek Szyprowski 	if (!iova)
17104ce63fcdSMarek Szyprowski 		return;
17114ce63fcdSMarek Szyprowski 
17120fa478dfSRob Herring 	if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
17134ce63fcdSMarek Szyprowski 		__dma_page_dev_to_cpu(page, offset, size, dir);
17144ce63fcdSMarek Szyprowski 
17154ce63fcdSMarek Szyprowski 	iommu_unmap(mapping->domain, iova, len);
17164ce63fcdSMarek Szyprowski 	__free_iova(mapping, iova, len);
17174ce63fcdSMarek Szyprowski }
17184ce63fcdSMarek Szyprowski 
17194ce63fcdSMarek Szyprowski static void arm_iommu_sync_single_for_cpu(struct device *dev,
17204ce63fcdSMarek Szyprowski 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
17214ce63fcdSMarek Szyprowski {
17224ce63fcdSMarek Szyprowski 	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
17234ce63fcdSMarek Szyprowski 	dma_addr_t iova = handle & PAGE_MASK;
17244ce63fcdSMarek Szyprowski 	struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
17254ce63fcdSMarek Szyprowski 	unsigned int offset = handle & ~PAGE_MASK;
17264ce63fcdSMarek Szyprowski 
17274ce63fcdSMarek Szyprowski 	if (!iova)
17284ce63fcdSMarek Szyprowski 		return;
17294ce63fcdSMarek Szyprowski 
17304ce63fcdSMarek Szyprowski 	__dma_page_dev_to_cpu(page, offset, size, dir);
17314ce63fcdSMarek Szyprowski }
17324ce63fcdSMarek Szyprowski 
17334ce63fcdSMarek Szyprowski static void arm_iommu_sync_single_for_device(struct device *dev,
17344ce63fcdSMarek Szyprowski 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
17354ce63fcdSMarek Szyprowski {
17364ce63fcdSMarek Szyprowski 	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
17374ce63fcdSMarek Szyprowski 	dma_addr_t iova = handle & PAGE_MASK;
17384ce63fcdSMarek Szyprowski 	struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
17394ce63fcdSMarek Szyprowski 	unsigned int offset = handle & ~PAGE_MASK;
17404ce63fcdSMarek Szyprowski 
17414ce63fcdSMarek Szyprowski 	if (!iova)
17424ce63fcdSMarek Szyprowski 		return;
17434ce63fcdSMarek Szyprowski 
17444ce63fcdSMarek Szyprowski 	__dma_page_cpu_to_dev(page, offset, size, dir);
17454ce63fcdSMarek Szyprowski }
17464ce63fcdSMarek Szyprowski 
17474ce63fcdSMarek Szyprowski struct dma_map_ops iommu_ops = {
17484ce63fcdSMarek Szyprowski 	.alloc		= arm_iommu_alloc_attrs,
17494ce63fcdSMarek Szyprowski 	.free		= arm_iommu_free_attrs,
17504ce63fcdSMarek Szyprowski 	.mmap		= arm_iommu_mmap_attrs,
1751dc2832e1SMarek Szyprowski 	.get_sgtable	= arm_iommu_get_sgtable,
17524ce63fcdSMarek Szyprowski 
17534ce63fcdSMarek Szyprowski 	.map_page		= arm_iommu_map_page,
17544ce63fcdSMarek Szyprowski 	.unmap_page		= arm_iommu_unmap_page,
17554ce63fcdSMarek Szyprowski 	.sync_single_for_cpu	= arm_iommu_sync_single_for_cpu,
17564ce63fcdSMarek Szyprowski 	.sync_single_for_device	= arm_iommu_sync_single_for_device,
17574ce63fcdSMarek Szyprowski 
17584ce63fcdSMarek Szyprowski 	.map_sg			= arm_iommu_map_sg,
17594ce63fcdSMarek Szyprowski 	.unmap_sg		= arm_iommu_unmap_sg,
17604ce63fcdSMarek Szyprowski 	.sync_sg_for_cpu	= arm_iommu_sync_sg_for_cpu,
17614ce63fcdSMarek Szyprowski 	.sync_sg_for_device	= arm_iommu_sync_sg_for_device,
1762d09e1333SHiroshi Doyu 
1763d09e1333SHiroshi Doyu 	.set_dma_mask		= arm_dma_set_mask,
17644ce63fcdSMarek Szyprowski };
17654ce63fcdSMarek Szyprowski 
17660fa478dfSRob Herring struct dma_map_ops iommu_coherent_ops = {
17670fa478dfSRob Herring 	.alloc		= arm_iommu_alloc_attrs,
17680fa478dfSRob Herring 	.free		= arm_iommu_free_attrs,
17690fa478dfSRob Herring 	.mmap		= arm_iommu_mmap_attrs,
17700fa478dfSRob Herring 	.get_sgtable	= arm_iommu_get_sgtable,
17710fa478dfSRob Herring 
17720fa478dfSRob Herring 	.map_page	= arm_coherent_iommu_map_page,
17730fa478dfSRob Herring 	.unmap_page	= arm_coherent_iommu_unmap_page,
17740fa478dfSRob Herring 
17750fa478dfSRob Herring 	.map_sg		= arm_coherent_iommu_map_sg,
17760fa478dfSRob Herring 	.unmap_sg	= arm_coherent_iommu_unmap_sg,
1777d09e1333SHiroshi Doyu 
1778d09e1333SHiroshi Doyu 	.set_dma_mask	= arm_dma_set_mask,
17790fa478dfSRob Herring };
17800fa478dfSRob Herring 
17814ce63fcdSMarek Szyprowski /**
17824ce63fcdSMarek Szyprowski  * arm_iommu_create_mapping
17834ce63fcdSMarek Szyprowski  * @bus: pointer to the bus holding the client device (for IOMMU calls)
17844ce63fcdSMarek Szyprowski  * @base: start address of the valid IO address space
17854ce63fcdSMarek Szyprowski  * @size: size of the valid IO address space
17864ce63fcdSMarek Szyprowski  * @order: accuracy of the IO addresses allocations
17874ce63fcdSMarek Szyprowski  *
17884ce63fcdSMarek Szyprowski  * Creates a mapping structure which holds information about used/unused
17894ce63fcdSMarek Szyprowski  * IO address ranges, which is required to perform memory allocation and
17904ce63fcdSMarek Szyprowski  * mapping with IOMMU aware functions.
17914ce63fcdSMarek Szyprowski  *
17924ce63fcdSMarek Szyprowski  * The client device need to be attached to the mapping with
17934ce63fcdSMarek Szyprowski  * arm_iommu_attach_device function.
17944ce63fcdSMarek Szyprowski  */
17954ce63fcdSMarek Szyprowski struct dma_iommu_mapping *
17964ce63fcdSMarek Szyprowski arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
17974ce63fcdSMarek Szyprowski 			 int order)
17984ce63fcdSMarek Szyprowski {
17994ce63fcdSMarek Szyprowski 	unsigned int count = size >> (PAGE_SHIFT + order);
18004ce63fcdSMarek Szyprowski 	unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
18014ce63fcdSMarek Szyprowski 	struct dma_iommu_mapping *mapping;
18024ce63fcdSMarek Szyprowski 	int err = -ENOMEM;
18034ce63fcdSMarek Szyprowski 
18044ce63fcdSMarek Szyprowski 	if (!count)
18054ce63fcdSMarek Szyprowski 		return ERR_PTR(-EINVAL);
18064ce63fcdSMarek Szyprowski 
18074ce63fcdSMarek Szyprowski 	mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
18084ce63fcdSMarek Szyprowski 	if (!mapping)
18094ce63fcdSMarek Szyprowski 		goto err;
18104ce63fcdSMarek Szyprowski 
18114ce63fcdSMarek Szyprowski 	mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
18124ce63fcdSMarek Szyprowski 	if (!mapping->bitmap)
18134ce63fcdSMarek Szyprowski 		goto err2;
18144ce63fcdSMarek Szyprowski 
18154ce63fcdSMarek Szyprowski 	mapping->base = base;
18164ce63fcdSMarek Szyprowski 	mapping->bits = BITS_PER_BYTE * bitmap_size;
18174ce63fcdSMarek Szyprowski 	mapping->order = order;
18184ce63fcdSMarek Szyprowski 	spin_lock_init(&mapping->lock);
18194ce63fcdSMarek Szyprowski 
18204ce63fcdSMarek Szyprowski 	mapping->domain = iommu_domain_alloc(bus);
18214ce63fcdSMarek Szyprowski 	if (!mapping->domain)
18224ce63fcdSMarek Szyprowski 		goto err3;
18234ce63fcdSMarek Szyprowski 
18244ce63fcdSMarek Szyprowski 	kref_init(&mapping->kref);
18254ce63fcdSMarek Szyprowski 	return mapping;
18264ce63fcdSMarek Szyprowski err3:
18274ce63fcdSMarek Szyprowski 	kfree(mapping->bitmap);
18284ce63fcdSMarek Szyprowski err2:
18294ce63fcdSMarek Szyprowski 	kfree(mapping);
18304ce63fcdSMarek Szyprowski err:
18314ce63fcdSMarek Szyprowski 	return ERR_PTR(err);
18324ce63fcdSMarek Szyprowski }
183318177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
18344ce63fcdSMarek Szyprowski 
18354ce63fcdSMarek Szyprowski static void release_iommu_mapping(struct kref *kref)
18364ce63fcdSMarek Szyprowski {
18374ce63fcdSMarek Szyprowski 	struct dma_iommu_mapping *mapping =
18384ce63fcdSMarek Szyprowski 		container_of(kref, struct dma_iommu_mapping, kref);
18394ce63fcdSMarek Szyprowski 
18404ce63fcdSMarek Szyprowski 	iommu_domain_free(mapping->domain);
18414ce63fcdSMarek Szyprowski 	kfree(mapping->bitmap);
18424ce63fcdSMarek Szyprowski 	kfree(mapping);
18434ce63fcdSMarek Szyprowski }
18444ce63fcdSMarek Szyprowski 
18454ce63fcdSMarek Szyprowski void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
18464ce63fcdSMarek Szyprowski {
18474ce63fcdSMarek Szyprowski 	if (mapping)
18484ce63fcdSMarek Szyprowski 		kref_put(&mapping->kref, release_iommu_mapping);
18494ce63fcdSMarek Szyprowski }
185018177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
18514ce63fcdSMarek Szyprowski 
18524ce63fcdSMarek Szyprowski /**
18534ce63fcdSMarek Szyprowski  * arm_iommu_attach_device
18544ce63fcdSMarek Szyprowski  * @dev: valid struct device pointer
18554ce63fcdSMarek Szyprowski  * @mapping: io address space mapping structure (returned from
18564ce63fcdSMarek Szyprowski  *	arm_iommu_create_mapping)
18574ce63fcdSMarek Szyprowski  *
18584ce63fcdSMarek Szyprowski  * Attaches specified io address space mapping to the provided device,
18594ce63fcdSMarek Szyprowski  * this replaces the dma operations (dma_map_ops pointer) with the
18604ce63fcdSMarek Szyprowski  * IOMMU aware version. More than one client might be attached to
18614ce63fcdSMarek Szyprowski  * the same io address space mapping.
18624ce63fcdSMarek Szyprowski  */
18634ce63fcdSMarek Szyprowski int arm_iommu_attach_device(struct device *dev,
18644ce63fcdSMarek Szyprowski 			    struct dma_iommu_mapping *mapping)
18654ce63fcdSMarek Szyprowski {
18664ce63fcdSMarek Szyprowski 	int err;
18674ce63fcdSMarek Szyprowski 
18684ce63fcdSMarek Szyprowski 	err = iommu_attach_device(mapping->domain, dev);
18694ce63fcdSMarek Szyprowski 	if (err)
18704ce63fcdSMarek Szyprowski 		return err;
18714ce63fcdSMarek Szyprowski 
18724ce63fcdSMarek Szyprowski 	kref_get(&mapping->kref);
18734ce63fcdSMarek Szyprowski 	dev->archdata.mapping = mapping;
18744ce63fcdSMarek Szyprowski 	set_dma_ops(dev, &iommu_ops);
18754ce63fcdSMarek Szyprowski 
187675c59716SHiroshi Doyu 	pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
18774ce63fcdSMarek Szyprowski 	return 0;
18784ce63fcdSMarek Szyprowski }
187918177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
18804ce63fcdSMarek Szyprowski 
18816fe36758SHiroshi Doyu /**
18826fe36758SHiroshi Doyu  * arm_iommu_detach_device
18836fe36758SHiroshi Doyu  * @dev: valid struct device pointer
18846fe36758SHiroshi Doyu  *
18856fe36758SHiroshi Doyu  * Detaches the provided device from a previously attached map.
18866fe36758SHiroshi Doyu  * This voids the dma operations (dma_map_ops pointer)
18876fe36758SHiroshi Doyu  */
18886fe36758SHiroshi Doyu void arm_iommu_detach_device(struct device *dev)
18896fe36758SHiroshi Doyu {
18906fe36758SHiroshi Doyu 	struct dma_iommu_mapping *mapping;
18916fe36758SHiroshi Doyu 
18926fe36758SHiroshi Doyu 	mapping = to_dma_iommu_mapping(dev);
18936fe36758SHiroshi Doyu 	if (!mapping) {
18946fe36758SHiroshi Doyu 		dev_warn(dev, "Not attached\n");
18956fe36758SHiroshi Doyu 		return;
18966fe36758SHiroshi Doyu 	}
18976fe36758SHiroshi Doyu 
18986fe36758SHiroshi Doyu 	iommu_detach_device(mapping->domain, dev);
18996fe36758SHiroshi Doyu 	kref_put(&mapping->kref, release_iommu_mapping);
19006fe36758SHiroshi Doyu 	mapping = NULL;
19016fe36758SHiroshi Doyu 	set_dma_ops(dev, NULL);
19026fe36758SHiroshi Doyu 
19036fe36758SHiroshi Doyu 	pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
19046fe36758SHiroshi Doyu }
190518177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
19066fe36758SHiroshi Doyu 
19074ce63fcdSMarek Szyprowski #endif
1908