xref: /openbmc/linux/arch/arm/mm/dma-mapping.c (revision a70c3ee3)
10ddbccd1SRussell King /*
20ddbccd1SRussell King  *  linux/arch/arm/mm/dma-mapping.c
30ddbccd1SRussell King  *
40ddbccd1SRussell King  *  Copyright (C) 2000-2004 Russell King
50ddbccd1SRussell King  *
60ddbccd1SRussell King  * This program is free software; you can redistribute it and/or modify
70ddbccd1SRussell King  * it under the terms of the GNU General Public License version 2 as
80ddbccd1SRussell King  * published by the Free Software Foundation.
90ddbccd1SRussell King  *
100ddbccd1SRussell King  *  DMA uncached mapping support.
110ddbccd1SRussell King  */
1211a5aa32SRussell King #include <linux/bootmem.h>
130ddbccd1SRussell King #include <linux/module.h>
140ddbccd1SRussell King #include <linux/mm.h>
1536d0fd21SLaura Abbott #include <linux/genalloc.h>
165a0e3ad6STejun Heo #include <linux/gfp.h>
170ddbccd1SRussell King #include <linux/errno.h>
180ddbccd1SRussell King #include <linux/list.h>
190ddbccd1SRussell King #include <linux/init.h>
200ddbccd1SRussell King #include <linux/device.h>
210ddbccd1SRussell King #include <linux/dma-mapping.h>
22c7909509SMarek Szyprowski #include <linux/dma-contiguous.h>
2339af22a7SNicolas Pitre #include <linux/highmem.h>
24c7909509SMarek Szyprowski #include <linux/memblock.h>
2599d1717dSJon Medhurst #include <linux/slab.h>
264ce63fcdSMarek Szyprowski #include <linux/iommu.h>
27e9da6e99SMarek Szyprowski #include <linux/io.h>
284ce63fcdSMarek Szyprowski #include <linux/vmalloc.h>
29158e8bfeSAlessandro Rubini #include <linux/sizes.h>
30a254129eSJoonsoo Kim #include <linux/cma.h>
310ddbccd1SRussell King 
320ddbccd1SRussell King #include <asm/memory.h>
3343377453SNicolas Pitre #include <asm/highmem.h>
340ddbccd1SRussell King #include <asm/cacheflush.h>
350ddbccd1SRussell King #include <asm/tlbflush.h>
3699d1717dSJon Medhurst #include <asm/mach/arch.h>
374ce63fcdSMarek Szyprowski #include <asm/dma-iommu.h>
38c7909509SMarek Szyprowski #include <asm/mach/map.h>
39c7909509SMarek Szyprowski #include <asm/system_info.h>
40c7909509SMarek Szyprowski #include <asm/dma-contiguous.h>
410ddbccd1SRussell King 
421234e3fdSRussell King #include "dma.h"
43022ae537SRussell King #include "mm.h"
44022ae537SRussell King 
45b4268676SRabin Vincent struct arm_dma_alloc_args {
46b4268676SRabin Vincent 	struct device *dev;
47b4268676SRabin Vincent 	size_t size;
48b4268676SRabin Vincent 	gfp_t gfp;
49b4268676SRabin Vincent 	pgprot_t prot;
50b4268676SRabin Vincent 	const void *caller;
51b4268676SRabin Vincent 	bool want_vaddr;
52f1270896SGregory CLEMENT 	int coherent_flag;
53b4268676SRabin Vincent };
54b4268676SRabin Vincent 
55b4268676SRabin Vincent struct arm_dma_free_args {
56b4268676SRabin Vincent 	struct device *dev;
57b4268676SRabin Vincent 	size_t size;
58b4268676SRabin Vincent 	void *cpu_addr;
59b4268676SRabin Vincent 	struct page *page;
60b4268676SRabin Vincent 	bool want_vaddr;
61b4268676SRabin Vincent };
62b4268676SRabin Vincent 
63f1270896SGregory CLEMENT #define NORMAL	    0
64f1270896SGregory CLEMENT #define COHERENT    1
65f1270896SGregory CLEMENT 
66b4268676SRabin Vincent struct arm_dma_allocator {
67b4268676SRabin Vincent 	void *(*alloc)(struct arm_dma_alloc_args *args,
68b4268676SRabin Vincent 		       struct page **ret_page);
69b4268676SRabin Vincent 	void (*free)(struct arm_dma_free_args *args);
70b4268676SRabin Vincent };
71b4268676SRabin Vincent 
7219e6e5e5SRabin Vincent struct arm_dma_buffer {
7319e6e5e5SRabin Vincent 	struct list_head list;
7419e6e5e5SRabin Vincent 	void *virt;
75b4268676SRabin Vincent 	struct arm_dma_allocator *allocator;
7619e6e5e5SRabin Vincent };
7719e6e5e5SRabin Vincent 
7819e6e5e5SRabin Vincent static LIST_HEAD(arm_dma_bufs);
7919e6e5e5SRabin Vincent static DEFINE_SPINLOCK(arm_dma_bufs_lock);
8019e6e5e5SRabin Vincent 
8119e6e5e5SRabin Vincent static struct arm_dma_buffer *arm_dma_buffer_find(void *virt)
8219e6e5e5SRabin Vincent {
8319e6e5e5SRabin Vincent 	struct arm_dma_buffer *buf, *found = NULL;
8419e6e5e5SRabin Vincent 	unsigned long flags;
8519e6e5e5SRabin Vincent 
8619e6e5e5SRabin Vincent 	spin_lock_irqsave(&arm_dma_bufs_lock, flags);
8719e6e5e5SRabin Vincent 	list_for_each_entry(buf, &arm_dma_bufs, list) {
8819e6e5e5SRabin Vincent 		if (buf->virt == virt) {
8919e6e5e5SRabin Vincent 			list_del(&buf->list);
9019e6e5e5SRabin Vincent 			found = buf;
9119e6e5e5SRabin Vincent 			break;
9219e6e5e5SRabin Vincent 		}
9319e6e5e5SRabin Vincent 	}
9419e6e5e5SRabin Vincent 	spin_unlock_irqrestore(&arm_dma_bufs_lock, flags);
9519e6e5e5SRabin Vincent 	return found;
9619e6e5e5SRabin Vincent }
9719e6e5e5SRabin Vincent 
9815237e1fSMarek Szyprowski /*
9915237e1fSMarek Szyprowski  * The DMA API is built upon the notion of "buffer ownership".  A buffer
10015237e1fSMarek Szyprowski  * is either exclusively owned by the CPU (and therefore may be accessed
10115237e1fSMarek Szyprowski  * by it) or exclusively owned by the DMA device.  These helper functions
10215237e1fSMarek Szyprowski  * represent the transitions between these two ownership states.
10315237e1fSMarek Szyprowski  *
10415237e1fSMarek Szyprowski  * Note, however, that on later ARMs, this notion does not work due to
10515237e1fSMarek Szyprowski  * speculative prefetches.  We model our approach on the assumption that
10615237e1fSMarek Szyprowski  * the CPU does do speculative prefetches, which means we clean caches
10715237e1fSMarek Szyprowski  * before transfers and delay cache invalidation until transfer completion.
10815237e1fSMarek Szyprowski  *
10915237e1fSMarek Szyprowski  */
11051fde349SMarek Szyprowski static void __dma_page_cpu_to_dev(struct page *, unsigned long,
11115237e1fSMarek Szyprowski 		size_t, enum dma_data_direction);
11251fde349SMarek Szyprowski static void __dma_page_dev_to_cpu(struct page *, unsigned long,
11315237e1fSMarek Szyprowski 		size_t, enum dma_data_direction);
11415237e1fSMarek Szyprowski 
1152dc6a016SMarek Szyprowski /**
1162dc6a016SMarek Szyprowski  * arm_dma_map_page - map a portion of a page for streaming DMA
1172dc6a016SMarek Szyprowski  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1182dc6a016SMarek Szyprowski  * @page: page that buffer resides in
1192dc6a016SMarek Szyprowski  * @offset: offset into page for start of buffer
1202dc6a016SMarek Szyprowski  * @size: size of buffer to map
1212dc6a016SMarek Szyprowski  * @dir: DMA transfer direction
1222dc6a016SMarek Szyprowski  *
1232dc6a016SMarek Szyprowski  * Ensure that any data held in the cache is appropriately discarded
1242dc6a016SMarek Szyprowski  * or written back.
1252dc6a016SMarek Szyprowski  *
1262dc6a016SMarek Szyprowski  * The device owns this memory once this call has completed.  The CPU
1272dc6a016SMarek Szyprowski  * can regain ownership by calling dma_unmap_page().
1282dc6a016SMarek Szyprowski  */
12951fde349SMarek Szyprowski static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
1302dc6a016SMarek Szyprowski 	     unsigned long offset, size_t size, enum dma_data_direction dir,
13100085f1eSKrzysztof Kozlowski 	     unsigned long attrs)
1322dc6a016SMarek Szyprowski {
13300085f1eSKrzysztof Kozlowski 	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
13451fde349SMarek Szyprowski 		__dma_page_cpu_to_dev(page, offset, size, dir);
13551fde349SMarek Szyprowski 	return pfn_to_dma(dev, page_to_pfn(page)) + offset;
1362dc6a016SMarek Szyprowski }
1372dc6a016SMarek Szyprowski 
138dd37e940SRob Herring static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page,
139dd37e940SRob Herring 	     unsigned long offset, size_t size, enum dma_data_direction dir,
14000085f1eSKrzysztof Kozlowski 	     unsigned long attrs)
141dd37e940SRob Herring {
142dd37e940SRob Herring 	return pfn_to_dma(dev, page_to_pfn(page)) + offset;
143dd37e940SRob Herring }
144dd37e940SRob Herring 
1452dc6a016SMarek Szyprowski /**
1462dc6a016SMarek Szyprowski  * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
1472dc6a016SMarek Szyprowski  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1482dc6a016SMarek Szyprowski  * @handle: DMA address of buffer
1492dc6a016SMarek Szyprowski  * @size: size of buffer (same as passed to dma_map_page)
1502dc6a016SMarek Szyprowski  * @dir: DMA transfer direction (same as passed to dma_map_page)
1512dc6a016SMarek Szyprowski  *
1522dc6a016SMarek Szyprowski  * Unmap a page streaming mode DMA translation.  The handle and size
1532dc6a016SMarek Szyprowski  * must match what was provided in the previous dma_map_page() call.
1542dc6a016SMarek Szyprowski  * All other usages are undefined.
1552dc6a016SMarek Szyprowski  *
1562dc6a016SMarek Szyprowski  * After this call, reads by the CPU to the buffer are guaranteed to see
1572dc6a016SMarek Szyprowski  * whatever the device wrote there.
1582dc6a016SMarek Szyprowski  */
15951fde349SMarek Szyprowski static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
16000085f1eSKrzysztof Kozlowski 		size_t size, enum dma_data_direction dir, unsigned long attrs)
1612dc6a016SMarek Szyprowski {
16200085f1eSKrzysztof Kozlowski 	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
16351fde349SMarek Szyprowski 		__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
16451fde349SMarek Szyprowski 				      handle & ~PAGE_MASK, size, dir);
1652dc6a016SMarek Szyprowski }
1662dc6a016SMarek Szyprowski 
16751fde349SMarek Szyprowski static void arm_dma_sync_single_for_cpu(struct device *dev,
1682dc6a016SMarek Szyprowski 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
1692dc6a016SMarek Szyprowski {
1702dc6a016SMarek Szyprowski 	unsigned int offset = handle & (PAGE_SIZE - 1);
1712dc6a016SMarek Szyprowski 	struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
1722dc6a016SMarek Szyprowski 	__dma_page_dev_to_cpu(page, offset, size, dir);
1732dc6a016SMarek Szyprowski }
1742dc6a016SMarek Szyprowski 
17551fde349SMarek Szyprowski static void arm_dma_sync_single_for_device(struct device *dev,
1762dc6a016SMarek Szyprowski 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
1772dc6a016SMarek Szyprowski {
1782dc6a016SMarek Szyprowski 	unsigned int offset = handle & (PAGE_SIZE - 1);
1792dc6a016SMarek Szyprowski 	struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
1802dc6a016SMarek Szyprowski 	__dma_page_cpu_to_dev(page, offset, size, dir);
1812dc6a016SMarek Szyprowski }
1822dc6a016SMarek Szyprowski 
1839eef8b8cSChristoph Hellwig static int arm_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1849eef8b8cSChristoph Hellwig {
1859eef8b8cSChristoph Hellwig 	return dma_addr == ARM_MAPPING_ERROR;
1869eef8b8cSChristoph Hellwig }
1879eef8b8cSChristoph Hellwig 
1885299709dSBart Van Assche const struct dma_map_ops arm_dma_ops = {
189f99d6034SMarek Szyprowski 	.alloc			= arm_dma_alloc,
190f99d6034SMarek Szyprowski 	.free			= arm_dma_free,
191f99d6034SMarek Szyprowski 	.mmap			= arm_dma_mmap,
192dc2832e1SMarek Szyprowski 	.get_sgtable		= arm_dma_get_sgtable,
1932dc6a016SMarek Szyprowski 	.map_page		= arm_dma_map_page,
1942dc6a016SMarek Szyprowski 	.unmap_page		= arm_dma_unmap_page,
1952dc6a016SMarek Szyprowski 	.map_sg			= arm_dma_map_sg,
1962dc6a016SMarek Szyprowski 	.unmap_sg		= arm_dma_unmap_sg,
1972dc6a016SMarek Szyprowski 	.sync_single_for_cpu	= arm_dma_sync_single_for_cpu,
1982dc6a016SMarek Szyprowski 	.sync_single_for_device	= arm_dma_sync_single_for_device,
1992dc6a016SMarek Szyprowski 	.sync_sg_for_cpu	= arm_dma_sync_sg_for_cpu,
2002dc6a016SMarek Szyprowski 	.sync_sg_for_device	= arm_dma_sync_sg_for_device,
2019eef8b8cSChristoph Hellwig 	.mapping_error		= arm_dma_mapping_error,
202418a7a7eSChristoph Hellwig 	.dma_supported		= arm_dma_supported,
2032dc6a016SMarek Szyprowski };
2042dc6a016SMarek Szyprowski EXPORT_SYMBOL(arm_dma_ops);
2052dc6a016SMarek Szyprowski 
206dd37e940SRob Herring static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
20700085f1eSKrzysztof Kozlowski 	dma_addr_t *handle, gfp_t gfp, unsigned long attrs);
208dd37e940SRob Herring static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
20900085f1eSKrzysztof Kozlowski 				  dma_addr_t handle, unsigned long attrs);
21055af8a91SMike Looijmans static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
21155af8a91SMike Looijmans 		 void *cpu_addr, dma_addr_t dma_addr, size_t size,
21200085f1eSKrzysztof Kozlowski 		 unsigned long attrs);
213dd37e940SRob Herring 
2145299709dSBart Van Assche const struct dma_map_ops arm_coherent_dma_ops = {
215dd37e940SRob Herring 	.alloc			= arm_coherent_dma_alloc,
216dd37e940SRob Herring 	.free			= arm_coherent_dma_free,
21755af8a91SMike Looijmans 	.mmap			= arm_coherent_dma_mmap,
218dd37e940SRob Herring 	.get_sgtable		= arm_dma_get_sgtable,
219dd37e940SRob Herring 	.map_page		= arm_coherent_dma_map_page,
220dd37e940SRob Herring 	.map_sg			= arm_dma_map_sg,
2219eef8b8cSChristoph Hellwig 	.mapping_error		= arm_dma_mapping_error,
222418a7a7eSChristoph Hellwig 	.dma_supported		= arm_dma_supported,
223dd37e940SRob Herring };
224dd37e940SRob Herring EXPORT_SYMBOL(arm_coherent_dma_ops);
225dd37e940SRob Herring 
2269f28cde0SRussell King static int __dma_supported(struct device *dev, u64 mask, bool warn)
2279f28cde0SRussell King {
2289f28cde0SRussell King 	unsigned long max_dma_pfn;
2299f28cde0SRussell King 
2309f28cde0SRussell King 	/*
2319f28cde0SRussell King 	 * If the mask allows for more memory than we can address,
2329f28cde0SRussell King 	 * and we actually have that much memory, then we must
2339f28cde0SRussell King 	 * indicate that DMA to this device is not supported.
2349f28cde0SRussell King 	 */
2359f28cde0SRussell King 	if (sizeof(mask) != sizeof(dma_addr_t) &&
2369f28cde0SRussell King 	    mask > (dma_addr_t)~0 &&
2378bf1268fSRussell King 	    dma_to_pfn(dev, ~0) < max_pfn - 1) {
2389f28cde0SRussell King 		if (warn) {
2399f28cde0SRussell King 			dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
2409f28cde0SRussell King 				 mask);
2419f28cde0SRussell King 			dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
2429f28cde0SRussell King 		}
2439f28cde0SRussell King 		return 0;
2449f28cde0SRussell King 	}
2459f28cde0SRussell King 
2469f28cde0SRussell King 	max_dma_pfn = min(max_pfn, arm_dma_pfn_limit);
2479f28cde0SRussell King 
2489f28cde0SRussell King 	/*
2499f28cde0SRussell King 	 * Translate the device's DMA mask to a PFN limit.  This
2509f28cde0SRussell King 	 * PFN number includes the page which we can DMA to.
2519f28cde0SRussell King 	 */
2529f28cde0SRussell King 	if (dma_to_pfn(dev, mask) < max_dma_pfn) {
2539f28cde0SRussell King 		if (warn)
2549f28cde0SRussell King 			dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
2559f28cde0SRussell King 				 mask,
2569f28cde0SRussell King 				 dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
2579f28cde0SRussell King 				 max_dma_pfn + 1);
2589f28cde0SRussell King 		return 0;
2599f28cde0SRussell King 	}
2609f28cde0SRussell King 
2619f28cde0SRussell King 	return 1;
2629f28cde0SRussell King }
2639f28cde0SRussell King 
264ab6494f0SCatalin Marinas static u64 get_coherent_dma_mask(struct device *dev)
265ab6494f0SCatalin Marinas {
2664dcfa600SRussell King 	u64 mask = (u64)DMA_BIT_MASK(32);
2670ddbccd1SRussell King 
268ab6494f0SCatalin Marinas 	if (dev) {
269ab6494f0SCatalin Marinas 		mask = dev->coherent_dma_mask;
270ab6494f0SCatalin Marinas 
271ab6494f0SCatalin Marinas 		/*
272ab6494f0SCatalin Marinas 		 * Sanity check the DMA mask - it must be non-zero, and
273ab6494f0SCatalin Marinas 		 * must be able to be satisfied by a DMA allocation.
274ab6494f0SCatalin Marinas 		 */
275ab6494f0SCatalin Marinas 		if (mask == 0) {
276ab6494f0SCatalin Marinas 			dev_warn(dev, "coherent DMA mask is unset\n");
277ab6494f0SCatalin Marinas 			return 0;
278ab6494f0SCatalin Marinas 		}
279ab6494f0SCatalin Marinas 
2809f28cde0SRussell King 		if (!__dma_supported(dev, mask, true))
2814dcfa600SRussell King 			return 0;
2824dcfa600SRussell King 	}
2834dcfa600SRussell King 
284ab6494f0SCatalin Marinas 	return mask;
285ab6494f0SCatalin Marinas }
286ab6494f0SCatalin Marinas 
287f1270896SGregory CLEMENT static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
288c7909509SMarek Szyprowski {
289c7909509SMarek Szyprowski 	/*
290c7909509SMarek Szyprowski 	 * Ensure that the allocated pages are zeroed, and that any data
291c7909509SMarek Szyprowski 	 * lurking in the kernel direct-mapped region is invalidated.
292c7909509SMarek Szyprowski 	 */
2939848e48fSMarek Szyprowski 	if (PageHighMem(page)) {
2949848e48fSMarek Szyprowski 		phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
2959848e48fSMarek Szyprowski 		phys_addr_t end = base + size;
2969848e48fSMarek Szyprowski 		while (size > 0) {
2979848e48fSMarek Szyprowski 			void *ptr = kmap_atomic(page);
2989848e48fSMarek Szyprowski 			memset(ptr, 0, PAGE_SIZE);
299f1270896SGregory CLEMENT 			if (coherent_flag != COHERENT)
3009848e48fSMarek Szyprowski 				dmac_flush_range(ptr, ptr + PAGE_SIZE);
3019848e48fSMarek Szyprowski 			kunmap_atomic(ptr);
3029848e48fSMarek Szyprowski 			page++;
3039848e48fSMarek Szyprowski 			size -= PAGE_SIZE;
3049848e48fSMarek Szyprowski 		}
305f1270896SGregory CLEMENT 		if (coherent_flag != COHERENT)
3069848e48fSMarek Szyprowski 			outer_flush_range(base, end);
3079848e48fSMarek Szyprowski 	} else {
3089848e48fSMarek Szyprowski 		void *ptr = page_address(page);
309c7909509SMarek Szyprowski 		memset(ptr, 0, size);
310f1270896SGregory CLEMENT 		if (coherent_flag != COHERENT) {
311c7909509SMarek Szyprowski 			dmac_flush_range(ptr, ptr + size);
312c7909509SMarek Szyprowski 			outer_flush_range(__pa(ptr), __pa(ptr) + size);
313c7909509SMarek Szyprowski 		}
3144ce63fcdSMarek Szyprowski 	}
315f1270896SGregory CLEMENT }
316c7909509SMarek Szyprowski 
3177a9a32a9SRussell King /*
3187a9a32a9SRussell King  * Allocate a DMA buffer for 'dev' of size 'size' using the
3197a9a32a9SRussell King  * specified gfp mask.  Note that 'size' must be page aligned.
3207a9a32a9SRussell King  */
321f1270896SGregory CLEMENT static struct page *__dma_alloc_buffer(struct device *dev, size_t size,
322f1270896SGregory CLEMENT 				       gfp_t gfp, int coherent_flag)
3237a9a32a9SRussell King {
3247a9a32a9SRussell King 	unsigned long order = get_order(size);
3257a9a32a9SRussell King 	struct page *page, *p, *e;
3267a9a32a9SRussell King 
3277a9a32a9SRussell King 	page = alloc_pages(gfp, order);
3287a9a32a9SRussell King 	if (!page)
3297a9a32a9SRussell King 		return NULL;
3307a9a32a9SRussell King 
3317a9a32a9SRussell King 	/*
3327a9a32a9SRussell King 	 * Now split the huge page and free the excess pages
3337a9a32a9SRussell King 	 */
3347a9a32a9SRussell King 	split_page(page, order);
3357a9a32a9SRussell King 	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
3367a9a32a9SRussell King 		__free_page(p);
3377a9a32a9SRussell King 
338f1270896SGregory CLEMENT 	__dma_clear_buffer(page, size, coherent_flag);
3397a9a32a9SRussell King 
3407a9a32a9SRussell King 	return page;
3417a9a32a9SRussell King }
3427a9a32a9SRussell King 
3437a9a32a9SRussell King /*
3447a9a32a9SRussell King  * Free a DMA buffer.  'size' must be page aligned.
3457a9a32a9SRussell King  */
3467a9a32a9SRussell King static void __dma_free_buffer(struct page *page, size_t size)
3477a9a32a9SRussell King {
3487a9a32a9SRussell King 	struct page *e = page + (size >> PAGE_SHIFT);
3497a9a32a9SRussell King 
3507a9a32a9SRussell King 	while (page < e) {
3517a9a32a9SRussell King 		__free_page(page);
3527a9a32a9SRussell King 		page++;
3537a9a32a9SRussell King 	}
3547a9a32a9SRussell King }
3557a9a32a9SRussell King 
356c7909509SMarek Szyprowski static void *__alloc_from_contiguous(struct device *dev, size_t size,
3579848e48fSMarek Szyprowski 				     pgprot_t prot, struct page **ret_page,
358f1270896SGregory CLEMENT 				     const void *caller, bool want_vaddr,
359712c604dSLucas Stach 				     int coherent_flag, gfp_t gfp);
360c7909509SMarek Szyprowski 
361e9da6e99SMarek Szyprowski static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
362e9da6e99SMarek Szyprowski 				 pgprot_t prot, struct page **ret_page,
3636e8266e3SCarlo Caione 				 const void *caller, bool want_vaddr);
364e9da6e99SMarek Szyprowski 
365e9da6e99SMarek Szyprowski static void *
366e9da6e99SMarek Szyprowski __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
367e9da6e99SMarek Szyprowski 	const void *caller)
368e9da6e99SMarek Szyprowski {
369e9da6e99SMarek Szyprowski 	/*
370e9da6e99SMarek Szyprowski 	 * DMA allocation can be mapped to user space, so lets
371e9da6e99SMarek Szyprowski 	 * set VM_USERMAP flags too.
372e9da6e99SMarek Szyprowski 	 */
373513510ddSLaura Abbott 	return dma_common_contiguous_remap(page, size,
374513510ddSLaura Abbott 			VM_ARM_DMA_CONSISTENT | VM_USERMAP,
375513510ddSLaura Abbott 			prot, caller);
376e9da6e99SMarek Szyprowski }
377e9da6e99SMarek Szyprowski 
378e9da6e99SMarek Szyprowski static void __dma_free_remap(void *cpu_addr, size_t size)
379e9da6e99SMarek Szyprowski {
380513510ddSLaura Abbott 	dma_common_free_remap(cpu_addr, size,
381513510ddSLaura Abbott 			VM_ARM_DMA_CONSISTENT | VM_USERMAP);
382e9da6e99SMarek Szyprowski }
383e9da6e99SMarek Szyprowski 
3846e5267aaSMarek Szyprowski #define DEFAULT_DMA_COHERENT_POOL_SIZE	SZ_256K
385b337e1c4SVladimir Murzin static struct gen_pool *atomic_pool __ro_after_init;
3866e5267aaSMarek Szyprowski 
387b337e1c4SVladimir Murzin static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
388c7909509SMarek Szyprowski 
389c7909509SMarek Szyprowski static int __init early_coherent_pool(char *p)
390c7909509SMarek Szyprowski {
39136d0fd21SLaura Abbott 	atomic_pool_size = memparse(p, &p);
392c7909509SMarek Szyprowski 	return 0;
393c7909509SMarek Szyprowski }
394c7909509SMarek Szyprowski early_param("coherent_pool", early_coherent_pool);
395c7909509SMarek Szyprowski 
396c7909509SMarek Szyprowski /*
397c7909509SMarek Szyprowski  * Initialise the coherent pool for atomic allocations.
398c7909509SMarek Szyprowski  */
399e9da6e99SMarek Szyprowski static int __init atomic_pool_init(void)
400c7909509SMarek Szyprowski {
40171b55663SRussell King 	pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL);
4029d1400cfSMarek Szyprowski 	gfp_t gfp = GFP_KERNEL | GFP_DMA;
403c7909509SMarek Szyprowski 	struct page *page;
404c7909509SMarek Szyprowski 	void *ptr;
405c7909509SMarek Szyprowski 
40636d0fd21SLaura Abbott 	atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
40736d0fd21SLaura Abbott 	if (!atomic_pool)
40836d0fd21SLaura Abbott 		goto out;
409f1270896SGregory CLEMENT 	/*
410f1270896SGregory CLEMENT 	 * The atomic pool is only used for non-coherent allocations
411f1270896SGregory CLEMENT 	 * so we must pass NORMAL for coherent_flag.
412f1270896SGregory CLEMENT 	 */
413e464ef16SGioh Kim 	if (dev_get_cma_area(NULL))
41436d0fd21SLaura Abbott 		ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
415712c604dSLucas Stach 				      &page, atomic_pool_init, true, NORMAL,
416712c604dSLucas Stach 				      GFP_KERNEL);
417e9da6e99SMarek Szyprowski 	else
41836d0fd21SLaura Abbott 		ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
4196e8266e3SCarlo Caione 					   &page, atomic_pool_init, true);
420c7909509SMarek Szyprowski 	if (ptr) {
42136d0fd21SLaura Abbott 		int ret;
4226b3fe472SHiroshi Doyu 
42336d0fd21SLaura Abbott 		ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr,
42436d0fd21SLaura Abbott 					page_to_phys(page),
42536d0fd21SLaura Abbott 					atomic_pool_size, -1);
42636d0fd21SLaura Abbott 		if (ret)
42736d0fd21SLaura Abbott 			goto destroy_genpool;
4286b3fe472SHiroshi Doyu 
42936d0fd21SLaura Abbott 		gen_pool_set_algo(atomic_pool,
43036d0fd21SLaura Abbott 				gen_pool_first_fit_order_align,
431acb62448SVladimir Murzin 				NULL);
432bf31c5e0SFabio Estevam 		pr_info("DMA: preallocated %zu KiB pool for atomic coherent allocations\n",
43336d0fd21SLaura Abbott 		       atomic_pool_size / 1024);
434c7909509SMarek Szyprowski 		return 0;
435c7909509SMarek Szyprowski 	}
436ec10665cSSachin Kamat 
43736d0fd21SLaura Abbott destroy_genpool:
43836d0fd21SLaura Abbott 	gen_pool_destroy(atomic_pool);
43936d0fd21SLaura Abbott 	atomic_pool = NULL;
44036d0fd21SLaura Abbott out:
441bf31c5e0SFabio Estevam 	pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
44236d0fd21SLaura Abbott 	       atomic_pool_size / 1024);
443c7909509SMarek Szyprowski 	return -ENOMEM;
444c7909509SMarek Szyprowski }
445c7909509SMarek Szyprowski /*
446c7909509SMarek Szyprowski  * CMA is activated by core_initcall, so we must be called after it.
447c7909509SMarek Szyprowski  */
448e9da6e99SMarek Szyprowski postcore_initcall(atomic_pool_init);
449c7909509SMarek Szyprowski 
450c7909509SMarek Szyprowski struct dma_contig_early_reserve {
451c7909509SMarek Szyprowski 	phys_addr_t base;
452c7909509SMarek Szyprowski 	unsigned long size;
453c7909509SMarek Szyprowski };
454c7909509SMarek Szyprowski 
455c7909509SMarek Szyprowski static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
456c7909509SMarek Szyprowski 
457c7909509SMarek Szyprowski static int dma_mmu_remap_num __initdata;
458c7909509SMarek Szyprowski 
459c7909509SMarek Szyprowski void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
460c7909509SMarek Szyprowski {
461c7909509SMarek Szyprowski 	dma_mmu_remap[dma_mmu_remap_num].base = base;
462c7909509SMarek Szyprowski 	dma_mmu_remap[dma_mmu_remap_num].size = size;
463c7909509SMarek Szyprowski 	dma_mmu_remap_num++;
464c7909509SMarek Szyprowski }
465c7909509SMarek Szyprowski 
466c7909509SMarek Szyprowski void __init dma_contiguous_remap(void)
467c7909509SMarek Szyprowski {
468c7909509SMarek Szyprowski 	int i;
4693d2054adSJoonsoo Kim 
4703d2054adSJoonsoo Kim 	if (!dma_mmu_remap_num)
4713d2054adSJoonsoo Kim 		return;
4723d2054adSJoonsoo Kim 
4733d2054adSJoonsoo Kim 	/* call flush_cache_all() since CMA area would be large enough */
4743d2054adSJoonsoo Kim 	flush_cache_all();
475c7909509SMarek Szyprowski 	for (i = 0; i < dma_mmu_remap_num; i++) {
476c7909509SMarek Szyprowski 		phys_addr_t start = dma_mmu_remap[i].base;
477c7909509SMarek Szyprowski 		phys_addr_t end = start + dma_mmu_remap[i].size;
478c7909509SMarek Szyprowski 		struct map_desc map;
479c7909509SMarek Szyprowski 		unsigned long addr;
480c7909509SMarek Szyprowski 
481c7909509SMarek Szyprowski 		if (end > arm_lowmem_limit)
482c7909509SMarek Szyprowski 			end = arm_lowmem_limit;
483c7909509SMarek Szyprowski 		if (start >= end)
48439f78e70SChris Brand 			continue;
485c7909509SMarek Szyprowski 
486c7909509SMarek Szyprowski 		map.pfn = __phys_to_pfn(start);
487c7909509SMarek Szyprowski 		map.virtual = __phys_to_virt(start);
488c7909509SMarek Szyprowski 		map.length = end - start;
489c7909509SMarek Szyprowski 		map.type = MT_MEMORY_DMA_READY;
490c7909509SMarek Szyprowski 
491c7909509SMarek Szyprowski 		/*
4926b076991SRussell King 		 * Clear previous low-memory mapping to ensure that the
4936b076991SRussell King 		 * TLB does not see any conflicting entries, then flush
4946b076991SRussell King 		 * the TLB of the old entries before creating new mappings.
4956b076991SRussell King 		 *
4966b076991SRussell King 		 * This ensures that any speculatively loaded TLB entries
4976b076991SRussell King 		 * (even though they may be rare) can not cause any problems,
4986b076991SRussell King 		 * and ensures that this code is architecturally compliant.
499c7909509SMarek Szyprowski 		 */
500c7909509SMarek Szyprowski 		for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
50161f6c7a4SVitaly Andrianov 		     addr += PMD_SIZE)
502c7909509SMarek Szyprowski 			pmd_clear(pmd_off_k(addr));
503c7909509SMarek Szyprowski 
5046b076991SRussell King 		flush_tlb_kernel_range(__phys_to_virt(start),
5056b076991SRussell King 				       __phys_to_virt(end));
5066b076991SRussell King 
5073d2054adSJoonsoo Kim 		/*
5083d2054adSJoonsoo Kim 		 * All the memory in CMA region will be on ZONE_MOVABLE.
5093d2054adSJoonsoo Kim 		 * If that zone is considered as highmem, the memory in CMA
5103d2054adSJoonsoo Kim 		 * region is also considered as highmem even if it's
5113d2054adSJoonsoo Kim 		 * physical address belong to lowmem. In this case,
5123d2054adSJoonsoo Kim 		 * re-mapping isn't required.
5133d2054adSJoonsoo Kim 		 */
5143d2054adSJoonsoo Kim 		if (!is_highmem_idx(ZONE_MOVABLE))
515c7909509SMarek Szyprowski 			iotable_init(&map, 1);
516c7909509SMarek Szyprowski 	}
517c7909509SMarek Szyprowski }
518c7909509SMarek Szyprowski 
519c7909509SMarek Szyprowski static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
520c7909509SMarek Szyprowski 			    void *data)
521c7909509SMarek Szyprowski {
522c7909509SMarek Szyprowski 	struct page *page = virt_to_page(addr);
523c7909509SMarek Szyprowski 	pgprot_t prot = *(pgprot_t *)data;
524c7909509SMarek Szyprowski 
525c7909509SMarek Szyprowski 	set_pte_ext(pte, mk_pte(page, prot), 0);
526c7909509SMarek Szyprowski 	return 0;
527c7909509SMarek Szyprowski }
528c7909509SMarek Szyprowski 
529c7909509SMarek Szyprowski static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
530c7909509SMarek Szyprowski {
531c7909509SMarek Szyprowski 	unsigned long start = (unsigned long) page_address(page);
532c7909509SMarek Szyprowski 	unsigned end = start + size;
533c7909509SMarek Szyprowski 
534c7909509SMarek Szyprowski 	apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
535c7909509SMarek Szyprowski 	flush_tlb_kernel_range(start, end);
536c7909509SMarek Szyprowski }
537c7909509SMarek Szyprowski 
538c7909509SMarek Szyprowski static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
539c7909509SMarek Szyprowski 				 pgprot_t prot, struct page **ret_page,
5406e8266e3SCarlo Caione 				 const void *caller, bool want_vaddr)
541c7909509SMarek Szyprowski {
542c7909509SMarek Szyprowski 	struct page *page;
5436e8266e3SCarlo Caione 	void *ptr = NULL;
544f1270896SGregory CLEMENT 	/*
545f1270896SGregory CLEMENT 	 * __alloc_remap_buffer is only called when the device is
546f1270896SGregory CLEMENT 	 * non-coherent
547f1270896SGregory CLEMENT 	 */
548f1270896SGregory CLEMENT 	page = __dma_alloc_buffer(dev, size, gfp, NORMAL);
549c7909509SMarek Szyprowski 	if (!page)
550c7909509SMarek Szyprowski 		return NULL;
5516e8266e3SCarlo Caione 	if (!want_vaddr)
5526e8266e3SCarlo Caione 		goto out;
553c7909509SMarek Szyprowski 
554c7909509SMarek Szyprowski 	ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
555c7909509SMarek Szyprowski 	if (!ptr) {
556c7909509SMarek Szyprowski 		__dma_free_buffer(page, size);
557c7909509SMarek Szyprowski 		return NULL;
558c7909509SMarek Szyprowski 	}
559c7909509SMarek Szyprowski 
5606e8266e3SCarlo Caione  out:
561c7909509SMarek Szyprowski 	*ret_page = page;
562c7909509SMarek Szyprowski 	return ptr;
563c7909509SMarek Szyprowski }
564c7909509SMarek Szyprowski 
565e9da6e99SMarek Szyprowski static void *__alloc_from_pool(size_t size, struct page **ret_page)
566c7909509SMarek Szyprowski {
56736d0fd21SLaura Abbott 	unsigned long val;
568e9da6e99SMarek Szyprowski 	void *ptr = NULL;
569c7909509SMarek Szyprowski 
57036d0fd21SLaura Abbott 	if (!atomic_pool) {
571e9da6e99SMarek Szyprowski 		WARN(1, "coherent pool not initialised!\n");
572c7909509SMarek Szyprowski 		return NULL;
573c7909509SMarek Szyprowski 	}
574c7909509SMarek Szyprowski 
57536d0fd21SLaura Abbott 	val = gen_pool_alloc(atomic_pool, size);
57636d0fd21SLaura Abbott 	if (val) {
57736d0fd21SLaura Abbott 		phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
578e9da6e99SMarek Szyprowski 
57936d0fd21SLaura Abbott 		*ret_page = phys_to_page(phys);
58036d0fd21SLaura Abbott 		ptr = (void *)val;
581e9da6e99SMarek Szyprowski 	}
582e9da6e99SMarek Szyprowski 
583c7909509SMarek Szyprowski 	return ptr;
584c7909509SMarek Szyprowski }
585c7909509SMarek Szyprowski 
58621d0a759SHiroshi Doyu static bool __in_atomic_pool(void *start, size_t size)
58721d0a759SHiroshi Doyu {
58836d0fd21SLaura Abbott 	return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
58921d0a759SHiroshi Doyu }
59021d0a759SHiroshi Doyu 
591e9da6e99SMarek Szyprowski static int __free_from_pool(void *start, size_t size)
592c7909509SMarek Szyprowski {
59321d0a759SHiroshi Doyu 	if (!__in_atomic_pool(start, size))
594c7909509SMarek Szyprowski 		return 0;
595c7909509SMarek Szyprowski 
59636d0fd21SLaura Abbott 	gen_pool_free(atomic_pool, (unsigned long)start, size);
597e9da6e99SMarek Szyprowski 
598c7909509SMarek Szyprowski 	return 1;
599c7909509SMarek Szyprowski }
600c7909509SMarek Szyprowski 
601c7909509SMarek Szyprowski static void *__alloc_from_contiguous(struct device *dev, size_t size,
6029848e48fSMarek Szyprowski 				     pgprot_t prot, struct page **ret_page,
603f1270896SGregory CLEMENT 				     const void *caller, bool want_vaddr,
604712c604dSLucas Stach 				     int coherent_flag, gfp_t gfp)
605c7909509SMarek Szyprowski {
606c7909509SMarek Szyprowski 	unsigned long order = get_order(size);
607c7909509SMarek Szyprowski 	size_t count = size >> PAGE_SHIFT;
608c7909509SMarek Szyprowski 	struct page *page;
6096e8266e3SCarlo Caione 	void *ptr = NULL;
610c7909509SMarek Szyprowski 
611712c604dSLucas Stach 	page = dma_alloc_from_contiguous(dev, count, order, gfp);
612c7909509SMarek Szyprowski 	if (!page)
613c7909509SMarek Szyprowski 		return NULL;
614c7909509SMarek Szyprowski 
615f1270896SGregory CLEMENT 	__dma_clear_buffer(page, size, coherent_flag);
616c7909509SMarek Szyprowski 
6176e8266e3SCarlo Caione 	if (!want_vaddr)
6186e8266e3SCarlo Caione 		goto out;
6196e8266e3SCarlo Caione 
6209848e48fSMarek Szyprowski 	if (PageHighMem(page)) {
6219848e48fSMarek Szyprowski 		ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
6229848e48fSMarek Szyprowski 		if (!ptr) {
6239848e48fSMarek Szyprowski 			dma_release_from_contiguous(dev, page, count);
6249848e48fSMarek Szyprowski 			return NULL;
6259848e48fSMarek Szyprowski 		}
6269848e48fSMarek Szyprowski 	} else {
6279848e48fSMarek Szyprowski 		__dma_remap(page, size, prot);
6289848e48fSMarek Szyprowski 		ptr = page_address(page);
6299848e48fSMarek Szyprowski 	}
6306e8266e3SCarlo Caione 
6316e8266e3SCarlo Caione  out:
632c7909509SMarek Szyprowski 	*ret_page = page;
6339848e48fSMarek Szyprowski 	return ptr;
634c7909509SMarek Szyprowski }
635c7909509SMarek Szyprowski 
636c7909509SMarek Szyprowski static void __free_from_contiguous(struct device *dev, struct page *page,
6376e8266e3SCarlo Caione 				   void *cpu_addr, size_t size, bool want_vaddr)
638c7909509SMarek Szyprowski {
6396e8266e3SCarlo Caione 	if (want_vaddr) {
6409848e48fSMarek Szyprowski 		if (PageHighMem(page))
6419848e48fSMarek Szyprowski 			__dma_free_remap(cpu_addr, size);
6429848e48fSMarek Szyprowski 		else
64371b55663SRussell King 			__dma_remap(page, size, PAGE_KERNEL);
6446e8266e3SCarlo Caione 	}
645c7909509SMarek Szyprowski 	dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
646c7909509SMarek Szyprowski }
647c7909509SMarek Szyprowski 
64800085f1eSKrzysztof Kozlowski static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot)
649f99d6034SMarek Szyprowski {
65000085f1eSKrzysztof Kozlowski 	prot = (attrs & DMA_ATTR_WRITE_COMBINE) ?
651f99d6034SMarek Szyprowski 			pgprot_writecombine(prot) :
652f99d6034SMarek Szyprowski 			pgprot_dmacoherent(prot);
653f99d6034SMarek Szyprowski 	return prot;
654f99d6034SMarek Szyprowski }
655f99d6034SMarek Szyprowski 
656c7909509SMarek Szyprowski static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
657c7909509SMarek Szyprowski 				   struct page **ret_page)
658ab6494f0SCatalin Marinas {
65904da5694SRussell King 	struct page *page;
660f1270896SGregory CLEMENT 	/* __alloc_simple_buffer is only called when the device is coherent */
661f1270896SGregory CLEMENT 	page = __dma_alloc_buffer(dev, size, gfp, COHERENT);
662c7909509SMarek Szyprowski 	if (!page)
663c7909509SMarek Szyprowski 		return NULL;
664c7909509SMarek Szyprowski 
665c7909509SMarek Szyprowski 	*ret_page = page;
666c7909509SMarek Szyprowski 	return page_address(page);
667c7909509SMarek Szyprowski }
668c7909509SMarek Szyprowski 
669b4268676SRabin Vincent static void *simple_allocator_alloc(struct arm_dma_alloc_args *args,
670b4268676SRabin Vincent 				    struct page **ret_page)
671b4268676SRabin Vincent {
672b4268676SRabin Vincent 	return __alloc_simple_buffer(args->dev, args->size, args->gfp,
673b4268676SRabin Vincent 				     ret_page);
674b4268676SRabin Vincent }
675c7909509SMarek Szyprowski 
676b4268676SRabin Vincent static void simple_allocator_free(struct arm_dma_free_args *args)
677b4268676SRabin Vincent {
678b4268676SRabin Vincent 	__dma_free_buffer(args->page, args->size);
679b4268676SRabin Vincent }
680b4268676SRabin Vincent 
681b4268676SRabin Vincent static struct arm_dma_allocator simple_allocator = {
682b4268676SRabin Vincent 	.alloc = simple_allocator_alloc,
683b4268676SRabin Vincent 	.free = simple_allocator_free,
684b4268676SRabin Vincent };
685b4268676SRabin Vincent 
686b4268676SRabin Vincent static void *cma_allocator_alloc(struct arm_dma_alloc_args *args,
687b4268676SRabin Vincent 				 struct page **ret_page)
688b4268676SRabin Vincent {
689b4268676SRabin Vincent 	return __alloc_from_contiguous(args->dev, args->size, args->prot,
690b4268676SRabin Vincent 				       ret_page, args->caller,
691712c604dSLucas Stach 				       args->want_vaddr, args->coherent_flag,
692712c604dSLucas Stach 				       args->gfp);
693b4268676SRabin Vincent }
694b4268676SRabin Vincent 
695b4268676SRabin Vincent static void cma_allocator_free(struct arm_dma_free_args *args)
696b4268676SRabin Vincent {
697b4268676SRabin Vincent 	__free_from_contiguous(args->dev, args->page, args->cpu_addr,
698b4268676SRabin Vincent 			       args->size, args->want_vaddr);
699b4268676SRabin Vincent }
700b4268676SRabin Vincent 
701b4268676SRabin Vincent static struct arm_dma_allocator cma_allocator = {
702b4268676SRabin Vincent 	.alloc = cma_allocator_alloc,
703b4268676SRabin Vincent 	.free = cma_allocator_free,
704b4268676SRabin Vincent };
705b4268676SRabin Vincent 
706b4268676SRabin Vincent static void *pool_allocator_alloc(struct arm_dma_alloc_args *args,
707b4268676SRabin Vincent 				  struct page **ret_page)
708b4268676SRabin Vincent {
709b4268676SRabin Vincent 	return __alloc_from_pool(args->size, ret_page);
710b4268676SRabin Vincent }
711b4268676SRabin Vincent 
712b4268676SRabin Vincent static void pool_allocator_free(struct arm_dma_free_args *args)
713b4268676SRabin Vincent {
714b4268676SRabin Vincent 	__free_from_pool(args->cpu_addr, args->size);
715b4268676SRabin Vincent }
716b4268676SRabin Vincent 
717b4268676SRabin Vincent static struct arm_dma_allocator pool_allocator = {
718b4268676SRabin Vincent 	.alloc = pool_allocator_alloc,
719b4268676SRabin Vincent 	.free = pool_allocator_free,
720b4268676SRabin Vincent };
721b4268676SRabin Vincent 
722b4268676SRabin Vincent static void *remap_allocator_alloc(struct arm_dma_alloc_args *args,
723b4268676SRabin Vincent 				   struct page **ret_page)
724b4268676SRabin Vincent {
725b4268676SRabin Vincent 	return __alloc_remap_buffer(args->dev, args->size, args->gfp,
726b4268676SRabin Vincent 				    args->prot, ret_page, args->caller,
727b4268676SRabin Vincent 				    args->want_vaddr);
728b4268676SRabin Vincent }
729b4268676SRabin Vincent 
730b4268676SRabin Vincent static void remap_allocator_free(struct arm_dma_free_args *args)
731b4268676SRabin Vincent {
732b4268676SRabin Vincent 	if (args->want_vaddr)
733b4268676SRabin Vincent 		__dma_free_remap(args->cpu_addr, args->size);
734b4268676SRabin Vincent 
735b4268676SRabin Vincent 	__dma_free_buffer(args->page, args->size);
736b4268676SRabin Vincent }
737b4268676SRabin Vincent 
738b4268676SRabin Vincent static struct arm_dma_allocator remap_allocator = {
739b4268676SRabin Vincent 	.alloc = remap_allocator_alloc,
740b4268676SRabin Vincent 	.free = remap_allocator_free,
741b4268676SRabin Vincent };
742c7909509SMarek Szyprowski 
743c7909509SMarek Szyprowski static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
7446e8266e3SCarlo Caione 			 gfp_t gfp, pgprot_t prot, bool is_coherent,
74500085f1eSKrzysztof Kozlowski 			 unsigned long attrs, const void *caller)
746c7909509SMarek Szyprowski {
747c7909509SMarek Szyprowski 	u64 mask = get_coherent_dma_mask(dev);
7483dd7ea92SJingoo Han 	struct page *page = NULL;
74931ebf944SRussell King 	void *addr;
750b4268676SRabin Vincent 	bool allowblock, cma;
75119e6e5e5SRabin Vincent 	struct arm_dma_buffer *buf;
752b4268676SRabin Vincent 	struct arm_dma_alloc_args args = {
753b4268676SRabin Vincent 		.dev = dev,
754b4268676SRabin Vincent 		.size = PAGE_ALIGN(size),
755b4268676SRabin Vincent 		.gfp = gfp,
756b4268676SRabin Vincent 		.prot = prot,
757b4268676SRabin Vincent 		.caller = caller,
75800085f1eSKrzysztof Kozlowski 		.want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
759f1270896SGregory CLEMENT 		.coherent_flag = is_coherent ? COHERENT : NORMAL,
760b4268676SRabin Vincent 	};
761ab6494f0SCatalin Marinas 
762c7909509SMarek Szyprowski #ifdef CONFIG_DMA_API_DEBUG
763c7909509SMarek Szyprowski 	u64 limit = (mask + 1) & ~mask;
764c7909509SMarek Szyprowski 	if (limit && size >= limit) {
765c7909509SMarek Szyprowski 		dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
766c7909509SMarek Szyprowski 			size, mask);
767c7909509SMarek Szyprowski 		return NULL;
768c7909509SMarek Szyprowski 	}
769c7909509SMarek Szyprowski #endif
770c7909509SMarek Szyprowski 
771c7909509SMarek Szyprowski 	if (!mask)
772c7909509SMarek Szyprowski 		return NULL;
773c7909509SMarek Szyprowski 
7749c18fcf7SAlexandre Courbot 	buf = kzalloc(sizeof(*buf),
7759c18fcf7SAlexandre Courbot 		      gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM));
77619e6e5e5SRabin Vincent 	if (!buf)
77719e6e5e5SRabin Vincent 		return NULL;
77819e6e5e5SRabin Vincent 
779c7909509SMarek Szyprowski 	if (mask < 0xffffffffULL)
780c7909509SMarek Szyprowski 		gfp |= GFP_DMA;
781c7909509SMarek Szyprowski 
782ea2e7057SSumit Bhattacharya 	/*
783ea2e7057SSumit Bhattacharya 	 * Following is a work-around (a.k.a. hack) to prevent pages
784ea2e7057SSumit Bhattacharya 	 * with __GFP_COMP being passed to split_page() which cannot
785ea2e7057SSumit Bhattacharya 	 * handle them.  The real problem is that this flag probably
786ea2e7057SSumit Bhattacharya 	 * should be 0 on ARM as it is not supported on this
787ea2e7057SSumit Bhattacharya 	 * platform; see CONFIG_HUGETLBFS.
788ea2e7057SSumit Bhattacharya 	 */
789ea2e7057SSumit Bhattacharya 	gfp &= ~(__GFP_COMP);
790b4268676SRabin Vincent 	args.gfp = gfp;
791ea2e7057SSumit Bhattacharya 
7929eef8b8cSChristoph Hellwig 	*handle = ARM_MAPPING_ERROR;
793b4268676SRabin Vincent 	allowblock = gfpflags_allow_blocking(gfp);
794b4268676SRabin Vincent 	cma = allowblock ? dev_get_cma_area(dev) : false;
79504da5694SRussell King 
796b4268676SRabin Vincent 	if (cma)
797b4268676SRabin Vincent 		buf->allocator = &cma_allocator;
7981655cf88SVladimir Murzin 	else if (is_coherent)
799b4268676SRabin Vincent 		buf->allocator = &simple_allocator;
800b4268676SRabin Vincent 	else if (allowblock)
801b4268676SRabin Vincent 		buf->allocator = &remap_allocator;
80231ebf944SRussell King 	else
803b4268676SRabin Vincent 		buf->allocator = &pool_allocator;
804b4268676SRabin Vincent 
805b4268676SRabin Vincent 	addr = buf->allocator->alloc(&args, &page);
80631ebf944SRussell King 
80719e6e5e5SRabin Vincent 	if (page) {
80819e6e5e5SRabin Vincent 		unsigned long flags;
80919e6e5e5SRabin Vincent 
8109eedd963SRussell King 		*handle = pfn_to_dma(dev, page_to_pfn(page));
811b4268676SRabin Vincent 		buf->virt = args.want_vaddr ? addr : page;
81219e6e5e5SRabin Vincent 
81319e6e5e5SRabin Vincent 		spin_lock_irqsave(&arm_dma_bufs_lock, flags);
81419e6e5e5SRabin Vincent 		list_add(&buf->list, &arm_dma_bufs);
81519e6e5e5SRabin Vincent 		spin_unlock_irqrestore(&arm_dma_bufs_lock, flags);
81619e6e5e5SRabin Vincent 	} else {
81719e6e5e5SRabin Vincent 		kfree(buf);
81819e6e5e5SRabin Vincent 	}
81931ebf944SRussell King 
820b4268676SRabin Vincent 	return args.want_vaddr ? addr : page;
821ab6494f0SCatalin Marinas }
822695ae0afSRussell King 
8230ddbccd1SRussell King /*
8240ddbccd1SRussell King  * Allocate DMA-coherent memory space and return both the kernel remapped
8250ddbccd1SRussell King  * virtual and bus address for that space.
8260ddbccd1SRussell King  */
827f99d6034SMarek Szyprowski void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
82800085f1eSKrzysztof Kozlowski 		    gfp_t gfp, unsigned long attrs)
8290ddbccd1SRussell King {
8300ea1ec71SRussell King 	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
8310ddbccd1SRussell King 
832dd37e940SRob Herring 	return __dma_alloc(dev, size, handle, gfp, prot, false,
8336e8266e3SCarlo Caione 			   attrs, __builtin_return_address(0));
834dd37e940SRob Herring }
835dd37e940SRob Herring 
836dd37e940SRob Herring static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
83700085f1eSKrzysztof Kozlowski 	dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
838dd37e940SRob Herring {
83921caf3a7SLorenzo Nava 	return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
8406e8266e3SCarlo Caione 			   attrs, __builtin_return_address(0));
8410ddbccd1SRussell King }
8420ddbccd1SRussell King 
84355af8a91SMike Looijmans static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
844f99d6034SMarek Szyprowski 		 void *cpu_addr, dma_addr_t dma_addr, size_t size,
84500085f1eSKrzysztof Kozlowski 		 unsigned long attrs)
8460ddbccd1SRussell King {
8471655cf88SVladimir Murzin 	int ret;
848a70c3ee3SFabio Estevam 	unsigned long nr_vma_pages = vma_pages(vma);
84950262a4bSMarek Szyprowski 	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
850c7909509SMarek Szyprowski 	unsigned long pfn = dma_to_pfn(dev, dma_addr);
85150262a4bSMarek Szyprowski 	unsigned long off = vma->vm_pgoff;
85250262a4bSMarek Szyprowski 
85343fc509cSVladimir Murzin 	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
85447142f07SMarek Szyprowski 		return ret;
85547142f07SMarek Szyprowski 
85650262a4bSMarek Szyprowski 	if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
8570ddbccd1SRussell King 		ret = remap_pfn_range(vma, vma->vm_start,
85850262a4bSMarek Szyprowski 				      pfn + off,
859c7909509SMarek Szyprowski 				      vma->vm_end - vma->vm_start,
8600ddbccd1SRussell King 				      vma->vm_page_prot);
86150262a4bSMarek Szyprowski 	}
8620ddbccd1SRussell King 
8630ddbccd1SRussell King 	return ret;
8640ddbccd1SRussell King }
8650ddbccd1SRussell King 
8660ddbccd1SRussell King /*
86755af8a91SMike Looijmans  * Create userspace mapping for the DMA-coherent memory.
86855af8a91SMike Looijmans  */
86955af8a91SMike Looijmans static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
87055af8a91SMike Looijmans 		 void *cpu_addr, dma_addr_t dma_addr, size_t size,
87100085f1eSKrzysztof Kozlowski 		 unsigned long attrs)
87255af8a91SMike Looijmans {
87355af8a91SMike Looijmans 	return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
87455af8a91SMike Looijmans }
87555af8a91SMike Looijmans 
87655af8a91SMike Looijmans int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
87755af8a91SMike Looijmans 		 void *cpu_addr, dma_addr_t dma_addr, size_t size,
87800085f1eSKrzysztof Kozlowski 		 unsigned long attrs)
87955af8a91SMike Looijmans {
88055af8a91SMike Looijmans 	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
88155af8a91SMike Looijmans 	return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
88255af8a91SMike Looijmans }
88355af8a91SMike Looijmans 
88455af8a91SMike Looijmans /*
885c7909509SMarek Szyprowski  * Free a buffer as defined by the above mapping.
8860ddbccd1SRussell King  */
887dd37e940SRob Herring static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
88800085f1eSKrzysztof Kozlowski 			   dma_addr_t handle, unsigned long attrs,
889dd37e940SRob Herring 			   bool is_coherent)
8900ddbccd1SRussell King {
891c7909509SMarek Szyprowski 	struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
89219e6e5e5SRabin Vincent 	struct arm_dma_buffer *buf;
893b4268676SRabin Vincent 	struct arm_dma_free_args args = {
894b4268676SRabin Vincent 		.dev = dev,
895b4268676SRabin Vincent 		.size = PAGE_ALIGN(size),
896b4268676SRabin Vincent 		.cpu_addr = cpu_addr,
897b4268676SRabin Vincent 		.page = page,
89800085f1eSKrzysztof Kozlowski 		.want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
899b4268676SRabin Vincent 	};
90019e6e5e5SRabin Vincent 
90119e6e5e5SRabin Vincent 	buf = arm_dma_buffer_find(cpu_addr);
90219e6e5e5SRabin Vincent 	if (WARN(!buf, "Freeing invalid buffer %p\n", cpu_addr))
90319e6e5e5SRabin Vincent 		return;
9040ddbccd1SRussell King 
905b4268676SRabin Vincent 	buf->allocator->free(&args);
90619e6e5e5SRabin Vincent 	kfree(buf);
9070ddbccd1SRussell King }
908afd1a321SRussell King 
909dd37e940SRob Herring void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
91000085f1eSKrzysztof Kozlowski 		  dma_addr_t handle, unsigned long attrs)
911dd37e940SRob Herring {
912dd37e940SRob Herring 	__arm_dma_free(dev, size, cpu_addr, handle, attrs, false);
913dd37e940SRob Herring }
914dd37e940SRob Herring 
915dd37e940SRob Herring static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
91600085f1eSKrzysztof Kozlowski 				  dma_addr_t handle, unsigned long attrs)
917dd37e940SRob Herring {
918dd37e940SRob Herring 	__arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
919dd37e940SRob Herring }
920dd37e940SRob Herring 
921916a008bSRussell King /*
922916a008bSRussell King  * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
923916a008bSRussell King  * that the intention is to allow exporting memory allocated via the
924916a008bSRussell King  * coherent DMA APIs through the dma_buf API, which only accepts a
925916a008bSRussell King  * scattertable.  This presents a couple of problems:
926916a008bSRussell King  * 1. Not all memory allocated via the coherent DMA APIs is backed by
927916a008bSRussell King  *    a struct page
928916a008bSRussell King  * 2. Passing coherent DMA memory into the streaming APIs is not allowed
929916a008bSRussell King  *    as we will try to flush the memory through a different alias to that
930916a008bSRussell King  *    actually being used (and the flushes are redundant.)
931916a008bSRussell King  */
932dc2832e1SMarek Szyprowski int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
933dc2832e1SMarek Szyprowski 		 void *cpu_addr, dma_addr_t handle, size_t size,
93400085f1eSKrzysztof Kozlowski 		 unsigned long attrs)
935dc2832e1SMarek Szyprowski {
936916a008bSRussell King 	unsigned long pfn = dma_to_pfn(dev, handle);
937916a008bSRussell King 	struct page *page;
938dc2832e1SMarek Szyprowski 	int ret;
939dc2832e1SMarek Szyprowski 
940916a008bSRussell King 	/* If the PFN is not valid, we do not have a struct page */
941916a008bSRussell King 	if (!pfn_valid(pfn))
942916a008bSRussell King 		return -ENXIO;
943916a008bSRussell King 
944916a008bSRussell King 	page = pfn_to_page(pfn);
945916a008bSRussell King 
946dc2832e1SMarek Szyprowski 	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
947dc2832e1SMarek Szyprowski 	if (unlikely(ret))
948dc2832e1SMarek Szyprowski 		return ret;
949dc2832e1SMarek Szyprowski 
950dc2832e1SMarek Szyprowski 	sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
951dc2832e1SMarek Szyprowski 	return 0;
952dc2832e1SMarek Szyprowski }
953dc2832e1SMarek Szyprowski 
95465af191aSRussell King static void dma_cache_maint_page(struct page *page, unsigned long offset,
955a9c9147eSRussell King 	size_t size, enum dma_data_direction dir,
956a9c9147eSRussell King 	void (*op)(const void *, size_t, int))
95765af191aSRussell King {
95815653371SRussell King 	unsigned long pfn;
95915653371SRussell King 	size_t left = size;
96015653371SRussell King 
96115653371SRussell King 	pfn = page_to_pfn(page) + offset / PAGE_SIZE;
96215653371SRussell King 	offset %= PAGE_SIZE;
96315653371SRussell King 
96465af191aSRussell King 	/*
96565af191aSRussell King 	 * A single sg entry may refer to multiple physically contiguous
96665af191aSRussell King 	 * pages.  But we still need to process highmem pages individually.
96765af191aSRussell King 	 * If highmem is not configured then the bulk of this loop gets
96865af191aSRussell King 	 * optimized out.
96965af191aSRussell King 	 */
97065af191aSRussell King 	do {
97165af191aSRussell King 		size_t len = left;
97293f1d629SRussell King 		void *vaddr;
97393f1d629SRussell King 
97415653371SRussell King 		page = pfn_to_page(pfn);
97515653371SRussell King 
97693f1d629SRussell King 		if (PageHighMem(page)) {
97715653371SRussell King 			if (len + offset > PAGE_SIZE)
97865af191aSRussell King 				len = PAGE_SIZE - offset;
979dd0f67f4SJoonsoo Kim 
980dd0f67f4SJoonsoo Kim 			if (cache_is_vipt_nonaliasing()) {
98139af22a7SNicolas Pitre 				vaddr = kmap_atomic(page);
9827e5a69e8SNicolas Pitre 				op(vaddr + offset, len, dir);
98339af22a7SNicolas Pitre 				kunmap_atomic(vaddr);
984dd0f67f4SJoonsoo Kim 			} else {
985dd0f67f4SJoonsoo Kim 				vaddr = kmap_high_get(page);
986dd0f67f4SJoonsoo Kim 				if (vaddr) {
987dd0f67f4SJoonsoo Kim 					op(vaddr + offset, len, dir);
988dd0f67f4SJoonsoo Kim 					kunmap_high(page);
989dd0f67f4SJoonsoo Kim 				}
99093f1d629SRussell King 			}
99193f1d629SRussell King 		} else {
99293f1d629SRussell King 			vaddr = page_address(page) + offset;
993a9c9147eSRussell King 			op(vaddr, len, dir);
99493f1d629SRussell King 		}
99565af191aSRussell King 		offset = 0;
99615653371SRussell King 		pfn++;
99765af191aSRussell King 		left -= len;
99865af191aSRussell King 	} while (left);
99965af191aSRussell King }
100065af191aSRussell King 
100151fde349SMarek Szyprowski /*
100251fde349SMarek Szyprowski  * Make an area consistent for devices.
100351fde349SMarek Szyprowski  * Note: Drivers should NOT use this function directly, as it will break
100451fde349SMarek Szyprowski  * platforms with CONFIG_DMABOUNCE.
100551fde349SMarek Szyprowski  * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
100651fde349SMarek Szyprowski  */
100751fde349SMarek Szyprowski static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
100865af191aSRussell King 	size_t size, enum dma_data_direction dir)
100965af191aSRussell King {
10102161c248SSantosh Shilimkar 	phys_addr_t paddr;
101143377453SNicolas Pitre 
1012a9c9147eSRussell King 	dma_cache_maint_page(page, off, size, dir, dmac_map_area);
101343377453SNicolas Pitre 
101465af191aSRussell King 	paddr = page_to_phys(page) + off;
10152ffe2da3SRussell King 	if (dir == DMA_FROM_DEVICE) {
10162ffe2da3SRussell King 		outer_inv_range(paddr, paddr + size);
10172ffe2da3SRussell King 	} else {
10182ffe2da3SRussell King 		outer_clean_range(paddr, paddr + size);
10192ffe2da3SRussell King 	}
10202ffe2da3SRussell King 	/* FIXME: non-speculating: flush on bidirectional mappings? */
102143377453SNicolas Pitre }
10224ea0d737SRussell King 
102351fde349SMarek Szyprowski static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
10244ea0d737SRussell King 	size_t size, enum dma_data_direction dir)
10254ea0d737SRussell King {
10262161c248SSantosh Shilimkar 	phys_addr_t paddr = page_to_phys(page) + off;
10272ffe2da3SRussell King 
10282ffe2da3SRussell King 	/* FIXME: non-speculating: not required */
1029deace4a6SRussell King 	/* in any case, don't bother invalidating if DMA to device */
1030deace4a6SRussell King 	if (dir != DMA_TO_DEVICE) {
10312ffe2da3SRussell King 		outer_inv_range(paddr, paddr + size);
10322ffe2da3SRussell King 
1033a9c9147eSRussell King 		dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
1034deace4a6SRussell King 	}
1035c0177800SCatalin Marinas 
1036c0177800SCatalin Marinas 	/*
1037b2a234edSMing Lei 	 * Mark the D-cache clean for these pages to avoid extra flushing.
1038c0177800SCatalin Marinas 	 */
1039b2a234edSMing Lei 	if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) {
1040b2a234edSMing Lei 		unsigned long pfn;
1041b2a234edSMing Lei 		size_t left = size;
1042b2a234edSMing Lei 
1043b2a234edSMing Lei 		pfn = page_to_pfn(page) + off / PAGE_SIZE;
1044b2a234edSMing Lei 		off %= PAGE_SIZE;
1045b2a234edSMing Lei 		if (off) {
1046b2a234edSMing Lei 			pfn++;
1047b2a234edSMing Lei 			left -= PAGE_SIZE - off;
1048b2a234edSMing Lei 		}
1049b2a234edSMing Lei 		while (left >= PAGE_SIZE) {
1050b2a234edSMing Lei 			page = pfn_to_page(pfn++);
1051c0177800SCatalin Marinas 			set_bit(PG_dcache_clean, &page->flags);
1052b2a234edSMing Lei 			left -= PAGE_SIZE;
1053b2a234edSMing Lei 		}
1054b2a234edSMing Lei 	}
10554ea0d737SRussell King }
105643377453SNicolas Pitre 
1057afd1a321SRussell King /**
10582a550e73SMarek Szyprowski  * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
1059afd1a321SRussell King  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1060afd1a321SRussell King  * @sg: list of buffers
1061afd1a321SRussell King  * @nents: number of buffers to map
1062afd1a321SRussell King  * @dir: DMA transfer direction
1063afd1a321SRussell King  *
1064afd1a321SRussell King  * Map a set of buffers described by scatterlist in streaming mode for DMA.
1065afd1a321SRussell King  * This is the scatter-gather version of the dma_map_single interface.
1066afd1a321SRussell King  * Here the scatter gather list elements are each tagged with the
1067afd1a321SRussell King  * appropriate dma address and length.  They are obtained via
1068afd1a321SRussell King  * sg_dma_{address,length}.
1069afd1a321SRussell King  *
1070afd1a321SRussell King  * Device ownership issues as mentioned for dma_map_single are the same
1071afd1a321SRussell King  * here.
1072afd1a321SRussell King  */
10732dc6a016SMarek Szyprowski int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
107400085f1eSKrzysztof Kozlowski 		enum dma_data_direction dir, unsigned long attrs)
1075afd1a321SRussell King {
10765299709dSBart Van Assche 	const struct dma_map_ops *ops = get_dma_ops(dev);
1077afd1a321SRussell King 	struct scatterlist *s;
107801135d92SRussell King 	int i, j;
1079afd1a321SRussell King 
1080afd1a321SRussell King 	for_each_sg(sg, s, nents, i) {
10814ce63fcdSMarek Szyprowski #ifdef CONFIG_NEED_SG_DMA_LENGTH
10824ce63fcdSMarek Szyprowski 		s->dma_length = s->length;
10834ce63fcdSMarek Szyprowski #endif
10842a550e73SMarek Szyprowski 		s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
10852a550e73SMarek Szyprowski 						s->length, dir, attrs);
108601135d92SRussell King 		if (dma_mapping_error(dev, s->dma_address))
108701135d92SRussell King 			goto bad_mapping;
1088afd1a321SRussell King 	}
1089afd1a321SRussell King 	return nents;
109001135d92SRussell King 
109101135d92SRussell King  bad_mapping:
109201135d92SRussell King 	for_each_sg(sg, s, i, j)
10932a550e73SMarek Szyprowski 		ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
109401135d92SRussell King 	return 0;
1095afd1a321SRussell King }
1096afd1a321SRussell King 
1097afd1a321SRussell King /**
10982a550e73SMarek Szyprowski  * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1099afd1a321SRussell King  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1100afd1a321SRussell King  * @sg: list of buffers
11010adfca6fSLinus Walleij  * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1102afd1a321SRussell King  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1103afd1a321SRussell King  *
1104afd1a321SRussell King  * Unmap a set of streaming mode DMA translations.  Again, CPU access
1105afd1a321SRussell King  * rules concerning calls here are the same as for dma_unmap_single().
1106afd1a321SRussell King  */
11072dc6a016SMarek Szyprowski void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
110800085f1eSKrzysztof Kozlowski 		enum dma_data_direction dir, unsigned long attrs)
1109afd1a321SRussell King {
11105299709dSBart Van Assche 	const struct dma_map_ops *ops = get_dma_ops(dev);
111101135d92SRussell King 	struct scatterlist *s;
111201135d92SRussell King 
111301135d92SRussell King 	int i;
111424056f52SRussell King 
111501135d92SRussell King 	for_each_sg(sg, s, nents, i)
11162a550e73SMarek Szyprowski 		ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
1117afd1a321SRussell King }
1118afd1a321SRussell King 
1119afd1a321SRussell King /**
11202a550e73SMarek Szyprowski  * arm_dma_sync_sg_for_cpu
1121afd1a321SRussell King  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1122afd1a321SRussell King  * @sg: list of buffers
1123afd1a321SRussell King  * @nents: number of buffers to map (returned from dma_map_sg)
1124afd1a321SRussell King  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1125afd1a321SRussell King  */
11262dc6a016SMarek Szyprowski void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1127afd1a321SRussell King 			int nents, enum dma_data_direction dir)
1128afd1a321SRussell King {
11295299709dSBart Van Assche 	const struct dma_map_ops *ops = get_dma_ops(dev);
1130afd1a321SRussell King 	struct scatterlist *s;
1131afd1a321SRussell King 	int i;
1132afd1a321SRussell King 
11332a550e73SMarek Szyprowski 	for_each_sg(sg, s, nents, i)
11342a550e73SMarek Szyprowski 		ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
11352a550e73SMarek Szyprowski 					 dir);
1136afd1a321SRussell King }
113724056f52SRussell King 
1138afd1a321SRussell King /**
11392a550e73SMarek Szyprowski  * arm_dma_sync_sg_for_device
1140afd1a321SRussell King  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1141afd1a321SRussell King  * @sg: list of buffers
1142afd1a321SRussell King  * @nents: number of buffers to map (returned from dma_map_sg)
1143afd1a321SRussell King  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1144afd1a321SRussell King  */
11452dc6a016SMarek Szyprowski void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1146afd1a321SRussell King 			int nents, enum dma_data_direction dir)
1147afd1a321SRussell King {
11485299709dSBart Van Assche 	const struct dma_map_ops *ops = get_dma_ops(dev);
1149afd1a321SRussell King 	struct scatterlist *s;
1150afd1a321SRussell King 	int i;
1151afd1a321SRussell King 
11522a550e73SMarek Szyprowski 	for_each_sg(sg, s, nents, i)
11532a550e73SMarek Szyprowski 		ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
11542a550e73SMarek Szyprowski 					    dir);
1155afd1a321SRussell King }
115624056f52SRussell King 
1157022ae537SRussell King /*
1158022ae537SRussell King  * Return whether the given device DMA address mask can be supported
1159022ae537SRussell King  * properly.  For example, if your device can only drive the low 24-bits
1160022ae537SRussell King  * during bus mastering, then you would pass 0x00ffffff as the mask
1161022ae537SRussell King  * to this function.
1162022ae537SRussell King  */
1163418a7a7eSChristoph Hellwig int arm_dma_supported(struct device *dev, u64 mask)
1164022ae537SRussell King {
11659f28cde0SRussell King 	return __dma_supported(dev, mask, false);
1166022ae537SRussell King }
1167022ae537SRussell King 
116824056f52SRussell King #define PREALLOC_DMA_DEBUG_ENTRIES	4096
116924056f52SRussell King 
117024056f52SRussell King static int __init dma_debug_do_init(void)
117124056f52SRussell King {
117224056f52SRussell King 	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
117324056f52SRussell King 	return 0;
117424056f52SRussell King }
1175256ff1cfSMarek Szyprowski core_initcall(dma_debug_do_init);
11764ce63fcdSMarek Szyprowski 
11774ce63fcdSMarek Szyprowski #ifdef CONFIG_ARM_DMA_USE_IOMMU
11784ce63fcdSMarek Szyprowski 
11797d2822dfSSricharan R static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs)
11807d2822dfSSricharan R {
11817d2822dfSSricharan R 	int prot = 0;
11827d2822dfSSricharan R 
11837d2822dfSSricharan R 	if (attrs & DMA_ATTR_PRIVILEGED)
11847d2822dfSSricharan R 		prot |= IOMMU_PRIV;
11857d2822dfSSricharan R 
11867d2822dfSSricharan R 	switch (dir) {
11877d2822dfSSricharan R 	case DMA_BIDIRECTIONAL:
11887d2822dfSSricharan R 		return prot | IOMMU_READ | IOMMU_WRITE;
11897d2822dfSSricharan R 	case DMA_TO_DEVICE:
11907d2822dfSSricharan R 		return prot | IOMMU_READ;
11917d2822dfSSricharan R 	case DMA_FROM_DEVICE:
11927d2822dfSSricharan R 		return prot | IOMMU_WRITE;
11937d2822dfSSricharan R 	default:
11947d2822dfSSricharan R 		return prot;
11957d2822dfSSricharan R 	}
11967d2822dfSSricharan R }
11977d2822dfSSricharan R 
11984ce63fcdSMarek Szyprowski /* IOMMU */
11994ce63fcdSMarek Szyprowski 
12004d852ef8SAndreas Herrmann static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
12014d852ef8SAndreas Herrmann 
12024ce63fcdSMarek Szyprowski static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
12034ce63fcdSMarek Szyprowski 				      size_t size)
12044ce63fcdSMarek Szyprowski {
12054ce63fcdSMarek Szyprowski 	unsigned int order = get_order(size);
12064ce63fcdSMarek Szyprowski 	unsigned int align = 0;
12074ce63fcdSMarek Szyprowski 	unsigned int count, start;
1208006f841dSRitesh Harjani 	size_t mapping_size = mapping->bits << PAGE_SHIFT;
12094ce63fcdSMarek Szyprowski 	unsigned long flags;
12104d852ef8SAndreas Herrmann 	dma_addr_t iova;
12114d852ef8SAndreas Herrmann 	int i;
12124ce63fcdSMarek Szyprowski 
121360460abfSSeung-Woo Kim 	if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
121460460abfSSeung-Woo Kim 		order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
121560460abfSSeung-Woo Kim 
121668efd7d2SMarek Szyprowski 	count = PAGE_ALIGN(size) >> PAGE_SHIFT;
121768efd7d2SMarek Szyprowski 	align = (1 << order) - 1;
12184ce63fcdSMarek Szyprowski 
12194ce63fcdSMarek Szyprowski 	spin_lock_irqsave(&mapping->lock, flags);
12204d852ef8SAndreas Herrmann 	for (i = 0; i < mapping->nr_bitmaps; i++) {
12214d852ef8SAndreas Herrmann 		start = bitmap_find_next_zero_area(mapping->bitmaps[i],
12224d852ef8SAndreas Herrmann 				mapping->bits, 0, count, align);
12234d852ef8SAndreas Herrmann 
12244d852ef8SAndreas Herrmann 		if (start > mapping->bits)
12254d852ef8SAndreas Herrmann 			continue;
12264d852ef8SAndreas Herrmann 
12274d852ef8SAndreas Herrmann 		bitmap_set(mapping->bitmaps[i], start, count);
12284d852ef8SAndreas Herrmann 		break;
12294d852ef8SAndreas Herrmann 	}
12304d852ef8SAndreas Herrmann 
12314d852ef8SAndreas Herrmann 	/*
12324d852ef8SAndreas Herrmann 	 * No unused range found. Try to extend the existing mapping
12334d852ef8SAndreas Herrmann 	 * and perform a second attempt to reserve an IO virtual
12344d852ef8SAndreas Herrmann 	 * address range of size bytes.
12354d852ef8SAndreas Herrmann 	 */
12364d852ef8SAndreas Herrmann 	if (i == mapping->nr_bitmaps) {
12374d852ef8SAndreas Herrmann 		if (extend_iommu_mapping(mapping)) {
12384d852ef8SAndreas Herrmann 			spin_unlock_irqrestore(&mapping->lock, flags);
12399eef8b8cSChristoph Hellwig 			return ARM_MAPPING_ERROR;
12404d852ef8SAndreas Herrmann 		}
12414d852ef8SAndreas Herrmann 
12424d852ef8SAndreas Herrmann 		start = bitmap_find_next_zero_area(mapping->bitmaps[i],
12434d852ef8SAndreas Herrmann 				mapping->bits, 0, count, align);
12444d852ef8SAndreas Herrmann 
12454ce63fcdSMarek Szyprowski 		if (start > mapping->bits) {
12464ce63fcdSMarek Szyprowski 			spin_unlock_irqrestore(&mapping->lock, flags);
12479eef8b8cSChristoph Hellwig 			return ARM_MAPPING_ERROR;
12484ce63fcdSMarek Szyprowski 		}
12494ce63fcdSMarek Szyprowski 
12504d852ef8SAndreas Herrmann 		bitmap_set(mapping->bitmaps[i], start, count);
12514d852ef8SAndreas Herrmann 	}
12524ce63fcdSMarek Szyprowski 	spin_unlock_irqrestore(&mapping->lock, flags);
12534ce63fcdSMarek Szyprowski 
1254006f841dSRitesh Harjani 	iova = mapping->base + (mapping_size * i);
125568efd7d2SMarek Szyprowski 	iova += start << PAGE_SHIFT;
12564d852ef8SAndreas Herrmann 
12574d852ef8SAndreas Herrmann 	return iova;
12584ce63fcdSMarek Szyprowski }
12594ce63fcdSMarek Szyprowski 
12604ce63fcdSMarek Szyprowski static inline void __free_iova(struct dma_iommu_mapping *mapping,
12614ce63fcdSMarek Szyprowski 			       dma_addr_t addr, size_t size)
12624ce63fcdSMarek Szyprowski {
12634d852ef8SAndreas Herrmann 	unsigned int start, count;
1264006f841dSRitesh Harjani 	size_t mapping_size = mapping->bits << PAGE_SHIFT;
12654ce63fcdSMarek Szyprowski 	unsigned long flags;
12664d852ef8SAndreas Herrmann 	dma_addr_t bitmap_base;
12674d852ef8SAndreas Herrmann 	u32 bitmap_index;
12684d852ef8SAndreas Herrmann 
12694d852ef8SAndreas Herrmann 	if (!size)
12704d852ef8SAndreas Herrmann 		return;
12714d852ef8SAndreas Herrmann 
1272006f841dSRitesh Harjani 	bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size;
12734d852ef8SAndreas Herrmann 	BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions);
12744d852ef8SAndreas Herrmann 
1275006f841dSRitesh Harjani 	bitmap_base = mapping->base + mapping_size * bitmap_index;
12764d852ef8SAndreas Herrmann 
127768efd7d2SMarek Szyprowski 	start = (addr - bitmap_base) >>	PAGE_SHIFT;
12784d852ef8SAndreas Herrmann 
1279006f841dSRitesh Harjani 	if (addr + size > bitmap_base + mapping_size) {
12804d852ef8SAndreas Herrmann 		/*
12814d852ef8SAndreas Herrmann 		 * The address range to be freed reaches into the iova
12824d852ef8SAndreas Herrmann 		 * range of the next bitmap. This should not happen as
12834d852ef8SAndreas Herrmann 		 * we don't allow this in __alloc_iova (at the
12844d852ef8SAndreas Herrmann 		 * moment).
12854d852ef8SAndreas Herrmann 		 */
12864d852ef8SAndreas Herrmann 		BUG();
12874d852ef8SAndreas Herrmann 	} else
128868efd7d2SMarek Szyprowski 		count = size >> PAGE_SHIFT;
12894ce63fcdSMarek Szyprowski 
12904ce63fcdSMarek Szyprowski 	spin_lock_irqsave(&mapping->lock, flags);
12914d852ef8SAndreas Herrmann 	bitmap_clear(mapping->bitmaps[bitmap_index], start, count);
12924ce63fcdSMarek Szyprowski 	spin_unlock_irqrestore(&mapping->lock, flags);
12934ce63fcdSMarek Szyprowski }
12944ce63fcdSMarek Szyprowski 
129533298ef6SDoug Anderson /* We'll try 2M, 1M, 64K, and finally 4K; array must end with 0! */
129633298ef6SDoug Anderson static const int iommu_order_array[] = { 9, 8, 4, 0 };
129733298ef6SDoug Anderson 
1298549a17e4SMarek Szyprowski static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
129900085f1eSKrzysztof Kozlowski 					  gfp_t gfp, unsigned long attrs,
1300f1270896SGregory CLEMENT 					  int coherent_flag)
13014ce63fcdSMarek Szyprowski {
13024ce63fcdSMarek Szyprowski 	struct page **pages;
13034ce63fcdSMarek Szyprowski 	int count = size >> PAGE_SHIFT;
13044ce63fcdSMarek Szyprowski 	int array_size = count * sizeof(struct page *);
13054ce63fcdSMarek Szyprowski 	int i = 0;
130633298ef6SDoug Anderson 	int order_idx = 0;
13074ce63fcdSMarek Szyprowski 
13084ce63fcdSMarek Szyprowski 	if (array_size <= PAGE_SIZE)
130923be7fdaSAlexandre Courbot 		pages = kzalloc(array_size, GFP_KERNEL);
13104ce63fcdSMarek Szyprowski 	else
13114ce63fcdSMarek Szyprowski 		pages = vzalloc(array_size);
13124ce63fcdSMarek Szyprowski 	if (!pages)
13134ce63fcdSMarek Szyprowski 		return NULL;
13144ce63fcdSMarek Szyprowski 
131500085f1eSKrzysztof Kozlowski 	if (attrs & DMA_ATTR_FORCE_CONTIGUOUS)
1316549a17e4SMarek Szyprowski 	{
1317549a17e4SMarek Szyprowski 		unsigned long order = get_order(size);
1318549a17e4SMarek Szyprowski 		struct page *page;
1319549a17e4SMarek Szyprowski 
1320712c604dSLucas Stach 		page = dma_alloc_from_contiguous(dev, count, order, gfp);
1321549a17e4SMarek Szyprowski 		if (!page)
1322549a17e4SMarek Szyprowski 			goto error;
1323549a17e4SMarek Szyprowski 
1324f1270896SGregory CLEMENT 		__dma_clear_buffer(page, size, coherent_flag);
1325549a17e4SMarek Szyprowski 
1326549a17e4SMarek Szyprowski 		for (i = 0; i < count; i++)
1327549a17e4SMarek Szyprowski 			pages[i] = page + i;
1328549a17e4SMarek Szyprowski 
1329549a17e4SMarek Szyprowski 		return pages;
1330549a17e4SMarek Szyprowski 	}
1331549a17e4SMarek Szyprowski 
133214d3ae2eSDoug Anderson 	/* Go straight to 4K chunks if caller says it's OK. */
133300085f1eSKrzysztof Kozlowski 	if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
133414d3ae2eSDoug Anderson 		order_idx = ARRAY_SIZE(iommu_order_array) - 1;
133514d3ae2eSDoug Anderson 
1336f8669befSMarek Szyprowski 	/*
1337f8669befSMarek Szyprowski 	 * IOMMU can map any pages, so himem can also be used here
1338f8669befSMarek Szyprowski 	 */
1339f8669befSMarek Szyprowski 	gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
1340f8669befSMarek Szyprowski 
13414ce63fcdSMarek Szyprowski 	while (count) {
134249f28aa6STomasz Figa 		int j, order;
13434ce63fcdSMarek Szyprowski 
134433298ef6SDoug Anderson 		order = iommu_order_array[order_idx];
134533298ef6SDoug Anderson 
134633298ef6SDoug Anderson 		/* Drop down when we get small */
134733298ef6SDoug Anderson 		if (__fls(count) < order) {
134833298ef6SDoug Anderson 			order_idx++;
134933298ef6SDoug Anderson 			continue;
135049f28aa6STomasz Figa 		}
135149f28aa6STomasz Figa 
135233298ef6SDoug Anderson 		if (order) {
135333298ef6SDoug Anderson 			/* See if it's easy to allocate a high-order chunk */
135433298ef6SDoug Anderson 			pages[i] = alloc_pages(gfp | __GFP_NORETRY, order);
135533298ef6SDoug Anderson 
135633298ef6SDoug Anderson 			/* Go down a notch at first sign of pressure */
135749f28aa6STomasz Figa 			if (!pages[i]) {
135833298ef6SDoug Anderson 				order_idx++;
135933298ef6SDoug Anderson 				continue;
136033298ef6SDoug Anderson 			}
136133298ef6SDoug Anderson 		} else {
136249f28aa6STomasz Figa 			pages[i] = alloc_pages(gfp, 0);
13634ce63fcdSMarek Szyprowski 			if (!pages[i])
13644ce63fcdSMarek Szyprowski 				goto error;
136549f28aa6STomasz Figa 		}
13664ce63fcdSMarek Szyprowski 
13675a796eebSHiroshi Doyu 		if (order) {
13684ce63fcdSMarek Szyprowski 			split_page(pages[i], order);
13694ce63fcdSMarek Szyprowski 			j = 1 << order;
13704ce63fcdSMarek Szyprowski 			while (--j)
13714ce63fcdSMarek Szyprowski 				pages[i + j] = pages[i] + j;
13725a796eebSHiroshi Doyu 		}
13734ce63fcdSMarek Szyprowski 
1374f1270896SGregory CLEMENT 		__dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag);
13754ce63fcdSMarek Szyprowski 		i += 1 << order;
13764ce63fcdSMarek Szyprowski 		count -= 1 << order;
13774ce63fcdSMarek Szyprowski 	}
13784ce63fcdSMarek Szyprowski 
13794ce63fcdSMarek Szyprowski 	return pages;
13804ce63fcdSMarek Szyprowski error:
13819fa8af91SMarek Szyprowski 	while (i--)
13824ce63fcdSMarek Szyprowski 		if (pages[i])
13834ce63fcdSMarek Szyprowski 			__free_pages(pages[i], 0);
13841d5cfdb0STetsuo Handa 	kvfree(pages);
13854ce63fcdSMarek Szyprowski 	return NULL;
13864ce63fcdSMarek Szyprowski }
13874ce63fcdSMarek Szyprowski 
1388549a17e4SMarek Szyprowski static int __iommu_free_buffer(struct device *dev, struct page **pages,
138900085f1eSKrzysztof Kozlowski 			       size_t size, unsigned long attrs)
13904ce63fcdSMarek Szyprowski {
13914ce63fcdSMarek Szyprowski 	int count = size >> PAGE_SHIFT;
13924ce63fcdSMarek Szyprowski 	int i;
1393549a17e4SMarek Szyprowski 
139400085f1eSKrzysztof Kozlowski 	if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
1395549a17e4SMarek Szyprowski 		dma_release_from_contiguous(dev, pages[0], count);
1396549a17e4SMarek Szyprowski 	} else {
13974ce63fcdSMarek Szyprowski 		for (i = 0; i < count; i++)
13984ce63fcdSMarek Szyprowski 			if (pages[i])
13994ce63fcdSMarek Szyprowski 				__free_pages(pages[i], 0);
1400549a17e4SMarek Szyprowski 	}
1401549a17e4SMarek Szyprowski 
14021d5cfdb0STetsuo Handa 	kvfree(pages);
14034ce63fcdSMarek Szyprowski 	return 0;
14044ce63fcdSMarek Szyprowski }
14054ce63fcdSMarek Szyprowski 
14064ce63fcdSMarek Szyprowski /*
14074ce63fcdSMarek Szyprowski  * Create a CPU mapping for a specified pages
14084ce63fcdSMarek Szyprowski  */
14094ce63fcdSMarek Szyprowski static void *
1410e9da6e99SMarek Szyprowski __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
1411e9da6e99SMarek Szyprowski 		    const void *caller)
14124ce63fcdSMarek Szyprowski {
1413513510ddSLaura Abbott 	return dma_common_pages_remap(pages, size,
1414513510ddSLaura Abbott 			VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller);
14154ce63fcdSMarek Szyprowski }
14164ce63fcdSMarek Szyprowski 
14174ce63fcdSMarek Szyprowski /*
14184ce63fcdSMarek Szyprowski  * Create a mapping in device IO address space for specified pages
14194ce63fcdSMarek Szyprowski  */
14204ce63fcdSMarek Szyprowski static dma_addr_t
14217d2822dfSSricharan R __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
14227d2822dfSSricharan R 		       unsigned long attrs)
14234ce63fcdSMarek Szyprowski {
142489cfdb19SWill Deacon 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
14254ce63fcdSMarek Szyprowski 	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
14264ce63fcdSMarek Szyprowski 	dma_addr_t dma_addr, iova;
142790cde558SAndre Przywara 	int i;
14284ce63fcdSMarek Szyprowski 
14294ce63fcdSMarek Szyprowski 	dma_addr = __alloc_iova(mapping, size);
14309eef8b8cSChristoph Hellwig 	if (dma_addr == ARM_MAPPING_ERROR)
14314ce63fcdSMarek Szyprowski 		return dma_addr;
14324ce63fcdSMarek Szyprowski 
14334ce63fcdSMarek Szyprowski 	iova = dma_addr;
14344ce63fcdSMarek Szyprowski 	for (i = 0; i < count; ) {
143590cde558SAndre Przywara 		int ret;
143690cde558SAndre Przywara 
14374ce63fcdSMarek Szyprowski 		unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
14384ce63fcdSMarek Szyprowski 		phys_addr_t phys = page_to_phys(pages[i]);
14394ce63fcdSMarek Szyprowski 		unsigned int len, j;
14404ce63fcdSMarek Szyprowski 
14414ce63fcdSMarek Szyprowski 		for (j = i + 1; j < count; j++, next_pfn++)
14424ce63fcdSMarek Szyprowski 			if (page_to_pfn(pages[j]) != next_pfn)
14434ce63fcdSMarek Szyprowski 				break;
14444ce63fcdSMarek Szyprowski 
14454ce63fcdSMarek Szyprowski 		len = (j - i) << PAGE_SHIFT;
1446c9b24996SAndreas Herrmann 		ret = iommu_map(mapping->domain, iova, phys, len,
14477d2822dfSSricharan R 				__dma_info_to_prot(DMA_BIDIRECTIONAL, attrs));
14484ce63fcdSMarek Szyprowski 		if (ret < 0)
14494ce63fcdSMarek Szyprowski 			goto fail;
14504ce63fcdSMarek Szyprowski 		iova += len;
14514ce63fcdSMarek Szyprowski 		i = j;
14524ce63fcdSMarek Szyprowski 	}
14534ce63fcdSMarek Szyprowski 	return dma_addr;
14544ce63fcdSMarek Szyprowski fail:
14554ce63fcdSMarek Szyprowski 	iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
14564ce63fcdSMarek Szyprowski 	__free_iova(mapping, dma_addr, size);
14579eef8b8cSChristoph Hellwig 	return ARM_MAPPING_ERROR;
14584ce63fcdSMarek Szyprowski }
14594ce63fcdSMarek Szyprowski 
14604ce63fcdSMarek Szyprowski static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
14614ce63fcdSMarek Szyprowski {
146289cfdb19SWill Deacon 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
14634ce63fcdSMarek Szyprowski 
14644ce63fcdSMarek Szyprowski 	/*
14654ce63fcdSMarek Szyprowski 	 * add optional in-page offset from iova to size and align
14664ce63fcdSMarek Szyprowski 	 * result to page size
14674ce63fcdSMarek Szyprowski 	 */
14684ce63fcdSMarek Szyprowski 	size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
14694ce63fcdSMarek Szyprowski 	iova &= PAGE_MASK;
14704ce63fcdSMarek Szyprowski 
14714ce63fcdSMarek Szyprowski 	iommu_unmap(mapping->domain, iova, size);
14724ce63fcdSMarek Szyprowski 	__free_iova(mapping, iova, size);
14734ce63fcdSMarek Szyprowski 	return 0;
14744ce63fcdSMarek Szyprowski }
14754ce63fcdSMarek Szyprowski 
1476665bad7bSHiroshi Doyu static struct page **__atomic_get_pages(void *addr)
1477665bad7bSHiroshi Doyu {
147836d0fd21SLaura Abbott 	struct page *page;
147936d0fd21SLaura Abbott 	phys_addr_t phys;
1480665bad7bSHiroshi Doyu 
148136d0fd21SLaura Abbott 	phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr);
148236d0fd21SLaura Abbott 	page = phys_to_page(phys);
148336d0fd21SLaura Abbott 
148436d0fd21SLaura Abbott 	return (struct page **)page;
1485665bad7bSHiroshi Doyu }
1486665bad7bSHiroshi Doyu 
148700085f1eSKrzysztof Kozlowski static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
1488e9da6e99SMarek Szyprowski {
1489e9da6e99SMarek Szyprowski 	struct vm_struct *area;
1490e9da6e99SMarek Szyprowski 
1491665bad7bSHiroshi Doyu 	if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
1492665bad7bSHiroshi Doyu 		return __atomic_get_pages(cpu_addr);
1493665bad7bSHiroshi Doyu 
149400085f1eSKrzysztof Kozlowski 	if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
1495955c757eSMarek Szyprowski 		return cpu_addr;
1496955c757eSMarek Szyprowski 
1497e9da6e99SMarek Szyprowski 	area = find_vm_area(cpu_addr);
1498e9da6e99SMarek Szyprowski 	if (area && (area->flags & VM_ARM_DMA_CONSISTENT))
1499e9da6e99SMarek Szyprowski 		return area->pages;
1500e9da6e99SMarek Szyprowski 	return NULL;
1501e9da6e99SMarek Szyprowski }
1502e9da6e99SMarek Szyprowski 
150356506822SGregory CLEMENT static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
15047d2822dfSSricharan R 				  dma_addr_t *handle, int coherent_flag,
15057d2822dfSSricharan R 				  unsigned long attrs)
1506479ed93aSHiroshi Doyu {
1507479ed93aSHiroshi Doyu 	struct page *page;
1508479ed93aSHiroshi Doyu 	void *addr;
1509479ed93aSHiroshi Doyu 
151056506822SGregory CLEMENT 	if (coherent_flag  == COHERENT)
151156506822SGregory CLEMENT 		addr = __alloc_simple_buffer(dev, size, gfp, &page);
151256506822SGregory CLEMENT 	else
1513479ed93aSHiroshi Doyu 		addr = __alloc_from_pool(size, &page);
1514479ed93aSHiroshi Doyu 	if (!addr)
1515479ed93aSHiroshi Doyu 		return NULL;
1516479ed93aSHiroshi Doyu 
15177d2822dfSSricharan R 	*handle = __iommu_create_mapping(dev, &page, size, attrs);
15189eef8b8cSChristoph Hellwig 	if (*handle == ARM_MAPPING_ERROR)
1519479ed93aSHiroshi Doyu 		goto err_mapping;
1520479ed93aSHiroshi Doyu 
1521479ed93aSHiroshi Doyu 	return addr;
1522479ed93aSHiroshi Doyu 
1523479ed93aSHiroshi Doyu err_mapping:
1524479ed93aSHiroshi Doyu 	__free_from_pool(addr, size);
1525479ed93aSHiroshi Doyu 	return NULL;
1526479ed93aSHiroshi Doyu }
1527479ed93aSHiroshi Doyu 
1528d5898291SMarek Szyprowski static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
152956506822SGregory CLEMENT 			dma_addr_t handle, size_t size, int coherent_flag)
1530479ed93aSHiroshi Doyu {
1531479ed93aSHiroshi Doyu 	__iommu_remove_mapping(dev, handle, size);
153256506822SGregory CLEMENT 	if (coherent_flag == COHERENT)
153356506822SGregory CLEMENT 		__dma_free_buffer(virt_to_page(cpu_addr), size);
153456506822SGregory CLEMENT 	else
1535d5898291SMarek Szyprowski 		__free_from_pool(cpu_addr, size);
1536479ed93aSHiroshi Doyu }
1537479ed93aSHiroshi Doyu 
153856506822SGregory CLEMENT static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
153900085f1eSKrzysztof Kozlowski 	    dma_addr_t *handle, gfp_t gfp, unsigned long attrs,
154056506822SGregory CLEMENT 	    int coherent_flag)
15414ce63fcdSMarek Szyprowski {
154271b55663SRussell King 	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
15434ce63fcdSMarek Szyprowski 	struct page **pages;
15444ce63fcdSMarek Szyprowski 	void *addr = NULL;
15454ce63fcdSMarek Szyprowski 
15469eef8b8cSChristoph Hellwig 	*handle = ARM_MAPPING_ERROR;
15474ce63fcdSMarek Szyprowski 	size = PAGE_ALIGN(size);
15484ce63fcdSMarek Szyprowski 
154956506822SGregory CLEMENT 	if (coherent_flag  == COHERENT || !gfpflags_allow_blocking(gfp))
155056506822SGregory CLEMENT 		return __iommu_alloc_simple(dev, size, gfp, handle,
15517d2822dfSSricharan R 					    coherent_flag, attrs);
1552479ed93aSHiroshi Doyu 
15535b91a98cSRichard Zhao 	/*
15545b91a98cSRichard Zhao 	 * Following is a work-around (a.k.a. hack) to prevent pages
15555b91a98cSRichard Zhao 	 * with __GFP_COMP being passed to split_page() which cannot
15565b91a98cSRichard Zhao 	 * handle them.  The real problem is that this flag probably
15575b91a98cSRichard Zhao 	 * should be 0 on ARM as it is not supported on this
15585b91a98cSRichard Zhao 	 * platform; see CONFIG_HUGETLBFS.
15595b91a98cSRichard Zhao 	 */
15605b91a98cSRichard Zhao 	gfp &= ~(__GFP_COMP);
15615b91a98cSRichard Zhao 
156256506822SGregory CLEMENT 	pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag);
15634ce63fcdSMarek Szyprowski 	if (!pages)
15644ce63fcdSMarek Szyprowski 		return NULL;
15654ce63fcdSMarek Szyprowski 
15667d2822dfSSricharan R 	*handle = __iommu_create_mapping(dev, pages, size, attrs);
15679eef8b8cSChristoph Hellwig 	if (*handle == ARM_MAPPING_ERROR)
15684ce63fcdSMarek Szyprowski 		goto err_buffer;
15694ce63fcdSMarek Szyprowski 
157000085f1eSKrzysztof Kozlowski 	if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
1571955c757eSMarek Szyprowski 		return pages;
1572955c757eSMarek Szyprowski 
1573e9da6e99SMarek Szyprowski 	addr = __iommu_alloc_remap(pages, size, gfp, prot,
1574e9da6e99SMarek Szyprowski 				   __builtin_return_address(0));
15754ce63fcdSMarek Szyprowski 	if (!addr)
15764ce63fcdSMarek Szyprowski 		goto err_mapping;
15774ce63fcdSMarek Szyprowski 
15784ce63fcdSMarek Szyprowski 	return addr;
15794ce63fcdSMarek Szyprowski 
15804ce63fcdSMarek Szyprowski err_mapping:
15814ce63fcdSMarek Szyprowski 	__iommu_remove_mapping(dev, *handle, size);
15824ce63fcdSMarek Szyprowski err_buffer:
1583549a17e4SMarek Szyprowski 	__iommu_free_buffer(dev, pages, size, attrs);
15844ce63fcdSMarek Szyprowski 	return NULL;
15854ce63fcdSMarek Szyprowski }
15864ce63fcdSMarek Szyprowski 
158756506822SGregory CLEMENT static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
158800085f1eSKrzysztof Kozlowski 	    dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
158956506822SGregory CLEMENT {
159056506822SGregory CLEMENT 	return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL);
159156506822SGregory CLEMENT }
159256506822SGregory CLEMENT 
159356506822SGregory CLEMENT static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size,
159400085f1eSKrzysztof Kozlowski 		    dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
159556506822SGregory CLEMENT {
159656506822SGregory CLEMENT 	return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT);
159756506822SGregory CLEMENT }
159856506822SGregory CLEMENT 
159956506822SGregory CLEMENT static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
16004ce63fcdSMarek Szyprowski 		    void *cpu_addr, dma_addr_t dma_addr, size_t size,
160100085f1eSKrzysztof Kozlowski 		    unsigned long attrs)
16024ce63fcdSMarek Szyprowski {
16034ce63fcdSMarek Szyprowski 	unsigned long uaddr = vma->vm_start;
16044ce63fcdSMarek Szyprowski 	unsigned long usize = vma->vm_end - vma->vm_start;
1605955c757eSMarek Szyprowski 	struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1606371f0f08SMarek Szyprowski 	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1607371f0f08SMarek Szyprowski 	unsigned long off = vma->vm_pgoff;
1608e9da6e99SMarek Szyprowski 
1609e9da6e99SMarek Szyprowski 	if (!pages)
1610e9da6e99SMarek Szyprowski 		return -ENXIO;
16114ce63fcdSMarek Szyprowski 
1612371f0f08SMarek Szyprowski 	if (off >= nr_pages || (usize >> PAGE_SHIFT) > nr_pages - off)
1613371f0f08SMarek Szyprowski 		return -ENXIO;
1614371f0f08SMarek Szyprowski 
16157e312103SMarek Szyprowski 	pages += off;
16167e312103SMarek Szyprowski 
16174ce63fcdSMarek Szyprowski 	do {
1618e9da6e99SMarek Szyprowski 		int ret = vm_insert_page(vma, uaddr, *pages++);
16194ce63fcdSMarek Szyprowski 		if (ret) {
1620e9da6e99SMarek Szyprowski 			pr_err("Remapping memory failed: %d\n", ret);
16214ce63fcdSMarek Szyprowski 			return ret;
16224ce63fcdSMarek Szyprowski 		}
16234ce63fcdSMarek Szyprowski 		uaddr += PAGE_SIZE;
16244ce63fcdSMarek Szyprowski 		usize -= PAGE_SIZE;
16254ce63fcdSMarek Szyprowski 	} while (usize > 0);
1626e9da6e99SMarek Szyprowski 
16274ce63fcdSMarek Szyprowski 	return 0;
16284ce63fcdSMarek Szyprowski }
162956506822SGregory CLEMENT static int arm_iommu_mmap_attrs(struct device *dev,
163056506822SGregory CLEMENT 		struct vm_area_struct *vma, void *cpu_addr,
163100085f1eSKrzysztof Kozlowski 		dma_addr_t dma_addr, size_t size, unsigned long attrs)
163256506822SGregory CLEMENT {
163356506822SGregory CLEMENT 	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
163456506822SGregory CLEMENT 
163556506822SGregory CLEMENT 	return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
163656506822SGregory CLEMENT }
163756506822SGregory CLEMENT 
163856506822SGregory CLEMENT static int arm_coherent_iommu_mmap_attrs(struct device *dev,
163956506822SGregory CLEMENT 		struct vm_area_struct *vma, void *cpu_addr,
164000085f1eSKrzysztof Kozlowski 		dma_addr_t dma_addr, size_t size, unsigned long attrs)
164156506822SGregory CLEMENT {
164256506822SGregory CLEMENT 	return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
164356506822SGregory CLEMENT }
16444ce63fcdSMarek Szyprowski 
16454ce63fcdSMarek Szyprowski /*
16464ce63fcdSMarek Szyprowski  * free a page as defined by the above mapping.
16474ce63fcdSMarek Szyprowski  * Must not be called with IRQs disabled.
16484ce63fcdSMarek Szyprowski  */
164956506822SGregory CLEMENT void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
165000085f1eSKrzysztof Kozlowski 	dma_addr_t handle, unsigned long attrs, int coherent_flag)
16514ce63fcdSMarek Szyprowski {
1652836bfa0dSYoungJun Cho 	struct page **pages;
16534ce63fcdSMarek Szyprowski 	size = PAGE_ALIGN(size);
16544ce63fcdSMarek Szyprowski 
165556506822SGregory CLEMENT 	if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) {
165656506822SGregory CLEMENT 		__iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag);
1657479ed93aSHiroshi Doyu 		return;
1658479ed93aSHiroshi Doyu 	}
1659479ed93aSHiroshi Doyu 
1660836bfa0dSYoungJun Cho 	pages = __iommu_get_pages(cpu_addr, attrs);
1661836bfa0dSYoungJun Cho 	if (!pages) {
1662836bfa0dSYoungJun Cho 		WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
1663836bfa0dSYoungJun Cho 		return;
1664836bfa0dSYoungJun Cho 	}
1665836bfa0dSYoungJun Cho 
166600085f1eSKrzysztof Kozlowski 	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) {
1667513510ddSLaura Abbott 		dma_common_free_remap(cpu_addr, size,
1668513510ddSLaura Abbott 			VM_ARM_DMA_CONSISTENT | VM_USERMAP);
1669955c757eSMarek Szyprowski 	}
1670e9da6e99SMarek Szyprowski 
16714ce63fcdSMarek Szyprowski 	__iommu_remove_mapping(dev, handle, size);
1672549a17e4SMarek Szyprowski 	__iommu_free_buffer(dev, pages, size, attrs);
16734ce63fcdSMarek Szyprowski }
16744ce63fcdSMarek Szyprowski 
167556506822SGregory CLEMENT void arm_iommu_free_attrs(struct device *dev, size_t size,
167600085f1eSKrzysztof Kozlowski 		    void *cpu_addr, dma_addr_t handle, unsigned long attrs)
167756506822SGregory CLEMENT {
167856506822SGregory CLEMENT 	__arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL);
167956506822SGregory CLEMENT }
168056506822SGregory CLEMENT 
168156506822SGregory CLEMENT void arm_coherent_iommu_free_attrs(struct device *dev, size_t size,
168200085f1eSKrzysztof Kozlowski 		    void *cpu_addr, dma_addr_t handle, unsigned long attrs)
168356506822SGregory CLEMENT {
168456506822SGregory CLEMENT 	__arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT);
168556506822SGregory CLEMENT }
168656506822SGregory CLEMENT 
1687dc2832e1SMarek Szyprowski static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
1688dc2832e1SMarek Szyprowski 				 void *cpu_addr, dma_addr_t dma_addr,
168900085f1eSKrzysztof Kozlowski 				 size_t size, unsigned long attrs)
1690dc2832e1SMarek Szyprowski {
1691dc2832e1SMarek Szyprowski 	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1692dc2832e1SMarek Szyprowski 	struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1693dc2832e1SMarek Szyprowski 
1694dc2832e1SMarek Szyprowski 	if (!pages)
1695dc2832e1SMarek Szyprowski 		return -ENXIO;
1696dc2832e1SMarek Szyprowski 
1697dc2832e1SMarek Szyprowski 	return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
1698dc2832e1SMarek Szyprowski 					 GFP_KERNEL);
16994ce63fcdSMarek Szyprowski }
17004ce63fcdSMarek Szyprowski 
17014ce63fcdSMarek Szyprowski /*
17024ce63fcdSMarek Szyprowski  * Map a part of the scatter-gather list into contiguous io address space
17034ce63fcdSMarek Szyprowski  */
17044ce63fcdSMarek Szyprowski static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
17054ce63fcdSMarek Szyprowski 			  size_t size, dma_addr_t *handle,
170600085f1eSKrzysztof Kozlowski 			  enum dma_data_direction dir, unsigned long attrs,
17070fa478dfSRob Herring 			  bool is_coherent)
17084ce63fcdSMarek Szyprowski {
170989cfdb19SWill Deacon 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
17104ce63fcdSMarek Szyprowski 	dma_addr_t iova, iova_base;
17114ce63fcdSMarek Szyprowski 	int ret = 0;
17124ce63fcdSMarek Szyprowski 	unsigned int count;
17134ce63fcdSMarek Szyprowski 	struct scatterlist *s;
1714c9b24996SAndreas Herrmann 	int prot;
17154ce63fcdSMarek Szyprowski 
17164ce63fcdSMarek Szyprowski 	size = PAGE_ALIGN(size);
17179eef8b8cSChristoph Hellwig 	*handle = ARM_MAPPING_ERROR;
17184ce63fcdSMarek Szyprowski 
17194ce63fcdSMarek Szyprowski 	iova_base = iova = __alloc_iova(mapping, size);
17209eef8b8cSChristoph Hellwig 	if (iova == ARM_MAPPING_ERROR)
17214ce63fcdSMarek Szyprowski 		return -ENOMEM;
17224ce63fcdSMarek Szyprowski 
17234ce63fcdSMarek Szyprowski 	for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
17243e6110fdSDan Williams 		phys_addr_t phys = page_to_phys(sg_page(s));
17254ce63fcdSMarek Szyprowski 		unsigned int len = PAGE_ALIGN(s->offset + s->length);
17264ce63fcdSMarek Szyprowski 
172700085f1eSKrzysztof Kozlowski 		if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
17284ce63fcdSMarek Szyprowski 			__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
17294ce63fcdSMarek Szyprowski 
17307d2822dfSSricharan R 		prot = __dma_info_to_prot(dir, attrs);
1731c9b24996SAndreas Herrmann 
1732c9b24996SAndreas Herrmann 		ret = iommu_map(mapping->domain, iova, phys, len, prot);
17334ce63fcdSMarek Szyprowski 		if (ret < 0)
17344ce63fcdSMarek Szyprowski 			goto fail;
17354ce63fcdSMarek Szyprowski 		count += len >> PAGE_SHIFT;
17364ce63fcdSMarek Szyprowski 		iova += len;
17374ce63fcdSMarek Szyprowski 	}
17384ce63fcdSMarek Szyprowski 	*handle = iova_base;
17394ce63fcdSMarek Szyprowski 
17404ce63fcdSMarek Szyprowski 	return 0;
17414ce63fcdSMarek Szyprowski fail:
17424ce63fcdSMarek Szyprowski 	iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
17434ce63fcdSMarek Szyprowski 	__free_iova(mapping, iova_base, size);
17444ce63fcdSMarek Szyprowski 	return ret;
17454ce63fcdSMarek Szyprowski }
17464ce63fcdSMarek Szyprowski 
17470fa478dfSRob Herring static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
174800085f1eSKrzysztof Kozlowski 		     enum dma_data_direction dir, unsigned long attrs,
17490fa478dfSRob Herring 		     bool is_coherent)
17504ce63fcdSMarek Szyprowski {
17514ce63fcdSMarek Szyprowski 	struct scatterlist *s = sg, *dma = sg, *start = sg;
17524ce63fcdSMarek Szyprowski 	int i, count = 0;
17534ce63fcdSMarek Szyprowski 	unsigned int offset = s->offset;
17544ce63fcdSMarek Szyprowski 	unsigned int size = s->offset + s->length;
17554ce63fcdSMarek Szyprowski 	unsigned int max = dma_get_max_seg_size(dev);
17564ce63fcdSMarek Szyprowski 
17574ce63fcdSMarek Szyprowski 	for (i = 1; i < nents; i++) {
17584ce63fcdSMarek Szyprowski 		s = sg_next(s);
17594ce63fcdSMarek Szyprowski 
17609eef8b8cSChristoph Hellwig 		s->dma_address = ARM_MAPPING_ERROR;
17614ce63fcdSMarek Szyprowski 		s->dma_length = 0;
17624ce63fcdSMarek Szyprowski 
17634ce63fcdSMarek Szyprowski 		if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
17644ce63fcdSMarek Szyprowski 			if (__map_sg_chunk(dev, start, size, &dma->dma_address,
17650fa478dfSRob Herring 			    dir, attrs, is_coherent) < 0)
17664ce63fcdSMarek Szyprowski 				goto bad_mapping;
17674ce63fcdSMarek Szyprowski 
17684ce63fcdSMarek Szyprowski 			dma->dma_address += offset;
17694ce63fcdSMarek Szyprowski 			dma->dma_length = size - offset;
17704ce63fcdSMarek Szyprowski 
17714ce63fcdSMarek Szyprowski 			size = offset = s->offset;
17724ce63fcdSMarek Szyprowski 			start = s;
17734ce63fcdSMarek Szyprowski 			dma = sg_next(dma);
17744ce63fcdSMarek Szyprowski 			count += 1;
17754ce63fcdSMarek Szyprowski 		}
17764ce63fcdSMarek Szyprowski 		size += s->length;
17774ce63fcdSMarek Szyprowski 	}
17780fa478dfSRob Herring 	if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
17790fa478dfSRob Herring 		is_coherent) < 0)
17804ce63fcdSMarek Szyprowski 		goto bad_mapping;
17814ce63fcdSMarek Szyprowski 
17824ce63fcdSMarek Szyprowski 	dma->dma_address += offset;
17834ce63fcdSMarek Szyprowski 	dma->dma_length = size - offset;
17844ce63fcdSMarek Szyprowski 
17854ce63fcdSMarek Szyprowski 	return count+1;
17864ce63fcdSMarek Szyprowski 
17874ce63fcdSMarek Szyprowski bad_mapping:
17884ce63fcdSMarek Szyprowski 	for_each_sg(sg, s, count, i)
17894ce63fcdSMarek Szyprowski 		__iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
17904ce63fcdSMarek Szyprowski 	return 0;
17914ce63fcdSMarek Szyprowski }
17924ce63fcdSMarek Szyprowski 
17934ce63fcdSMarek Szyprowski /**
17940fa478dfSRob Herring  * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
17950fa478dfSRob Herring  * @dev: valid struct device pointer
17960fa478dfSRob Herring  * @sg: list of buffers
17970fa478dfSRob Herring  * @nents: number of buffers to map
17980fa478dfSRob Herring  * @dir: DMA transfer direction
17990fa478dfSRob Herring  *
18000fa478dfSRob Herring  * Map a set of i/o coherent buffers described by scatterlist in streaming
18010fa478dfSRob Herring  * mode for DMA. The scatter gather list elements are merged together (if
18020fa478dfSRob Herring  * possible) and tagged with the appropriate dma address and length. They are
18030fa478dfSRob Herring  * obtained via sg_dma_{address,length}.
18040fa478dfSRob Herring  */
18050fa478dfSRob Herring int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
180600085f1eSKrzysztof Kozlowski 		int nents, enum dma_data_direction dir, unsigned long attrs)
18070fa478dfSRob Herring {
18080fa478dfSRob Herring 	return __iommu_map_sg(dev, sg, nents, dir, attrs, true);
18090fa478dfSRob Herring }
18100fa478dfSRob Herring 
18110fa478dfSRob Herring /**
18120fa478dfSRob Herring  * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
18130fa478dfSRob Herring  * @dev: valid struct device pointer
18140fa478dfSRob Herring  * @sg: list of buffers
18150fa478dfSRob Herring  * @nents: number of buffers to map
18160fa478dfSRob Herring  * @dir: DMA transfer direction
18170fa478dfSRob Herring  *
18180fa478dfSRob Herring  * Map a set of buffers described by scatterlist in streaming mode for DMA.
18190fa478dfSRob Herring  * The scatter gather list elements are merged together (if possible) and
18200fa478dfSRob Herring  * tagged with the appropriate dma address and length. They are obtained via
18210fa478dfSRob Herring  * sg_dma_{address,length}.
18220fa478dfSRob Herring  */
18230fa478dfSRob Herring int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
182400085f1eSKrzysztof Kozlowski 		int nents, enum dma_data_direction dir, unsigned long attrs)
18250fa478dfSRob Herring {
18260fa478dfSRob Herring 	return __iommu_map_sg(dev, sg, nents, dir, attrs, false);
18270fa478dfSRob Herring }
18280fa478dfSRob Herring 
18290fa478dfSRob Herring static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
183000085f1eSKrzysztof Kozlowski 		int nents, enum dma_data_direction dir,
183100085f1eSKrzysztof Kozlowski 		unsigned long attrs, bool is_coherent)
18320fa478dfSRob Herring {
18330fa478dfSRob Herring 	struct scatterlist *s;
18340fa478dfSRob Herring 	int i;
18350fa478dfSRob Herring 
18360fa478dfSRob Herring 	for_each_sg(sg, s, nents, i) {
18370fa478dfSRob Herring 		if (sg_dma_len(s))
18380fa478dfSRob Herring 			__iommu_remove_mapping(dev, sg_dma_address(s),
18390fa478dfSRob Herring 					       sg_dma_len(s));
184000085f1eSKrzysztof Kozlowski 		if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
18410fa478dfSRob Herring 			__dma_page_dev_to_cpu(sg_page(s), s->offset,
18420fa478dfSRob Herring 					      s->length, dir);
18430fa478dfSRob Herring 	}
18440fa478dfSRob Herring }
18450fa478dfSRob Herring 
18460fa478dfSRob Herring /**
18470fa478dfSRob Herring  * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
18480fa478dfSRob Herring  * @dev: valid struct device pointer
18490fa478dfSRob Herring  * @sg: list of buffers
18500fa478dfSRob Herring  * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
18510fa478dfSRob Herring  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
18520fa478dfSRob Herring  *
18530fa478dfSRob Herring  * Unmap a set of streaming mode DMA translations.  Again, CPU access
18540fa478dfSRob Herring  * rules concerning calls here are the same as for dma_unmap_single().
18550fa478dfSRob Herring  */
18560fa478dfSRob Herring void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
185700085f1eSKrzysztof Kozlowski 		int nents, enum dma_data_direction dir,
185800085f1eSKrzysztof Kozlowski 		unsigned long attrs)
18590fa478dfSRob Herring {
18600fa478dfSRob Herring 	__iommu_unmap_sg(dev, sg, nents, dir, attrs, true);
18610fa478dfSRob Herring }
18620fa478dfSRob Herring 
18630fa478dfSRob Herring /**
18644ce63fcdSMarek Szyprowski  * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
18654ce63fcdSMarek Szyprowski  * @dev: valid struct device pointer
18664ce63fcdSMarek Szyprowski  * @sg: list of buffers
18674ce63fcdSMarek Szyprowski  * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
18684ce63fcdSMarek Szyprowski  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
18694ce63fcdSMarek Szyprowski  *
18704ce63fcdSMarek Szyprowski  * Unmap a set of streaming mode DMA translations.  Again, CPU access
18714ce63fcdSMarek Szyprowski  * rules concerning calls here are the same as for dma_unmap_single().
18724ce63fcdSMarek Szyprowski  */
18734ce63fcdSMarek Szyprowski void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
187400085f1eSKrzysztof Kozlowski 			enum dma_data_direction dir,
187500085f1eSKrzysztof Kozlowski 			unsigned long attrs)
18764ce63fcdSMarek Szyprowski {
18770fa478dfSRob Herring 	__iommu_unmap_sg(dev, sg, nents, dir, attrs, false);
18784ce63fcdSMarek Szyprowski }
18794ce63fcdSMarek Szyprowski 
18804ce63fcdSMarek Szyprowski /**
18814ce63fcdSMarek Szyprowski  * arm_iommu_sync_sg_for_cpu
18824ce63fcdSMarek Szyprowski  * @dev: valid struct device pointer
18834ce63fcdSMarek Szyprowski  * @sg: list of buffers
18844ce63fcdSMarek Szyprowski  * @nents: number of buffers to map (returned from dma_map_sg)
18854ce63fcdSMarek Szyprowski  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
18864ce63fcdSMarek Szyprowski  */
18874ce63fcdSMarek Szyprowski void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
18884ce63fcdSMarek Szyprowski 			int nents, enum dma_data_direction dir)
18894ce63fcdSMarek Szyprowski {
18904ce63fcdSMarek Szyprowski 	struct scatterlist *s;
18914ce63fcdSMarek Szyprowski 	int i;
18924ce63fcdSMarek Szyprowski 
18934ce63fcdSMarek Szyprowski 	for_each_sg(sg, s, nents, i)
18944ce63fcdSMarek Szyprowski 		__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
18954ce63fcdSMarek Szyprowski 
18964ce63fcdSMarek Szyprowski }
18974ce63fcdSMarek Szyprowski 
18984ce63fcdSMarek Szyprowski /**
18994ce63fcdSMarek Szyprowski  * arm_iommu_sync_sg_for_device
19004ce63fcdSMarek Szyprowski  * @dev: valid struct device pointer
19014ce63fcdSMarek Szyprowski  * @sg: list of buffers
19024ce63fcdSMarek Szyprowski  * @nents: number of buffers to map (returned from dma_map_sg)
19034ce63fcdSMarek Szyprowski  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
19044ce63fcdSMarek Szyprowski  */
19054ce63fcdSMarek Szyprowski void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
19064ce63fcdSMarek Szyprowski 			int nents, enum dma_data_direction dir)
19074ce63fcdSMarek Szyprowski {
19084ce63fcdSMarek Szyprowski 	struct scatterlist *s;
19094ce63fcdSMarek Szyprowski 	int i;
19104ce63fcdSMarek Szyprowski 
19114ce63fcdSMarek Szyprowski 	for_each_sg(sg, s, nents, i)
19124ce63fcdSMarek Szyprowski 		__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
19134ce63fcdSMarek Szyprowski }
19144ce63fcdSMarek Szyprowski 
19154ce63fcdSMarek Szyprowski 
19164ce63fcdSMarek Szyprowski /**
19170fa478dfSRob Herring  * arm_coherent_iommu_map_page
19180fa478dfSRob Herring  * @dev: valid struct device pointer
19190fa478dfSRob Herring  * @page: page that buffer resides in
19200fa478dfSRob Herring  * @offset: offset into page for start of buffer
19210fa478dfSRob Herring  * @size: size of buffer to map
19220fa478dfSRob Herring  * @dir: DMA transfer direction
19230fa478dfSRob Herring  *
19240fa478dfSRob Herring  * Coherent IOMMU aware version of arm_dma_map_page()
19250fa478dfSRob Herring  */
19260fa478dfSRob Herring static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page,
19270fa478dfSRob Herring 	     unsigned long offset, size_t size, enum dma_data_direction dir,
192800085f1eSKrzysztof Kozlowski 	     unsigned long attrs)
19290fa478dfSRob Herring {
193089cfdb19SWill Deacon 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
19310fa478dfSRob Herring 	dma_addr_t dma_addr;
193213987d68SWill Deacon 	int ret, prot, len = PAGE_ALIGN(size + offset);
19330fa478dfSRob Herring 
19340fa478dfSRob Herring 	dma_addr = __alloc_iova(mapping, len);
19359eef8b8cSChristoph Hellwig 	if (dma_addr == ARM_MAPPING_ERROR)
19360fa478dfSRob Herring 		return dma_addr;
19370fa478dfSRob Herring 
19387d2822dfSSricharan R 	prot = __dma_info_to_prot(dir, attrs);
193913987d68SWill Deacon 
194013987d68SWill Deacon 	ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
19410fa478dfSRob Herring 	if (ret < 0)
19420fa478dfSRob Herring 		goto fail;
19430fa478dfSRob Herring 
19440fa478dfSRob Herring 	return dma_addr + offset;
19450fa478dfSRob Herring fail:
19460fa478dfSRob Herring 	__free_iova(mapping, dma_addr, len);
19479eef8b8cSChristoph Hellwig 	return ARM_MAPPING_ERROR;
19480fa478dfSRob Herring }
19490fa478dfSRob Herring 
19500fa478dfSRob Herring /**
19514ce63fcdSMarek Szyprowski  * arm_iommu_map_page
19524ce63fcdSMarek Szyprowski  * @dev: valid struct device pointer
19534ce63fcdSMarek Szyprowski  * @page: page that buffer resides in
19544ce63fcdSMarek Szyprowski  * @offset: offset into page for start of buffer
19554ce63fcdSMarek Szyprowski  * @size: size of buffer to map
19564ce63fcdSMarek Szyprowski  * @dir: DMA transfer direction
19574ce63fcdSMarek Szyprowski  *
19584ce63fcdSMarek Szyprowski  * IOMMU aware version of arm_dma_map_page()
19594ce63fcdSMarek Szyprowski  */
19604ce63fcdSMarek Szyprowski static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
19614ce63fcdSMarek Szyprowski 	     unsigned long offset, size_t size, enum dma_data_direction dir,
196200085f1eSKrzysztof Kozlowski 	     unsigned long attrs)
19634ce63fcdSMarek Szyprowski {
196400085f1eSKrzysztof Kozlowski 	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
19654ce63fcdSMarek Szyprowski 		__dma_page_cpu_to_dev(page, offset, size, dir);
19664ce63fcdSMarek Szyprowski 
19670fa478dfSRob Herring 	return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
19680fa478dfSRob Herring }
19694ce63fcdSMarek Szyprowski 
19700fa478dfSRob Herring /**
19710fa478dfSRob Herring  * arm_coherent_iommu_unmap_page
19720fa478dfSRob Herring  * @dev: valid struct device pointer
19730fa478dfSRob Herring  * @handle: DMA address of buffer
19740fa478dfSRob Herring  * @size: size of buffer (same as passed to dma_map_page)
19750fa478dfSRob Herring  * @dir: DMA transfer direction (same as passed to dma_map_page)
19760fa478dfSRob Herring  *
19770fa478dfSRob Herring  * Coherent IOMMU aware version of arm_dma_unmap_page()
19780fa478dfSRob Herring  */
19790fa478dfSRob Herring static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
198000085f1eSKrzysztof Kozlowski 		size_t size, enum dma_data_direction dir, unsigned long attrs)
19810fa478dfSRob Herring {
198289cfdb19SWill Deacon 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
19830fa478dfSRob Herring 	dma_addr_t iova = handle & PAGE_MASK;
19840fa478dfSRob Herring 	int offset = handle & ~PAGE_MASK;
19850fa478dfSRob Herring 	int len = PAGE_ALIGN(size + offset);
19864ce63fcdSMarek Szyprowski 
19870fa478dfSRob Herring 	if (!iova)
19880fa478dfSRob Herring 		return;
19890fa478dfSRob Herring 
19900fa478dfSRob Herring 	iommu_unmap(mapping->domain, iova, len);
19910fa478dfSRob Herring 	__free_iova(mapping, iova, len);
19924ce63fcdSMarek Szyprowski }
19934ce63fcdSMarek Szyprowski 
19944ce63fcdSMarek Szyprowski /**
19954ce63fcdSMarek Szyprowski  * arm_iommu_unmap_page
19964ce63fcdSMarek Szyprowski  * @dev: valid struct device pointer
19974ce63fcdSMarek Szyprowski  * @handle: DMA address of buffer
19984ce63fcdSMarek Szyprowski  * @size: size of buffer (same as passed to dma_map_page)
19994ce63fcdSMarek Szyprowski  * @dir: DMA transfer direction (same as passed to dma_map_page)
20004ce63fcdSMarek Szyprowski  *
20014ce63fcdSMarek Szyprowski  * IOMMU aware version of arm_dma_unmap_page()
20024ce63fcdSMarek Szyprowski  */
20034ce63fcdSMarek Szyprowski static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
200400085f1eSKrzysztof Kozlowski 		size_t size, enum dma_data_direction dir, unsigned long attrs)
20054ce63fcdSMarek Szyprowski {
200689cfdb19SWill Deacon 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
20074ce63fcdSMarek Szyprowski 	dma_addr_t iova = handle & PAGE_MASK;
20084ce63fcdSMarek Szyprowski 	struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
20094ce63fcdSMarek Szyprowski 	int offset = handle & ~PAGE_MASK;
20104ce63fcdSMarek Szyprowski 	int len = PAGE_ALIGN(size + offset);
20114ce63fcdSMarek Szyprowski 
20124ce63fcdSMarek Szyprowski 	if (!iova)
20134ce63fcdSMarek Szyprowski 		return;
20144ce63fcdSMarek Szyprowski 
201500085f1eSKrzysztof Kozlowski 	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
20164ce63fcdSMarek Szyprowski 		__dma_page_dev_to_cpu(page, offset, size, dir);
20174ce63fcdSMarek Szyprowski 
20184ce63fcdSMarek Szyprowski 	iommu_unmap(mapping->domain, iova, len);
20194ce63fcdSMarek Szyprowski 	__free_iova(mapping, iova, len);
20204ce63fcdSMarek Szyprowski }
20214ce63fcdSMarek Szyprowski 
202224ed5d2cSNiklas Söderlund /**
202324ed5d2cSNiklas Söderlund  * arm_iommu_map_resource - map a device resource for DMA
202424ed5d2cSNiklas Söderlund  * @dev: valid struct device pointer
202524ed5d2cSNiklas Söderlund  * @phys_addr: physical address of resource
202624ed5d2cSNiklas Söderlund  * @size: size of resource to map
202724ed5d2cSNiklas Söderlund  * @dir: DMA transfer direction
202824ed5d2cSNiklas Söderlund  */
202924ed5d2cSNiklas Söderlund static dma_addr_t arm_iommu_map_resource(struct device *dev,
203024ed5d2cSNiklas Söderlund 		phys_addr_t phys_addr, size_t size,
203124ed5d2cSNiklas Söderlund 		enum dma_data_direction dir, unsigned long attrs)
203224ed5d2cSNiklas Söderlund {
203324ed5d2cSNiklas Söderlund 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
203424ed5d2cSNiklas Söderlund 	dma_addr_t dma_addr;
203524ed5d2cSNiklas Söderlund 	int ret, prot;
203624ed5d2cSNiklas Söderlund 	phys_addr_t addr = phys_addr & PAGE_MASK;
203724ed5d2cSNiklas Söderlund 	unsigned int offset = phys_addr & ~PAGE_MASK;
203824ed5d2cSNiklas Söderlund 	size_t len = PAGE_ALIGN(size + offset);
203924ed5d2cSNiklas Söderlund 
204024ed5d2cSNiklas Söderlund 	dma_addr = __alloc_iova(mapping, len);
20419eef8b8cSChristoph Hellwig 	if (dma_addr == ARM_MAPPING_ERROR)
204224ed5d2cSNiklas Söderlund 		return dma_addr;
204324ed5d2cSNiklas Söderlund 
20447d2822dfSSricharan R 	prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
204524ed5d2cSNiklas Söderlund 
204624ed5d2cSNiklas Söderlund 	ret = iommu_map(mapping->domain, dma_addr, addr, len, prot);
204724ed5d2cSNiklas Söderlund 	if (ret < 0)
204824ed5d2cSNiklas Söderlund 		goto fail;
204924ed5d2cSNiklas Söderlund 
205024ed5d2cSNiklas Söderlund 	return dma_addr + offset;
205124ed5d2cSNiklas Söderlund fail:
205224ed5d2cSNiklas Söderlund 	__free_iova(mapping, dma_addr, len);
20539eef8b8cSChristoph Hellwig 	return ARM_MAPPING_ERROR;
205424ed5d2cSNiklas Söderlund }
205524ed5d2cSNiklas Söderlund 
205624ed5d2cSNiklas Söderlund /**
205724ed5d2cSNiklas Söderlund  * arm_iommu_unmap_resource - unmap a device DMA resource
205824ed5d2cSNiklas Söderlund  * @dev: valid struct device pointer
205924ed5d2cSNiklas Söderlund  * @dma_handle: DMA address to resource
206024ed5d2cSNiklas Söderlund  * @size: size of resource to map
206124ed5d2cSNiklas Söderlund  * @dir: DMA transfer direction
206224ed5d2cSNiklas Söderlund  */
206324ed5d2cSNiklas Söderlund static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle,
206424ed5d2cSNiklas Söderlund 		size_t size, enum dma_data_direction dir,
206524ed5d2cSNiklas Söderlund 		unsigned long attrs)
206624ed5d2cSNiklas Söderlund {
206724ed5d2cSNiklas Söderlund 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
206824ed5d2cSNiklas Söderlund 	dma_addr_t iova = dma_handle & PAGE_MASK;
206924ed5d2cSNiklas Söderlund 	unsigned int offset = dma_handle & ~PAGE_MASK;
207024ed5d2cSNiklas Söderlund 	size_t len = PAGE_ALIGN(size + offset);
207124ed5d2cSNiklas Söderlund 
207224ed5d2cSNiklas Söderlund 	if (!iova)
207324ed5d2cSNiklas Söderlund 		return;
207424ed5d2cSNiklas Söderlund 
207524ed5d2cSNiklas Söderlund 	iommu_unmap(mapping->domain, iova, len);
207624ed5d2cSNiklas Söderlund 	__free_iova(mapping, iova, len);
207724ed5d2cSNiklas Söderlund }
207824ed5d2cSNiklas Söderlund 
20794ce63fcdSMarek Szyprowski static void arm_iommu_sync_single_for_cpu(struct device *dev,
20804ce63fcdSMarek Szyprowski 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
20814ce63fcdSMarek Szyprowski {
208289cfdb19SWill Deacon 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
20834ce63fcdSMarek Szyprowski 	dma_addr_t iova = handle & PAGE_MASK;
20844ce63fcdSMarek Szyprowski 	struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
20854ce63fcdSMarek Szyprowski 	unsigned int offset = handle & ~PAGE_MASK;
20864ce63fcdSMarek Szyprowski 
20874ce63fcdSMarek Szyprowski 	if (!iova)
20884ce63fcdSMarek Szyprowski 		return;
20894ce63fcdSMarek Szyprowski 
20904ce63fcdSMarek Szyprowski 	__dma_page_dev_to_cpu(page, offset, size, dir);
20914ce63fcdSMarek Szyprowski }
20924ce63fcdSMarek Szyprowski 
20934ce63fcdSMarek Szyprowski static void arm_iommu_sync_single_for_device(struct device *dev,
20944ce63fcdSMarek Szyprowski 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
20954ce63fcdSMarek Szyprowski {
209689cfdb19SWill Deacon 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
20974ce63fcdSMarek Szyprowski 	dma_addr_t iova = handle & PAGE_MASK;
20984ce63fcdSMarek Szyprowski 	struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
20994ce63fcdSMarek Szyprowski 	unsigned int offset = handle & ~PAGE_MASK;
21004ce63fcdSMarek Szyprowski 
21014ce63fcdSMarek Szyprowski 	if (!iova)
21024ce63fcdSMarek Szyprowski 		return;
21034ce63fcdSMarek Szyprowski 
21044ce63fcdSMarek Szyprowski 	__dma_page_cpu_to_dev(page, offset, size, dir);
21054ce63fcdSMarek Szyprowski }
21064ce63fcdSMarek Szyprowski 
21075299709dSBart Van Assche const struct dma_map_ops iommu_ops = {
21084ce63fcdSMarek Szyprowski 	.alloc		= arm_iommu_alloc_attrs,
21094ce63fcdSMarek Szyprowski 	.free		= arm_iommu_free_attrs,
21104ce63fcdSMarek Szyprowski 	.mmap		= arm_iommu_mmap_attrs,
2111dc2832e1SMarek Szyprowski 	.get_sgtable	= arm_iommu_get_sgtable,
21124ce63fcdSMarek Szyprowski 
21134ce63fcdSMarek Szyprowski 	.map_page		= arm_iommu_map_page,
21144ce63fcdSMarek Szyprowski 	.unmap_page		= arm_iommu_unmap_page,
21154ce63fcdSMarek Szyprowski 	.sync_single_for_cpu	= arm_iommu_sync_single_for_cpu,
21164ce63fcdSMarek Szyprowski 	.sync_single_for_device	= arm_iommu_sync_single_for_device,
21174ce63fcdSMarek Szyprowski 
21184ce63fcdSMarek Szyprowski 	.map_sg			= arm_iommu_map_sg,
21194ce63fcdSMarek Szyprowski 	.unmap_sg		= arm_iommu_unmap_sg,
21204ce63fcdSMarek Szyprowski 	.sync_sg_for_cpu	= arm_iommu_sync_sg_for_cpu,
21214ce63fcdSMarek Szyprowski 	.sync_sg_for_device	= arm_iommu_sync_sg_for_device,
212224ed5d2cSNiklas Söderlund 
212324ed5d2cSNiklas Söderlund 	.map_resource		= arm_iommu_map_resource,
212424ed5d2cSNiklas Söderlund 	.unmap_resource		= arm_iommu_unmap_resource,
21259eef8b8cSChristoph Hellwig 
21269eef8b8cSChristoph Hellwig 	.mapping_error		= arm_dma_mapping_error,
2127418a7a7eSChristoph Hellwig 	.dma_supported		= arm_dma_supported,
21284ce63fcdSMarek Szyprowski };
21294ce63fcdSMarek Szyprowski 
21305299709dSBart Van Assche const struct dma_map_ops iommu_coherent_ops = {
213156506822SGregory CLEMENT 	.alloc		= arm_coherent_iommu_alloc_attrs,
213256506822SGregory CLEMENT 	.free		= arm_coherent_iommu_free_attrs,
213356506822SGregory CLEMENT 	.mmap		= arm_coherent_iommu_mmap_attrs,
21340fa478dfSRob Herring 	.get_sgtable	= arm_iommu_get_sgtable,
21350fa478dfSRob Herring 
21360fa478dfSRob Herring 	.map_page	= arm_coherent_iommu_map_page,
21370fa478dfSRob Herring 	.unmap_page	= arm_coherent_iommu_unmap_page,
21380fa478dfSRob Herring 
21390fa478dfSRob Herring 	.map_sg		= arm_coherent_iommu_map_sg,
21400fa478dfSRob Herring 	.unmap_sg	= arm_coherent_iommu_unmap_sg,
214124ed5d2cSNiklas Söderlund 
214224ed5d2cSNiklas Söderlund 	.map_resource	= arm_iommu_map_resource,
214324ed5d2cSNiklas Söderlund 	.unmap_resource	= arm_iommu_unmap_resource,
21449eef8b8cSChristoph Hellwig 
21459eef8b8cSChristoph Hellwig 	.mapping_error		= arm_dma_mapping_error,
2146418a7a7eSChristoph Hellwig 	.dma_supported		= arm_dma_supported,
21470fa478dfSRob Herring };
21480fa478dfSRob Herring 
21494ce63fcdSMarek Szyprowski /**
21504ce63fcdSMarek Szyprowski  * arm_iommu_create_mapping
21514ce63fcdSMarek Szyprowski  * @bus: pointer to the bus holding the client device (for IOMMU calls)
21524ce63fcdSMarek Szyprowski  * @base: start address of the valid IO address space
215368efd7d2SMarek Szyprowski  * @size: maximum size of the valid IO address space
21544ce63fcdSMarek Szyprowski  *
21554ce63fcdSMarek Szyprowski  * Creates a mapping structure which holds information about used/unused
21564ce63fcdSMarek Szyprowski  * IO address ranges, which is required to perform memory allocation and
21574ce63fcdSMarek Szyprowski  * mapping with IOMMU aware functions.
21584ce63fcdSMarek Szyprowski  *
21594ce63fcdSMarek Szyprowski  * The client device need to be attached to the mapping with
21604ce63fcdSMarek Szyprowski  * arm_iommu_attach_device function.
21614ce63fcdSMarek Szyprowski  */
21624ce63fcdSMarek Szyprowski struct dma_iommu_mapping *
21631424532bSMarek Szyprowski arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size)
21644ce63fcdSMarek Szyprowski {
216568efd7d2SMarek Szyprowski 	unsigned int bits = size >> PAGE_SHIFT;
216668efd7d2SMarek Szyprowski 	unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
21674ce63fcdSMarek Szyprowski 	struct dma_iommu_mapping *mapping;
216868efd7d2SMarek Szyprowski 	int extensions = 1;
21694ce63fcdSMarek Szyprowski 	int err = -ENOMEM;
21704ce63fcdSMarek Szyprowski 
21711424532bSMarek Szyprowski 	/* currently only 32-bit DMA address space is supported */
21721424532bSMarek Szyprowski 	if (size > DMA_BIT_MASK(32) + 1)
21731424532bSMarek Szyprowski 		return ERR_PTR(-ERANGE);
21741424532bSMarek Szyprowski 
217568efd7d2SMarek Szyprowski 	if (!bitmap_size)
21764ce63fcdSMarek Szyprowski 		return ERR_PTR(-EINVAL);
21774ce63fcdSMarek Szyprowski 
217868efd7d2SMarek Szyprowski 	if (bitmap_size > PAGE_SIZE) {
217968efd7d2SMarek Szyprowski 		extensions = bitmap_size / PAGE_SIZE;
218068efd7d2SMarek Szyprowski 		bitmap_size = PAGE_SIZE;
218168efd7d2SMarek Szyprowski 	}
218268efd7d2SMarek Szyprowski 
21834ce63fcdSMarek Szyprowski 	mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
21844ce63fcdSMarek Szyprowski 	if (!mapping)
21854ce63fcdSMarek Szyprowski 		goto err;
21864ce63fcdSMarek Szyprowski 
218768efd7d2SMarek Szyprowski 	mapping->bitmap_size = bitmap_size;
218868efd7d2SMarek Szyprowski 	mapping->bitmaps = kzalloc(extensions * sizeof(unsigned long *),
21894d852ef8SAndreas Herrmann 				GFP_KERNEL);
21904d852ef8SAndreas Herrmann 	if (!mapping->bitmaps)
21914ce63fcdSMarek Szyprowski 		goto err2;
21924ce63fcdSMarek Szyprowski 
219368efd7d2SMarek Szyprowski 	mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL);
21944d852ef8SAndreas Herrmann 	if (!mapping->bitmaps[0])
21954d852ef8SAndreas Herrmann 		goto err3;
21964d852ef8SAndreas Herrmann 
21974d852ef8SAndreas Herrmann 	mapping->nr_bitmaps = 1;
21984d852ef8SAndreas Herrmann 	mapping->extensions = extensions;
21994ce63fcdSMarek Szyprowski 	mapping->base = base;
220068efd7d2SMarek Szyprowski 	mapping->bits = BITS_PER_BYTE * bitmap_size;
22014d852ef8SAndreas Herrmann 
22024ce63fcdSMarek Szyprowski 	spin_lock_init(&mapping->lock);
22034ce63fcdSMarek Szyprowski 
22044ce63fcdSMarek Szyprowski 	mapping->domain = iommu_domain_alloc(bus);
22054ce63fcdSMarek Szyprowski 	if (!mapping->domain)
22064d852ef8SAndreas Herrmann 		goto err4;
22074ce63fcdSMarek Szyprowski 
22084ce63fcdSMarek Szyprowski 	kref_init(&mapping->kref);
22094ce63fcdSMarek Szyprowski 	return mapping;
22104d852ef8SAndreas Herrmann err4:
22114d852ef8SAndreas Herrmann 	kfree(mapping->bitmaps[0]);
22124ce63fcdSMarek Szyprowski err3:
22134d852ef8SAndreas Herrmann 	kfree(mapping->bitmaps);
22144ce63fcdSMarek Szyprowski err2:
22154ce63fcdSMarek Szyprowski 	kfree(mapping);
22164ce63fcdSMarek Szyprowski err:
22174ce63fcdSMarek Szyprowski 	return ERR_PTR(err);
22184ce63fcdSMarek Szyprowski }
221918177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
22204ce63fcdSMarek Szyprowski 
22214ce63fcdSMarek Szyprowski static void release_iommu_mapping(struct kref *kref)
22224ce63fcdSMarek Szyprowski {
22234d852ef8SAndreas Herrmann 	int i;
22244ce63fcdSMarek Szyprowski 	struct dma_iommu_mapping *mapping =
22254ce63fcdSMarek Szyprowski 		container_of(kref, struct dma_iommu_mapping, kref);
22264ce63fcdSMarek Szyprowski 
22274ce63fcdSMarek Szyprowski 	iommu_domain_free(mapping->domain);
22284d852ef8SAndreas Herrmann 	for (i = 0; i < mapping->nr_bitmaps; i++)
22294d852ef8SAndreas Herrmann 		kfree(mapping->bitmaps[i]);
22304d852ef8SAndreas Herrmann 	kfree(mapping->bitmaps);
22314ce63fcdSMarek Szyprowski 	kfree(mapping);
22324ce63fcdSMarek Szyprowski }
22334ce63fcdSMarek Szyprowski 
22344d852ef8SAndreas Herrmann static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
22354d852ef8SAndreas Herrmann {
22364d852ef8SAndreas Herrmann 	int next_bitmap;
22374d852ef8SAndreas Herrmann 
2238462859aaSMarek Szyprowski 	if (mapping->nr_bitmaps >= mapping->extensions)
22394d852ef8SAndreas Herrmann 		return -EINVAL;
22404d852ef8SAndreas Herrmann 
22414d852ef8SAndreas Herrmann 	next_bitmap = mapping->nr_bitmaps;
22424d852ef8SAndreas Herrmann 	mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size,
22434d852ef8SAndreas Herrmann 						GFP_ATOMIC);
22444d852ef8SAndreas Herrmann 	if (!mapping->bitmaps[next_bitmap])
22454d852ef8SAndreas Herrmann 		return -ENOMEM;
22464d852ef8SAndreas Herrmann 
22474d852ef8SAndreas Herrmann 	mapping->nr_bitmaps++;
22484d852ef8SAndreas Herrmann 
22494d852ef8SAndreas Herrmann 	return 0;
22504d852ef8SAndreas Herrmann }
22514d852ef8SAndreas Herrmann 
22524ce63fcdSMarek Szyprowski void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
22534ce63fcdSMarek Szyprowski {
22544ce63fcdSMarek Szyprowski 	if (mapping)
22554ce63fcdSMarek Szyprowski 		kref_put(&mapping->kref, release_iommu_mapping);
22564ce63fcdSMarek Szyprowski }
225718177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
22584ce63fcdSMarek Szyprowski 
2259eab8d653SLaurent Pinchart static int __arm_iommu_attach_device(struct device *dev,
22604ce63fcdSMarek Szyprowski 				     struct dma_iommu_mapping *mapping)
22614ce63fcdSMarek Szyprowski {
22624ce63fcdSMarek Szyprowski 	int err;
22634ce63fcdSMarek Szyprowski 
22644ce63fcdSMarek Szyprowski 	err = iommu_attach_device(mapping->domain, dev);
22654ce63fcdSMarek Szyprowski 	if (err)
22664ce63fcdSMarek Szyprowski 		return err;
22674ce63fcdSMarek Szyprowski 
22684ce63fcdSMarek Szyprowski 	kref_get(&mapping->kref);
226989cfdb19SWill Deacon 	to_dma_iommu_mapping(dev) = mapping;
22704ce63fcdSMarek Szyprowski 
227175c59716SHiroshi Doyu 	pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
22724ce63fcdSMarek Szyprowski 	return 0;
22734ce63fcdSMarek Szyprowski }
22744ce63fcdSMarek Szyprowski 
22756fe36758SHiroshi Doyu /**
2276eab8d653SLaurent Pinchart  * arm_iommu_attach_device
22776fe36758SHiroshi Doyu  * @dev: valid struct device pointer
2278eab8d653SLaurent Pinchart  * @mapping: io address space mapping structure (returned from
2279eab8d653SLaurent Pinchart  *	arm_iommu_create_mapping)
22806fe36758SHiroshi Doyu  *
2281eab8d653SLaurent Pinchart  * Attaches specified io address space mapping to the provided device.
2282eab8d653SLaurent Pinchart  * This replaces the dma operations (dma_map_ops pointer) with the
2283eab8d653SLaurent Pinchart  * IOMMU aware version.
2284eab8d653SLaurent Pinchart  *
2285eab8d653SLaurent Pinchart  * More than one client might be attached to the same io address space
2286eab8d653SLaurent Pinchart  * mapping.
22876fe36758SHiroshi Doyu  */
2288eab8d653SLaurent Pinchart int arm_iommu_attach_device(struct device *dev,
2289eab8d653SLaurent Pinchart 			    struct dma_iommu_mapping *mapping)
2290eab8d653SLaurent Pinchart {
2291eab8d653SLaurent Pinchart 	int err;
2292eab8d653SLaurent Pinchart 
2293eab8d653SLaurent Pinchart 	err = __arm_iommu_attach_device(dev, mapping);
2294eab8d653SLaurent Pinchart 	if (err)
2295eab8d653SLaurent Pinchart 		return err;
2296eab8d653SLaurent Pinchart 
2297eab8d653SLaurent Pinchart 	set_dma_ops(dev, &iommu_ops);
2298eab8d653SLaurent Pinchart 	return 0;
2299eab8d653SLaurent Pinchart }
2300eab8d653SLaurent Pinchart EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
2301eab8d653SLaurent Pinchart 
2302d3e01c51SSricharan R /**
2303d3e01c51SSricharan R  * arm_iommu_detach_device
2304d3e01c51SSricharan R  * @dev: valid struct device pointer
2305d3e01c51SSricharan R  *
2306d3e01c51SSricharan R  * Detaches the provided device from a previously attached map.
2307d3e01c51SSricharan R  * This voids the dma operations (dma_map_ops pointer)
2308d3e01c51SSricharan R  */
2309d3e01c51SSricharan R void arm_iommu_detach_device(struct device *dev)
23106fe36758SHiroshi Doyu {
23116fe36758SHiroshi Doyu 	struct dma_iommu_mapping *mapping;
23126fe36758SHiroshi Doyu 
23136fe36758SHiroshi Doyu 	mapping = to_dma_iommu_mapping(dev);
23146fe36758SHiroshi Doyu 	if (!mapping) {
23156fe36758SHiroshi Doyu 		dev_warn(dev, "Not attached\n");
23166fe36758SHiroshi Doyu 		return;
23176fe36758SHiroshi Doyu 	}
23186fe36758SHiroshi Doyu 
23196fe36758SHiroshi Doyu 	iommu_detach_device(mapping->domain, dev);
23206fe36758SHiroshi Doyu 	kref_put(&mapping->kref, release_iommu_mapping);
232189cfdb19SWill Deacon 	to_dma_iommu_mapping(dev) = NULL;
2322d3e01c51SSricharan R 	set_dma_ops(dev, NULL);
23236fe36758SHiroshi Doyu 
23246fe36758SHiroshi Doyu 	pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
23256fe36758SHiroshi Doyu }
232618177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
23276fe36758SHiroshi Doyu 
23285299709dSBart Van Assche static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
23294bb25789SWill Deacon {
23304bb25789SWill Deacon 	return coherent ? &iommu_coherent_ops : &iommu_ops;
23314bb25789SWill Deacon }
23324bb25789SWill Deacon 
23334bb25789SWill Deacon static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
233453c92d79SRobin Murphy 				    const struct iommu_ops *iommu)
23354bb25789SWill Deacon {
23364bb25789SWill Deacon 	struct dma_iommu_mapping *mapping;
23374bb25789SWill Deacon 
23384bb25789SWill Deacon 	if (!iommu)
23394bb25789SWill Deacon 		return false;
23404bb25789SWill Deacon 
23414bb25789SWill Deacon 	mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
23424bb25789SWill Deacon 	if (IS_ERR(mapping)) {
23434bb25789SWill Deacon 		pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
23444bb25789SWill Deacon 				size, dev_name(dev));
23454bb25789SWill Deacon 		return false;
23464bb25789SWill Deacon 	}
23474bb25789SWill Deacon 
2348eab8d653SLaurent Pinchart 	if (__arm_iommu_attach_device(dev, mapping)) {
23494bb25789SWill Deacon 		pr_warn("Failed to attached device %s to IOMMU_mapping\n",
23504bb25789SWill Deacon 				dev_name(dev));
23514bb25789SWill Deacon 		arm_iommu_release_mapping(mapping);
23524bb25789SWill Deacon 		return false;
23534bb25789SWill Deacon 	}
23544bb25789SWill Deacon 
23554bb25789SWill Deacon 	return true;
23564bb25789SWill Deacon }
23574bb25789SWill Deacon 
23584bb25789SWill Deacon static void arm_teardown_iommu_dma_ops(struct device *dev)
23594bb25789SWill Deacon {
236089cfdb19SWill Deacon 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
23614bb25789SWill Deacon 
2362c2273a18SWill Deacon 	if (!mapping)
2363c2273a18SWill Deacon 		return;
2364c2273a18SWill Deacon 
2365d3e01c51SSricharan R 	arm_iommu_detach_device(dev);
23664bb25789SWill Deacon 	arm_iommu_release_mapping(mapping);
23674bb25789SWill Deacon }
23684bb25789SWill Deacon 
23694bb25789SWill Deacon #else
23704bb25789SWill Deacon 
23714bb25789SWill Deacon static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
237253c92d79SRobin Murphy 				    const struct iommu_ops *iommu)
23734bb25789SWill Deacon {
23744bb25789SWill Deacon 	return false;
23754bb25789SWill Deacon }
23764bb25789SWill Deacon 
23774bb25789SWill Deacon static void arm_teardown_iommu_dma_ops(struct device *dev) { }
23784bb25789SWill Deacon 
23794bb25789SWill Deacon #define arm_get_iommu_dma_map_ops arm_get_dma_map_ops
23804bb25789SWill Deacon 
23814bb25789SWill Deacon #endif	/* CONFIG_ARM_DMA_USE_IOMMU */
23824bb25789SWill Deacon 
23835299709dSBart Van Assche static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
23844bb25789SWill Deacon {
23854bb25789SWill Deacon 	return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
23864bb25789SWill Deacon }
23874bb25789SWill Deacon 
23884bb25789SWill Deacon void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
238953c92d79SRobin Murphy 			const struct iommu_ops *iommu, bool coherent)
23904bb25789SWill Deacon {
23915299709dSBart Van Assche 	const struct dma_map_ops *dma_ops;
23924bb25789SWill Deacon 
23936f51ee70SLinus Torvalds 	dev->archdata.dma_coherent = coherent;
239426b37b94SLaurent Pinchart 
239526b37b94SLaurent Pinchart 	/*
239626b37b94SLaurent Pinchart 	 * Don't override the dma_ops if they have already been set. Ideally
239726b37b94SLaurent Pinchart 	 * this should be the only location where dma_ops are set, remove this
239826b37b94SLaurent Pinchart 	 * check when all other callers of set_dma_ops will have disappeared.
239926b37b94SLaurent Pinchart 	 */
240026b37b94SLaurent Pinchart 	if (dev->dma_ops)
240126b37b94SLaurent Pinchart 		return;
240226b37b94SLaurent Pinchart 
24034bb25789SWill Deacon 	if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu))
24044bb25789SWill Deacon 		dma_ops = arm_get_iommu_dma_map_ops(coherent);
24054bb25789SWill Deacon 	else
24064bb25789SWill Deacon 		dma_ops = arm_get_dma_map_ops(coherent);
24074bb25789SWill Deacon 
24084bb25789SWill Deacon 	set_dma_ops(dev, dma_ops);
2409e0586326SStefano Stabellini 
2410e0586326SStefano Stabellini #ifdef CONFIG_XEN
2411e0586326SStefano Stabellini 	if (xen_initial_domain()) {
2412e0586326SStefano Stabellini 		dev->archdata.dev_dma_ops = dev->dma_ops;
2413e0586326SStefano Stabellini 		dev->dma_ops = xen_dma_ops;
2414e0586326SStefano Stabellini 	}
2415e0586326SStefano Stabellini #endif
2416a93a121aSLaurent Pinchart 	dev->archdata.dma_ops_setup = true;
24174bb25789SWill Deacon }
24184bb25789SWill Deacon 
24194bb25789SWill Deacon void arch_teardown_dma_ops(struct device *dev)
24204bb25789SWill Deacon {
2421a93a121aSLaurent Pinchart 	if (!dev->archdata.dma_ops_setup)
2422a93a121aSLaurent Pinchart 		return;
2423a93a121aSLaurent Pinchart 
24244bb25789SWill Deacon 	arm_teardown_iommu_dma_ops(dev);
24254bb25789SWill Deacon }
2426