xref: /openbmc/linux/arch/arm/mm/dma-mapping.c (revision 8b5989f3)
1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
20ddbccd1SRussell King /*
30ddbccd1SRussell King  *  linux/arch/arm/mm/dma-mapping.c
40ddbccd1SRussell King  *
50ddbccd1SRussell King  *  Copyright (C) 2000-2004 Russell King
60ddbccd1SRussell King  *
70ddbccd1SRussell King  *  DMA uncached mapping support.
80ddbccd1SRussell King  */
90ddbccd1SRussell King #include <linux/module.h>
100ddbccd1SRussell King #include <linux/mm.h>
1136d0fd21SLaura Abbott #include <linux/genalloc.h>
125a0e3ad6STejun Heo #include <linux/gfp.h>
130ddbccd1SRussell King #include <linux/errno.h>
140ddbccd1SRussell King #include <linux/list.h>
150ddbccd1SRussell King #include <linux/init.h>
160ddbccd1SRussell King #include <linux/device.h>
17249baa54SChristoph Hellwig #include <linux/dma-direct.h>
180a0f0d8bSChristoph Hellwig #include <linux/dma-map-ops.h>
1939af22a7SNicolas Pitre #include <linux/highmem.h>
20c7909509SMarek Szyprowski #include <linux/memblock.h>
2199d1717dSJon Medhurst #include <linux/slab.h>
224ce63fcdSMarek Szyprowski #include <linux/iommu.h>
23e9da6e99SMarek Szyprowski #include <linux/io.h>
244ce63fcdSMarek Szyprowski #include <linux/vmalloc.h>
25158e8bfeSAlessandro Rubini #include <linux/sizes.h>
26a254129eSJoonsoo Kim #include <linux/cma.h>
270ddbccd1SRussell King 
28a9ff6961SLinus Walleij #include <asm/page.h>
2943377453SNicolas Pitre #include <asm/highmem.h>
300ddbccd1SRussell King #include <asm/cacheflush.h>
310ddbccd1SRussell King #include <asm/tlbflush.h>
3299d1717dSJon Medhurst #include <asm/mach/arch.h>
334ce63fcdSMarek Szyprowski #include <asm/dma-iommu.h>
34c7909509SMarek Szyprowski #include <asm/mach/map.h>
35c7909509SMarek Szyprowski #include <asm/system_info.h>
369bf22421SOleksandr Tyshchenko #include <asm/xen/xen-ops.h>
370ddbccd1SRussell King 
381234e3fdSRussell King #include "dma.h"
39022ae537SRussell King #include "mm.h"
40022ae537SRussell King 
41b4268676SRabin Vincent struct arm_dma_alloc_args {
42b4268676SRabin Vincent 	struct device *dev;
43b4268676SRabin Vincent 	size_t size;
44b4268676SRabin Vincent 	gfp_t gfp;
45b4268676SRabin Vincent 	pgprot_t prot;
46b4268676SRabin Vincent 	const void *caller;
47b4268676SRabin Vincent 	bool want_vaddr;
48f1270896SGregory CLEMENT 	int coherent_flag;
49b4268676SRabin Vincent };
50b4268676SRabin Vincent 
51b4268676SRabin Vincent struct arm_dma_free_args {
52b4268676SRabin Vincent 	struct device *dev;
53b4268676SRabin Vincent 	size_t size;
54b4268676SRabin Vincent 	void *cpu_addr;
55b4268676SRabin Vincent 	struct page *page;
56b4268676SRabin Vincent 	bool want_vaddr;
57b4268676SRabin Vincent };
58b4268676SRabin Vincent 
59f1270896SGregory CLEMENT #define NORMAL	    0
60f1270896SGregory CLEMENT #define COHERENT    1
61f1270896SGregory CLEMENT 
62b4268676SRabin Vincent struct arm_dma_allocator {
63b4268676SRabin Vincent 	void *(*alloc)(struct arm_dma_alloc_args *args,
64b4268676SRabin Vincent 		       struct page **ret_page);
65b4268676SRabin Vincent 	void (*free)(struct arm_dma_free_args *args);
66b4268676SRabin Vincent };
67b4268676SRabin Vincent 
6819e6e5e5SRabin Vincent struct arm_dma_buffer {
6919e6e5e5SRabin Vincent 	struct list_head list;
7019e6e5e5SRabin Vincent 	void *virt;
71b4268676SRabin Vincent 	struct arm_dma_allocator *allocator;
7219e6e5e5SRabin Vincent };
7319e6e5e5SRabin Vincent 
7419e6e5e5SRabin Vincent static LIST_HEAD(arm_dma_bufs);
7519e6e5e5SRabin Vincent static DEFINE_SPINLOCK(arm_dma_bufs_lock);
7619e6e5e5SRabin Vincent 
arm_dma_buffer_find(void * virt)7719e6e5e5SRabin Vincent static struct arm_dma_buffer *arm_dma_buffer_find(void *virt)
7819e6e5e5SRabin Vincent {
7919e6e5e5SRabin Vincent 	struct arm_dma_buffer *buf, *found = NULL;
8019e6e5e5SRabin Vincent 	unsigned long flags;
8119e6e5e5SRabin Vincent 
8219e6e5e5SRabin Vincent 	spin_lock_irqsave(&arm_dma_bufs_lock, flags);
8319e6e5e5SRabin Vincent 	list_for_each_entry(buf, &arm_dma_bufs, list) {
8419e6e5e5SRabin Vincent 		if (buf->virt == virt) {
8519e6e5e5SRabin Vincent 			list_del(&buf->list);
8619e6e5e5SRabin Vincent 			found = buf;
8719e6e5e5SRabin Vincent 			break;
8819e6e5e5SRabin Vincent 		}
8919e6e5e5SRabin Vincent 	}
9019e6e5e5SRabin Vincent 	spin_unlock_irqrestore(&arm_dma_bufs_lock, flags);
9119e6e5e5SRabin Vincent 	return found;
9219e6e5e5SRabin Vincent }
9319e6e5e5SRabin Vincent 
9415237e1fSMarek Szyprowski /*
9515237e1fSMarek Szyprowski  * The DMA API is built upon the notion of "buffer ownership".  A buffer
9615237e1fSMarek Szyprowski  * is either exclusively owned by the CPU (and therefore may be accessed
9715237e1fSMarek Szyprowski  * by it) or exclusively owned by the DMA device.  These helper functions
9815237e1fSMarek Szyprowski  * represent the transitions between these two ownership states.
9915237e1fSMarek Szyprowski  *
10015237e1fSMarek Szyprowski  * Note, however, that on later ARMs, this notion does not work due to
10115237e1fSMarek Szyprowski  * speculative prefetches.  We model our approach on the assumption that
10215237e1fSMarek Szyprowski  * the CPU does do speculative prefetches, which means we clean caches
10315237e1fSMarek Szyprowski  * before transfers and delay cache invalidation until transfer completion.
10415237e1fSMarek Szyprowski  *
10515237e1fSMarek Szyprowski  */
106dd37e940SRob Herring 
__dma_clear_buffer(struct page * page,size_t size,int coherent_flag)107f1270896SGregory CLEMENT static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
108c7909509SMarek Szyprowski {
109c7909509SMarek Szyprowski 	/*
110c7909509SMarek Szyprowski 	 * Ensure that the allocated pages are zeroed, and that any data
111c7909509SMarek Szyprowski 	 * lurking in the kernel direct-mapped region is invalidated.
112c7909509SMarek Szyprowski 	 */
1139848e48fSMarek Szyprowski 	if (PageHighMem(page)) {
1149848e48fSMarek Szyprowski 		phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
1159848e48fSMarek Szyprowski 		phys_addr_t end = base + size;
1169848e48fSMarek Szyprowski 		while (size > 0) {
1179848e48fSMarek Szyprowski 			void *ptr = kmap_atomic(page);
1189848e48fSMarek Szyprowski 			memset(ptr, 0, PAGE_SIZE);
119f1270896SGregory CLEMENT 			if (coherent_flag != COHERENT)
1209848e48fSMarek Szyprowski 				dmac_flush_range(ptr, ptr + PAGE_SIZE);
1219848e48fSMarek Szyprowski 			kunmap_atomic(ptr);
1229848e48fSMarek Szyprowski 			page++;
1239848e48fSMarek Szyprowski 			size -= PAGE_SIZE;
1249848e48fSMarek Szyprowski 		}
125f1270896SGregory CLEMENT 		if (coherent_flag != COHERENT)
1269848e48fSMarek Szyprowski 			outer_flush_range(base, end);
1279848e48fSMarek Szyprowski 	} else {
1289848e48fSMarek Szyprowski 		void *ptr = page_address(page);
129c7909509SMarek Szyprowski 		memset(ptr, 0, size);
130f1270896SGregory CLEMENT 		if (coherent_flag != COHERENT) {
131c7909509SMarek Szyprowski 			dmac_flush_range(ptr, ptr + size);
132c7909509SMarek Szyprowski 			outer_flush_range(__pa(ptr), __pa(ptr) + size);
133c7909509SMarek Szyprowski 		}
1344ce63fcdSMarek Szyprowski 	}
135f1270896SGregory CLEMENT }
136c7909509SMarek Szyprowski 
1377a9a32a9SRussell King /*
1387a9a32a9SRussell King  * Allocate a DMA buffer for 'dev' of size 'size' using the
1397a9a32a9SRussell King  * specified gfp mask.  Note that 'size' must be page aligned.
1407a9a32a9SRussell King  */
__dma_alloc_buffer(struct device * dev,size_t size,gfp_t gfp,int coherent_flag)141f1270896SGregory CLEMENT static struct page *__dma_alloc_buffer(struct device *dev, size_t size,
142f1270896SGregory CLEMENT 				       gfp_t gfp, int coherent_flag)
1437a9a32a9SRussell King {
1447a9a32a9SRussell King 	unsigned long order = get_order(size);
1457a9a32a9SRussell King 	struct page *page, *p, *e;
1467a9a32a9SRussell King 
1477a9a32a9SRussell King 	page = alloc_pages(gfp, order);
1487a9a32a9SRussell King 	if (!page)
1497a9a32a9SRussell King 		return NULL;
1507a9a32a9SRussell King 
1517a9a32a9SRussell King 	/*
1527a9a32a9SRussell King 	 * Now split the huge page and free the excess pages
1537a9a32a9SRussell King 	 */
1547a9a32a9SRussell King 	split_page(page, order);
1557a9a32a9SRussell King 	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
1567a9a32a9SRussell King 		__free_page(p);
1577a9a32a9SRussell King 
158f1270896SGregory CLEMENT 	__dma_clear_buffer(page, size, coherent_flag);
1597a9a32a9SRussell King 
1607a9a32a9SRussell King 	return page;
1617a9a32a9SRussell King }
1627a9a32a9SRussell King 
1637a9a32a9SRussell King /*
1647a9a32a9SRussell King  * Free a DMA buffer.  'size' must be page aligned.
1657a9a32a9SRussell King  */
__dma_free_buffer(struct page * page,size_t size)1667a9a32a9SRussell King static void __dma_free_buffer(struct page *page, size_t size)
1677a9a32a9SRussell King {
1687a9a32a9SRussell King 	struct page *e = page + (size >> PAGE_SHIFT);
1697a9a32a9SRussell King 
1707a9a32a9SRussell King 	while (page < e) {
1717a9a32a9SRussell King 		__free_page(page);
1727a9a32a9SRussell King 		page++;
1737a9a32a9SRussell King 	}
1747a9a32a9SRussell King }
1757a9a32a9SRussell King 
176c7909509SMarek Szyprowski static void *__alloc_from_contiguous(struct device *dev, size_t size,
1779848e48fSMarek Szyprowski 				     pgprot_t prot, struct page **ret_page,
178f1270896SGregory CLEMENT 				     const void *caller, bool want_vaddr,
179712c604dSLucas Stach 				     int coherent_flag, gfp_t gfp);
180c7909509SMarek Szyprowski 
181e9da6e99SMarek Szyprowski static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
182e9da6e99SMarek Szyprowski 				 pgprot_t prot, struct page **ret_page,
1836e8266e3SCarlo Caione 				 const void *caller, bool want_vaddr);
184e9da6e99SMarek Szyprowski 
1856e5267aaSMarek Szyprowski #define DEFAULT_DMA_COHERENT_POOL_SIZE	SZ_256K
186b337e1c4SVladimir Murzin static struct gen_pool *atomic_pool __ro_after_init;
1876e5267aaSMarek Szyprowski 
188b337e1c4SVladimir Murzin static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
189c7909509SMarek Szyprowski 
early_coherent_pool(char * p)190c7909509SMarek Szyprowski static int __init early_coherent_pool(char *p)
191c7909509SMarek Szyprowski {
19236d0fd21SLaura Abbott 	atomic_pool_size = memparse(p, &p);
193c7909509SMarek Szyprowski 	return 0;
194c7909509SMarek Szyprowski }
195c7909509SMarek Szyprowski early_param("coherent_pool", early_coherent_pool);
196c7909509SMarek Szyprowski 
197c7909509SMarek Szyprowski /*
198c7909509SMarek Szyprowski  * Initialise the coherent pool for atomic allocations.
199c7909509SMarek Szyprowski  */
atomic_pool_init(void)200e9da6e99SMarek Szyprowski static int __init atomic_pool_init(void)
201c7909509SMarek Szyprowski {
20271b55663SRussell King 	pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL);
2039d1400cfSMarek Szyprowski 	gfp_t gfp = GFP_KERNEL | GFP_DMA;
204c7909509SMarek Szyprowski 	struct page *page;
205c7909509SMarek Szyprowski 	void *ptr;
206c7909509SMarek Szyprowski 
20736d0fd21SLaura Abbott 	atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
20836d0fd21SLaura Abbott 	if (!atomic_pool)
20936d0fd21SLaura Abbott 		goto out;
210f1270896SGregory CLEMENT 	/*
211f1270896SGregory CLEMENT 	 * The atomic pool is only used for non-coherent allocations
212f1270896SGregory CLEMENT 	 * so we must pass NORMAL for coherent_flag.
213f1270896SGregory CLEMENT 	 */
214e464ef16SGioh Kim 	if (dev_get_cma_area(NULL))
21536d0fd21SLaura Abbott 		ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
216712c604dSLucas Stach 				      &page, atomic_pool_init, true, NORMAL,
217712c604dSLucas Stach 				      GFP_KERNEL);
218e9da6e99SMarek Szyprowski 	else
21936d0fd21SLaura Abbott 		ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
2206e8266e3SCarlo Caione 					   &page, atomic_pool_init, true);
221c7909509SMarek Szyprowski 	if (ptr) {
22236d0fd21SLaura Abbott 		int ret;
2236b3fe472SHiroshi Doyu 
22436d0fd21SLaura Abbott 		ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr,
22536d0fd21SLaura Abbott 					page_to_phys(page),
22636d0fd21SLaura Abbott 					atomic_pool_size, -1);
22736d0fd21SLaura Abbott 		if (ret)
22836d0fd21SLaura Abbott 			goto destroy_genpool;
2296b3fe472SHiroshi Doyu 
23036d0fd21SLaura Abbott 		gen_pool_set_algo(atomic_pool,
23136d0fd21SLaura Abbott 				gen_pool_first_fit_order_align,
232acb62448SVladimir Murzin 				NULL);
233bf31c5e0SFabio Estevam 		pr_info("DMA: preallocated %zu KiB pool for atomic coherent allocations\n",
23436d0fd21SLaura Abbott 		       atomic_pool_size / 1024);
235c7909509SMarek Szyprowski 		return 0;
236c7909509SMarek Szyprowski 	}
237ec10665cSSachin Kamat 
23836d0fd21SLaura Abbott destroy_genpool:
23936d0fd21SLaura Abbott 	gen_pool_destroy(atomic_pool);
24036d0fd21SLaura Abbott 	atomic_pool = NULL;
24136d0fd21SLaura Abbott out:
242bf31c5e0SFabio Estevam 	pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
24336d0fd21SLaura Abbott 	       atomic_pool_size / 1024);
244c7909509SMarek Szyprowski 	return -ENOMEM;
245c7909509SMarek Szyprowski }
246c7909509SMarek Szyprowski /*
247c7909509SMarek Szyprowski  * CMA is activated by core_initcall, so we must be called after it.
248c7909509SMarek Szyprowski  */
249e9da6e99SMarek Szyprowski postcore_initcall(atomic_pool_init);
250c7909509SMarek Szyprowski 
251229a08a4SKees Cook #ifdef CONFIG_CMA_AREAS
252c7909509SMarek Szyprowski struct dma_contig_early_reserve {
253c7909509SMarek Szyprowski 	phys_addr_t base;
254c7909509SMarek Szyprowski 	unsigned long size;
255c7909509SMarek Szyprowski };
256c7909509SMarek Szyprowski 
257c7909509SMarek Szyprowski static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
258c7909509SMarek Szyprowski 
259c7909509SMarek Szyprowski static int dma_mmu_remap_num __initdata;
260c7909509SMarek Szyprowski 
261a9f8f2b2SArnd Bergmann #ifdef CONFIG_DMA_CMA
dma_contiguous_early_fixup(phys_addr_t base,unsigned long size)262c7909509SMarek Szyprowski void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
263c7909509SMarek Szyprowski {
264c7909509SMarek Szyprowski 	dma_mmu_remap[dma_mmu_remap_num].base = base;
265c7909509SMarek Szyprowski 	dma_mmu_remap[dma_mmu_remap_num].size = size;
266c7909509SMarek Szyprowski 	dma_mmu_remap_num++;
267c7909509SMarek Szyprowski }
268a9f8f2b2SArnd Bergmann #endif
269c7909509SMarek Szyprowski 
dma_contiguous_remap(void)270c7909509SMarek Szyprowski void __init dma_contiguous_remap(void)
271c7909509SMarek Szyprowski {
272c7909509SMarek Szyprowski 	int i;
273c7909509SMarek Szyprowski 	for (i = 0; i < dma_mmu_remap_num; i++) {
274c7909509SMarek Szyprowski 		phys_addr_t start = dma_mmu_remap[i].base;
275c7909509SMarek Szyprowski 		phys_addr_t end = start + dma_mmu_remap[i].size;
276c7909509SMarek Szyprowski 		struct map_desc map;
277c7909509SMarek Szyprowski 		unsigned long addr;
278c7909509SMarek Szyprowski 
279c7909509SMarek Szyprowski 		if (end > arm_lowmem_limit)
280c7909509SMarek Szyprowski 			end = arm_lowmem_limit;
281c7909509SMarek Szyprowski 		if (start >= end)
28239f78e70SChris Brand 			continue;
283c7909509SMarek Szyprowski 
284c7909509SMarek Szyprowski 		map.pfn = __phys_to_pfn(start);
285c7909509SMarek Szyprowski 		map.virtual = __phys_to_virt(start);
286c7909509SMarek Szyprowski 		map.length = end - start;
287c7909509SMarek Szyprowski 		map.type = MT_MEMORY_DMA_READY;
288c7909509SMarek Szyprowski 
289c7909509SMarek Szyprowski 		/*
2906b076991SRussell King 		 * Clear previous low-memory mapping to ensure that the
2916b076991SRussell King 		 * TLB does not see any conflicting entries, then flush
2926b076991SRussell King 		 * the TLB of the old entries before creating new mappings.
2936b076991SRussell King 		 *
2946b076991SRussell King 		 * This ensures that any speculatively loaded TLB entries
2956b076991SRussell King 		 * (even though they may be rare) can not cause any problems,
2966b076991SRussell King 		 * and ensures that this code is architecturally compliant.
297c7909509SMarek Szyprowski 		 */
298c7909509SMarek Szyprowski 		for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
29961f6c7a4SVitaly Andrianov 		     addr += PMD_SIZE)
300c7909509SMarek Szyprowski 			pmd_clear(pmd_off_k(addr));
301c7909509SMarek Szyprowski 
3026b076991SRussell King 		flush_tlb_kernel_range(__phys_to_virt(start),
3036b076991SRussell King 				       __phys_to_virt(end));
3046b076991SRussell King 
305c7909509SMarek Szyprowski 		iotable_init(&map, 1);
306c7909509SMarek Szyprowski 	}
307c7909509SMarek Szyprowski }
308229a08a4SKees Cook #endif
309c7909509SMarek Szyprowski 
__dma_update_pte(pte_t * pte,unsigned long addr,void * data)3108b1e0f81SAnshuman Khandual static int __dma_update_pte(pte_t *pte, unsigned long addr, void *data)
311c7909509SMarek Szyprowski {
3128770b9e5SLinus Walleij 	struct page *page = virt_to_page((void *)addr);
313c7909509SMarek Szyprowski 	pgprot_t prot = *(pgprot_t *)data;
314c7909509SMarek Szyprowski 
315c7909509SMarek Szyprowski 	set_pte_ext(pte, mk_pte(page, prot), 0);
316c7909509SMarek Szyprowski 	return 0;
317c7909509SMarek Szyprowski }
318c7909509SMarek Szyprowski 
__dma_remap(struct page * page,size_t size,pgprot_t prot)319c7909509SMarek Szyprowski static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
320c7909509SMarek Szyprowski {
321c7909509SMarek Szyprowski 	unsigned long start = (unsigned long) page_address(page);
322c7909509SMarek Szyprowski 	unsigned end = start + size;
323c7909509SMarek Szyprowski 
324c7909509SMarek Szyprowski 	apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
325c7909509SMarek Szyprowski 	flush_tlb_kernel_range(start, end);
326c7909509SMarek Szyprowski }
327c7909509SMarek Szyprowski 
__alloc_remap_buffer(struct device * dev,size_t size,gfp_t gfp,pgprot_t prot,struct page ** ret_page,const void * caller,bool want_vaddr)328c7909509SMarek Szyprowski static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
329c7909509SMarek Szyprowski 				 pgprot_t prot, struct page **ret_page,
3306e8266e3SCarlo Caione 				 const void *caller, bool want_vaddr)
331c7909509SMarek Szyprowski {
332c7909509SMarek Szyprowski 	struct page *page;
3336e8266e3SCarlo Caione 	void *ptr = NULL;
334f1270896SGregory CLEMENT 	/*
335f1270896SGregory CLEMENT 	 * __alloc_remap_buffer is only called when the device is
336f1270896SGregory CLEMENT 	 * non-coherent
337f1270896SGregory CLEMENT 	 */
338f1270896SGregory CLEMENT 	page = __dma_alloc_buffer(dev, size, gfp, NORMAL);
339c7909509SMarek Szyprowski 	if (!page)
340c7909509SMarek Szyprowski 		return NULL;
3416e8266e3SCarlo Caione 	if (!want_vaddr)
3426e8266e3SCarlo Caione 		goto out;
343c7909509SMarek Szyprowski 
34478406ff5SChristoph Hellwig 	ptr = dma_common_contiguous_remap(page, size, prot, caller);
345c7909509SMarek Szyprowski 	if (!ptr) {
346c7909509SMarek Szyprowski 		__dma_free_buffer(page, size);
347c7909509SMarek Szyprowski 		return NULL;
348c7909509SMarek Szyprowski 	}
349c7909509SMarek Szyprowski 
3506e8266e3SCarlo Caione  out:
351c7909509SMarek Szyprowski 	*ret_page = page;
352c7909509SMarek Szyprowski 	return ptr;
353c7909509SMarek Szyprowski }
354c7909509SMarek Szyprowski 
__alloc_from_pool(size_t size,struct page ** ret_page)355e9da6e99SMarek Szyprowski static void *__alloc_from_pool(size_t size, struct page **ret_page)
356c7909509SMarek Szyprowski {
35736d0fd21SLaura Abbott 	unsigned long val;
358e9da6e99SMarek Szyprowski 	void *ptr = NULL;
359c7909509SMarek Szyprowski 
36036d0fd21SLaura Abbott 	if (!atomic_pool) {
361e9da6e99SMarek Szyprowski 		WARN(1, "coherent pool not initialised!\n");
362c7909509SMarek Szyprowski 		return NULL;
363c7909509SMarek Szyprowski 	}
364c7909509SMarek Szyprowski 
36536d0fd21SLaura Abbott 	val = gen_pool_alloc(atomic_pool, size);
36636d0fd21SLaura Abbott 	if (val) {
36736d0fd21SLaura Abbott 		phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
368e9da6e99SMarek Szyprowski 
36936d0fd21SLaura Abbott 		*ret_page = phys_to_page(phys);
37036d0fd21SLaura Abbott 		ptr = (void *)val;
371e9da6e99SMarek Szyprowski 	}
372e9da6e99SMarek Szyprowski 
373c7909509SMarek Szyprowski 	return ptr;
374c7909509SMarek Szyprowski }
375c7909509SMarek Szyprowski 
__in_atomic_pool(void * start,size_t size)37621d0a759SHiroshi Doyu static bool __in_atomic_pool(void *start, size_t size)
37721d0a759SHiroshi Doyu {
378964975acSHuang Shijie 	return gen_pool_has_addr(atomic_pool, (unsigned long)start, size);
37921d0a759SHiroshi Doyu }
38021d0a759SHiroshi Doyu 
__free_from_pool(void * start,size_t size)381e9da6e99SMarek Szyprowski static int __free_from_pool(void *start, size_t size)
382c7909509SMarek Szyprowski {
38321d0a759SHiroshi Doyu 	if (!__in_atomic_pool(start, size))
384c7909509SMarek Szyprowski 		return 0;
385c7909509SMarek Szyprowski 
38636d0fd21SLaura Abbott 	gen_pool_free(atomic_pool, (unsigned long)start, size);
387e9da6e99SMarek Szyprowski 
388c7909509SMarek Szyprowski 	return 1;
389c7909509SMarek Szyprowski }
390c7909509SMarek Szyprowski 
__alloc_from_contiguous(struct device * dev,size_t size,pgprot_t prot,struct page ** ret_page,const void * caller,bool want_vaddr,int coherent_flag,gfp_t gfp)391c7909509SMarek Szyprowski static void *__alloc_from_contiguous(struct device *dev, size_t size,
3929848e48fSMarek Szyprowski 				     pgprot_t prot, struct page **ret_page,
393f1270896SGregory CLEMENT 				     const void *caller, bool want_vaddr,
394712c604dSLucas Stach 				     int coherent_flag, gfp_t gfp)
395c7909509SMarek Szyprowski {
396c7909509SMarek Szyprowski 	unsigned long order = get_order(size);
397c7909509SMarek Szyprowski 	size_t count = size >> PAGE_SHIFT;
398c7909509SMarek Szyprowski 	struct page *page;
3996e8266e3SCarlo Caione 	void *ptr = NULL;
400c7909509SMarek Szyprowski 
401d834c5abSMarek Szyprowski 	page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN);
402c7909509SMarek Szyprowski 	if (!page)
403c7909509SMarek Szyprowski 		return NULL;
404c7909509SMarek Szyprowski 
405f1270896SGregory CLEMENT 	__dma_clear_buffer(page, size, coherent_flag);
406c7909509SMarek Szyprowski 
4076e8266e3SCarlo Caione 	if (!want_vaddr)
4086e8266e3SCarlo Caione 		goto out;
4096e8266e3SCarlo Caione 
4109848e48fSMarek Szyprowski 	if (PageHighMem(page)) {
41178406ff5SChristoph Hellwig 		ptr = dma_common_contiguous_remap(page, size, prot, caller);
4129848e48fSMarek Szyprowski 		if (!ptr) {
4139848e48fSMarek Szyprowski 			dma_release_from_contiguous(dev, page, count);
4149848e48fSMarek Szyprowski 			return NULL;
4159848e48fSMarek Szyprowski 		}
4169848e48fSMarek Szyprowski 	} else {
4179848e48fSMarek Szyprowski 		__dma_remap(page, size, prot);
4189848e48fSMarek Szyprowski 		ptr = page_address(page);
4199848e48fSMarek Szyprowski 	}
4206e8266e3SCarlo Caione 
4216e8266e3SCarlo Caione  out:
422c7909509SMarek Szyprowski 	*ret_page = page;
4239848e48fSMarek Szyprowski 	return ptr;
424c7909509SMarek Szyprowski }
425c7909509SMarek Szyprowski 
__free_from_contiguous(struct device * dev,struct page * page,void * cpu_addr,size_t size,bool want_vaddr)426c7909509SMarek Szyprowski static void __free_from_contiguous(struct device *dev, struct page *page,
4276e8266e3SCarlo Caione 				   void *cpu_addr, size_t size, bool want_vaddr)
428c7909509SMarek Szyprowski {
4296e8266e3SCarlo Caione 	if (want_vaddr) {
4309848e48fSMarek Szyprowski 		if (PageHighMem(page))
43178406ff5SChristoph Hellwig 			dma_common_free_remap(cpu_addr, size);
4329848e48fSMarek Szyprowski 		else
43371b55663SRussell King 			__dma_remap(page, size, PAGE_KERNEL);
4346e8266e3SCarlo Caione 	}
435c7909509SMarek Szyprowski 	dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
436c7909509SMarek Szyprowski }
437c7909509SMarek Szyprowski 
__get_dma_pgprot(unsigned long attrs,pgprot_t prot)43800085f1eSKrzysztof Kozlowski static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot)
439f99d6034SMarek Szyprowski {
44000085f1eSKrzysztof Kozlowski 	prot = (attrs & DMA_ATTR_WRITE_COMBINE) ?
441f99d6034SMarek Szyprowski 			pgprot_writecombine(prot) :
442f99d6034SMarek Szyprowski 			pgprot_dmacoherent(prot);
443f99d6034SMarek Szyprowski 	return prot;
444f99d6034SMarek Szyprowski }
445f99d6034SMarek Szyprowski 
__alloc_simple_buffer(struct device * dev,size_t size,gfp_t gfp,struct page ** ret_page)446c7909509SMarek Szyprowski static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
447c7909509SMarek Szyprowski 				   struct page **ret_page)
448ab6494f0SCatalin Marinas {
44904da5694SRussell King 	struct page *page;
450f1270896SGregory CLEMENT 	/* __alloc_simple_buffer is only called when the device is coherent */
451f1270896SGregory CLEMENT 	page = __dma_alloc_buffer(dev, size, gfp, COHERENT);
452c7909509SMarek Szyprowski 	if (!page)
453c7909509SMarek Szyprowski 		return NULL;
454c7909509SMarek Szyprowski 
455c7909509SMarek Szyprowski 	*ret_page = page;
456c7909509SMarek Szyprowski 	return page_address(page);
457c7909509SMarek Szyprowski }
458c7909509SMarek Szyprowski 
simple_allocator_alloc(struct arm_dma_alloc_args * args,struct page ** ret_page)459b4268676SRabin Vincent static void *simple_allocator_alloc(struct arm_dma_alloc_args *args,
460b4268676SRabin Vincent 				    struct page **ret_page)
461b4268676SRabin Vincent {
462b4268676SRabin Vincent 	return __alloc_simple_buffer(args->dev, args->size, args->gfp,
463b4268676SRabin Vincent 				     ret_page);
464b4268676SRabin Vincent }
465c7909509SMarek Szyprowski 
simple_allocator_free(struct arm_dma_free_args * args)466b4268676SRabin Vincent static void simple_allocator_free(struct arm_dma_free_args *args)
467b4268676SRabin Vincent {
468b4268676SRabin Vincent 	__dma_free_buffer(args->page, args->size);
469b4268676SRabin Vincent }
470b4268676SRabin Vincent 
471b4268676SRabin Vincent static struct arm_dma_allocator simple_allocator = {
472b4268676SRabin Vincent 	.alloc = simple_allocator_alloc,
473b4268676SRabin Vincent 	.free = simple_allocator_free,
474b4268676SRabin Vincent };
475b4268676SRabin Vincent 
cma_allocator_alloc(struct arm_dma_alloc_args * args,struct page ** ret_page)476b4268676SRabin Vincent static void *cma_allocator_alloc(struct arm_dma_alloc_args *args,
477b4268676SRabin Vincent 				 struct page **ret_page)
478b4268676SRabin Vincent {
479b4268676SRabin Vincent 	return __alloc_from_contiguous(args->dev, args->size, args->prot,
480b4268676SRabin Vincent 				       ret_page, args->caller,
481712c604dSLucas Stach 				       args->want_vaddr, args->coherent_flag,
482712c604dSLucas Stach 				       args->gfp);
483b4268676SRabin Vincent }
484b4268676SRabin Vincent 
cma_allocator_free(struct arm_dma_free_args * args)485b4268676SRabin Vincent static void cma_allocator_free(struct arm_dma_free_args *args)
486b4268676SRabin Vincent {
487b4268676SRabin Vincent 	__free_from_contiguous(args->dev, args->page, args->cpu_addr,
488b4268676SRabin Vincent 			       args->size, args->want_vaddr);
489b4268676SRabin Vincent }
490b4268676SRabin Vincent 
491b4268676SRabin Vincent static struct arm_dma_allocator cma_allocator = {
492b4268676SRabin Vincent 	.alloc = cma_allocator_alloc,
493b4268676SRabin Vincent 	.free = cma_allocator_free,
494b4268676SRabin Vincent };
495b4268676SRabin Vincent 
pool_allocator_alloc(struct arm_dma_alloc_args * args,struct page ** ret_page)496b4268676SRabin Vincent static void *pool_allocator_alloc(struct arm_dma_alloc_args *args,
497b4268676SRabin Vincent 				  struct page **ret_page)
498b4268676SRabin Vincent {
499b4268676SRabin Vincent 	return __alloc_from_pool(args->size, ret_page);
500b4268676SRabin Vincent }
501b4268676SRabin Vincent 
pool_allocator_free(struct arm_dma_free_args * args)502b4268676SRabin Vincent static void pool_allocator_free(struct arm_dma_free_args *args)
503b4268676SRabin Vincent {
504b4268676SRabin Vincent 	__free_from_pool(args->cpu_addr, args->size);
505b4268676SRabin Vincent }
506b4268676SRabin Vincent 
507b4268676SRabin Vincent static struct arm_dma_allocator pool_allocator = {
508b4268676SRabin Vincent 	.alloc = pool_allocator_alloc,
509b4268676SRabin Vincent 	.free = pool_allocator_free,
510b4268676SRabin Vincent };
511b4268676SRabin Vincent 
remap_allocator_alloc(struct arm_dma_alloc_args * args,struct page ** ret_page)512b4268676SRabin Vincent static void *remap_allocator_alloc(struct arm_dma_alloc_args *args,
513b4268676SRabin Vincent 				   struct page **ret_page)
514b4268676SRabin Vincent {
515b4268676SRabin Vincent 	return __alloc_remap_buffer(args->dev, args->size, args->gfp,
516b4268676SRabin Vincent 				    args->prot, ret_page, args->caller,
517b4268676SRabin Vincent 				    args->want_vaddr);
518b4268676SRabin Vincent }
519b4268676SRabin Vincent 
remap_allocator_free(struct arm_dma_free_args * args)520b4268676SRabin Vincent static void remap_allocator_free(struct arm_dma_free_args *args)
521b4268676SRabin Vincent {
522b4268676SRabin Vincent 	if (args->want_vaddr)
52378406ff5SChristoph Hellwig 		dma_common_free_remap(args->cpu_addr, args->size);
524b4268676SRabin Vincent 
525b4268676SRabin Vincent 	__dma_free_buffer(args->page, args->size);
526b4268676SRabin Vincent }
527b4268676SRabin Vincent 
528b4268676SRabin Vincent static struct arm_dma_allocator remap_allocator = {
529b4268676SRabin Vincent 	.alloc = remap_allocator_alloc,
530b4268676SRabin Vincent 	.free = remap_allocator_free,
531b4268676SRabin Vincent };
532c7909509SMarek Szyprowski 
__dma_alloc(struct device * dev,size_t size,dma_addr_t * handle,gfp_t gfp,pgprot_t prot,bool is_coherent,unsigned long attrs,const void * caller)533c7909509SMarek Szyprowski static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
5346e8266e3SCarlo Caione 			 gfp_t gfp, pgprot_t prot, bool is_coherent,
53500085f1eSKrzysztof Kozlowski 			 unsigned long attrs, const void *caller)
536c7909509SMarek Szyprowski {
5377607cb73SChristoph Hellwig 	u64 mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
5383dd7ea92SJingoo Han 	struct page *page = NULL;
53931ebf944SRussell King 	void *addr;
540b4268676SRabin Vincent 	bool allowblock, cma;
54119e6e5e5SRabin Vincent 	struct arm_dma_buffer *buf;
542b4268676SRabin Vincent 	struct arm_dma_alloc_args args = {
543b4268676SRabin Vincent 		.dev = dev,
544b4268676SRabin Vincent 		.size = PAGE_ALIGN(size),
545b4268676SRabin Vincent 		.gfp = gfp,
546b4268676SRabin Vincent 		.prot = prot,
547b4268676SRabin Vincent 		.caller = caller,
54800085f1eSKrzysztof Kozlowski 		.want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
549f1270896SGregory CLEMENT 		.coherent_flag = is_coherent ? COHERENT : NORMAL,
550b4268676SRabin Vincent 	};
551ab6494f0SCatalin Marinas 
552c7909509SMarek Szyprowski #ifdef CONFIG_DMA_API_DEBUG
553c7909509SMarek Szyprowski 	u64 limit = (mask + 1) & ~mask;
554c7909509SMarek Szyprowski 	if (limit && size >= limit) {
555c7909509SMarek Szyprowski 		dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
556c7909509SMarek Szyprowski 			size, mask);
557c7909509SMarek Szyprowski 		return NULL;
558c7909509SMarek Szyprowski 	}
559c7909509SMarek Szyprowski #endif
560c7909509SMarek Szyprowski 
5619c18fcf7SAlexandre Courbot 	buf = kzalloc(sizeof(*buf),
5629c18fcf7SAlexandre Courbot 		      gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM));
56319e6e5e5SRabin Vincent 	if (!buf)
56419e6e5e5SRabin Vincent 		return NULL;
56519e6e5e5SRabin Vincent 
566c7909509SMarek Szyprowski 	if (mask < 0xffffffffULL)
567c7909509SMarek Szyprowski 		gfp |= GFP_DMA;
568c7909509SMarek Szyprowski 
569b4268676SRabin Vincent 	args.gfp = gfp;
570ea2e7057SSumit Bhattacharya 
57172fd97bfSChristoph Hellwig 	*handle = DMA_MAPPING_ERROR;
572b4268676SRabin Vincent 	allowblock = gfpflags_allow_blocking(gfp);
57334370214SBen Dooks 	cma = allowblock ? dev_get_cma_area(dev) : NULL;
57404da5694SRussell King 
575b4268676SRabin Vincent 	if (cma)
576b4268676SRabin Vincent 		buf->allocator = &cma_allocator;
5771655cf88SVladimir Murzin 	else if (is_coherent)
578b4268676SRabin Vincent 		buf->allocator = &simple_allocator;
579b4268676SRabin Vincent 	else if (allowblock)
580b4268676SRabin Vincent 		buf->allocator = &remap_allocator;
58131ebf944SRussell King 	else
582b4268676SRabin Vincent 		buf->allocator = &pool_allocator;
583b4268676SRabin Vincent 
584b4268676SRabin Vincent 	addr = buf->allocator->alloc(&args, &page);
58531ebf944SRussell King 
58619e6e5e5SRabin Vincent 	if (page) {
58719e6e5e5SRabin Vincent 		unsigned long flags;
58819e6e5e5SRabin Vincent 
589f9774cfdSChristoph Hellwig 		*handle = phys_to_dma(dev, page_to_phys(page));
590b4268676SRabin Vincent 		buf->virt = args.want_vaddr ? addr : page;
59119e6e5e5SRabin Vincent 
59219e6e5e5SRabin Vincent 		spin_lock_irqsave(&arm_dma_bufs_lock, flags);
59319e6e5e5SRabin Vincent 		list_add(&buf->list, &arm_dma_bufs);
59419e6e5e5SRabin Vincent 		spin_unlock_irqrestore(&arm_dma_bufs_lock, flags);
59519e6e5e5SRabin Vincent 	} else {
59619e6e5e5SRabin Vincent 		kfree(buf);
59719e6e5e5SRabin Vincent 	}
59831ebf944SRussell King 
599b4268676SRabin Vincent 	return args.want_vaddr ? addr : page;
600ab6494f0SCatalin Marinas }
601695ae0afSRussell King 
6020ddbccd1SRussell King /*
603c7909509SMarek Szyprowski  * Free a buffer as defined by the above mapping.
6040ddbccd1SRussell King  */
__arm_dma_free(struct device * dev,size_t size,void * cpu_addr,dma_addr_t handle,unsigned long attrs,bool is_coherent)605dd37e940SRob Herring static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
60600085f1eSKrzysztof Kozlowski 			   dma_addr_t handle, unsigned long attrs,
607dd37e940SRob Herring 			   bool is_coherent)
6080ddbccd1SRussell King {
609f9774cfdSChristoph Hellwig 	struct page *page = phys_to_page(dma_to_phys(dev, handle));
61019e6e5e5SRabin Vincent 	struct arm_dma_buffer *buf;
611b4268676SRabin Vincent 	struct arm_dma_free_args args = {
612b4268676SRabin Vincent 		.dev = dev,
613b4268676SRabin Vincent 		.size = PAGE_ALIGN(size),
614b4268676SRabin Vincent 		.cpu_addr = cpu_addr,
615b4268676SRabin Vincent 		.page = page,
61600085f1eSKrzysztof Kozlowski 		.want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
617b4268676SRabin Vincent 	};
61819e6e5e5SRabin Vincent 
61919e6e5e5SRabin Vincent 	buf = arm_dma_buffer_find(cpu_addr);
62019e6e5e5SRabin Vincent 	if (WARN(!buf, "Freeing invalid buffer %p\n", cpu_addr))
62119e6e5e5SRabin Vincent 		return;
6220ddbccd1SRussell King 
623b4268676SRabin Vincent 	buf->allocator->free(&args);
62419e6e5e5SRabin Vincent 	kfree(buf);
6250ddbccd1SRussell King }
626afd1a321SRussell King 
dma_cache_maint_page(struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,void (* op)(const void *,size_t,int))62765af191aSRussell King static void dma_cache_maint_page(struct page *page, unsigned long offset,
628a9c9147eSRussell King 	size_t size, enum dma_data_direction dir,
629a9c9147eSRussell King 	void (*op)(const void *, size_t, int))
63065af191aSRussell King {
63115653371SRussell King 	unsigned long pfn;
63215653371SRussell King 	size_t left = size;
63315653371SRussell King 
63415653371SRussell King 	pfn = page_to_pfn(page) + offset / PAGE_SIZE;
63515653371SRussell King 	offset %= PAGE_SIZE;
63615653371SRussell King 
63765af191aSRussell King 	/*
63865af191aSRussell King 	 * A single sg entry may refer to multiple physically contiguous
63965af191aSRussell King 	 * pages.  But we still need to process highmem pages individually.
64065af191aSRussell King 	 * If highmem is not configured then the bulk of this loop gets
64165af191aSRussell King 	 * optimized out.
64265af191aSRussell King 	 */
64365af191aSRussell King 	do {
64465af191aSRussell King 		size_t len = left;
64593f1d629SRussell King 		void *vaddr;
64693f1d629SRussell King 
64715653371SRussell King 		page = pfn_to_page(pfn);
64815653371SRussell King 
64993f1d629SRussell King 		if (PageHighMem(page)) {
65015653371SRussell King 			if (len + offset > PAGE_SIZE)
65165af191aSRussell King 				len = PAGE_SIZE - offset;
652dd0f67f4SJoonsoo Kim 
653dd0f67f4SJoonsoo Kim 			if (cache_is_vipt_nonaliasing()) {
65439af22a7SNicolas Pitre 				vaddr = kmap_atomic(page);
6557e5a69e8SNicolas Pitre 				op(vaddr + offset, len, dir);
65639af22a7SNicolas Pitre 				kunmap_atomic(vaddr);
657dd0f67f4SJoonsoo Kim 			} else {
658dd0f67f4SJoonsoo Kim 				vaddr = kmap_high_get(page);
659dd0f67f4SJoonsoo Kim 				if (vaddr) {
660dd0f67f4SJoonsoo Kim 					op(vaddr + offset, len, dir);
661dd0f67f4SJoonsoo Kim 					kunmap_high(page);
662dd0f67f4SJoonsoo Kim 				}
66393f1d629SRussell King 			}
66493f1d629SRussell King 		} else {
66593f1d629SRussell King 			vaddr = page_address(page) + offset;
666a9c9147eSRussell King 			op(vaddr, len, dir);
66793f1d629SRussell King 		}
66865af191aSRussell King 		offset = 0;
66915653371SRussell King 		pfn++;
67065af191aSRussell King 		left -= len;
67165af191aSRussell King 	} while (left);
67265af191aSRussell King }
67365af191aSRussell King 
67451fde349SMarek Szyprowski /*
67551fde349SMarek Szyprowski  * Make an area consistent for devices.
676a45e52bfSLukas Bulwahn  * Note: Drivers should NOT use this function directly.
67751fde349SMarek Szyprowski  * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
67851fde349SMarek Szyprowski  */
__dma_page_cpu_to_dev(struct page * page,unsigned long off,size_t size,enum dma_data_direction dir)67951fde349SMarek Szyprowski static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
68065af191aSRussell King 	size_t size, enum dma_data_direction dir)
68165af191aSRussell King {
6822161c248SSantosh Shilimkar 	phys_addr_t paddr;
68343377453SNicolas Pitre 
684a9c9147eSRussell King 	dma_cache_maint_page(page, off, size, dir, dmac_map_area);
68543377453SNicolas Pitre 
68665af191aSRussell King 	paddr = page_to_phys(page) + off;
6872ffe2da3SRussell King 	if (dir == DMA_FROM_DEVICE) {
6882ffe2da3SRussell King 		outer_inv_range(paddr, paddr + size);
6892ffe2da3SRussell King 	} else {
6902ffe2da3SRussell King 		outer_clean_range(paddr, paddr + size);
6912ffe2da3SRussell King 	}
6922ffe2da3SRussell King 	/* FIXME: non-speculating: flush on bidirectional mappings? */
69343377453SNicolas Pitre }
6944ea0d737SRussell King 
__dma_page_dev_to_cpu(struct page * page,unsigned long off,size_t size,enum dma_data_direction dir)69551fde349SMarek Szyprowski static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
6964ea0d737SRussell King 	size_t size, enum dma_data_direction dir)
6974ea0d737SRussell King {
6982161c248SSantosh Shilimkar 	phys_addr_t paddr = page_to_phys(page) + off;
6992ffe2da3SRussell King 
7002ffe2da3SRussell King 	/* FIXME: non-speculating: not required */
701deace4a6SRussell King 	/* in any case, don't bother invalidating if DMA to device */
702deace4a6SRussell King 	if (dir != DMA_TO_DEVICE) {
7032ffe2da3SRussell King 		outer_inv_range(paddr, paddr + size);
7042ffe2da3SRussell King 
705a9c9147eSRussell King 		dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
706deace4a6SRussell King 	}
707c0177800SCatalin Marinas 
708c0177800SCatalin Marinas 	/*
709b2a234edSMing Lei 	 * Mark the D-cache clean for these pages to avoid extra flushing.
710c0177800SCatalin Marinas 	 */
711b2a234edSMing Lei 	if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) {
712*8b5989f3SMatthew Wilcox (Oracle) 		struct folio *folio = pfn_folio(paddr / PAGE_SIZE);
713*8b5989f3SMatthew Wilcox (Oracle) 		size_t offset = offset_in_folio(folio, paddr);
714b2a234edSMing Lei 
715*8b5989f3SMatthew Wilcox (Oracle) 		for (;;) {
716*8b5989f3SMatthew Wilcox (Oracle) 			size_t sz = folio_size(folio) - offset;
717*8b5989f3SMatthew Wilcox (Oracle) 
718*8b5989f3SMatthew Wilcox (Oracle) 			if (size < sz)
719*8b5989f3SMatthew Wilcox (Oracle) 				break;
720*8b5989f3SMatthew Wilcox (Oracle) 			if (!offset)
721*8b5989f3SMatthew Wilcox (Oracle) 				set_bit(PG_dcache_clean, &folio->flags);
722*8b5989f3SMatthew Wilcox (Oracle) 			offset = 0;
723*8b5989f3SMatthew Wilcox (Oracle) 			size -= sz;
724*8b5989f3SMatthew Wilcox (Oracle) 			if (!size)
725*8b5989f3SMatthew Wilcox (Oracle) 				break;
726*8b5989f3SMatthew Wilcox (Oracle) 			folio = folio_next(folio);
727b2a234edSMing Lei 		}
728b2a234edSMing Lei 	}
7294ea0d737SRussell King }
73043377453SNicolas Pitre 
7314ce63fcdSMarek Szyprowski #ifdef CONFIG_ARM_DMA_USE_IOMMU
7324ce63fcdSMarek Szyprowski 
__dma_info_to_prot(enum dma_data_direction dir,unsigned long attrs)7337d2822dfSSricharan R static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs)
7347d2822dfSSricharan R {
7357d2822dfSSricharan R 	int prot = 0;
7367d2822dfSSricharan R 
7377d2822dfSSricharan R 	if (attrs & DMA_ATTR_PRIVILEGED)
7387d2822dfSSricharan R 		prot |= IOMMU_PRIV;
7397d2822dfSSricharan R 
7407d2822dfSSricharan R 	switch (dir) {
7417d2822dfSSricharan R 	case DMA_BIDIRECTIONAL:
7427d2822dfSSricharan R 		return prot | IOMMU_READ | IOMMU_WRITE;
7437d2822dfSSricharan R 	case DMA_TO_DEVICE:
7447d2822dfSSricharan R 		return prot | IOMMU_READ;
7457d2822dfSSricharan R 	case DMA_FROM_DEVICE:
7467d2822dfSSricharan R 		return prot | IOMMU_WRITE;
7477d2822dfSSricharan R 	default:
7487d2822dfSSricharan R 		return prot;
7497d2822dfSSricharan R 	}
7507d2822dfSSricharan R }
7517d2822dfSSricharan R 
7524ce63fcdSMarek Szyprowski /* IOMMU */
7534ce63fcdSMarek Szyprowski 
7544d852ef8SAndreas Herrmann static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
7554d852ef8SAndreas Herrmann 
__alloc_iova(struct dma_iommu_mapping * mapping,size_t size)7564ce63fcdSMarek Szyprowski static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
7574ce63fcdSMarek Szyprowski 				      size_t size)
7584ce63fcdSMarek Szyprowski {
7594ce63fcdSMarek Szyprowski 	unsigned int order = get_order(size);
7604ce63fcdSMarek Szyprowski 	unsigned int align = 0;
7614ce63fcdSMarek Szyprowski 	unsigned int count, start;
762006f841dSRitesh Harjani 	size_t mapping_size = mapping->bits << PAGE_SHIFT;
7634ce63fcdSMarek Szyprowski 	unsigned long flags;
7644d852ef8SAndreas Herrmann 	dma_addr_t iova;
7654d852ef8SAndreas Herrmann 	int i;
7664ce63fcdSMarek Szyprowski 
76760460abfSSeung-Woo Kim 	if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
76860460abfSSeung-Woo Kim 		order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
76960460abfSSeung-Woo Kim 
77068efd7d2SMarek Szyprowski 	count = PAGE_ALIGN(size) >> PAGE_SHIFT;
77168efd7d2SMarek Szyprowski 	align = (1 << order) - 1;
7724ce63fcdSMarek Szyprowski 
7734ce63fcdSMarek Szyprowski 	spin_lock_irqsave(&mapping->lock, flags);
7744d852ef8SAndreas Herrmann 	for (i = 0; i < mapping->nr_bitmaps; i++) {
7754d852ef8SAndreas Herrmann 		start = bitmap_find_next_zero_area(mapping->bitmaps[i],
7764d852ef8SAndreas Herrmann 				mapping->bits, 0, count, align);
7774d852ef8SAndreas Herrmann 
7784d852ef8SAndreas Herrmann 		if (start > mapping->bits)
7794d852ef8SAndreas Herrmann 			continue;
7804d852ef8SAndreas Herrmann 
7814d852ef8SAndreas Herrmann 		bitmap_set(mapping->bitmaps[i], start, count);
7824d852ef8SAndreas Herrmann 		break;
7834d852ef8SAndreas Herrmann 	}
7844d852ef8SAndreas Herrmann 
7854d852ef8SAndreas Herrmann 	/*
7864d852ef8SAndreas Herrmann 	 * No unused range found. Try to extend the existing mapping
7874d852ef8SAndreas Herrmann 	 * and perform a second attempt to reserve an IO virtual
7884d852ef8SAndreas Herrmann 	 * address range of size bytes.
7894d852ef8SAndreas Herrmann 	 */
7904d852ef8SAndreas Herrmann 	if (i == mapping->nr_bitmaps) {
7914d852ef8SAndreas Herrmann 		if (extend_iommu_mapping(mapping)) {
7924d852ef8SAndreas Herrmann 			spin_unlock_irqrestore(&mapping->lock, flags);
79372fd97bfSChristoph Hellwig 			return DMA_MAPPING_ERROR;
7944d852ef8SAndreas Herrmann 		}
7954d852ef8SAndreas Herrmann 
7964d852ef8SAndreas Herrmann 		start = bitmap_find_next_zero_area(mapping->bitmaps[i],
7974d852ef8SAndreas Herrmann 				mapping->bits, 0, count, align);
7984d852ef8SAndreas Herrmann 
7994ce63fcdSMarek Szyprowski 		if (start > mapping->bits) {
8004ce63fcdSMarek Szyprowski 			spin_unlock_irqrestore(&mapping->lock, flags);
80172fd97bfSChristoph Hellwig 			return DMA_MAPPING_ERROR;
8024ce63fcdSMarek Szyprowski 		}
8034ce63fcdSMarek Szyprowski 
8044d852ef8SAndreas Herrmann 		bitmap_set(mapping->bitmaps[i], start, count);
8054d852ef8SAndreas Herrmann 	}
8064ce63fcdSMarek Szyprowski 	spin_unlock_irqrestore(&mapping->lock, flags);
8074ce63fcdSMarek Szyprowski 
808006f841dSRitesh Harjani 	iova = mapping->base + (mapping_size * i);
80968efd7d2SMarek Szyprowski 	iova += start << PAGE_SHIFT;
8104d852ef8SAndreas Herrmann 
8114d852ef8SAndreas Herrmann 	return iova;
8124ce63fcdSMarek Szyprowski }
8134ce63fcdSMarek Szyprowski 
__free_iova(struct dma_iommu_mapping * mapping,dma_addr_t addr,size_t size)8144ce63fcdSMarek Szyprowski static inline void __free_iova(struct dma_iommu_mapping *mapping,
8154ce63fcdSMarek Szyprowski 			       dma_addr_t addr, size_t size)
8164ce63fcdSMarek Szyprowski {
8174d852ef8SAndreas Herrmann 	unsigned int start, count;
818006f841dSRitesh Harjani 	size_t mapping_size = mapping->bits << PAGE_SHIFT;
8194ce63fcdSMarek Szyprowski 	unsigned long flags;
8204d852ef8SAndreas Herrmann 	dma_addr_t bitmap_base;
8214d852ef8SAndreas Herrmann 	u32 bitmap_index;
8224d852ef8SAndreas Herrmann 
8234d852ef8SAndreas Herrmann 	if (!size)
8244d852ef8SAndreas Herrmann 		return;
8254d852ef8SAndreas Herrmann 
826006f841dSRitesh Harjani 	bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size;
8274d852ef8SAndreas Herrmann 	BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions);
8284d852ef8SAndreas Herrmann 
829006f841dSRitesh Harjani 	bitmap_base = mapping->base + mapping_size * bitmap_index;
8304d852ef8SAndreas Herrmann 
83168efd7d2SMarek Szyprowski 	start = (addr - bitmap_base) >>	PAGE_SHIFT;
8324d852ef8SAndreas Herrmann 
833006f841dSRitesh Harjani 	if (addr + size > bitmap_base + mapping_size) {
8344d852ef8SAndreas Herrmann 		/*
8354d852ef8SAndreas Herrmann 		 * The address range to be freed reaches into the iova
8364d852ef8SAndreas Herrmann 		 * range of the next bitmap. This should not happen as
8374d852ef8SAndreas Herrmann 		 * we don't allow this in __alloc_iova (at the
8384d852ef8SAndreas Herrmann 		 * moment).
8394d852ef8SAndreas Herrmann 		 */
8404d852ef8SAndreas Herrmann 		BUG();
8414d852ef8SAndreas Herrmann 	} else
84268efd7d2SMarek Szyprowski 		count = size >> PAGE_SHIFT;
8434ce63fcdSMarek Szyprowski 
8444ce63fcdSMarek Szyprowski 	spin_lock_irqsave(&mapping->lock, flags);
8454d852ef8SAndreas Herrmann 	bitmap_clear(mapping->bitmaps[bitmap_index], start, count);
8464ce63fcdSMarek Szyprowski 	spin_unlock_irqrestore(&mapping->lock, flags);
8474ce63fcdSMarek Szyprowski }
8484ce63fcdSMarek Szyprowski 
84933298ef6SDoug Anderson /* We'll try 2M, 1M, 64K, and finally 4K; array must end with 0! */
85033298ef6SDoug Anderson static const int iommu_order_array[] = { 9, 8, 4, 0 };
85133298ef6SDoug Anderson 
__iommu_alloc_buffer(struct device * dev,size_t size,gfp_t gfp,unsigned long attrs,int coherent_flag)852549a17e4SMarek Szyprowski static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
85300085f1eSKrzysztof Kozlowski 					  gfp_t gfp, unsigned long attrs,
854f1270896SGregory CLEMENT 					  int coherent_flag)
8554ce63fcdSMarek Szyprowski {
8564ce63fcdSMarek Szyprowski 	struct page **pages;
8574ce63fcdSMarek Szyprowski 	int count = size >> PAGE_SHIFT;
8584ce63fcdSMarek Szyprowski 	int array_size = count * sizeof(struct page *);
8594ce63fcdSMarek Szyprowski 	int i = 0;
86033298ef6SDoug Anderson 	int order_idx = 0;
8614ce63fcdSMarek Szyprowski 
8624ce63fcdSMarek Szyprowski 	if (array_size <= PAGE_SIZE)
86323be7fdaSAlexandre Courbot 		pages = kzalloc(array_size, GFP_KERNEL);
8644ce63fcdSMarek Szyprowski 	else
8654ce63fcdSMarek Szyprowski 		pages = vzalloc(array_size);
8664ce63fcdSMarek Szyprowski 	if (!pages)
8674ce63fcdSMarek Szyprowski 		return NULL;
8684ce63fcdSMarek Szyprowski 
86900085f1eSKrzysztof Kozlowski 	if (attrs & DMA_ATTR_FORCE_CONTIGUOUS)
870549a17e4SMarek Szyprowski 	{
871549a17e4SMarek Szyprowski 		unsigned long order = get_order(size);
872549a17e4SMarek Szyprowski 		struct page *page;
873549a17e4SMarek Szyprowski 
874d834c5abSMarek Szyprowski 		page = dma_alloc_from_contiguous(dev, count, order,
875d834c5abSMarek Szyprowski 						 gfp & __GFP_NOWARN);
876549a17e4SMarek Szyprowski 		if (!page)
877549a17e4SMarek Szyprowski 			goto error;
878549a17e4SMarek Szyprowski 
879f1270896SGregory CLEMENT 		__dma_clear_buffer(page, size, coherent_flag);
880549a17e4SMarek Szyprowski 
881549a17e4SMarek Szyprowski 		for (i = 0; i < count; i++)
882549a17e4SMarek Szyprowski 			pages[i] = page + i;
883549a17e4SMarek Szyprowski 
884549a17e4SMarek Szyprowski 		return pages;
885549a17e4SMarek Szyprowski 	}
886549a17e4SMarek Szyprowski 
88714d3ae2eSDoug Anderson 	/* Go straight to 4K chunks if caller says it's OK. */
88800085f1eSKrzysztof Kozlowski 	if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
88914d3ae2eSDoug Anderson 		order_idx = ARRAY_SIZE(iommu_order_array) - 1;
89014d3ae2eSDoug Anderson 
891f8669befSMarek Szyprowski 	/*
892f8669befSMarek Szyprowski 	 * IOMMU can map any pages, so himem can also be used here
893f8669befSMarek Szyprowski 	 */
894f8669befSMarek Szyprowski 	gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
895f8669befSMarek Szyprowski 
8964ce63fcdSMarek Szyprowski 	while (count) {
89749f28aa6STomasz Figa 		int j, order;
8984ce63fcdSMarek Szyprowski 
89933298ef6SDoug Anderson 		order = iommu_order_array[order_idx];
90033298ef6SDoug Anderson 
90133298ef6SDoug Anderson 		/* Drop down when we get small */
90233298ef6SDoug Anderson 		if (__fls(count) < order) {
90333298ef6SDoug Anderson 			order_idx++;
90433298ef6SDoug Anderson 			continue;
90549f28aa6STomasz Figa 		}
90649f28aa6STomasz Figa 
90733298ef6SDoug Anderson 		if (order) {
90833298ef6SDoug Anderson 			/* See if it's easy to allocate a high-order chunk */
90933298ef6SDoug Anderson 			pages[i] = alloc_pages(gfp | __GFP_NORETRY, order);
91033298ef6SDoug Anderson 
91133298ef6SDoug Anderson 			/* Go down a notch at first sign of pressure */
91249f28aa6STomasz Figa 			if (!pages[i]) {
91333298ef6SDoug Anderson 				order_idx++;
91433298ef6SDoug Anderson 				continue;
91533298ef6SDoug Anderson 			}
91633298ef6SDoug Anderson 		} else {
91749f28aa6STomasz Figa 			pages[i] = alloc_pages(gfp, 0);
9184ce63fcdSMarek Szyprowski 			if (!pages[i])
9194ce63fcdSMarek Szyprowski 				goto error;
92049f28aa6STomasz Figa 		}
9214ce63fcdSMarek Szyprowski 
9225a796eebSHiroshi Doyu 		if (order) {
9234ce63fcdSMarek Szyprowski 			split_page(pages[i], order);
9244ce63fcdSMarek Szyprowski 			j = 1 << order;
9254ce63fcdSMarek Szyprowski 			while (--j)
9264ce63fcdSMarek Szyprowski 				pages[i + j] = pages[i] + j;
9275a796eebSHiroshi Doyu 		}
9284ce63fcdSMarek Szyprowski 
929f1270896SGregory CLEMENT 		__dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag);
9304ce63fcdSMarek Szyprowski 		i += 1 << order;
9314ce63fcdSMarek Szyprowski 		count -= 1 << order;
9324ce63fcdSMarek Szyprowski 	}
9334ce63fcdSMarek Szyprowski 
9344ce63fcdSMarek Szyprowski 	return pages;
9354ce63fcdSMarek Szyprowski error:
9369fa8af91SMarek Szyprowski 	while (i--)
9374ce63fcdSMarek Szyprowski 		if (pages[i])
9384ce63fcdSMarek Szyprowski 			__free_pages(pages[i], 0);
9391d5cfdb0STetsuo Handa 	kvfree(pages);
9404ce63fcdSMarek Szyprowski 	return NULL;
9414ce63fcdSMarek Szyprowski }
9424ce63fcdSMarek Szyprowski 
__iommu_free_buffer(struct device * dev,struct page ** pages,size_t size,unsigned long attrs)943549a17e4SMarek Szyprowski static int __iommu_free_buffer(struct device *dev, struct page **pages,
94400085f1eSKrzysztof Kozlowski 			       size_t size, unsigned long attrs)
9454ce63fcdSMarek Szyprowski {
9464ce63fcdSMarek Szyprowski 	int count = size >> PAGE_SHIFT;
9474ce63fcdSMarek Szyprowski 	int i;
948549a17e4SMarek Szyprowski 
94900085f1eSKrzysztof Kozlowski 	if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
950549a17e4SMarek Szyprowski 		dma_release_from_contiguous(dev, pages[0], count);
951549a17e4SMarek Szyprowski 	} else {
9524ce63fcdSMarek Szyprowski 		for (i = 0; i < count; i++)
9534ce63fcdSMarek Szyprowski 			if (pages[i])
9544ce63fcdSMarek Szyprowski 				__free_pages(pages[i], 0);
955549a17e4SMarek Szyprowski 	}
956549a17e4SMarek Szyprowski 
9571d5cfdb0STetsuo Handa 	kvfree(pages);
9584ce63fcdSMarek Szyprowski 	return 0;
9594ce63fcdSMarek Szyprowski }
9604ce63fcdSMarek Szyprowski 
9614ce63fcdSMarek Szyprowski /*
9624ce63fcdSMarek Szyprowski  * Create a mapping in device IO address space for specified pages
9634ce63fcdSMarek Szyprowski  */
9644ce63fcdSMarek Szyprowski static dma_addr_t
__iommu_create_mapping(struct device * dev,struct page ** pages,size_t size,unsigned long attrs)9657d2822dfSSricharan R __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
9667d2822dfSSricharan R 		       unsigned long attrs)
9674ce63fcdSMarek Szyprowski {
96889cfdb19SWill Deacon 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
9694ce63fcdSMarek Szyprowski 	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
9704ce63fcdSMarek Szyprowski 	dma_addr_t dma_addr, iova;
97190cde558SAndre Przywara 	int i;
9724ce63fcdSMarek Szyprowski 
9734ce63fcdSMarek Szyprowski 	dma_addr = __alloc_iova(mapping, size);
97472fd97bfSChristoph Hellwig 	if (dma_addr == DMA_MAPPING_ERROR)
9754ce63fcdSMarek Szyprowski 		return dma_addr;
9764ce63fcdSMarek Szyprowski 
9774ce63fcdSMarek Szyprowski 	iova = dma_addr;
9784ce63fcdSMarek Szyprowski 	for (i = 0; i < count; ) {
97990cde558SAndre Przywara 		int ret;
98090cde558SAndre Przywara 
9814ce63fcdSMarek Szyprowski 		unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
9824ce63fcdSMarek Szyprowski 		phys_addr_t phys = page_to_phys(pages[i]);
9834ce63fcdSMarek Szyprowski 		unsigned int len, j;
9844ce63fcdSMarek Szyprowski 
9854ce63fcdSMarek Szyprowski 		for (j = i + 1; j < count; j++, next_pfn++)
9864ce63fcdSMarek Szyprowski 			if (page_to_pfn(pages[j]) != next_pfn)
9874ce63fcdSMarek Szyprowski 				break;
9884ce63fcdSMarek Szyprowski 
9894ce63fcdSMarek Szyprowski 		len = (j - i) << PAGE_SHIFT;
990c9b24996SAndreas Herrmann 		ret = iommu_map(mapping->domain, iova, phys, len,
9911369459bSJason Gunthorpe 				__dma_info_to_prot(DMA_BIDIRECTIONAL, attrs),
9921369459bSJason Gunthorpe 				GFP_KERNEL);
9934ce63fcdSMarek Szyprowski 		if (ret < 0)
9944ce63fcdSMarek Szyprowski 			goto fail;
9954ce63fcdSMarek Szyprowski 		iova += len;
9964ce63fcdSMarek Szyprowski 		i = j;
9974ce63fcdSMarek Szyprowski 	}
9984ce63fcdSMarek Szyprowski 	return dma_addr;
9994ce63fcdSMarek Szyprowski fail:
10004ce63fcdSMarek Szyprowski 	iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
10014ce63fcdSMarek Szyprowski 	__free_iova(mapping, dma_addr, size);
100272fd97bfSChristoph Hellwig 	return DMA_MAPPING_ERROR;
10034ce63fcdSMarek Szyprowski }
10044ce63fcdSMarek Szyprowski 
__iommu_remove_mapping(struct device * dev,dma_addr_t iova,size_t size)10054ce63fcdSMarek Szyprowski static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
10064ce63fcdSMarek Szyprowski {
100789cfdb19SWill Deacon 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
10084ce63fcdSMarek Szyprowski 
10094ce63fcdSMarek Szyprowski 	/*
10104ce63fcdSMarek Szyprowski 	 * add optional in-page offset from iova to size and align
10114ce63fcdSMarek Szyprowski 	 * result to page size
10124ce63fcdSMarek Szyprowski 	 */
10134ce63fcdSMarek Szyprowski 	size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
10144ce63fcdSMarek Szyprowski 	iova &= PAGE_MASK;
10154ce63fcdSMarek Szyprowski 
10164ce63fcdSMarek Szyprowski 	iommu_unmap(mapping->domain, iova, size);
10174ce63fcdSMarek Szyprowski 	__free_iova(mapping, iova, size);
10184ce63fcdSMarek Szyprowski 	return 0;
10194ce63fcdSMarek Szyprowski }
10204ce63fcdSMarek Szyprowski 
__atomic_get_pages(void * addr)1021665bad7bSHiroshi Doyu static struct page **__atomic_get_pages(void *addr)
1022665bad7bSHiroshi Doyu {
102336d0fd21SLaura Abbott 	struct page *page;
102436d0fd21SLaura Abbott 	phys_addr_t phys;
1025665bad7bSHiroshi Doyu 
102636d0fd21SLaura Abbott 	phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr);
102736d0fd21SLaura Abbott 	page = phys_to_page(phys);
102836d0fd21SLaura Abbott 
102936d0fd21SLaura Abbott 	return (struct page **)page;
1030665bad7bSHiroshi Doyu }
1031665bad7bSHiroshi Doyu 
__iommu_get_pages(void * cpu_addr,unsigned long attrs)103200085f1eSKrzysztof Kozlowski static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
1033e9da6e99SMarek Szyprowski {
1034665bad7bSHiroshi Doyu 	if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
1035665bad7bSHiroshi Doyu 		return __atomic_get_pages(cpu_addr);
1036665bad7bSHiroshi Doyu 
103700085f1eSKrzysztof Kozlowski 	if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
1038955c757eSMarek Szyprowski 		return cpu_addr;
1039955c757eSMarek Szyprowski 
10405cf45379SChristoph Hellwig 	return dma_common_find_pages(cpu_addr);
1041e9da6e99SMarek Szyprowski }
1042e9da6e99SMarek Szyprowski 
__iommu_alloc_simple(struct device * dev,size_t size,gfp_t gfp,dma_addr_t * handle,int coherent_flag,unsigned long attrs)104356506822SGregory CLEMENT static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
10447d2822dfSSricharan R 				  dma_addr_t *handle, int coherent_flag,
10457d2822dfSSricharan R 				  unsigned long attrs)
1046479ed93aSHiroshi Doyu {
1047479ed93aSHiroshi Doyu 	struct page *page;
1048479ed93aSHiroshi Doyu 	void *addr;
1049479ed93aSHiroshi Doyu 
105056506822SGregory CLEMENT 	if (coherent_flag  == COHERENT)
105156506822SGregory CLEMENT 		addr = __alloc_simple_buffer(dev, size, gfp, &page);
105256506822SGregory CLEMENT 	else
1053479ed93aSHiroshi Doyu 		addr = __alloc_from_pool(size, &page);
1054479ed93aSHiroshi Doyu 	if (!addr)
1055479ed93aSHiroshi Doyu 		return NULL;
1056479ed93aSHiroshi Doyu 
10577d2822dfSSricharan R 	*handle = __iommu_create_mapping(dev, &page, size, attrs);
105872fd97bfSChristoph Hellwig 	if (*handle == DMA_MAPPING_ERROR)
1059479ed93aSHiroshi Doyu 		goto err_mapping;
1060479ed93aSHiroshi Doyu 
1061479ed93aSHiroshi Doyu 	return addr;
1062479ed93aSHiroshi Doyu 
1063479ed93aSHiroshi Doyu err_mapping:
1064479ed93aSHiroshi Doyu 	__free_from_pool(addr, size);
1065479ed93aSHiroshi Doyu 	return NULL;
1066479ed93aSHiroshi Doyu }
1067479ed93aSHiroshi Doyu 
__iommu_free_atomic(struct device * dev,void * cpu_addr,dma_addr_t handle,size_t size,int coherent_flag)1068d5898291SMarek Szyprowski static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
106956506822SGregory CLEMENT 			dma_addr_t handle, size_t size, int coherent_flag)
1070479ed93aSHiroshi Doyu {
1071479ed93aSHiroshi Doyu 	__iommu_remove_mapping(dev, handle, size);
107256506822SGregory CLEMENT 	if (coherent_flag == COHERENT)
107356506822SGregory CLEMENT 		__dma_free_buffer(virt_to_page(cpu_addr), size);
107456506822SGregory CLEMENT 	else
1075d5898291SMarek Szyprowski 		__free_from_pool(cpu_addr, size);
1076479ed93aSHiroshi Doyu }
1077479ed93aSHiroshi Doyu 
arm_iommu_alloc_attrs(struct device * dev,size_t size,dma_addr_t * handle,gfp_t gfp,unsigned long attrs)1078d563bccfSRobin Murphy static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
1079d563bccfSRobin Murphy 	    dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
10804ce63fcdSMarek Szyprowski {
108171b55663SRussell King 	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
10824ce63fcdSMarek Szyprowski 	struct page **pages;
10834ce63fcdSMarek Szyprowski 	void *addr = NULL;
1084d563bccfSRobin Murphy 	int coherent_flag = dev->dma_coherent ? COHERENT : NORMAL;
10854ce63fcdSMarek Szyprowski 
108672fd97bfSChristoph Hellwig 	*handle = DMA_MAPPING_ERROR;
10874ce63fcdSMarek Szyprowski 	size = PAGE_ALIGN(size);
10884ce63fcdSMarek Szyprowski 
108956506822SGregory CLEMENT 	if (coherent_flag  == COHERENT || !gfpflags_allow_blocking(gfp))
109056506822SGregory CLEMENT 		return __iommu_alloc_simple(dev, size, gfp, handle,
10917d2822dfSSricharan R 					    coherent_flag, attrs);
1092479ed93aSHiroshi Doyu 
109356506822SGregory CLEMENT 	pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag);
10944ce63fcdSMarek Szyprowski 	if (!pages)
10954ce63fcdSMarek Szyprowski 		return NULL;
10964ce63fcdSMarek Szyprowski 
10977d2822dfSSricharan R 	*handle = __iommu_create_mapping(dev, pages, size, attrs);
109872fd97bfSChristoph Hellwig 	if (*handle == DMA_MAPPING_ERROR)
10994ce63fcdSMarek Szyprowski 		goto err_buffer;
11004ce63fcdSMarek Szyprowski 
110100085f1eSKrzysztof Kozlowski 	if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
1102955c757eSMarek Szyprowski 		return pages;
1103955c757eSMarek Szyprowski 
110478406ff5SChristoph Hellwig 	addr = dma_common_pages_remap(pages, size, prot,
1105e9da6e99SMarek Szyprowski 				   __builtin_return_address(0));
11064ce63fcdSMarek Szyprowski 	if (!addr)
11074ce63fcdSMarek Szyprowski 		goto err_mapping;
11084ce63fcdSMarek Szyprowski 
11094ce63fcdSMarek Szyprowski 	return addr;
11104ce63fcdSMarek Szyprowski 
11114ce63fcdSMarek Szyprowski err_mapping:
11124ce63fcdSMarek Szyprowski 	__iommu_remove_mapping(dev, *handle, size);
11134ce63fcdSMarek Szyprowski err_buffer:
1114549a17e4SMarek Szyprowski 	__iommu_free_buffer(dev, pages, size, attrs);
11154ce63fcdSMarek Szyprowski 	return NULL;
11164ce63fcdSMarek Szyprowski }
11174ce63fcdSMarek Szyprowski 
arm_iommu_mmap_attrs(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)1118d563bccfSRobin Murphy static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
11194ce63fcdSMarek Szyprowski 		    void *cpu_addr, dma_addr_t dma_addr, size_t size,
112000085f1eSKrzysztof Kozlowski 		    unsigned long attrs)
11214ce63fcdSMarek Szyprowski {
1122955c757eSMarek Szyprowski 	struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1123371f0f08SMarek Szyprowski 	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
11246248461dSSouptick Joarder 	int err;
1125e9da6e99SMarek Szyprowski 
1126e9da6e99SMarek Szyprowski 	if (!pages)
1127e9da6e99SMarek Szyprowski 		return -ENXIO;
11284ce63fcdSMarek Szyprowski 
11296248461dSSouptick Joarder 	if (vma->vm_pgoff >= nr_pages)
1130371f0f08SMarek Szyprowski 		return -ENXIO;
1131371f0f08SMarek Szyprowski 
1132d563bccfSRobin Murphy 	if (!dev->dma_coherent)
1133d563bccfSRobin Murphy 		vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
1134d563bccfSRobin Murphy 
11356248461dSSouptick Joarder 	err = vm_map_pages(vma, pages, nr_pages);
11366248461dSSouptick Joarder 	if (err)
11376248461dSSouptick Joarder 		pr_err("Remapping memory failed: %d\n", err);
11387e312103SMarek Szyprowski 
11396248461dSSouptick Joarder 	return err;
11404ce63fcdSMarek Szyprowski }
11414ce63fcdSMarek Szyprowski 
11424ce63fcdSMarek Szyprowski /*
11434ce63fcdSMarek Szyprowski  * free a page as defined by the above mapping.
11444ce63fcdSMarek Szyprowski  * Must not be called with IRQs disabled.
11454ce63fcdSMarek Szyprowski  */
arm_iommu_free_attrs(struct device * dev,size_t size,void * cpu_addr,dma_addr_t handle,unsigned long attrs)1146d563bccfSRobin Murphy static void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
1147d563bccfSRobin Murphy 	dma_addr_t handle, unsigned long attrs)
11484ce63fcdSMarek Szyprowski {
1149d563bccfSRobin Murphy 	int coherent_flag = dev->dma_coherent ? COHERENT : NORMAL;
1150836bfa0dSYoungJun Cho 	struct page **pages;
11514ce63fcdSMarek Szyprowski 	size = PAGE_ALIGN(size);
11524ce63fcdSMarek Szyprowski 
115356506822SGregory CLEMENT 	if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) {
115456506822SGregory CLEMENT 		__iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag);
1155479ed93aSHiroshi Doyu 		return;
1156479ed93aSHiroshi Doyu 	}
1157479ed93aSHiroshi Doyu 
1158836bfa0dSYoungJun Cho 	pages = __iommu_get_pages(cpu_addr, attrs);
1159836bfa0dSYoungJun Cho 	if (!pages) {
1160836bfa0dSYoungJun Cho 		WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
1161836bfa0dSYoungJun Cho 		return;
1162836bfa0dSYoungJun Cho 	}
1163836bfa0dSYoungJun Cho 
1164fe9041c2SChristoph Hellwig 	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
116551231740SChristoph Hellwig 		dma_common_free_remap(cpu_addr, size);
1166e9da6e99SMarek Szyprowski 
11674ce63fcdSMarek Szyprowski 	__iommu_remove_mapping(dev, handle, size);
1168549a17e4SMarek Szyprowski 	__iommu_free_buffer(dev, pages, size, attrs);
11694ce63fcdSMarek Szyprowski }
11704ce63fcdSMarek Szyprowski 
arm_iommu_get_sgtable(struct device * dev,struct sg_table * sgt,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)1171dc2832e1SMarek Szyprowski static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
1172dc2832e1SMarek Szyprowski 				 void *cpu_addr, dma_addr_t dma_addr,
117300085f1eSKrzysztof Kozlowski 				 size_t size, unsigned long attrs)
1174dc2832e1SMarek Szyprowski {
1175dc2832e1SMarek Szyprowski 	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1176dc2832e1SMarek Szyprowski 	struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1177dc2832e1SMarek Szyprowski 
1178dc2832e1SMarek Szyprowski 	if (!pages)
1179dc2832e1SMarek Szyprowski 		return -ENXIO;
1180dc2832e1SMarek Szyprowski 
1181dc2832e1SMarek Szyprowski 	return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
1182dc2832e1SMarek Szyprowski 					 GFP_KERNEL);
11834ce63fcdSMarek Szyprowski }
11844ce63fcdSMarek Szyprowski 
11854ce63fcdSMarek Szyprowski /*
11864ce63fcdSMarek Szyprowski  * Map a part of the scatter-gather list into contiguous io address space
11874ce63fcdSMarek Szyprowski  */
__map_sg_chunk(struct device * dev,struct scatterlist * sg,size_t size,dma_addr_t * handle,enum dma_data_direction dir,unsigned long attrs)11884ce63fcdSMarek Szyprowski static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
11894ce63fcdSMarek Szyprowski 			  size_t size, dma_addr_t *handle,
1190d563bccfSRobin Murphy 			  enum dma_data_direction dir, unsigned long attrs)
11914ce63fcdSMarek Szyprowski {
119289cfdb19SWill Deacon 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
11934ce63fcdSMarek Szyprowski 	dma_addr_t iova, iova_base;
11944ce63fcdSMarek Szyprowski 	int ret = 0;
11954ce63fcdSMarek Szyprowski 	unsigned int count;
11964ce63fcdSMarek Szyprowski 	struct scatterlist *s;
1197c9b24996SAndreas Herrmann 	int prot;
11984ce63fcdSMarek Szyprowski 
11994ce63fcdSMarek Szyprowski 	size = PAGE_ALIGN(size);
120072fd97bfSChristoph Hellwig 	*handle = DMA_MAPPING_ERROR;
12014ce63fcdSMarek Szyprowski 
12024ce63fcdSMarek Szyprowski 	iova_base = iova = __alloc_iova(mapping, size);
120372fd97bfSChristoph Hellwig 	if (iova == DMA_MAPPING_ERROR)
12044ce63fcdSMarek Szyprowski 		return -ENOMEM;
12054ce63fcdSMarek Szyprowski 
12064ce63fcdSMarek Szyprowski 	for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
12073e6110fdSDan Williams 		phys_addr_t phys = page_to_phys(sg_page(s));
12084ce63fcdSMarek Szyprowski 		unsigned int len = PAGE_ALIGN(s->offset + s->length);
12094ce63fcdSMarek Szyprowski 
1210d563bccfSRobin Murphy 		if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
12114ce63fcdSMarek Szyprowski 			__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
12124ce63fcdSMarek Szyprowski 
12137d2822dfSSricharan R 		prot = __dma_info_to_prot(dir, attrs);
1214c9b24996SAndreas Herrmann 
12151369459bSJason Gunthorpe 		ret = iommu_map(mapping->domain, iova, phys, len, prot,
12161369459bSJason Gunthorpe 				GFP_KERNEL);
12174ce63fcdSMarek Szyprowski 		if (ret < 0)
12184ce63fcdSMarek Szyprowski 			goto fail;
12194ce63fcdSMarek Szyprowski 		count += len >> PAGE_SHIFT;
12204ce63fcdSMarek Szyprowski 		iova += len;
12214ce63fcdSMarek Szyprowski 	}
12224ce63fcdSMarek Szyprowski 	*handle = iova_base;
12234ce63fcdSMarek Szyprowski 
12244ce63fcdSMarek Szyprowski 	return 0;
12254ce63fcdSMarek Szyprowski fail:
12264ce63fcdSMarek Szyprowski 	iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
12274ce63fcdSMarek Szyprowski 	__free_iova(mapping, iova_base, size);
12284ce63fcdSMarek Szyprowski 	return ret;
12294ce63fcdSMarek Szyprowski }
12304ce63fcdSMarek Szyprowski 
1231d563bccfSRobin Murphy /**
1232d563bccfSRobin Murphy  * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1233d563bccfSRobin Murphy  * @dev: valid struct device pointer
1234d563bccfSRobin Murphy  * @sg: list of buffers
1235d563bccfSRobin Murphy  * @nents: number of buffers to map
1236d563bccfSRobin Murphy  * @dir: DMA transfer direction
1237d563bccfSRobin Murphy  *
1238d563bccfSRobin Murphy  * Map a set of buffers described by scatterlist in streaming mode for DMA.
1239d563bccfSRobin Murphy  * The scatter gather list elements are merged together (if possible) and
1240d563bccfSRobin Murphy  * tagged with the appropriate dma address and length. They are obtained via
1241d563bccfSRobin Murphy  * sg_dma_{address,length}.
1242d563bccfSRobin Murphy  */
arm_iommu_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)1243d563bccfSRobin Murphy static int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
1244d563bccfSRobin Murphy 		int nents, enum dma_data_direction dir, unsigned long attrs)
12454ce63fcdSMarek Szyprowski {
12464ce63fcdSMarek Szyprowski 	struct scatterlist *s = sg, *dma = sg, *start = sg;
12476506932bSMartin Oliveira 	int i, count = 0, ret;
12484ce63fcdSMarek Szyprowski 	unsigned int offset = s->offset;
12494ce63fcdSMarek Szyprowski 	unsigned int size = s->offset + s->length;
12504ce63fcdSMarek Szyprowski 	unsigned int max = dma_get_max_seg_size(dev);
12514ce63fcdSMarek Szyprowski 
12524ce63fcdSMarek Szyprowski 	for (i = 1; i < nents; i++) {
12534ce63fcdSMarek Szyprowski 		s = sg_next(s);
12544ce63fcdSMarek Szyprowski 
12554ce63fcdSMarek Szyprowski 		s->dma_length = 0;
12564ce63fcdSMarek Szyprowski 
12574ce63fcdSMarek Szyprowski 		if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
12586506932bSMartin Oliveira 			ret = __map_sg_chunk(dev, start, size,
1259d563bccfSRobin Murphy 					     &dma->dma_address, dir, attrs);
12606506932bSMartin Oliveira 			if (ret < 0)
12614ce63fcdSMarek Szyprowski 				goto bad_mapping;
12624ce63fcdSMarek Szyprowski 
12634ce63fcdSMarek Szyprowski 			dma->dma_address += offset;
12644ce63fcdSMarek Szyprowski 			dma->dma_length = size - offset;
12654ce63fcdSMarek Szyprowski 
12664ce63fcdSMarek Szyprowski 			size = offset = s->offset;
12674ce63fcdSMarek Szyprowski 			start = s;
12684ce63fcdSMarek Szyprowski 			dma = sg_next(dma);
12694ce63fcdSMarek Szyprowski 			count += 1;
12704ce63fcdSMarek Szyprowski 		}
12714ce63fcdSMarek Szyprowski 		size += s->length;
12724ce63fcdSMarek Szyprowski 	}
1273d563bccfSRobin Murphy 	ret = __map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs);
12746506932bSMartin Oliveira 	if (ret < 0)
12754ce63fcdSMarek Szyprowski 		goto bad_mapping;
12764ce63fcdSMarek Szyprowski 
12774ce63fcdSMarek Szyprowski 	dma->dma_address += offset;
12784ce63fcdSMarek Szyprowski 	dma->dma_length = size - offset;
12794ce63fcdSMarek Szyprowski 
12804ce63fcdSMarek Szyprowski 	return count+1;
12814ce63fcdSMarek Szyprowski 
12824ce63fcdSMarek Szyprowski bad_mapping:
12834ce63fcdSMarek Szyprowski 	for_each_sg(sg, s, count, i)
12844ce63fcdSMarek Szyprowski 		__iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
12856506932bSMartin Oliveira 	if (ret == -ENOMEM)
12866506932bSMartin Oliveira 		return ret;
12876506932bSMartin Oliveira 	return -EINVAL;
12884ce63fcdSMarek Szyprowski }
12894ce63fcdSMarek Szyprowski 
12904ce63fcdSMarek Szyprowski /**
12914ce63fcdSMarek Szyprowski  * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
12924ce63fcdSMarek Szyprowski  * @dev: valid struct device pointer
12934ce63fcdSMarek Szyprowski  * @sg: list of buffers
12944ce63fcdSMarek Szyprowski  * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
12954ce63fcdSMarek Szyprowski  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
12964ce63fcdSMarek Szyprowski  *
12974ce63fcdSMarek Szyprowski  * Unmap a set of streaming mode DMA translations.  Again, CPU access
12984ce63fcdSMarek Szyprowski  * rules concerning calls here are the same as for dma_unmap_single().
12994ce63fcdSMarek Szyprowski  */
arm_iommu_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)130017fe8684SBen Dooks static void arm_iommu_unmap_sg(struct device *dev,
130117fe8684SBen Dooks 			       struct scatterlist *sg, int nents,
130200085f1eSKrzysztof Kozlowski 			       enum dma_data_direction dir,
130300085f1eSKrzysztof Kozlowski 			       unsigned long attrs)
13044ce63fcdSMarek Szyprowski {
1305d563bccfSRobin Murphy 	struct scatterlist *s;
1306d563bccfSRobin Murphy 	int i;
1307d563bccfSRobin Murphy 
1308d563bccfSRobin Murphy 	for_each_sg(sg, s, nents, i) {
1309d563bccfSRobin Murphy 		if (sg_dma_len(s))
1310d563bccfSRobin Murphy 			__iommu_remove_mapping(dev, sg_dma_address(s),
1311d563bccfSRobin Murphy 					       sg_dma_len(s));
1312d563bccfSRobin Murphy 		if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1313d563bccfSRobin Murphy 			__dma_page_dev_to_cpu(sg_page(s), s->offset,
1314d563bccfSRobin Murphy 					      s->length, dir);
1315d563bccfSRobin Murphy 	}
13164ce63fcdSMarek Szyprowski }
13174ce63fcdSMarek Szyprowski 
13184ce63fcdSMarek Szyprowski /**
13194ce63fcdSMarek Szyprowski  * arm_iommu_sync_sg_for_cpu
13204ce63fcdSMarek Szyprowski  * @dev: valid struct device pointer
13214ce63fcdSMarek Szyprowski  * @sg: list of buffers
13224ce63fcdSMarek Szyprowski  * @nents: number of buffers to map (returned from dma_map_sg)
13234ce63fcdSMarek Szyprowski  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
13244ce63fcdSMarek Szyprowski  */
arm_iommu_sync_sg_for_cpu(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)132517fe8684SBen Dooks static void arm_iommu_sync_sg_for_cpu(struct device *dev,
132617fe8684SBen Dooks 			struct scatterlist *sg,
13274ce63fcdSMarek Szyprowski 			int nents, enum dma_data_direction dir)
13284ce63fcdSMarek Szyprowski {
13294ce63fcdSMarek Szyprowski 	struct scatterlist *s;
13304ce63fcdSMarek Szyprowski 	int i;
13314ce63fcdSMarek Szyprowski 
13324136ce90SRobin Murphy 	if (dev->dma_coherent)
13334136ce90SRobin Murphy 		return;
13344136ce90SRobin Murphy 
13354ce63fcdSMarek Szyprowski 	for_each_sg(sg, s, nents, i)
13364ce63fcdSMarek Szyprowski 		__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
13374ce63fcdSMarek Szyprowski 
13384ce63fcdSMarek Szyprowski }
13394ce63fcdSMarek Szyprowski 
13404ce63fcdSMarek Szyprowski /**
13414ce63fcdSMarek Szyprowski  * arm_iommu_sync_sg_for_device
13424ce63fcdSMarek Szyprowski  * @dev: valid struct device pointer
13434ce63fcdSMarek Szyprowski  * @sg: list of buffers
13444ce63fcdSMarek Szyprowski  * @nents: number of buffers to map (returned from dma_map_sg)
13454ce63fcdSMarek Szyprowski  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
13464ce63fcdSMarek Szyprowski  */
arm_iommu_sync_sg_for_device(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)134717fe8684SBen Dooks static void arm_iommu_sync_sg_for_device(struct device *dev,
134817fe8684SBen Dooks 			struct scatterlist *sg,
13494ce63fcdSMarek Szyprowski 			int nents, enum dma_data_direction dir)
13504ce63fcdSMarek Szyprowski {
13514ce63fcdSMarek Szyprowski 	struct scatterlist *s;
13524ce63fcdSMarek Szyprowski 	int i;
13534ce63fcdSMarek Szyprowski 
13544136ce90SRobin Murphy 	if (dev->dma_coherent)
13554136ce90SRobin Murphy 		return;
13564136ce90SRobin Murphy 
13574ce63fcdSMarek Szyprowski 	for_each_sg(sg, s, nents, i)
13584ce63fcdSMarek Szyprowski 		__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
13594ce63fcdSMarek Szyprowski }
13604ce63fcdSMarek Szyprowski 
13614ce63fcdSMarek Szyprowski /**
1362d563bccfSRobin Murphy  * arm_iommu_map_page
13630fa478dfSRob Herring  * @dev: valid struct device pointer
13640fa478dfSRob Herring  * @page: page that buffer resides in
13650fa478dfSRob Herring  * @offset: offset into page for start of buffer
13660fa478dfSRob Herring  * @size: size of buffer to map
13670fa478dfSRob Herring  * @dir: DMA transfer direction
13680fa478dfSRob Herring  *
1369d563bccfSRobin Murphy  * IOMMU aware version of arm_dma_map_page()
13700fa478dfSRob Herring  */
arm_iommu_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,unsigned long attrs)1371d563bccfSRobin Murphy static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
13720fa478dfSRob Herring 	     unsigned long offset, size_t size, enum dma_data_direction dir,
137300085f1eSKrzysztof Kozlowski 	     unsigned long attrs)
13740fa478dfSRob Herring {
137589cfdb19SWill Deacon 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
13760fa478dfSRob Herring 	dma_addr_t dma_addr;
137713987d68SWill Deacon 	int ret, prot, len = PAGE_ALIGN(size + offset);
13780fa478dfSRob Herring 
1379d563bccfSRobin Murphy 	if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1380d563bccfSRobin Murphy 		__dma_page_cpu_to_dev(page, offset, size, dir);
1381d563bccfSRobin Murphy 
13820fa478dfSRob Herring 	dma_addr = __alloc_iova(mapping, len);
138372fd97bfSChristoph Hellwig 	if (dma_addr == DMA_MAPPING_ERROR)
13840fa478dfSRob Herring 		return dma_addr;
13850fa478dfSRob Herring 
13867d2822dfSSricharan R 	prot = __dma_info_to_prot(dir, attrs);
138713987d68SWill Deacon 
13881369459bSJason Gunthorpe 	ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len,
13891369459bSJason Gunthorpe 			prot, GFP_KERNEL);
13900fa478dfSRob Herring 	if (ret < 0)
13910fa478dfSRob Herring 		goto fail;
13920fa478dfSRob Herring 
13930fa478dfSRob Herring 	return dma_addr + offset;
13940fa478dfSRob Herring fail:
13950fa478dfSRob Herring 	__free_iova(mapping, dma_addr, len);
139672fd97bfSChristoph Hellwig 	return DMA_MAPPING_ERROR;
13970fa478dfSRob Herring }
13980fa478dfSRob Herring 
13990fa478dfSRob Herring /**
14004ce63fcdSMarek Szyprowski  * arm_iommu_unmap_page
14014ce63fcdSMarek Szyprowski  * @dev: valid struct device pointer
14024ce63fcdSMarek Szyprowski  * @handle: DMA address of buffer
14034ce63fcdSMarek Szyprowski  * @size: size of buffer (same as passed to dma_map_page)
14044ce63fcdSMarek Szyprowski  * @dir: DMA transfer direction (same as passed to dma_map_page)
14054ce63fcdSMarek Szyprowski  *
14064ce63fcdSMarek Szyprowski  * IOMMU aware version of arm_dma_unmap_page()
14074ce63fcdSMarek Szyprowski  */
arm_iommu_unmap_page(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir,unsigned long attrs)14084ce63fcdSMarek Szyprowski static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
140900085f1eSKrzysztof Kozlowski 		size_t size, enum dma_data_direction dir, unsigned long attrs)
14104ce63fcdSMarek Szyprowski {
141189cfdb19SWill Deacon 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
14124ce63fcdSMarek Szyprowski 	dma_addr_t iova = handle & PAGE_MASK;
1413d563bccfSRobin Murphy 	struct page *page;
14144ce63fcdSMarek Szyprowski 	int offset = handle & ~PAGE_MASK;
14154ce63fcdSMarek Szyprowski 	int len = PAGE_ALIGN(size + offset);
14164ce63fcdSMarek Szyprowski 
14174ce63fcdSMarek Szyprowski 	if (!iova)
14184ce63fcdSMarek Szyprowski 		return;
14194ce63fcdSMarek Szyprowski 
1420d563bccfSRobin Murphy 	if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
1421d563bccfSRobin Murphy 		page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
14224ce63fcdSMarek Szyprowski 		__dma_page_dev_to_cpu(page, offset, size, dir);
1423d563bccfSRobin Murphy 	}
14244ce63fcdSMarek Szyprowski 
14254ce63fcdSMarek Szyprowski 	iommu_unmap(mapping->domain, iova, len);
14264ce63fcdSMarek Szyprowski 	__free_iova(mapping, iova, len);
14274ce63fcdSMarek Szyprowski }
14284ce63fcdSMarek Szyprowski 
142924ed5d2cSNiklas Söderlund /**
143024ed5d2cSNiklas Söderlund  * arm_iommu_map_resource - map a device resource for DMA
143124ed5d2cSNiklas Söderlund  * @dev: valid struct device pointer
143224ed5d2cSNiklas Söderlund  * @phys_addr: physical address of resource
143324ed5d2cSNiklas Söderlund  * @size: size of resource to map
143424ed5d2cSNiklas Söderlund  * @dir: DMA transfer direction
143524ed5d2cSNiklas Söderlund  */
arm_iommu_map_resource(struct device * dev,phys_addr_t phys_addr,size_t size,enum dma_data_direction dir,unsigned long attrs)143624ed5d2cSNiklas Söderlund static dma_addr_t arm_iommu_map_resource(struct device *dev,
143724ed5d2cSNiklas Söderlund 		phys_addr_t phys_addr, size_t size,
143824ed5d2cSNiklas Söderlund 		enum dma_data_direction dir, unsigned long attrs)
143924ed5d2cSNiklas Söderlund {
144024ed5d2cSNiklas Söderlund 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
144124ed5d2cSNiklas Söderlund 	dma_addr_t dma_addr;
144224ed5d2cSNiklas Söderlund 	int ret, prot;
144324ed5d2cSNiklas Söderlund 	phys_addr_t addr = phys_addr & PAGE_MASK;
144424ed5d2cSNiklas Söderlund 	unsigned int offset = phys_addr & ~PAGE_MASK;
144524ed5d2cSNiklas Söderlund 	size_t len = PAGE_ALIGN(size + offset);
144624ed5d2cSNiklas Söderlund 
144724ed5d2cSNiklas Söderlund 	dma_addr = __alloc_iova(mapping, len);
144872fd97bfSChristoph Hellwig 	if (dma_addr == DMA_MAPPING_ERROR)
144924ed5d2cSNiklas Söderlund 		return dma_addr;
145024ed5d2cSNiklas Söderlund 
14517d2822dfSSricharan R 	prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
145224ed5d2cSNiklas Söderlund 
14531369459bSJason Gunthorpe 	ret = iommu_map(mapping->domain, dma_addr, addr, len, prot, GFP_KERNEL);
145424ed5d2cSNiklas Söderlund 	if (ret < 0)
145524ed5d2cSNiklas Söderlund 		goto fail;
145624ed5d2cSNiklas Söderlund 
145724ed5d2cSNiklas Söderlund 	return dma_addr + offset;
145824ed5d2cSNiklas Söderlund fail:
145924ed5d2cSNiklas Söderlund 	__free_iova(mapping, dma_addr, len);
146072fd97bfSChristoph Hellwig 	return DMA_MAPPING_ERROR;
146124ed5d2cSNiklas Söderlund }
146224ed5d2cSNiklas Söderlund 
146324ed5d2cSNiklas Söderlund /**
146424ed5d2cSNiklas Söderlund  * arm_iommu_unmap_resource - unmap a device DMA resource
146524ed5d2cSNiklas Söderlund  * @dev: valid struct device pointer
146624ed5d2cSNiklas Söderlund  * @dma_handle: DMA address to resource
146724ed5d2cSNiklas Söderlund  * @size: size of resource to map
146824ed5d2cSNiklas Söderlund  * @dir: DMA transfer direction
146924ed5d2cSNiklas Söderlund  */
arm_iommu_unmap_resource(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction dir,unsigned long attrs)147024ed5d2cSNiklas Söderlund static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle,
147124ed5d2cSNiklas Söderlund 		size_t size, enum dma_data_direction dir,
147224ed5d2cSNiklas Söderlund 		unsigned long attrs)
147324ed5d2cSNiklas Söderlund {
147424ed5d2cSNiklas Söderlund 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
147524ed5d2cSNiklas Söderlund 	dma_addr_t iova = dma_handle & PAGE_MASK;
147624ed5d2cSNiklas Söderlund 	unsigned int offset = dma_handle & ~PAGE_MASK;
147724ed5d2cSNiklas Söderlund 	size_t len = PAGE_ALIGN(size + offset);
147824ed5d2cSNiklas Söderlund 
147924ed5d2cSNiklas Söderlund 	if (!iova)
148024ed5d2cSNiklas Söderlund 		return;
148124ed5d2cSNiklas Söderlund 
148224ed5d2cSNiklas Söderlund 	iommu_unmap(mapping->domain, iova, len);
148324ed5d2cSNiklas Söderlund 	__free_iova(mapping, iova, len);
148424ed5d2cSNiklas Söderlund }
148524ed5d2cSNiklas Söderlund 
arm_iommu_sync_single_for_cpu(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir)14864ce63fcdSMarek Szyprowski static void arm_iommu_sync_single_for_cpu(struct device *dev,
14874ce63fcdSMarek Szyprowski 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
14884ce63fcdSMarek Szyprowski {
148989cfdb19SWill Deacon 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
14904ce63fcdSMarek Szyprowski 	dma_addr_t iova = handle & PAGE_MASK;
14914136ce90SRobin Murphy 	struct page *page;
14924ce63fcdSMarek Szyprowski 	unsigned int offset = handle & ~PAGE_MASK;
14934ce63fcdSMarek Szyprowski 
14944136ce90SRobin Murphy 	if (dev->dma_coherent || !iova)
14954ce63fcdSMarek Szyprowski 		return;
14964ce63fcdSMarek Szyprowski 
14974136ce90SRobin Murphy 	page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
14984ce63fcdSMarek Szyprowski 	__dma_page_dev_to_cpu(page, offset, size, dir);
14994ce63fcdSMarek Szyprowski }
15004ce63fcdSMarek Szyprowski 
arm_iommu_sync_single_for_device(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir)15014ce63fcdSMarek Szyprowski static void arm_iommu_sync_single_for_device(struct device *dev,
15024ce63fcdSMarek Szyprowski 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
15034ce63fcdSMarek Szyprowski {
150489cfdb19SWill Deacon 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
15054ce63fcdSMarek Szyprowski 	dma_addr_t iova = handle & PAGE_MASK;
15064136ce90SRobin Murphy 	struct page *page;
15074ce63fcdSMarek Szyprowski 	unsigned int offset = handle & ~PAGE_MASK;
15084ce63fcdSMarek Szyprowski 
15094136ce90SRobin Murphy 	if (dev->dma_coherent || !iova)
15104ce63fcdSMarek Szyprowski 		return;
15114ce63fcdSMarek Szyprowski 
15124136ce90SRobin Murphy 	page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
15134ce63fcdSMarek Szyprowski 	__dma_page_cpu_to_dev(page, offset, size, dir);
15144ce63fcdSMarek Szyprowski }
15154ce63fcdSMarek Szyprowski 
151617fe8684SBen Dooks static const struct dma_map_ops iommu_ops = {
15174ce63fcdSMarek Szyprowski 	.alloc		= arm_iommu_alloc_attrs,
15184ce63fcdSMarek Szyprowski 	.free		= arm_iommu_free_attrs,
15194ce63fcdSMarek Szyprowski 	.mmap		= arm_iommu_mmap_attrs,
1520dc2832e1SMarek Szyprowski 	.get_sgtable	= arm_iommu_get_sgtable,
15214ce63fcdSMarek Szyprowski 
15224ce63fcdSMarek Szyprowski 	.map_page		= arm_iommu_map_page,
15234ce63fcdSMarek Szyprowski 	.unmap_page		= arm_iommu_unmap_page,
15244ce63fcdSMarek Szyprowski 	.sync_single_for_cpu	= arm_iommu_sync_single_for_cpu,
15254ce63fcdSMarek Szyprowski 	.sync_single_for_device	= arm_iommu_sync_single_for_device,
15264ce63fcdSMarek Szyprowski 
15274ce63fcdSMarek Szyprowski 	.map_sg			= arm_iommu_map_sg,
15284ce63fcdSMarek Szyprowski 	.unmap_sg		= arm_iommu_unmap_sg,
15294ce63fcdSMarek Szyprowski 	.sync_sg_for_cpu	= arm_iommu_sync_sg_for_cpu,
15304ce63fcdSMarek Szyprowski 	.sync_sg_for_device	= arm_iommu_sync_sg_for_device,
153124ed5d2cSNiklas Söderlund 
153224ed5d2cSNiklas Söderlund 	.map_resource		= arm_iommu_map_resource,
153324ed5d2cSNiklas Söderlund 	.unmap_resource		= arm_iommu_unmap_resource,
15340fa478dfSRob Herring };
15350fa478dfSRob Herring 
15364ce63fcdSMarek Szyprowski /**
15374ce63fcdSMarek Szyprowski  * arm_iommu_create_mapping
15384ce63fcdSMarek Szyprowski  * @bus: pointer to the bus holding the client device (for IOMMU calls)
15394ce63fcdSMarek Szyprowski  * @base: start address of the valid IO address space
154068efd7d2SMarek Szyprowski  * @size: maximum size of the valid IO address space
15414ce63fcdSMarek Szyprowski  *
15424ce63fcdSMarek Szyprowski  * Creates a mapping structure which holds information about used/unused
15434ce63fcdSMarek Szyprowski  * IO address ranges, which is required to perform memory allocation and
15444ce63fcdSMarek Szyprowski  * mapping with IOMMU aware functions.
15454ce63fcdSMarek Szyprowski  *
15464ce63fcdSMarek Szyprowski  * The client device need to be attached to the mapping with
15474ce63fcdSMarek Szyprowski  * arm_iommu_attach_device function.
15484ce63fcdSMarek Szyprowski  */
15494ce63fcdSMarek Szyprowski struct dma_iommu_mapping *
arm_iommu_create_mapping(const struct bus_type * bus,dma_addr_t base,u64 size)1550a3ea9fbcSGreg Kroah-Hartman arm_iommu_create_mapping(const struct bus_type *bus, dma_addr_t base, u64 size)
15514ce63fcdSMarek Szyprowski {
155268efd7d2SMarek Szyprowski 	unsigned int bits = size >> PAGE_SHIFT;
155368efd7d2SMarek Szyprowski 	unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
15544ce63fcdSMarek Szyprowski 	struct dma_iommu_mapping *mapping;
155568efd7d2SMarek Szyprowski 	int extensions = 1;
15564ce63fcdSMarek Szyprowski 	int err = -ENOMEM;
15574ce63fcdSMarek Szyprowski 
15581424532bSMarek Szyprowski 	/* currently only 32-bit DMA address space is supported */
15591424532bSMarek Szyprowski 	if (size > DMA_BIT_MASK(32) + 1)
15601424532bSMarek Szyprowski 		return ERR_PTR(-ERANGE);
15611424532bSMarek Szyprowski 
156268efd7d2SMarek Szyprowski 	if (!bitmap_size)
15634ce63fcdSMarek Szyprowski 		return ERR_PTR(-EINVAL);
15644ce63fcdSMarek Szyprowski 
156568efd7d2SMarek Szyprowski 	if (bitmap_size > PAGE_SIZE) {
156668efd7d2SMarek Szyprowski 		extensions = bitmap_size / PAGE_SIZE;
156768efd7d2SMarek Szyprowski 		bitmap_size = PAGE_SIZE;
156868efd7d2SMarek Szyprowski 	}
156968efd7d2SMarek Szyprowski 
15704ce63fcdSMarek Szyprowski 	mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
15714ce63fcdSMarek Szyprowski 	if (!mapping)
15724ce63fcdSMarek Szyprowski 		goto err;
15734ce63fcdSMarek Szyprowski 
157468efd7d2SMarek Szyprowski 	mapping->bitmap_size = bitmap_size;
15756396bb22SKees Cook 	mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *),
15764d852ef8SAndreas Herrmann 				   GFP_KERNEL);
15774d852ef8SAndreas Herrmann 	if (!mapping->bitmaps)
15784ce63fcdSMarek Szyprowski 		goto err2;
15794ce63fcdSMarek Szyprowski 
158068efd7d2SMarek Szyprowski 	mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL);
15814d852ef8SAndreas Herrmann 	if (!mapping->bitmaps[0])
15824d852ef8SAndreas Herrmann 		goto err3;
15834d852ef8SAndreas Herrmann 
15844d852ef8SAndreas Herrmann 	mapping->nr_bitmaps = 1;
15854d852ef8SAndreas Herrmann 	mapping->extensions = extensions;
15864ce63fcdSMarek Szyprowski 	mapping->base = base;
158768efd7d2SMarek Szyprowski 	mapping->bits = BITS_PER_BYTE * bitmap_size;
15884d852ef8SAndreas Herrmann 
15894ce63fcdSMarek Szyprowski 	spin_lock_init(&mapping->lock);
15904ce63fcdSMarek Szyprowski 
15914ce63fcdSMarek Szyprowski 	mapping->domain = iommu_domain_alloc(bus);
15924ce63fcdSMarek Szyprowski 	if (!mapping->domain)
15934d852ef8SAndreas Herrmann 		goto err4;
15944ce63fcdSMarek Szyprowski 
15954ce63fcdSMarek Szyprowski 	kref_init(&mapping->kref);
15964ce63fcdSMarek Szyprowski 	return mapping;
15974d852ef8SAndreas Herrmann err4:
15984d852ef8SAndreas Herrmann 	kfree(mapping->bitmaps[0]);
15994ce63fcdSMarek Szyprowski err3:
16004d852ef8SAndreas Herrmann 	kfree(mapping->bitmaps);
16014ce63fcdSMarek Szyprowski err2:
16024ce63fcdSMarek Szyprowski 	kfree(mapping);
16034ce63fcdSMarek Szyprowski err:
16044ce63fcdSMarek Szyprowski 	return ERR_PTR(err);
16054ce63fcdSMarek Szyprowski }
160618177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
16074ce63fcdSMarek Szyprowski 
release_iommu_mapping(struct kref * kref)16084ce63fcdSMarek Szyprowski static void release_iommu_mapping(struct kref *kref)
16094ce63fcdSMarek Szyprowski {
16104d852ef8SAndreas Herrmann 	int i;
16114ce63fcdSMarek Szyprowski 	struct dma_iommu_mapping *mapping =
16124ce63fcdSMarek Szyprowski 		container_of(kref, struct dma_iommu_mapping, kref);
16134ce63fcdSMarek Szyprowski 
16144ce63fcdSMarek Szyprowski 	iommu_domain_free(mapping->domain);
16154d852ef8SAndreas Herrmann 	for (i = 0; i < mapping->nr_bitmaps; i++)
16164d852ef8SAndreas Herrmann 		kfree(mapping->bitmaps[i]);
16174d852ef8SAndreas Herrmann 	kfree(mapping->bitmaps);
16184ce63fcdSMarek Szyprowski 	kfree(mapping);
16194ce63fcdSMarek Szyprowski }
16204ce63fcdSMarek Szyprowski 
extend_iommu_mapping(struct dma_iommu_mapping * mapping)16214d852ef8SAndreas Herrmann static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
16224d852ef8SAndreas Herrmann {
16234d852ef8SAndreas Herrmann 	int next_bitmap;
16244d852ef8SAndreas Herrmann 
1625462859aaSMarek Szyprowski 	if (mapping->nr_bitmaps >= mapping->extensions)
16264d852ef8SAndreas Herrmann 		return -EINVAL;
16274d852ef8SAndreas Herrmann 
16284d852ef8SAndreas Herrmann 	next_bitmap = mapping->nr_bitmaps;
16294d852ef8SAndreas Herrmann 	mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size,
16304d852ef8SAndreas Herrmann 						GFP_ATOMIC);
16314d852ef8SAndreas Herrmann 	if (!mapping->bitmaps[next_bitmap])
16324d852ef8SAndreas Herrmann 		return -ENOMEM;
16334d852ef8SAndreas Herrmann 
16344d852ef8SAndreas Herrmann 	mapping->nr_bitmaps++;
16354d852ef8SAndreas Herrmann 
16364d852ef8SAndreas Herrmann 	return 0;
16374d852ef8SAndreas Herrmann }
16384d852ef8SAndreas Herrmann 
arm_iommu_release_mapping(struct dma_iommu_mapping * mapping)16394ce63fcdSMarek Szyprowski void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
16404ce63fcdSMarek Szyprowski {
16414ce63fcdSMarek Szyprowski 	if (mapping)
16424ce63fcdSMarek Szyprowski 		kref_put(&mapping->kref, release_iommu_mapping);
16434ce63fcdSMarek Szyprowski }
164418177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
16454ce63fcdSMarek Szyprowski 
__arm_iommu_attach_device(struct device * dev,struct dma_iommu_mapping * mapping)1646eab8d653SLaurent Pinchart static int __arm_iommu_attach_device(struct device *dev,
16474ce63fcdSMarek Szyprowski 				     struct dma_iommu_mapping *mapping)
16484ce63fcdSMarek Szyprowski {
16494ce63fcdSMarek Szyprowski 	int err;
16504ce63fcdSMarek Szyprowski 
16514ce63fcdSMarek Szyprowski 	err = iommu_attach_device(mapping->domain, dev);
16524ce63fcdSMarek Szyprowski 	if (err)
16534ce63fcdSMarek Szyprowski 		return err;
16544ce63fcdSMarek Szyprowski 
16554ce63fcdSMarek Szyprowski 	kref_get(&mapping->kref);
165689cfdb19SWill Deacon 	to_dma_iommu_mapping(dev) = mapping;
16574ce63fcdSMarek Szyprowski 
165875c59716SHiroshi Doyu 	pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
16594ce63fcdSMarek Szyprowski 	return 0;
16604ce63fcdSMarek Szyprowski }
16614ce63fcdSMarek Szyprowski 
16626fe36758SHiroshi Doyu /**
1663eab8d653SLaurent Pinchart  * arm_iommu_attach_device
16646fe36758SHiroshi Doyu  * @dev: valid struct device pointer
1665eab8d653SLaurent Pinchart  * @mapping: io address space mapping structure (returned from
1666eab8d653SLaurent Pinchart  *	arm_iommu_create_mapping)
16676fe36758SHiroshi Doyu  *
1668eab8d653SLaurent Pinchart  * Attaches specified io address space mapping to the provided device.
1669eab8d653SLaurent Pinchart  * This replaces the dma operations (dma_map_ops pointer) with the
1670eab8d653SLaurent Pinchart  * IOMMU aware version.
1671eab8d653SLaurent Pinchart  *
1672eab8d653SLaurent Pinchart  * More than one client might be attached to the same io address space
1673eab8d653SLaurent Pinchart  * mapping.
16746fe36758SHiroshi Doyu  */
arm_iommu_attach_device(struct device * dev,struct dma_iommu_mapping * mapping)1675eab8d653SLaurent Pinchart int arm_iommu_attach_device(struct device *dev,
1676eab8d653SLaurent Pinchart 			    struct dma_iommu_mapping *mapping)
1677eab8d653SLaurent Pinchart {
1678eab8d653SLaurent Pinchart 	int err;
1679eab8d653SLaurent Pinchart 
1680eab8d653SLaurent Pinchart 	err = __arm_iommu_attach_device(dev, mapping);
1681eab8d653SLaurent Pinchart 	if (err)
1682eab8d653SLaurent Pinchart 		return err;
1683eab8d653SLaurent Pinchart 
1684eab8d653SLaurent Pinchart 	set_dma_ops(dev, &iommu_ops);
1685eab8d653SLaurent Pinchart 	return 0;
1686eab8d653SLaurent Pinchart }
1687eab8d653SLaurent Pinchart EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
1688eab8d653SLaurent Pinchart 
1689d3e01c51SSricharan R /**
1690d3e01c51SSricharan R  * arm_iommu_detach_device
1691d3e01c51SSricharan R  * @dev: valid struct device pointer
1692d3e01c51SSricharan R  *
1693d3e01c51SSricharan R  * Detaches the provided device from a previously attached map.
16944a4d68fcSWolfram Sang (Renesas)  * This overwrites the dma_ops pointer with appropriate non-IOMMU ops.
1695d3e01c51SSricharan R  */
arm_iommu_detach_device(struct device * dev)1696d3e01c51SSricharan R void arm_iommu_detach_device(struct device *dev)
16976fe36758SHiroshi Doyu {
16986fe36758SHiroshi Doyu 	struct dma_iommu_mapping *mapping;
16996fe36758SHiroshi Doyu 
17006fe36758SHiroshi Doyu 	mapping = to_dma_iommu_mapping(dev);
17016fe36758SHiroshi Doyu 	if (!mapping) {
17026fe36758SHiroshi Doyu 		dev_warn(dev, "Not attached\n");
17036fe36758SHiroshi Doyu 		return;
17046fe36758SHiroshi Doyu 	}
17056fe36758SHiroshi Doyu 
17066fe36758SHiroshi Doyu 	iommu_detach_device(mapping->domain, dev);
17076fe36758SHiroshi Doyu 	kref_put(&mapping->kref, release_iommu_mapping);
170889cfdb19SWill Deacon 	to_dma_iommu_mapping(dev) = NULL;
1709ae626eb9SChristoph Hellwig 	set_dma_ops(dev, NULL);
17106fe36758SHiroshi Doyu 
17116fe36758SHiroshi Doyu 	pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
17126fe36758SHiroshi Doyu }
171318177d12SPrathyush K EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
17146fe36758SHiroshi Doyu 
arm_setup_iommu_dma_ops(struct device * dev,u64 dma_base,u64 size,const struct iommu_ops * iommu,bool coherent)1715ae626eb9SChristoph Hellwig static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
1716ae626eb9SChristoph Hellwig 				    const struct iommu_ops *iommu, bool coherent)
17174bb25789SWill Deacon {
17184bb25789SWill Deacon 	struct dma_iommu_mapping *mapping;
17194bb25789SWill Deacon 
17204bb25789SWill Deacon 	mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
17214bb25789SWill Deacon 	if (IS_ERR(mapping)) {
17224bb25789SWill Deacon 		pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
17234bb25789SWill Deacon 				size, dev_name(dev));
1724ae626eb9SChristoph Hellwig 		return;
17254bb25789SWill Deacon 	}
17264bb25789SWill Deacon 
1727eab8d653SLaurent Pinchart 	if (__arm_iommu_attach_device(dev, mapping)) {
17284bb25789SWill Deacon 		pr_warn("Failed to attached device %s to IOMMU_mapping\n",
17294bb25789SWill Deacon 				dev_name(dev));
17304bb25789SWill Deacon 		arm_iommu_release_mapping(mapping);
1731ae626eb9SChristoph Hellwig 		return;
17324bb25789SWill Deacon 	}
17334bb25789SWill Deacon 
1734ae626eb9SChristoph Hellwig 	set_dma_ops(dev, &iommu_ops);
17354bb25789SWill Deacon }
17364bb25789SWill Deacon 
arm_teardown_iommu_dma_ops(struct device * dev)17374bb25789SWill Deacon static void arm_teardown_iommu_dma_ops(struct device *dev)
17384bb25789SWill Deacon {
173989cfdb19SWill Deacon 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
17404bb25789SWill Deacon 
1741c2273a18SWill Deacon 	if (!mapping)
1742c2273a18SWill Deacon 		return;
1743c2273a18SWill Deacon 
1744d3e01c51SSricharan R 	arm_iommu_detach_device(dev);
17454bb25789SWill Deacon 	arm_iommu_release_mapping(mapping);
17464bb25789SWill Deacon }
17474bb25789SWill Deacon 
17484bb25789SWill Deacon #else
17494bb25789SWill Deacon 
arm_setup_iommu_dma_ops(struct device * dev,u64 dma_base,u64 size,const struct iommu_ops * iommu,bool coherent)1750ae626eb9SChristoph Hellwig static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
1751ae626eb9SChristoph Hellwig 				    const struct iommu_ops *iommu, bool coherent)
17524bb25789SWill Deacon {
17534bb25789SWill Deacon }
17544bb25789SWill Deacon 
arm_teardown_iommu_dma_ops(struct device * dev)17554bb25789SWill Deacon static void arm_teardown_iommu_dma_ops(struct device *dev) { }
17564bb25789SWill Deacon 
17574bb25789SWill Deacon #endif	/* CONFIG_ARM_DMA_USE_IOMMU */
17584bb25789SWill Deacon 
arch_setup_dma_ops(struct device * dev,u64 dma_base,u64 size,const struct iommu_ops * iommu,bool coherent)17594bb25789SWill Deacon void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
176053c92d79SRobin Murphy 			const struct iommu_ops *iommu, bool coherent)
17614bb25789SWill Deacon {
176249bc8bebSChristoph Hellwig 	/*
176349bc8bebSChristoph Hellwig 	 * Due to legacy code that sets the ->dma_coherent flag from a bus
176449bc8bebSChristoph Hellwig 	 * notifier we can't just assign coherent to the ->dma_coherent flag
176549bc8bebSChristoph Hellwig 	 * here, but instead have to make sure we only set but never clear it
176649bc8bebSChristoph Hellwig 	 * for now.
176749bc8bebSChristoph Hellwig 	 */
1768c9cb0136SChristoph Hellwig 	if (coherent)
176949bc8bebSChristoph Hellwig 		dev->dma_coherent = true;
177026b37b94SLaurent Pinchart 
177126b37b94SLaurent Pinchart 	/*
177226b37b94SLaurent Pinchart 	 * Don't override the dma_ops if they have already been set. Ideally
177326b37b94SLaurent Pinchart 	 * this should be the only location where dma_ops are set, remove this
177426b37b94SLaurent Pinchart 	 * check when all other callers of set_dma_ops will have disappeared.
177526b37b94SLaurent Pinchart 	 */
177626b37b94SLaurent Pinchart 	if (dev->dma_ops)
177726b37b94SLaurent Pinchart 		return;
177826b37b94SLaurent Pinchart 
1779ae626eb9SChristoph Hellwig 	if (iommu)
1780ae626eb9SChristoph Hellwig 		arm_setup_iommu_dma_ops(dev, dma_base, size, iommu, coherent);
1781e0586326SStefano Stabellini 
17829bf22421SOleksandr Tyshchenko 	xen_setup_dma_ops(dev);
1783a93a121aSLaurent Pinchart 	dev->archdata.dma_ops_setup = true;
17844bb25789SWill Deacon }
17854bb25789SWill Deacon 
arch_teardown_dma_ops(struct device * dev)17864bb25789SWill Deacon void arch_teardown_dma_ops(struct device *dev)
17874bb25789SWill Deacon {
1788a93a121aSLaurent Pinchart 	if (!dev->archdata.dma_ops_setup)
1789a93a121aSLaurent Pinchart 		return;
1790a93a121aSLaurent Pinchart 
17914bb25789SWill Deacon 	arm_teardown_iommu_dma_ops(dev);
1792fc67e6f1SRobin Murphy 	/* Let arch_setup_dma_ops() start again from scratch upon re-probe */
1793fc67e6f1SRobin Murphy 	set_dma_ops(dev, NULL);
17944bb25789SWill Deacon }
1795ad3c7b18SChristoph Hellwig 
arch_sync_dma_for_device(phys_addr_t paddr,size_t size,enum dma_data_direction dir)179656e35f9cSChristoph Hellwig void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
179756e35f9cSChristoph Hellwig 		enum dma_data_direction dir)
1798ad3c7b18SChristoph Hellwig {
1799ad3c7b18SChristoph Hellwig 	__dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
1800ad3c7b18SChristoph Hellwig 			      size, dir);
1801ad3c7b18SChristoph Hellwig }
1802ad3c7b18SChristoph Hellwig 
arch_sync_dma_for_cpu(phys_addr_t paddr,size_t size,enum dma_data_direction dir)180356e35f9cSChristoph Hellwig void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
180456e35f9cSChristoph Hellwig 		enum dma_data_direction dir)
1805ad3c7b18SChristoph Hellwig {
1806ad3c7b18SChristoph Hellwig 	__dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
1807ad3c7b18SChristoph Hellwig 			      size, dir);
1808ad3c7b18SChristoph Hellwig }
1809ad3c7b18SChristoph Hellwig 
arch_dma_alloc(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp,unsigned long attrs)1810ad3c7b18SChristoph Hellwig void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
1811ad3c7b18SChristoph Hellwig 		gfp_t gfp, unsigned long attrs)
1812ad3c7b18SChristoph Hellwig {
1813ad3c7b18SChristoph Hellwig 	return __dma_alloc(dev, size, dma_handle, gfp,
1814ad3c7b18SChristoph Hellwig 			   __get_dma_pgprot(attrs, PAGE_KERNEL), false,
1815ad3c7b18SChristoph Hellwig 			   attrs, __builtin_return_address(0));
1816ad3c7b18SChristoph Hellwig }
1817ad3c7b18SChristoph Hellwig 
arch_dma_free(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_handle,unsigned long attrs)1818ad3c7b18SChristoph Hellwig void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
1819ad3c7b18SChristoph Hellwig 		dma_addr_t dma_handle, unsigned long attrs)
1820ad3c7b18SChristoph Hellwig {
1821ad3c7b18SChristoph Hellwig 	__arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false);
1822ad3c7b18SChristoph Hellwig }
1823