xref: /openbmc/linux/arch/arm/mm/dma-mapping.c (revision 2fa5ebe3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/arch/arm/mm/dma-mapping.c
4  *
5  *  Copyright (C) 2000-2004 Russell King
6  *
7  *  DMA uncached mapping support.
8  */
9 #include <linux/module.h>
10 #include <linux/mm.h>
11 #include <linux/genalloc.h>
12 #include <linux/gfp.h>
13 #include <linux/errno.h>
14 #include <linux/list.h>
15 #include <linux/init.h>
16 #include <linux/device.h>
17 #include <linux/dma-direct.h>
18 #include <linux/dma-map-ops.h>
19 #include <linux/highmem.h>
20 #include <linux/memblock.h>
21 #include <linux/slab.h>
22 #include <linux/iommu.h>
23 #include <linux/io.h>
24 #include <linux/vmalloc.h>
25 #include <linux/sizes.h>
26 #include <linux/cma.h>
27 
28 #include <asm/memory.h>
29 #include <asm/highmem.h>
30 #include <asm/cacheflush.h>
31 #include <asm/tlbflush.h>
32 #include <asm/mach/arch.h>
33 #include <asm/dma-iommu.h>
34 #include <asm/mach/map.h>
35 #include <asm/system_info.h>
36 #include <asm/xen/xen-ops.h>
37 
38 #include "dma.h"
39 #include "mm.h"
40 
41 struct arm_dma_alloc_args {
42 	struct device *dev;
43 	size_t size;
44 	gfp_t gfp;
45 	pgprot_t prot;
46 	const void *caller;
47 	bool want_vaddr;
48 	int coherent_flag;
49 };
50 
51 struct arm_dma_free_args {
52 	struct device *dev;
53 	size_t size;
54 	void *cpu_addr;
55 	struct page *page;
56 	bool want_vaddr;
57 };
58 
59 #define NORMAL	    0
60 #define COHERENT    1
61 
62 struct arm_dma_allocator {
63 	void *(*alloc)(struct arm_dma_alloc_args *args,
64 		       struct page **ret_page);
65 	void (*free)(struct arm_dma_free_args *args);
66 };
67 
68 struct arm_dma_buffer {
69 	struct list_head list;
70 	void *virt;
71 	struct arm_dma_allocator *allocator;
72 };
73 
74 static LIST_HEAD(arm_dma_bufs);
75 static DEFINE_SPINLOCK(arm_dma_bufs_lock);
76 
77 static struct arm_dma_buffer *arm_dma_buffer_find(void *virt)
78 {
79 	struct arm_dma_buffer *buf, *found = NULL;
80 	unsigned long flags;
81 
82 	spin_lock_irqsave(&arm_dma_bufs_lock, flags);
83 	list_for_each_entry(buf, &arm_dma_bufs, list) {
84 		if (buf->virt == virt) {
85 			list_del(&buf->list);
86 			found = buf;
87 			break;
88 		}
89 	}
90 	spin_unlock_irqrestore(&arm_dma_bufs_lock, flags);
91 	return found;
92 }
93 
94 /*
95  * The DMA API is built upon the notion of "buffer ownership".  A buffer
96  * is either exclusively owned by the CPU (and therefore may be accessed
97  * by it) or exclusively owned by the DMA device.  These helper functions
98  * represent the transitions between these two ownership states.
99  *
100  * Note, however, that on later ARMs, this notion does not work due to
101  * speculative prefetches.  We model our approach on the assumption that
102  * the CPU does do speculative prefetches, which means we clean caches
103  * before transfers and delay cache invalidation until transfer completion.
104  *
105  */
106 
107 static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
108 {
109 	/*
110 	 * Ensure that the allocated pages are zeroed, and that any data
111 	 * lurking in the kernel direct-mapped region is invalidated.
112 	 */
113 	if (PageHighMem(page)) {
114 		phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
115 		phys_addr_t end = base + size;
116 		while (size > 0) {
117 			void *ptr = kmap_atomic(page);
118 			memset(ptr, 0, PAGE_SIZE);
119 			if (coherent_flag != COHERENT)
120 				dmac_flush_range(ptr, ptr + PAGE_SIZE);
121 			kunmap_atomic(ptr);
122 			page++;
123 			size -= PAGE_SIZE;
124 		}
125 		if (coherent_flag != COHERENT)
126 			outer_flush_range(base, end);
127 	} else {
128 		void *ptr = page_address(page);
129 		memset(ptr, 0, size);
130 		if (coherent_flag != COHERENT) {
131 			dmac_flush_range(ptr, ptr + size);
132 			outer_flush_range(__pa(ptr), __pa(ptr) + size);
133 		}
134 	}
135 }
136 
137 /*
138  * Allocate a DMA buffer for 'dev' of size 'size' using the
139  * specified gfp mask.  Note that 'size' must be page aligned.
140  */
141 static struct page *__dma_alloc_buffer(struct device *dev, size_t size,
142 				       gfp_t gfp, int coherent_flag)
143 {
144 	unsigned long order = get_order(size);
145 	struct page *page, *p, *e;
146 
147 	page = alloc_pages(gfp, order);
148 	if (!page)
149 		return NULL;
150 
151 	/*
152 	 * Now split the huge page and free the excess pages
153 	 */
154 	split_page(page, order);
155 	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
156 		__free_page(p);
157 
158 	__dma_clear_buffer(page, size, coherent_flag);
159 
160 	return page;
161 }
162 
163 /*
164  * Free a DMA buffer.  'size' must be page aligned.
165  */
166 static void __dma_free_buffer(struct page *page, size_t size)
167 {
168 	struct page *e = page + (size >> PAGE_SHIFT);
169 
170 	while (page < e) {
171 		__free_page(page);
172 		page++;
173 	}
174 }
175 
176 static void *__alloc_from_contiguous(struct device *dev, size_t size,
177 				     pgprot_t prot, struct page **ret_page,
178 				     const void *caller, bool want_vaddr,
179 				     int coherent_flag, gfp_t gfp);
180 
181 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
182 				 pgprot_t prot, struct page **ret_page,
183 				 const void *caller, bool want_vaddr);
184 
185 #define DEFAULT_DMA_COHERENT_POOL_SIZE	SZ_256K
186 static struct gen_pool *atomic_pool __ro_after_init;
187 
188 static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
189 
190 static int __init early_coherent_pool(char *p)
191 {
192 	atomic_pool_size = memparse(p, &p);
193 	return 0;
194 }
195 early_param("coherent_pool", early_coherent_pool);
196 
197 /*
198  * Initialise the coherent pool for atomic allocations.
199  */
200 static int __init atomic_pool_init(void)
201 {
202 	pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL);
203 	gfp_t gfp = GFP_KERNEL | GFP_DMA;
204 	struct page *page;
205 	void *ptr;
206 
207 	atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
208 	if (!atomic_pool)
209 		goto out;
210 	/*
211 	 * The atomic pool is only used for non-coherent allocations
212 	 * so we must pass NORMAL for coherent_flag.
213 	 */
214 	if (dev_get_cma_area(NULL))
215 		ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
216 				      &page, atomic_pool_init, true, NORMAL,
217 				      GFP_KERNEL);
218 	else
219 		ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
220 					   &page, atomic_pool_init, true);
221 	if (ptr) {
222 		int ret;
223 
224 		ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr,
225 					page_to_phys(page),
226 					atomic_pool_size, -1);
227 		if (ret)
228 			goto destroy_genpool;
229 
230 		gen_pool_set_algo(atomic_pool,
231 				gen_pool_first_fit_order_align,
232 				NULL);
233 		pr_info("DMA: preallocated %zu KiB pool for atomic coherent allocations\n",
234 		       atomic_pool_size / 1024);
235 		return 0;
236 	}
237 
238 destroy_genpool:
239 	gen_pool_destroy(atomic_pool);
240 	atomic_pool = NULL;
241 out:
242 	pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
243 	       atomic_pool_size / 1024);
244 	return -ENOMEM;
245 }
246 /*
247  * CMA is activated by core_initcall, so we must be called after it.
248  */
249 postcore_initcall(atomic_pool_init);
250 
251 #ifdef CONFIG_CMA_AREAS
252 struct dma_contig_early_reserve {
253 	phys_addr_t base;
254 	unsigned long size;
255 };
256 
257 static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
258 
259 static int dma_mmu_remap_num __initdata;
260 
261 void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
262 {
263 	dma_mmu_remap[dma_mmu_remap_num].base = base;
264 	dma_mmu_remap[dma_mmu_remap_num].size = size;
265 	dma_mmu_remap_num++;
266 }
267 
268 void __init dma_contiguous_remap(void)
269 {
270 	int i;
271 	for (i = 0; i < dma_mmu_remap_num; i++) {
272 		phys_addr_t start = dma_mmu_remap[i].base;
273 		phys_addr_t end = start + dma_mmu_remap[i].size;
274 		struct map_desc map;
275 		unsigned long addr;
276 
277 		if (end > arm_lowmem_limit)
278 			end = arm_lowmem_limit;
279 		if (start >= end)
280 			continue;
281 
282 		map.pfn = __phys_to_pfn(start);
283 		map.virtual = __phys_to_virt(start);
284 		map.length = end - start;
285 		map.type = MT_MEMORY_DMA_READY;
286 
287 		/*
288 		 * Clear previous low-memory mapping to ensure that the
289 		 * TLB does not see any conflicting entries, then flush
290 		 * the TLB of the old entries before creating new mappings.
291 		 *
292 		 * This ensures that any speculatively loaded TLB entries
293 		 * (even though they may be rare) can not cause any problems,
294 		 * and ensures that this code is architecturally compliant.
295 		 */
296 		for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
297 		     addr += PMD_SIZE)
298 			pmd_clear(pmd_off_k(addr));
299 
300 		flush_tlb_kernel_range(__phys_to_virt(start),
301 				       __phys_to_virt(end));
302 
303 		iotable_init(&map, 1);
304 	}
305 }
306 #endif
307 
308 static int __dma_update_pte(pte_t *pte, unsigned long addr, void *data)
309 {
310 	struct page *page = virt_to_page((void *)addr);
311 	pgprot_t prot = *(pgprot_t *)data;
312 
313 	set_pte_ext(pte, mk_pte(page, prot), 0);
314 	return 0;
315 }
316 
317 static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
318 {
319 	unsigned long start = (unsigned long) page_address(page);
320 	unsigned end = start + size;
321 
322 	apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
323 	flush_tlb_kernel_range(start, end);
324 }
325 
326 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
327 				 pgprot_t prot, struct page **ret_page,
328 				 const void *caller, bool want_vaddr)
329 {
330 	struct page *page;
331 	void *ptr = NULL;
332 	/*
333 	 * __alloc_remap_buffer is only called when the device is
334 	 * non-coherent
335 	 */
336 	page = __dma_alloc_buffer(dev, size, gfp, NORMAL);
337 	if (!page)
338 		return NULL;
339 	if (!want_vaddr)
340 		goto out;
341 
342 	ptr = dma_common_contiguous_remap(page, size, prot, caller);
343 	if (!ptr) {
344 		__dma_free_buffer(page, size);
345 		return NULL;
346 	}
347 
348  out:
349 	*ret_page = page;
350 	return ptr;
351 }
352 
353 static void *__alloc_from_pool(size_t size, struct page **ret_page)
354 {
355 	unsigned long val;
356 	void *ptr = NULL;
357 
358 	if (!atomic_pool) {
359 		WARN(1, "coherent pool not initialised!\n");
360 		return NULL;
361 	}
362 
363 	val = gen_pool_alloc(atomic_pool, size);
364 	if (val) {
365 		phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
366 
367 		*ret_page = phys_to_page(phys);
368 		ptr = (void *)val;
369 	}
370 
371 	return ptr;
372 }
373 
374 static bool __in_atomic_pool(void *start, size_t size)
375 {
376 	return gen_pool_has_addr(atomic_pool, (unsigned long)start, size);
377 }
378 
379 static int __free_from_pool(void *start, size_t size)
380 {
381 	if (!__in_atomic_pool(start, size))
382 		return 0;
383 
384 	gen_pool_free(atomic_pool, (unsigned long)start, size);
385 
386 	return 1;
387 }
388 
389 static void *__alloc_from_contiguous(struct device *dev, size_t size,
390 				     pgprot_t prot, struct page **ret_page,
391 				     const void *caller, bool want_vaddr,
392 				     int coherent_flag, gfp_t gfp)
393 {
394 	unsigned long order = get_order(size);
395 	size_t count = size >> PAGE_SHIFT;
396 	struct page *page;
397 	void *ptr = NULL;
398 
399 	page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN);
400 	if (!page)
401 		return NULL;
402 
403 	__dma_clear_buffer(page, size, coherent_flag);
404 
405 	if (!want_vaddr)
406 		goto out;
407 
408 	if (PageHighMem(page)) {
409 		ptr = dma_common_contiguous_remap(page, size, prot, caller);
410 		if (!ptr) {
411 			dma_release_from_contiguous(dev, page, count);
412 			return NULL;
413 		}
414 	} else {
415 		__dma_remap(page, size, prot);
416 		ptr = page_address(page);
417 	}
418 
419  out:
420 	*ret_page = page;
421 	return ptr;
422 }
423 
424 static void __free_from_contiguous(struct device *dev, struct page *page,
425 				   void *cpu_addr, size_t size, bool want_vaddr)
426 {
427 	if (want_vaddr) {
428 		if (PageHighMem(page))
429 			dma_common_free_remap(cpu_addr, size);
430 		else
431 			__dma_remap(page, size, PAGE_KERNEL);
432 	}
433 	dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
434 }
435 
436 static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot)
437 {
438 	prot = (attrs & DMA_ATTR_WRITE_COMBINE) ?
439 			pgprot_writecombine(prot) :
440 			pgprot_dmacoherent(prot);
441 	return prot;
442 }
443 
444 static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
445 				   struct page **ret_page)
446 {
447 	struct page *page;
448 	/* __alloc_simple_buffer is only called when the device is coherent */
449 	page = __dma_alloc_buffer(dev, size, gfp, COHERENT);
450 	if (!page)
451 		return NULL;
452 
453 	*ret_page = page;
454 	return page_address(page);
455 }
456 
457 static void *simple_allocator_alloc(struct arm_dma_alloc_args *args,
458 				    struct page **ret_page)
459 {
460 	return __alloc_simple_buffer(args->dev, args->size, args->gfp,
461 				     ret_page);
462 }
463 
464 static void simple_allocator_free(struct arm_dma_free_args *args)
465 {
466 	__dma_free_buffer(args->page, args->size);
467 }
468 
469 static struct arm_dma_allocator simple_allocator = {
470 	.alloc = simple_allocator_alloc,
471 	.free = simple_allocator_free,
472 };
473 
474 static void *cma_allocator_alloc(struct arm_dma_alloc_args *args,
475 				 struct page **ret_page)
476 {
477 	return __alloc_from_contiguous(args->dev, args->size, args->prot,
478 				       ret_page, args->caller,
479 				       args->want_vaddr, args->coherent_flag,
480 				       args->gfp);
481 }
482 
483 static void cma_allocator_free(struct arm_dma_free_args *args)
484 {
485 	__free_from_contiguous(args->dev, args->page, args->cpu_addr,
486 			       args->size, args->want_vaddr);
487 }
488 
489 static struct arm_dma_allocator cma_allocator = {
490 	.alloc = cma_allocator_alloc,
491 	.free = cma_allocator_free,
492 };
493 
494 static void *pool_allocator_alloc(struct arm_dma_alloc_args *args,
495 				  struct page **ret_page)
496 {
497 	return __alloc_from_pool(args->size, ret_page);
498 }
499 
500 static void pool_allocator_free(struct arm_dma_free_args *args)
501 {
502 	__free_from_pool(args->cpu_addr, args->size);
503 }
504 
505 static struct arm_dma_allocator pool_allocator = {
506 	.alloc = pool_allocator_alloc,
507 	.free = pool_allocator_free,
508 };
509 
510 static void *remap_allocator_alloc(struct arm_dma_alloc_args *args,
511 				   struct page **ret_page)
512 {
513 	return __alloc_remap_buffer(args->dev, args->size, args->gfp,
514 				    args->prot, ret_page, args->caller,
515 				    args->want_vaddr);
516 }
517 
518 static void remap_allocator_free(struct arm_dma_free_args *args)
519 {
520 	if (args->want_vaddr)
521 		dma_common_free_remap(args->cpu_addr, args->size);
522 
523 	__dma_free_buffer(args->page, args->size);
524 }
525 
526 static struct arm_dma_allocator remap_allocator = {
527 	.alloc = remap_allocator_alloc,
528 	.free = remap_allocator_free,
529 };
530 
531 static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
532 			 gfp_t gfp, pgprot_t prot, bool is_coherent,
533 			 unsigned long attrs, const void *caller)
534 {
535 	u64 mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
536 	struct page *page = NULL;
537 	void *addr;
538 	bool allowblock, cma;
539 	struct arm_dma_buffer *buf;
540 	struct arm_dma_alloc_args args = {
541 		.dev = dev,
542 		.size = PAGE_ALIGN(size),
543 		.gfp = gfp,
544 		.prot = prot,
545 		.caller = caller,
546 		.want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
547 		.coherent_flag = is_coherent ? COHERENT : NORMAL,
548 	};
549 
550 #ifdef CONFIG_DMA_API_DEBUG
551 	u64 limit = (mask + 1) & ~mask;
552 	if (limit && size >= limit) {
553 		dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
554 			size, mask);
555 		return NULL;
556 	}
557 #endif
558 
559 	buf = kzalloc(sizeof(*buf),
560 		      gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM));
561 	if (!buf)
562 		return NULL;
563 
564 	if (mask < 0xffffffffULL)
565 		gfp |= GFP_DMA;
566 
567 	args.gfp = gfp;
568 
569 	*handle = DMA_MAPPING_ERROR;
570 	allowblock = gfpflags_allow_blocking(gfp);
571 	cma = allowblock ? dev_get_cma_area(dev) : NULL;
572 
573 	if (cma)
574 		buf->allocator = &cma_allocator;
575 	else if (is_coherent)
576 		buf->allocator = &simple_allocator;
577 	else if (allowblock)
578 		buf->allocator = &remap_allocator;
579 	else
580 		buf->allocator = &pool_allocator;
581 
582 	addr = buf->allocator->alloc(&args, &page);
583 
584 	if (page) {
585 		unsigned long flags;
586 
587 		*handle = phys_to_dma(dev, page_to_phys(page));
588 		buf->virt = args.want_vaddr ? addr : page;
589 
590 		spin_lock_irqsave(&arm_dma_bufs_lock, flags);
591 		list_add(&buf->list, &arm_dma_bufs);
592 		spin_unlock_irqrestore(&arm_dma_bufs_lock, flags);
593 	} else {
594 		kfree(buf);
595 	}
596 
597 	return args.want_vaddr ? addr : page;
598 }
599 
600 /*
601  * Free a buffer as defined by the above mapping.
602  */
603 static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
604 			   dma_addr_t handle, unsigned long attrs,
605 			   bool is_coherent)
606 {
607 	struct page *page = phys_to_page(dma_to_phys(dev, handle));
608 	struct arm_dma_buffer *buf;
609 	struct arm_dma_free_args args = {
610 		.dev = dev,
611 		.size = PAGE_ALIGN(size),
612 		.cpu_addr = cpu_addr,
613 		.page = page,
614 		.want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
615 	};
616 
617 	buf = arm_dma_buffer_find(cpu_addr);
618 	if (WARN(!buf, "Freeing invalid buffer %p\n", cpu_addr))
619 		return;
620 
621 	buf->allocator->free(&args);
622 	kfree(buf);
623 }
624 
625 static void dma_cache_maint_page(struct page *page, unsigned long offset,
626 	size_t size, enum dma_data_direction dir,
627 	void (*op)(const void *, size_t, int))
628 {
629 	unsigned long pfn;
630 	size_t left = size;
631 
632 	pfn = page_to_pfn(page) + offset / PAGE_SIZE;
633 	offset %= PAGE_SIZE;
634 
635 	/*
636 	 * A single sg entry may refer to multiple physically contiguous
637 	 * pages.  But we still need to process highmem pages individually.
638 	 * If highmem is not configured then the bulk of this loop gets
639 	 * optimized out.
640 	 */
641 	do {
642 		size_t len = left;
643 		void *vaddr;
644 
645 		page = pfn_to_page(pfn);
646 
647 		if (PageHighMem(page)) {
648 			if (len + offset > PAGE_SIZE)
649 				len = PAGE_SIZE - offset;
650 
651 			if (cache_is_vipt_nonaliasing()) {
652 				vaddr = kmap_atomic(page);
653 				op(vaddr + offset, len, dir);
654 				kunmap_atomic(vaddr);
655 			} else {
656 				vaddr = kmap_high_get(page);
657 				if (vaddr) {
658 					op(vaddr + offset, len, dir);
659 					kunmap_high(page);
660 				}
661 			}
662 		} else {
663 			vaddr = page_address(page) + offset;
664 			op(vaddr, len, dir);
665 		}
666 		offset = 0;
667 		pfn++;
668 		left -= len;
669 	} while (left);
670 }
671 
672 /*
673  * Make an area consistent for devices.
674  * Note: Drivers should NOT use this function directly.
675  * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
676  */
677 static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
678 	size_t size, enum dma_data_direction dir)
679 {
680 	phys_addr_t paddr;
681 
682 	dma_cache_maint_page(page, off, size, dir, dmac_map_area);
683 
684 	paddr = page_to_phys(page) + off;
685 	if (dir == DMA_FROM_DEVICE) {
686 		outer_inv_range(paddr, paddr + size);
687 	} else {
688 		outer_clean_range(paddr, paddr + size);
689 	}
690 	/* FIXME: non-speculating: flush on bidirectional mappings? */
691 }
692 
693 static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
694 	size_t size, enum dma_data_direction dir)
695 {
696 	phys_addr_t paddr = page_to_phys(page) + off;
697 
698 	/* FIXME: non-speculating: not required */
699 	/* in any case, don't bother invalidating if DMA to device */
700 	if (dir != DMA_TO_DEVICE) {
701 		outer_inv_range(paddr, paddr + size);
702 
703 		dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
704 	}
705 
706 	/*
707 	 * Mark the D-cache clean for these pages to avoid extra flushing.
708 	 */
709 	if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) {
710 		unsigned long pfn;
711 		size_t left = size;
712 
713 		pfn = page_to_pfn(page) + off / PAGE_SIZE;
714 		off %= PAGE_SIZE;
715 		if (off) {
716 			pfn++;
717 			left -= PAGE_SIZE - off;
718 		}
719 		while (left >= PAGE_SIZE) {
720 			page = pfn_to_page(pfn++);
721 			set_bit(PG_dcache_clean, &page->flags);
722 			left -= PAGE_SIZE;
723 		}
724 	}
725 }
726 
727 #ifdef CONFIG_ARM_DMA_USE_IOMMU
728 
729 static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs)
730 {
731 	int prot = 0;
732 
733 	if (attrs & DMA_ATTR_PRIVILEGED)
734 		prot |= IOMMU_PRIV;
735 
736 	switch (dir) {
737 	case DMA_BIDIRECTIONAL:
738 		return prot | IOMMU_READ | IOMMU_WRITE;
739 	case DMA_TO_DEVICE:
740 		return prot | IOMMU_READ;
741 	case DMA_FROM_DEVICE:
742 		return prot | IOMMU_WRITE;
743 	default:
744 		return prot;
745 	}
746 }
747 
748 /* IOMMU */
749 
750 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
751 
752 static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
753 				      size_t size)
754 {
755 	unsigned int order = get_order(size);
756 	unsigned int align = 0;
757 	unsigned int count, start;
758 	size_t mapping_size = mapping->bits << PAGE_SHIFT;
759 	unsigned long flags;
760 	dma_addr_t iova;
761 	int i;
762 
763 	if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
764 		order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
765 
766 	count = PAGE_ALIGN(size) >> PAGE_SHIFT;
767 	align = (1 << order) - 1;
768 
769 	spin_lock_irqsave(&mapping->lock, flags);
770 	for (i = 0; i < mapping->nr_bitmaps; i++) {
771 		start = bitmap_find_next_zero_area(mapping->bitmaps[i],
772 				mapping->bits, 0, count, align);
773 
774 		if (start > mapping->bits)
775 			continue;
776 
777 		bitmap_set(mapping->bitmaps[i], start, count);
778 		break;
779 	}
780 
781 	/*
782 	 * No unused range found. Try to extend the existing mapping
783 	 * and perform a second attempt to reserve an IO virtual
784 	 * address range of size bytes.
785 	 */
786 	if (i == mapping->nr_bitmaps) {
787 		if (extend_iommu_mapping(mapping)) {
788 			spin_unlock_irqrestore(&mapping->lock, flags);
789 			return DMA_MAPPING_ERROR;
790 		}
791 
792 		start = bitmap_find_next_zero_area(mapping->bitmaps[i],
793 				mapping->bits, 0, count, align);
794 
795 		if (start > mapping->bits) {
796 			spin_unlock_irqrestore(&mapping->lock, flags);
797 			return DMA_MAPPING_ERROR;
798 		}
799 
800 		bitmap_set(mapping->bitmaps[i], start, count);
801 	}
802 	spin_unlock_irqrestore(&mapping->lock, flags);
803 
804 	iova = mapping->base + (mapping_size * i);
805 	iova += start << PAGE_SHIFT;
806 
807 	return iova;
808 }
809 
810 static inline void __free_iova(struct dma_iommu_mapping *mapping,
811 			       dma_addr_t addr, size_t size)
812 {
813 	unsigned int start, count;
814 	size_t mapping_size = mapping->bits << PAGE_SHIFT;
815 	unsigned long flags;
816 	dma_addr_t bitmap_base;
817 	u32 bitmap_index;
818 
819 	if (!size)
820 		return;
821 
822 	bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size;
823 	BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions);
824 
825 	bitmap_base = mapping->base + mapping_size * bitmap_index;
826 
827 	start = (addr - bitmap_base) >>	PAGE_SHIFT;
828 
829 	if (addr + size > bitmap_base + mapping_size) {
830 		/*
831 		 * The address range to be freed reaches into the iova
832 		 * range of the next bitmap. This should not happen as
833 		 * we don't allow this in __alloc_iova (at the
834 		 * moment).
835 		 */
836 		BUG();
837 	} else
838 		count = size >> PAGE_SHIFT;
839 
840 	spin_lock_irqsave(&mapping->lock, flags);
841 	bitmap_clear(mapping->bitmaps[bitmap_index], start, count);
842 	spin_unlock_irqrestore(&mapping->lock, flags);
843 }
844 
845 /* We'll try 2M, 1M, 64K, and finally 4K; array must end with 0! */
846 static const int iommu_order_array[] = { 9, 8, 4, 0 };
847 
848 static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
849 					  gfp_t gfp, unsigned long attrs,
850 					  int coherent_flag)
851 {
852 	struct page **pages;
853 	int count = size >> PAGE_SHIFT;
854 	int array_size = count * sizeof(struct page *);
855 	int i = 0;
856 	int order_idx = 0;
857 
858 	if (array_size <= PAGE_SIZE)
859 		pages = kzalloc(array_size, GFP_KERNEL);
860 	else
861 		pages = vzalloc(array_size);
862 	if (!pages)
863 		return NULL;
864 
865 	if (attrs & DMA_ATTR_FORCE_CONTIGUOUS)
866 	{
867 		unsigned long order = get_order(size);
868 		struct page *page;
869 
870 		page = dma_alloc_from_contiguous(dev, count, order,
871 						 gfp & __GFP_NOWARN);
872 		if (!page)
873 			goto error;
874 
875 		__dma_clear_buffer(page, size, coherent_flag);
876 
877 		for (i = 0; i < count; i++)
878 			pages[i] = page + i;
879 
880 		return pages;
881 	}
882 
883 	/* Go straight to 4K chunks if caller says it's OK. */
884 	if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
885 		order_idx = ARRAY_SIZE(iommu_order_array) - 1;
886 
887 	/*
888 	 * IOMMU can map any pages, so himem can also be used here
889 	 */
890 	gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
891 
892 	while (count) {
893 		int j, order;
894 
895 		order = iommu_order_array[order_idx];
896 
897 		/* Drop down when we get small */
898 		if (__fls(count) < order) {
899 			order_idx++;
900 			continue;
901 		}
902 
903 		if (order) {
904 			/* See if it's easy to allocate a high-order chunk */
905 			pages[i] = alloc_pages(gfp | __GFP_NORETRY, order);
906 
907 			/* Go down a notch at first sign of pressure */
908 			if (!pages[i]) {
909 				order_idx++;
910 				continue;
911 			}
912 		} else {
913 			pages[i] = alloc_pages(gfp, 0);
914 			if (!pages[i])
915 				goto error;
916 		}
917 
918 		if (order) {
919 			split_page(pages[i], order);
920 			j = 1 << order;
921 			while (--j)
922 				pages[i + j] = pages[i] + j;
923 		}
924 
925 		__dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag);
926 		i += 1 << order;
927 		count -= 1 << order;
928 	}
929 
930 	return pages;
931 error:
932 	while (i--)
933 		if (pages[i])
934 			__free_pages(pages[i], 0);
935 	kvfree(pages);
936 	return NULL;
937 }
938 
939 static int __iommu_free_buffer(struct device *dev, struct page **pages,
940 			       size_t size, unsigned long attrs)
941 {
942 	int count = size >> PAGE_SHIFT;
943 	int i;
944 
945 	if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
946 		dma_release_from_contiguous(dev, pages[0], count);
947 	} else {
948 		for (i = 0; i < count; i++)
949 			if (pages[i])
950 				__free_pages(pages[i], 0);
951 	}
952 
953 	kvfree(pages);
954 	return 0;
955 }
956 
957 /*
958  * Create a mapping in device IO address space for specified pages
959  */
960 static dma_addr_t
961 __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
962 		       unsigned long attrs)
963 {
964 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
965 	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
966 	dma_addr_t dma_addr, iova;
967 	int i;
968 
969 	dma_addr = __alloc_iova(mapping, size);
970 	if (dma_addr == DMA_MAPPING_ERROR)
971 		return dma_addr;
972 
973 	iova = dma_addr;
974 	for (i = 0; i < count; ) {
975 		int ret;
976 
977 		unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
978 		phys_addr_t phys = page_to_phys(pages[i]);
979 		unsigned int len, j;
980 
981 		for (j = i + 1; j < count; j++, next_pfn++)
982 			if (page_to_pfn(pages[j]) != next_pfn)
983 				break;
984 
985 		len = (j - i) << PAGE_SHIFT;
986 		ret = iommu_map(mapping->domain, iova, phys, len,
987 				__dma_info_to_prot(DMA_BIDIRECTIONAL, attrs),
988 				GFP_KERNEL);
989 		if (ret < 0)
990 			goto fail;
991 		iova += len;
992 		i = j;
993 	}
994 	return dma_addr;
995 fail:
996 	iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
997 	__free_iova(mapping, dma_addr, size);
998 	return DMA_MAPPING_ERROR;
999 }
1000 
1001 static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
1002 {
1003 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1004 
1005 	/*
1006 	 * add optional in-page offset from iova to size and align
1007 	 * result to page size
1008 	 */
1009 	size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
1010 	iova &= PAGE_MASK;
1011 
1012 	iommu_unmap(mapping->domain, iova, size);
1013 	__free_iova(mapping, iova, size);
1014 	return 0;
1015 }
1016 
1017 static struct page **__atomic_get_pages(void *addr)
1018 {
1019 	struct page *page;
1020 	phys_addr_t phys;
1021 
1022 	phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr);
1023 	page = phys_to_page(phys);
1024 
1025 	return (struct page **)page;
1026 }
1027 
1028 static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
1029 {
1030 	if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
1031 		return __atomic_get_pages(cpu_addr);
1032 
1033 	if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
1034 		return cpu_addr;
1035 
1036 	return dma_common_find_pages(cpu_addr);
1037 }
1038 
1039 static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
1040 				  dma_addr_t *handle, int coherent_flag,
1041 				  unsigned long attrs)
1042 {
1043 	struct page *page;
1044 	void *addr;
1045 
1046 	if (coherent_flag  == COHERENT)
1047 		addr = __alloc_simple_buffer(dev, size, gfp, &page);
1048 	else
1049 		addr = __alloc_from_pool(size, &page);
1050 	if (!addr)
1051 		return NULL;
1052 
1053 	*handle = __iommu_create_mapping(dev, &page, size, attrs);
1054 	if (*handle == DMA_MAPPING_ERROR)
1055 		goto err_mapping;
1056 
1057 	return addr;
1058 
1059 err_mapping:
1060 	__free_from_pool(addr, size);
1061 	return NULL;
1062 }
1063 
1064 static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
1065 			dma_addr_t handle, size_t size, int coherent_flag)
1066 {
1067 	__iommu_remove_mapping(dev, handle, size);
1068 	if (coherent_flag == COHERENT)
1069 		__dma_free_buffer(virt_to_page(cpu_addr), size);
1070 	else
1071 		__free_from_pool(cpu_addr, size);
1072 }
1073 
1074 static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
1075 	    dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
1076 {
1077 	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
1078 	struct page **pages;
1079 	void *addr = NULL;
1080 	int coherent_flag = dev->dma_coherent ? COHERENT : NORMAL;
1081 
1082 	*handle = DMA_MAPPING_ERROR;
1083 	size = PAGE_ALIGN(size);
1084 
1085 	if (coherent_flag  == COHERENT || !gfpflags_allow_blocking(gfp))
1086 		return __iommu_alloc_simple(dev, size, gfp, handle,
1087 					    coherent_flag, attrs);
1088 
1089 	pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag);
1090 	if (!pages)
1091 		return NULL;
1092 
1093 	*handle = __iommu_create_mapping(dev, pages, size, attrs);
1094 	if (*handle == DMA_MAPPING_ERROR)
1095 		goto err_buffer;
1096 
1097 	if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
1098 		return pages;
1099 
1100 	addr = dma_common_pages_remap(pages, size, prot,
1101 				   __builtin_return_address(0));
1102 	if (!addr)
1103 		goto err_mapping;
1104 
1105 	return addr;
1106 
1107 err_mapping:
1108 	__iommu_remove_mapping(dev, *handle, size);
1109 err_buffer:
1110 	__iommu_free_buffer(dev, pages, size, attrs);
1111 	return NULL;
1112 }
1113 
1114 static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
1115 		    void *cpu_addr, dma_addr_t dma_addr, size_t size,
1116 		    unsigned long attrs)
1117 {
1118 	struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1119 	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1120 	int err;
1121 
1122 	if (!pages)
1123 		return -ENXIO;
1124 
1125 	if (vma->vm_pgoff >= nr_pages)
1126 		return -ENXIO;
1127 
1128 	if (!dev->dma_coherent)
1129 		vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
1130 
1131 	err = vm_map_pages(vma, pages, nr_pages);
1132 	if (err)
1133 		pr_err("Remapping memory failed: %d\n", err);
1134 
1135 	return err;
1136 }
1137 
1138 /*
1139  * free a page as defined by the above mapping.
1140  * Must not be called with IRQs disabled.
1141  */
1142 static void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
1143 	dma_addr_t handle, unsigned long attrs)
1144 {
1145 	int coherent_flag = dev->dma_coherent ? COHERENT : NORMAL;
1146 	struct page **pages;
1147 	size = PAGE_ALIGN(size);
1148 
1149 	if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) {
1150 		__iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag);
1151 		return;
1152 	}
1153 
1154 	pages = __iommu_get_pages(cpu_addr, attrs);
1155 	if (!pages) {
1156 		WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
1157 		return;
1158 	}
1159 
1160 	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
1161 		dma_common_free_remap(cpu_addr, size);
1162 
1163 	__iommu_remove_mapping(dev, handle, size);
1164 	__iommu_free_buffer(dev, pages, size, attrs);
1165 }
1166 
1167 static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
1168 				 void *cpu_addr, dma_addr_t dma_addr,
1169 				 size_t size, unsigned long attrs)
1170 {
1171 	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1172 	struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1173 
1174 	if (!pages)
1175 		return -ENXIO;
1176 
1177 	return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
1178 					 GFP_KERNEL);
1179 }
1180 
1181 /*
1182  * Map a part of the scatter-gather list into contiguous io address space
1183  */
1184 static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1185 			  size_t size, dma_addr_t *handle,
1186 			  enum dma_data_direction dir, unsigned long attrs)
1187 {
1188 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1189 	dma_addr_t iova, iova_base;
1190 	int ret = 0;
1191 	unsigned int count;
1192 	struct scatterlist *s;
1193 	int prot;
1194 
1195 	size = PAGE_ALIGN(size);
1196 	*handle = DMA_MAPPING_ERROR;
1197 
1198 	iova_base = iova = __alloc_iova(mapping, size);
1199 	if (iova == DMA_MAPPING_ERROR)
1200 		return -ENOMEM;
1201 
1202 	for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
1203 		phys_addr_t phys = page_to_phys(sg_page(s));
1204 		unsigned int len = PAGE_ALIGN(s->offset + s->length);
1205 
1206 		if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1207 			__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1208 
1209 		prot = __dma_info_to_prot(dir, attrs);
1210 
1211 		ret = iommu_map(mapping->domain, iova, phys, len, prot,
1212 				GFP_KERNEL);
1213 		if (ret < 0)
1214 			goto fail;
1215 		count += len >> PAGE_SHIFT;
1216 		iova += len;
1217 	}
1218 	*handle = iova_base;
1219 
1220 	return 0;
1221 fail:
1222 	iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
1223 	__free_iova(mapping, iova_base, size);
1224 	return ret;
1225 }
1226 
1227 /**
1228  * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1229  * @dev: valid struct device pointer
1230  * @sg: list of buffers
1231  * @nents: number of buffers to map
1232  * @dir: DMA transfer direction
1233  *
1234  * Map a set of buffers described by scatterlist in streaming mode for DMA.
1235  * The scatter gather list elements are merged together (if possible) and
1236  * tagged with the appropriate dma address and length. They are obtained via
1237  * sg_dma_{address,length}.
1238  */
1239 static int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
1240 		int nents, enum dma_data_direction dir, unsigned long attrs)
1241 {
1242 	struct scatterlist *s = sg, *dma = sg, *start = sg;
1243 	int i, count = 0, ret;
1244 	unsigned int offset = s->offset;
1245 	unsigned int size = s->offset + s->length;
1246 	unsigned int max = dma_get_max_seg_size(dev);
1247 
1248 	for (i = 1; i < nents; i++) {
1249 		s = sg_next(s);
1250 
1251 		s->dma_length = 0;
1252 
1253 		if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
1254 			ret = __map_sg_chunk(dev, start, size,
1255 					     &dma->dma_address, dir, attrs);
1256 			if (ret < 0)
1257 				goto bad_mapping;
1258 
1259 			dma->dma_address += offset;
1260 			dma->dma_length = size - offset;
1261 
1262 			size = offset = s->offset;
1263 			start = s;
1264 			dma = sg_next(dma);
1265 			count += 1;
1266 		}
1267 		size += s->length;
1268 	}
1269 	ret = __map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs);
1270 	if (ret < 0)
1271 		goto bad_mapping;
1272 
1273 	dma->dma_address += offset;
1274 	dma->dma_length = size - offset;
1275 
1276 	return count+1;
1277 
1278 bad_mapping:
1279 	for_each_sg(sg, s, count, i)
1280 		__iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
1281 	if (ret == -ENOMEM)
1282 		return ret;
1283 	return -EINVAL;
1284 }
1285 
1286 /**
1287  * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1288  * @dev: valid struct device pointer
1289  * @sg: list of buffers
1290  * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1291  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1292  *
1293  * Unmap a set of streaming mode DMA translations.  Again, CPU access
1294  * rules concerning calls here are the same as for dma_unmap_single().
1295  */
1296 static void arm_iommu_unmap_sg(struct device *dev,
1297 			       struct scatterlist *sg, int nents,
1298 			       enum dma_data_direction dir,
1299 			       unsigned long attrs)
1300 {
1301 	struct scatterlist *s;
1302 	int i;
1303 
1304 	for_each_sg(sg, s, nents, i) {
1305 		if (sg_dma_len(s))
1306 			__iommu_remove_mapping(dev, sg_dma_address(s),
1307 					       sg_dma_len(s));
1308 		if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1309 			__dma_page_dev_to_cpu(sg_page(s), s->offset,
1310 					      s->length, dir);
1311 	}
1312 }
1313 
1314 /**
1315  * arm_iommu_sync_sg_for_cpu
1316  * @dev: valid struct device pointer
1317  * @sg: list of buffers
1318  * @nents: number of buffers to map (returned from dma_map_sg)
1319  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1320  */
1321 static void arm_iommu_sync_sg_for_cpu(struct device *dev,
1322 			struct scatterlist *sg,
1323 			int nents, enum dma_data_direction dir)
1324 {
1325 	struct scatterlist *s;
1326 	int i;
1327 
1328 	if (dev->dma_coherent)
1329 		return;
1330 
1331 	for_each_sg(sg, s, nents, i)
1332 		__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
1333 
1334 }
1335 
1336 /**
1337  * arm_iommu_sync_sg_for_device
1338  * @dev: valid struct device pointer
1339  * @sg: list of buffers
1340  * @nents: number of buffers to map (returned from dma_map_sg)
1341  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1342  */
1343 static void arm_iommu_sync_sg_for_device(struct device *dev,
1344 			struct scatterlist *sg,
1345 			int nents, enum dma_data_direction dir)
1346 {
1347 	struct scatterlist *s;
1348 	int i;
1349 
1350 	if (dev->dma_coherent)
1351 		return;
1352 
1353 	for_each_sg(sg, s, nents, i)
1354 		__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1355 }
1356 
1357 /**
1358  * arm_iommu_map_page
1359  * @dev: valid struct device pointer
1360  * @page: page that buffer resides in
1361  * @offset: offset into page for start of buffer
1362  * @size: size of buffer to map
1363  * @dir: DMA transfer direction
1364  *
1365  * IOMMU aware version of arm_dma_map_page()
1366  */
1367 static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
1368 	     unsigned long offset, size_t size, enum dma_data_direction dir,
1369 	     unsigned long attrs)
1370 {
1371 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1372 	dma_addr_t dma_addr;
1373 	int ret, prot, len = PAGE_ALIGN(size + offset);
1374 
1375 	if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1376 		__dma_page_cpu_to_dev(page, offset, size, dir);
1377 
1378 	dma_addr = __alloc_iova(mapping, len);
1379 	if (dma_addr == DMA_MAPPING_ERROR)
1380 		return dma_addr;
1381 
1382 	prot = __dma_info_to_prot(dir, attrs);
1383 
1384 	ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len,
1385 			prot, GFP_KERNEL);
1386 	if (ret < 0)
1387 		goto fail;
1388 
1389 	return dma_addr + offset;
1390 fail:
1391 	__free_iova(mapping, dma_addr, len);
1392 	return DMA_MAPPING_ERROR;
1393 }
1394 
1395 /**
1396  * arm_iommu_unmap_page
1397  * @dev: valid struct device pointer
1398  * @handle: DMA address of buffer
1399  * @size: size of buffer (same as passed to dma_map_page)
1400  * @dir: DMA transfer direction (same as passed to dma_map_page)
1401  *
1402  * IOMMU aware version of arm_dma_unmap_page()
1403  */
1404 static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1405 		size_t size, enum dma_data_direction dir, unsigned long attrs)
1406 {
1407 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1408 	dma_addr_t iova = handle & PAGE_MASK;
1409 	struct page *page;
1410 	int offset = handle & ~PAGE_MASK;
1411 	int len = PAGE_ALIGN(size + offset);
1412 
1413 	if (!iova)
1414 		return;
1415 
1416 	if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
1417 		page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1418 		__dma_page_dev_to_cpu(page, offset, size, dir);
1419 	}
1420 
1421 	iommu_unmap(mapping->domain, iova, len);
1422 	__free_iova(mapping, iova, len);
1423 }
1424 
1425 /**
1426  * arm_iommu_map_resource - map a device resource for DMA
1427  * @dev: valid struct device pointer
1428  * @phys_addr: physical address of resource
1429  * @size: size of resource to map
1430  * @dir: DMA transfer direction
1431  */
1432 static dma_addr_t arm_iommu_map_resource(struct device *dev,
1433 		phys_addr_t phys_addr, size_t size,
1434 		enum dma_data_direction dir, unsigned long attrs)
1435 {
1436 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1437 	dma_addr_t dma_addr;
1438 	int ret, prot;
1439 	phys_addr_t addr = phys_addr & PAGE_MASK;
1440 	unsigned int offset = phys_addr & ~PAGE_MASK;
1441 	size_t len = PAGE_ALIGN(size + offset);
1442 
1443 	dma_addr = __alloc_iova(mapping, len);
1444 	if (dma_addr == DMA_MAPPING_ERROR)
1445 		return dma_addr;
1446 
1447 	prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
1448 
1449 	ret = iommu_map(mapping->domain, dma_addr, addr, len, prot, GFP_KERNEL);
1450 	if (ret < 0)
1451 		goto fail;
1452 
1453 	return dma_addr + offset;
1454 fail:
1455 	__free_iova(mapping, dma_addr, len);
1456 	return DMA_MAPPING_ERROR;
1457 }
1458 
1459 /**
1460  * arm_iommu_unmap_resource - unmap a device DMA resource
1461  * @dev: valid struct device pointer
1462  * @dma_handle: DMA address to resource
1463  * @size: size of resource to map
1464  * @dir: DMA transfer direction
1465  */
1466 static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle,
1467 		size_t size, enum dma_data_direction dir,
1468 		unsigned long attrs)
1469 {
1470 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1471 	dma_addr_t iova = dma_handle & PAGE_MASK;
1472 	unsigned int offset = dma_handle & ~PAGE_MASK;
1473 	size_t len = PAGE_ALIGN(size + offset);
1474 
1475 	if (!iova)
1476 		return;
1477 
1478 	iommu_unmap(mapping->domain, iova, len);
1479 	__free_iova(mapping, iova, len);
1480 }
1481 
1482 static void arm_iommu_sync_single_for_cpu(struct device *dev,
1483 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
1484 {
1485 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1486 	dma_addr_t iova = handle & PAGE_MASK;
1487 	struct page *page;
1488 	unsigned int offset = handle & ~PAGE_MASK;
1489 
1490 	if (dev->dma_coherent || !iova)
1491 		return;
1492 
1493 	page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1494 	__dma_page_dev_to_cpu(page, offset, size, dir);
1495 }
1496 
1497 static void arm_iommu_sync_single_for_device(struct device *dev,
1498 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
1499 {
1500 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1501 	dma_addr_t iova = handle & PAGE_MASK;
1502 	struct page *page;
1503 	unsigned int offset = handle & ~PAGE_MASK;
1504 
1505 	if (dev->dma_coherent || !iova)
1506 		return;
1507 
1508 	page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1509 	__dma_page_cpu_to_dev(page, offset, size, dir);
1510 }
1511 
1512 static const struct dma_map_ops iommu_ops = {
1513 	.alloc		= arm_iommu_alloc_attrs,
1514 	.free		= arm_iommu_free_attrs,
1515 	.mmap		= arm_iommu_mmap_attrs,
1516 	.get_sgtable	= arm_iommu_get_sgtable,
1517 
1518 	.map_page		= arm_iommu_map_page,
1519 	.unmap_page		= arm_iommu_unmap_page,
1520 	.sync_single_for_cpu	= arm_iommu_sync_single_for_cpu,
1521 	.sync_single_for_device	= arm_iommu_sync_single_for_device,
1522 
1523 	.map_sg			= arm_iommu_map_sg,
1524 	.unmap_sg		= arm_iommu_unmap_sg,
1525 	.sync_sg_for_cpu	= arm_iommu_sync_sg_for_cpu,
1526 	.sync_sg_for_device	= arm_iommu_sync_sg_for_device,
1527 
1528 	.map_resource		= arm_iommu_map_resource,
1529 	.unmap_resource		= arm_iommu_unmap_resource,
1530 };
1531 
1532 /**
1533  * arm_iommu_create_mapping
1534  * @bus: pointer to the bus holding the client device (for IOMMU calls)
1535  * @base: start address of the valid IO address space
1536  * @size: maximum size of the valid IO address space
1537  *
1538  * Creates a mapping structure which holds information about used/unused
1539  * IO address ranges, which is required to perform memory allocation and
1540  * mapping with IOMMU aware functions.
1541  *
1542  * The client device need to be attached to the mapping with
1543  * arm_iommu_attach_device function.
1544  */
1545 struct dma_iommu_mapping *
1546 arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size)
1547 {
1548 	unsigned int bits = size >> PAGE_SHIFT;
1549 	unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
1550 	struct dma_iommu_mapping *mapping;
1551 	int extensions = 1;
1552 	int err = -ENOMEM;
1553 
1554 	/* currently only 32-bit DMA address space is supported */
1555 	if (size > DMA_BIT_MASK(32) + 1)
1556 		return ERR_PTR(-ERANGE);
1557 
1558 	if (!bitmap_size)
1559 		return ERR_PTR(-EINVAL);
1560 
1561 	if (bitmap_size > PAGE_SIZE) {
1562 		extensions = bitmap_size / PAGE_SIZE;
1563 		bitmap_size = PAGE_SIZE;
1564 	}
1565 
1566 	mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
1567 	if (!mapping)
1568 		goto err;
1569 
1570 	mapping->bitmap_size = bitmap_size;
1571 	mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *),
1572 				   GFP_KERNEL);
1573 	if (!mapping->bitmaps)
1574 		goto err2;
1575 
1576 	mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL);
1577 	if (!mapping->bitmaps[0])
1578 		goto err3;
1579 
1580 	mapping->nr_bitmaps = 1;
1581 	mapping->extensions = extensions;
1582 	mapping->base = base;
1583 	mapping->bits = BITS_PER_BYTE * bitmap_size;
1584 
1585 	spin_lock_init(&mapping->lock);
1586 
1587 	mapping->domain = iommu_domain_alloc(bus);
1588 	if (!mapping->domain)
1589 		goto err4;
1590 
1591 	kref_init(&mapping->kref);
1592 	return mapping;
1593 err4:
1594 	kfree(mapping->bitmaps[0]);
1595 err3:
1596 	kfree(mapping->bitmaps);
1597 err2:
1598 	kfree(mapping);
1599 err:
1600 	return ERR_PTR(err);
1601 }
1602 EXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
1603 
1604 static void release_iommu_mapping(struct kref *kref)
1605 {
1606 	int i;
1607 	struct dma_iommu_mapping *mapping =
1608 		container_of(kref, struct dma_iommu_mapping, kref);
1609 
1610 	iommu_domain_free(mapping->domain);
1611 	for (i = 0; i < mapping->nr_bitmaps; i++)
1612 		kfree(mapping->bitmaps[i]);
1613 	kfree(mapping->bitmaps);
1614 	kfree(mapping);
1615 }
1616 
1617 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
1618 {
1619 	int next_bitmap;
1620 
1621 	if (mapping->nr_bitmaps >= mapping->extensions)
1622 		return -EINVAL;
1623 
1624 	next_bitmap = mapping->nr_bitmaps;
1625 	mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size,
1626 						GFP_ATOMIC);
1627 	if (!mapping->bitmaps[next_bitmap])
1628 		return -ENOMEM;
1629 
1630 	mapping->nr_bitmaps++;
1631 
1632 	return 0;
1633 }
1634 
1635 void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
1636 {
1637 	if (mapping)
1638 		kref_put(&mapping->kref, release_iommu_mapping);
1639 }
1640 EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
1641 
1642 static int __arm_iommu_attach_device(struct device *dev,
1643 				     struct dma_iommu_mapping *mapping)
1644 {
1645 	int err;
1646 
1647 	err = iommu_attach_device(mapping->domain, dev);
1648 	if (err)
1649 		return err;
1650 
1651 	kref_get(&mapping->kref);
1652 	to_dma_iommu_mapping(dev) = mapping;
1653 
1654 	pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
1655 	return 0;
1656 }
1657 
1658 /**
1659  * arm_iommu_attach_device
1660  * @dev: valid struct device pointer
1661  * @mapping: io address space mapping structure (returned from
1662  *	arm_iommu_create_mapping)
1663  *
1664  * Attaches specified io address space mapping to the provided device.
1665  * This replaces the dma operations (dma_map_ops pointer) with the
1666  * IOMMU aware version.
1667  *
1668  * More than one client might be attached to the same io address space
1669  * mapping.
1670  */
1671 int arm_iommu_attach_device(struct device *dev,
1672 			    struct dma_iommu_mapping *mapping)
1673 {
1674 	int err;
1675 
1676 	err = __arm_iommu_attach_device(dev, mapping);
1677 	if (err)
1678 		return err;
1679 
1680 	set_dma_ops(dev, &iommu_ops);
1681 	return 0;
1682 }
1683 EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
1684 
1685 /**
1686  * arm_iommu_detach_device
1687  * @dev: valid struct device pointer
1688  *
1689  * Detaches the provided device from a previously attached map.
1690  * This overwrites the dma_ops pointer with appropriate non-IOMMU ops.
1691  */
1692 void arm_iommu_detach_device(struct device *dev)
1693 {
1694 	struct dma_iommu_mapping *mapping;
1695 
1696 	mapping = to_dma_iommu_mapping(dev);
1697 	if (!mapping) {
1698 		dev_warn(dev, "Not attached\n");
1699 		return;
1700 	}
1701 
1702 	iommu_detach_device(mapping->domain, dev);
1703 	kref_put(&mapping->kref, release_iommu_mapping);
1704 	to_dma_iommu_mapping(dev) = NULL;
1705 	set_dma_ops(dev, NULL);
1706 
1707 	pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
1708 }
1709 EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
1710 
1711 static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
1712 				    const struct iommu_ops *iommu, bool coherent)
1713 {
1714 	struct dma_iommu_mapping *mapping;
1715 
1716 	mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
1717 	if (IS_ERR(mapping)) {
1718 		pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
1719 				size, dev_name(dev));
1720 		return;
1721 	}
1722 
1723 	if (__arm_iommu_attach_device(dev, mapping)) {
1724 		pr_warn("Failed to attached device %s to IOMMU_mapping\n",
1725 				dev_name(dev));
1726 		arm_iommu_release_mapping(mapping);
1727 		return;
1728 	}
1729 
1730 	set_dma_ops(dev, &iommu_ops);
1731 }
1732 
1733 static void arm_teardown_iommu_dma_ops(struct device *dev)
1734 {
1735 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1736 
1737 	if (!mapping)
1738 		return;
1739 
1740 	arm_iommu_detach_device(dev);
1741 	arm_iommu_release_mapping(mapping);
1742 }
1743 
1744 #else
1745 
1746 static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
1747 				    const struct iommu_ops *iommu, bool coherent)
1748 {
1749 }
1750 
1751 static void arm_teardown_iommu_dma_ops(struct device *dev) { }
1752 
1753 #endif	/* CONFIG_ARM_DMA_USE_IOMMU */
1754 
1755 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
1756 			const struct iommu_ops *iommu, bool coherent)
1757 {
1758 	/*
1759 	 * Due to legacy code that sets the ->dma_coherent flag from a bus
1760 	 * notifier we can't just assign coherent to the ->dma_coherent flag
1761 	 * here, but instead have to make sure we only set but never clear it
1762 	 * for now.
1763 	 */
1764 	if (coherent)
1765 		dev->dma_coherent = true;
1766 
1767 	/*
1768 	 * Don't override the dma_ops if they have already been set. Ideally
1769 	 * this should be the only location where dma_ops are set, remove this
1770 	 * check when all other callers of set_dma_ops will have disappeared.
1771 	 */
1772 	if (dev->dma_ops)
1773 		return;
1774 
1775 	if (iommu)
1776 		arm_setup_iommu_dma_ops(dev, dma_base, size, iommu, coherent);
1777 
1778 	xen_setup_dma_ops(dev);
1779 	dev->archdata.dma_ops_setup = true;
1780 }
1781 
1782 void arch_teardown_dma_ops(struct device *dev)
1783 {
1784 	if (!dev->archdata.dma_ops_setup)
1785 		return;
1786 
1787 	arm_teardown_iommu_dma_ops(dev);
1788 	/* Let arch_setup_dma_ops() start again from scratch upon re-probe */
1789 	set_dma_ops(dev, NULL);
1790 }
1791 
1792 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
1793 		enum dma_data_direction dir)
1794 {
1795 	__dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
1796 			      size, dir);
1797 }
1798 
1799 void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
1800 		enum dma_data_direction dir)
1801 {
1802 	__dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
1803 			      size, dir);
1804 }
1805 
1806 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
1807 		gfp_t gfp, unsigned long attrs)
1808 {
1809 	return __dma_alloc(dev, size, dma_handle, gfp,
1810 			   __get_dma_pgprot(attrs, PAGE_KERNEL), false,
1811 			   attrs, __builtin_return_address(0));
1812 }
1813 
1814 void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
1815 		dma_addr_t dma_handle, unsigned long attrs)
1816 {
1817 	__arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false);
1818 }
1819