xref: /openbmc/linux/arch/arm/mm/dma-mapping.c (revision 53cbcbcf)
10ddbccd1SRussell King /*
20ddbccd1SRussell King  *  linux/arch/arm/mm/dma-mapping.c
30ddbccd1SRussell King  *
40ddbccd1SRussell King  *  Copyright (C) 2000-2004 Russell King
50ddbccd1SRussell King  *
60ddbccd1SRussell King  * This program is free software; you can redistribute it and/or modify
70ddbccd1SRussell King  * it under the terms of the GNU General Public License version 2 as
80ddbccd1SRussell King  * published by the Free Software Foundation.
90ddbccd1SRussell King  *
100ddbccd1SRussell King  *  DMA uncached mapping support.
110ddbccd1SRussell King  */
120ddbccd1SRussell King #include <linux/module.h>
130ddbccd1SRussell King #include <linux/mm.h>
145a0e3ad6STejun Heo #include <linux/gfp.h>
150ddbccd1SRussell King #include <linux/errno.h>
160ddbccd1SRussell King #include <linux/list.h>
170ddbccd1SRussell King #include <linux/init.h>
180ddbccd1SRussell King #include <linux/device.h>
190ddbccd1SRussell King #include <linux/dma-mapping.h>
2039af22a7SNicolas Pitre #include <linux/highmem.h>
2199d1717dSJon Medhurst #include <linux/slab.h>
220ddbccd1SRussell King 
230ddbccd1SRussell King #include <asm/memory.h>
2443377453SNicolas Pitre #include <asm/highmem.h>
250ddbccd1SRussell King #include <asm/cacheflush.h>
260ddbccd1SRussell King #include <asm/tlbflush.h>
270ddbccd1SRussell King #include <asm/sizes.h>
2899d1717dSJon Medhurst #include <asm/mach/arch.h>
290ddbccd1SRussell King 
30022ae537SRussell King #include "mm.h"
31022ae537SRussell King 
32ab6494f0SCatalin Marinas static u64 get_coherent_dma_mask(struct device *dev)
33ab6494f0SCatalin Marinas {
34022ae537SRussell King 	u64 mask = (u64)arm_dma_limit;
350ddbccd1SRussell King 
36ab6494f0SCatalin Marinas 	if (dev) {
37ab6494f0SCatalin Marinas 		mask = dev->coherent_dma_mask;
38ab6494f0SCatalin Marinas 
39ab6494f0SCatalin Marinas 		/*
40ab6494f0SCatalin Marinas 		 * Sanity check the DMA mask - it must be non-zero, and
41ab6494f0SCatalin Marinas 		 * must be able to be satisfied by a DMA allocation.
42ab6494f0SCatalin Marinas 		 */
43ab6494f0SCatalin Marinas 		if (mask == 0) {
44ab6494f0SCatalin Marinas 			dev_warn(dev, "coherent DMA mask is unset\n");
45ab6494f0SCatalin Marinas 			return 0;
46ab6494f0SCatalin Marinas 		}
47ab6494f0SCatalin Marinas 
48022ae537SRussell King 		if ((~mask) & (u64)arm_dma_limit) {
49ab6494f0SCatalin Marinas 			dev_warn(dev, "coherent DMA mask %#llx is smaller "
50ab6494f0SCatalin Marinas 				 "than system GFP_DMA mask %#llx\n",
51022ae537SRussell King 				 mask, (u64)arm_dma_limit);
52ab6494f0SCatalin Marinas 			return 0;
53ab6494f0SCatalin Marinas 		}
54ab6494f0SCatalin Marinas 	}
55ab6494f0SCatalin Marinas 
56ab6494f0SCatalin Marinas 	return mask;
57ab6494f0SCatalin Marinas }
58ab6494f0SCatalin Marinas 
597a9a32a9SRussell King /*
607a9a32a9SRussell King  * Allocate a DMA buffer for 'dev' of size 'size' using the
617a9a32a9SRussell King  * specified gfp mask.  Note that 'size' must be page aligned.
627a9a32a9SRussell King  */
637a9a32a9SRussell King static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
647a9a32a9SRussell King {
657a9a32a9SRussell King 	unsigned long order = get_order(size);
667a9a32a9SRussell King 	struct page *page, *p, *e;
677a9a32a9SRussell King 	void *ptr;
687a9a32a9SRussell King 	u64 mask = get_coherent_dma_mask(dev);
697a9a32a9SRussell King 
707a9a32a9SRussell King #ifdef CONFIG_DMA_API_DEBUG
717a9a32a9SRussell King 	u64 limit = (mask + 1) & ~mask;
727a9a32a9SRussell King 	if (limit && size >= limit) {
737a9a32a9SRussell King 		dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
747a9a32a9SRussell King 			size, mask);
757a9a32a9SRussell King 		return NULL;
767a9a32a9SRussell King 	}
777a9a32a9SRussell King #endif
787a9a32a9SRussell King 
797a9a32a9SRussell King 	if (!mask)
807a9a32a9SRussell King 		return NULL;
817a9a32a9SRussell King 
827a9a32a9SRussell King 	if (mask < 0xffffffffULL)
837a9a32a9SRussell King 		gfp |= GFP_DMA;
847a9a32a9SRussell King 
857a9a32a9SRussell King 	page = alloc_pages(gfp, order);
867a9a32a9SRussell King 	if (!page)
877a9a32a9SRussell King 		return NULL;
887a9a32a9SRussell King 
897a9a32a9SRussell King 	/*
907a9a32a9SRussell King 	 * Now split the huge page and free the excess pages
917a9a32a9SRussell King 	 */
927a9a32a9SRussell King 	split_page(page, order);
937a9a32a9SRussell King 	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
947a9a32a9SRussell King 		__free_page(p);
957a9a32a9SRussell King 
967a9a32a9SRussell King 	/*
977a9a32a9SRussell King 	 * Ensure that the allocated pages are zeroed, and that any data
987a9a32a9SRussell King 	 * lurking in the kernel direct-mapped region is invalidated.
997a9a32a9SRussell King 	 */
1007a9a32a9SRussell King 	ptr = page_address(page);
1017a9a32a9SRussell King 	memset(ptr, 0, size);
1027a9a32a9SRussell King 	dmac_flush_range(ptr, ptr + size);
1037a9a32a9SRussell King 	outer_flush_range(__pa(ptr), __pa(ptr) + size);
1047a9a32a9SRussell King 
1057a9a32a9SRussell King 	return page;
1067a9a32a9SRussell King }
1077a9a32a9SRussell King 
1087a9a32a9SRussell King /*
1097a9a32a9SRussell King  * Free a DMA buffer.  'size' must be page aligned.
1107a9a32a9SRussell King  */
1117a9a32a9SRussell King static void __dma_free_buffer(struct page *page, size_t size)
1127a9a32a9SRussell King {
1137a9a32a9SRussell King 	struct page *e = page + (size >> PAGE_SHIFT);
1147a9a32a9SRussell King 
1157a9a32a9SRussell King 	while (page < e) {
1167a9a32a9SRussell King 		__free_page(page);
1177a9a32a9SRussell King 		page++;
1187a9a32a9SRussell King 	}
1197a9a32a9SRussell King }
1207a9a32a9SRussell King 
121ab6494f0SCatalin Marinas #ifdef CONFIG_MMU
122a5e9d38bSCatalin Marinas 
12399d1717dSJon Medhurst #define CONSISTENT_OFFSET(x)	(((unsigned long)(x) - consistent_base) >> PAGE_SHIFT)
1241fdb24e9SLinus Torvalds #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - consistent_base) >> PMD_SHIFT)
125a5e9d38bSCatalin Marinas 
1260ddbccd1SRussell King /*
1270ddbccd1SRussell King  * These are the page tables (2MB each) covering uncached, DMA consistent allocations
1280ddbccd1SRussell King  */
12999d1717dSJon Medhurst static pte_t **consistent_pte;
13099d1717dSJon Medhurst 
13199d1717dSJon Medhurst #define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M
13299d1717dSJon Medhurst 
13399d1717dSJon Medhurst unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
13499d1717dSJon Medhurst 
13599d1717dSJon Medhurst void __init init_consistent_dma_size(unsigned long size)
13699d1717dSJon Medhurst {
13799d1717dSJon Medhurst 	unsigned long base = CONSISTENT_END - ALIGN(size, SZ_2M);
13899d1717dSJon Medhurst 
13999d1717dSJon Medhurst 	BUG_ON(consistent_pte); /* Check we're called before DMA region init */
14099d1717dSJon Medhurst 	BUG_ON(base < VMALLOC_END);
14199d1717dSJon Medhurst 
14299d1717dSJon Medhurst 	/* Grow region to accommodate specified size  */
14399d1717dSJon Medhurst 	if (base < consistent_base)
14499d1717dSJon Medhurst 		consistent_base = base;
14599d1717dSJon Medhurst }
1460ddbccd1SRussell King 
14713ccf3adSRussell King #include "vmregion.h"
1480ddbccd1SRussell King 
14913ccf3adSRussell King static struct arm_vmregion_head consistent_head = {
15013ccf3adSRussell King 	.vm_lock	= __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock),
1510ddbccd1SRussell King 	.vm_list	= LIST_HEAD_INIT(consistent_head.vm_list),
1520ddbccd1SRussell King 	.vm_end		= CONSISTENT_END,
1530ddbccd1SRussell King };
1540ddbccd1SRussell King 
1550ddbccd1SRussell King #ifdef CONFIG_HUGETLB_PAGE
1560ddbccd1SRussell King #error ARM Coherent DMA allocator does not (yet) support huge TLB
1570ddbccd1SRussell King #endif
1580ddbccd1SRussell King 
15988c58f3bSRussell King /*
16088c58f3bSRussell King  * Initialise the consistent memory allocation.
16188c58f3bSRussell King  */
16288c58f3bSRussell King static int __init consistent_init(void)
16388c58f3bSRussell King {
16488c58f3bSRussell King 	int ret = 0;
16588c58f3bSRussell King 	pgd_t *pgd;
166516295e5SRussell King 	pud_t *pud;
16788c58f3bSRussell King 	pmd_t *pmd;
16888c58f3bSRussell King 	pte_t *pte;
16988c58f3bSRussell King 	int i = 0;
17099d1717dSJon Medhurst 	unsigned long base = consistent_base;
17153cbcbcfSCatalin Marinas 	unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
17299d1717dSJon Medhurst 
17399d1717dSJon Medhurst 	consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
17499d1717dSJon Medhurst 	if (!consistent_pte) {
17599d1717dSJon Medhurst 		pr_err("%s: no memory\n", __func__);
17699d1717dSJon Medhurst 		return -ENOMEM;
17799d1717dSJon Medhurst 	}
17899d1717dSJon Medhurst 
17999d1717dSJon Medhurst 	pr_debug("DMA memory: 0x%08lx - 0x%08lx:\n", base, CONSISTENT_END);
18099d1717dSJon Medhurst 	consistent_head.vm_start = base;
18188c58f3bSRussell King 
18288c58f3bSRussell King 	do {
18388c58f3bSRussell King 		pgd = pgd_offset(&init_mm, base);
184516295e5SRussell King 
185516295e5SRussell King 		pud = pud_alloc(&init_mm, pgd, base);
186516295e5SRussell King 		if (!pud) {
187516295e5SRussell King 			printk(KERN_ERR "%s: no pud tables\n", __func__);
188516295e5SRussell King 			ret = -ENOMEM;
189516295e5SRussell King 			break;
190516295e5SRussell King 		}
191516295e5SRussell King 
192516295e5SRussell King 		pmd = pmd_alloc(&init_mm, pud, base);
19388c58f3bSRussell King 		if (!pmd) {
19488c58f3bSRussell King 			printk(KERN_ERR "%s: no pmd tables\n", __func__);
19588c58f3bSRussell King 			ret = -ENOMEM;
19688c58f3bSRussell King 			break;
19788c58f3bSRussell King 		}
19888c58f3bSRussell King 		WARN_ON(!pmd_none(*pmd));
19988c58f3bSRussell King 
20088c58f3bSRussell King 		pte = pte_alloc_kernel(pmd, base);
20188c58f3bSRussell King 		if (!pte) {
20288c58f3bSRussell King 			printk(KERN_ERR "%s: no pte tables\n", __func__);
20388c58f3bSRussell King 			ret = -ENOMEM;
20488c58f3bSRussell King 			break;
20588c58f3bSRussell King 		}
20688c58f3bSRussell King 
20788c58f3bSRussell King 		consistent_pte[i++] = pte;
208e73fc88eSCatalin Marinas 		base += PMD_SIZE;
20988c58f3bSRussell King 	} while (base < CONSISTENT_END);
21088c58f3bSRussell King 
21188c58f3bSRussell King 	return ret;
21288c58f3bSRussell King }
21388c58f3bSRussell King 
21488c58f3bSRussell King core_initcall(consistent_init);
21588c58f3bSRussell King 
2160ddbccd1SRussell King static void *
21731ebf944SRussell King __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
2180ddbccd1SRussell King {
21913ccf3adSRussell King 	struct arm_vmregion *c;
2205bc23d32SRussell King 	size_t align;
2215bc23d32SRussell King 	int bit;
2220ddbccd1SRussell King 
22399d1717dSJon Medhurst 	if (!consistent_pte) {
224ebd7a845SRussell King 		printk(KERN_ERR "%s: not initialised\n", __func__);
225ebd7a845SRussell King 		dump_stack();
226ebd7a845SRussell King 		return NULL;
227ebd7a845SRussell King 	}
228ebd7a845SRussell King 
2290ddbccd1SRussell King 	/*
2305bc23d32SRussell King 	 * Align the virtual region allocation - maximum alignment is
2315bc23d32SRussell King 	 * a section size, minimum is a page size.  This helps reduce
2325bc23d32SRussell King 	 * fragmentation of the DMA space, and also prevents allocations
2335bc23d32SRussell King 	 * smaller than a section from crossing a section boundary.
2345bc23d32SRussell King 	 */
235c947f69fSRussell King 	bit = fls(size - 1);
2365bc23d32SRussell King 	if (bit > SECTION_SHIFT)
2375bc23d32SRussell King 		bit = SECTION_SHIFT;
2385bc23d32SRussell King 	align = 1 << bit;
2395bc23d32SRussell King 
2405bc23d32SRussell King 	/*
2410ddbccd1SRussell King 	 * Allocate a virtual address in the consistent mapping region.
2420ddbccd1SRussell King 	 */
2435bc23d32SRussell King 	c = arm_vmregion_alloc(&consistent_head, align, size,
2440ddbccd1SRussell King 			    gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
2450ddbccd1SRussell King 	if (c) {
2460ddbccd1SRussell King 		pte_t *pte;
2470ddbccd1SRussell King 		int idx = CONSISTENT_PTE_INDEX(c->vm_start);
2480ddbccd1SRussell King 		u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
2490ddbccd1SRussell King 
2500ddbccd1SRussell King 		pte = consistent_pte[idx] + off;
2510ddbccd1SRussell King 		c->vm_pages = page;
2520ddbccd1SRussell King 
2530ddbccd1SRussell King 		do {
2540ddbccd1SRussell King 			BUG_ON(!pte_none(*pte));
2550ddbccd1SRussell King 
2560ddbccd1SRussell King 			set_pte_ext(pte, mk_pte(page, prot), 0);
2570ddbccd1SRussell King 			page++;
2580ddbccd1SRussell King 			pte++;
2590ddbccd1SRussell King 			off++;
2600ddbccd1SRussell King 			if (off >= PTRS_PER_PTE) {
2610ddbccd1SRussell King 				off = 0;
2620ddbccd1SRussell King 				pte = consistent_pte[++idx];
2630ddbccd1SRussell King 			}
2640ddbccd1SRussell King 		} while (size -= PAGE_SIZE);
2650ddbccd1SRussell King 
2662be23c47SRussell King 		dsb();
2672be23c47SRussell King 
2680ddbccd1SRussell King 		return (void *)c->vm_start;
2690ddbccd1SRussell King 	}
2700ddbccd1SRussell King 	return NULL;
2710ddbccd1SRussell King }
272695ae0afSRussell King 
273695ae0afSRussell King static void __dma_free_remap(void *cpu_addr, size_t size)
274695ae0afSRussell King {
275695ae0afSRussell King 	struct arm_vmregion *c;
276695ae0afSRussell King 	unsigned long addr;
277695ae0afSRussell King 	pte_t *ptep;
278695ae0afSRussell King 	int idx;
279695ae0afSRussell King 	u32 off;
280695ae0afSRussell King 
281695ae0afSRussell King 	c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
282695ae0afSRussell King 	if (!c) {
283695ae0afSRussell King 		printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
284695ae0afSRussell King 		       __func__, cpu_addr);
285695ae0afSRussell King 		dump_stack();
286695ae0afSRussell King 		return;
287695ae0afSRussell King 	}
288695ae0afSRussell King 
289695ae0afSRussell King 	if ((c->vm_end - c->vm_start) != size) {
290695ae0afSRussell King 		printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
291695ae0afSRussell King 		       __func__, c->vm_end - c->vm_start, size);
292695ae0afSRussell King 		dump_stack();
293695ae0afSRussell King 		size = c->vm_end - c->vm_start;
294695ae0afSRussell King 	}
295695ae0afSRussell King 
296695ae0afSRussell King 	idx = CONSISTENT_PTE_INDEX(c->vm_start);
297695ae0afSRussell King 	off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
298695ae0afSRussell King 	ptep = consistent_pte[idx] + off;
299695ae0afSRussell King 	addr = c->vm_start;
300695ae0afSRussell King 	do {
301695ae0afSRussell King 		pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
302695ae0afSRussell King 
303695ae0afSRussell King 		ptep++;
304695ae0afSRussell King 		addr += PAGE_SIZE;
305695ae0afSRussell King 		off++;
306695ae0afSRussell King 		if (off >= PTRS_PER_PTE) {
307695ae0afSRussell King 			off = 0;
308695ae0afSRussell King 			ptep = consistent_pte[++idx];
309695ae0afSRussell King 		}
310695ae0afSRussell King 
311acaac256SRussell King 		if (pte_none(pte) || !pte_present(pte))
312695ae0afSRussell King 			printk(KERN_CRIT "%s: bad page in kernel page table\n",
313695ae0afSRussell King 			       __func__);
314695ae0afSRussell King 	} while (size -= PAGE_SIZE);
315695ae0afSRussell King 
316695ae0afSRussell King 	flush_tlb_kernel_range(c->vm_start, c->vm_end);
317695ae0afSRussell King 
318695ae0afSRussell King 	arm_vmregion_free(&consistent_head, c);
319695ae0afSRussell King }
320695ae0afSRussell King 
321ab6494f0SCatalin Marinas #else	/* !CONFIG_MMU */
322695ae0afSRussell King 
32331ebf944SRussell King #define __dma_alloc_remap(page, size, gfp, prot)	page_address(page)
32431ebf944SRussell King #define __dma_free_remap(addr, size)			do { } while (0)
32531ebf944SRussell King 
32631ebf944SRussell King #endif	/* CONFIG_MMU */
32731ebf944SRussell King 
328ab6494f0SCatalin Marinas static void *
329ab6494f0SCatalin Marinas __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
330ab6494f0SCatalin Marinas 	    pgprot_t prot)
331ab6494f0SCatalin Marinas {
33204da5694SRussell King 	struct page *page;
33331ebf944SRussell King 	void *addr;
334ab6494f0SCatalin Marinas 
335ab6494f0SCatalin Marinas 	*handle = ~0;
33604da5694SRussell King 	size = PAGE_ALIGN(size);
33704da5694SRussell King 
33804da5694SRussell King 	page = __dma_alloc_buffer(dev, size, gfp);
33904da5694SRussell King 	if (!page)
340ab6494f0SCatalin Marinas 		return NULL;
34104da5694SRussell King 
34231ebf944SRussell King 	if (!arch_is_coherent())
34331ebf944SRussell King 		addr = __dma_alloc_remap(page, size, gfp, prot);
34431ebf944SRussell King 	else
34531ebf944SRussell King 		addr = page_address(page);
34631ebf944SRussell King 
34731ebf944SRussell King 	if (addr)
3489eedd963SRussell King 		*handle = pfn_to_dma(dev, page_to_pfn(page));
349d8e89b47SRussell King 	else
350d8e89b47SRussell King 		__dma_free_buffer(page, size);
35131ebf944SRussell King 
35231ebf944SRussell King 	return addr;
353ab6494f0SCatalin Marinas }
354695ae0afSRussell King 
3550ddbccd1SRussell King /*
3560ddbccd1SRussell King  * Allocate DMA-coherent memory space and return both the kernel remapped
3570ddbccd1SRussell King  * virtual and bus address for that space.
3580ddbccd1SRussell King  */
3590ddbccd1SRussell King void *
3600ddbccd1SRussell King dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
3610ddbccd1SRussell King {
3620ddbccd1SRussell King 	void *memory;
3630ddbccd1SRussell King 
3640ddbccd1SRussell King 	if (dma_alloc_from_coherent(dev, size, handle, &memory))
3650ddbccd1SRussell King 		return memory;
3660ddbccd1SRussell King 
3670ddbccd1SRussell King 	return __dma_alloc(dev, size, handle, gfp,
36826a26d32SRussell King 			   pgprot_dmacoherent(pgprot_kernel));
3690ddbccd1SRussell King }
3700ddbccd1SRussell King EXPORT_SYMBOL(dma_alloc_coherent);
3710ddbccd1SRussell King 
3720ddbccd1SRussell King /*
3730ddbccd1SRussell King  * Allocate a writecombining region, in much the same way as
3740ddbccd1SRussell King  * dma_alloc_coherent above.
3750ddbccd1SRussell King  */
3760ddbccd1SRussell King void *
3770ddbccd1SRussell King dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
3780ddbccd1SRussell King {
3790ddbccd1SRussell King 	return __dma_alloc(dev, size, handle, gfp,
3800ddbccd1SRussell King 			   pgprot_writecombine(pgprot_kernel));
3810ddbccd1SRussell King }
3820ddbccd1SRussell King EXPORT_SYMBOL(dma_alloc_writecombine);
3830ddbccd1SRussell King 
3840ddbccd1SRussell King static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
3850ddbccd1SRussell King 		    void *cpu_addr, dma_addr_t dma_addr, size_t size)
3860ddbccd1SRussell King {
387ab6494f0SCatalin Marinas 	int ret = -ENXIO;
388ab6494f0SCatalin Marinas #ifdef CONFIG_MMU
38913ccf3adSRussell King 	unsigned long user_size, kern_size;
39013ccf3adSRussell King 	struct arm_vmregion *c;
3910ddbccd1SRussell King 
3920ddbccd1SRussell King 	user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
3930ddbccd1SRussell King 
39413ccf3adSRussell King 	c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
3950ddbccd1SRussell King 	if (c) {
3960ddbccd1SRussell King 		unsigned long off = vma->vm_pgoff;
3970ddbccd1SRussell King 
3980ddbccd1SRussell King 		kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
3990ddbccd1SRussell King 
4000ddbccd1SRussell King 		if (off < kern_size &&
4010ddbccd1SRussell King 		    user_size <= (kern_size - off)) {
4020ddbccd1SRussell King 			ret = remap_pfn_range(vma, vma->vm_start,
4030ddbccd1SRussell King 					      page_to_pfn(c->vm_pages) + off,
4040ddbccd1SRussell King 					      user_size << PAGE_SHIFT,
4050ddbccd1SRussell King 					      vma->vm_page_prot);
4060ddbccd1SRussell King 		}
4070ddbccd1SRussell King 	}
408ab6494f0SCatalin Marinas #endif	/* CONFIG_MMU */
4090ddbccd1SRussell King 
4100ddbccd1SRussell King 	return ret;
4110ddbccd1SRussell King }
4120ddbccd1SRussell King 
4130ddbccd1SRussell King int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
4140ddbccd1SRussell King 		      void *cpu_addr, dma_addr_t dma_addr, size_t size)
4150ddbccd1SRussell King {
41626a26d32SRussell King 	vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
4170ddbccd1SRussell King 	return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
4180ddbccd1SRussell King }
4190ddbccd1SRussell King EXPORT_SYMBOL(dma_mmap_coherent);
4200ddbccd1SRussell King 
4210ddbccd1SRussell King int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
4220ddbccd1SRussell King 			  void *cpu_addr, dma_addr_t dma_addr, size_t size)
4230ddbccd1SRussell King {
4240ddbccd1SRussell King 	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
4250ddbccd1SRussell King 	return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
4260ddbccd1SRussell King }
4270ddbccd1SRussell King EXPORT_SYMBOL(dma_mmap_writecombine);
4280ddbccd1SRussell King 
4290ddbccd1SRussell King /*
4300ddbccd1SRussell King  * free a page as defined by the above mapping.
4310ddbccd1SRussell King  * Must not be called with IRQs disabled.
4320ddbccd1SRussell King  */
4330ddbccd1SRussell King void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
4340ddbccd1SRussell King {
4350ddbccd1SRussell King 	WARN_ON(irqs_disabled());
4360ddbccd1SRussell King 
4370ddbccd1SRussell King 	if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
4380ddbccd1SRussell King 		return;
4390ddbccd1SRussell King 
4403e82d012SRussell King 	size = PAGE_ALIGN(size);
4413e82d012SRussell King 
442695ae0afSRussell King 	if (!arch_is_coherent())
443695ae0afSRussell King 		__dma_free_remap(cpu_addr, size);
4447a9a32a9SRussell King 
4459eedd963SRussell King 	__dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size);
4460ddbccd1SRussell King }
4470ddbccd1SRussell King EXPORT_SYMBOL(dma_free_coherent);
4480ddbccd1SRussell King 
4490ddbccd1SRussell King /*
4500ddbccd1SRussell King  * Make an area consistent for devices.
4510ddbccd1SRussell King  * Note: Drivers should NOT use this function directly, as it will break
4520ddbccd1SRussell King  * platforms with CONFIG_DMABOUNCE.
4530ddbccd1SRussell King  * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
4540ddbccd1SRussell King  */
4554ea0d737SRussell King void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
4564ea0d737SRussell King 	enum dma_data_direction dir)
4574ea0d737SRussell King {
4582ffe2da3SRussell King 	unsigned long paddr;
4592ffe2da3SRussell King 
460a9c9147eSRussell King 	BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
461a9c9147eSRussell King 
462a9c9147eSRussell King 	dmac_map_area(kaddr, size, dir);
4632ffe2da3SRussell King 
4642ffe2da3SRussell King 	paddr = __pa(kaddr);
4652ffe2da3SRussell King 	if (dir == DMA_FROM_DEVICE) {
4662ffe2da3SRussell King 		outer_inv_range(paddr, paddr + size);
4672ffe2da3SRussell King 	} else {
4682ffe2da3SRussell King 		outer_clean_range(paddr, paddr + size);
4692ffe2da3SRussell King 	}
4702ffe2da3SRussell King 	/* FIXME: non-speculating: flush on bidirectional mappings? */
4714ea0d737SRussell King }
4724ea0d737SRussell King EXPORT_SYMBOL(___dma_single_cpu_to_dev);
4734ea0d737SRussell King 
4744ea0d737SRussell King void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
4754ea0d737SRussell King 	enum dma_data_direction dir)
4764ea0d737SRussell King {
477a9c9147eSRussell King 	BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
478a9c9147eSRussell King 
4792ffe2da3SRussell King 	/* FIXME: non-speculating: not required */
4802ffe2da3SRussell King 	/* don't bother invalidating if DMA to device */
4812ffe2da3SRussell King 	if (dir != DMA_TO_DEVICE) {
4822ffe2da3SRussell King 		unsigned long paddr = __pa(kaddr);
4832ffe2da3SRussell King 		outer_inv_range(paddr, paddr + size);
4842ffe2da3SRussell King 	}
4852ffe2da3SRussell King 
486a9c9147eSRussell King 	dmac_unmap_area(kaddr, size, dir);
4874ea0d737SRussell King }
4884ea0d737SRussell King EXPORT_SYMBOL(___dma_single_dev_to_cpu);
489afd1a321SRussell King 
49065af191aSRussell King static void dma_cache_maint_page(struct page *page, unsigned long offset,
491a9c9147eSRussell King 	size_t size, enum dma_data_direction dir,
492a9c9147eSRussell King 	void (*op)(const void *, size_t, int))
49365af191aSRussell King {
49465af191aSRussell King 	/*
49565af191aSRussell King 	 * A single sg entry may refer to multiple physically contiguous
49665af191aSRussell King 	 * pages.  But we still need to process highmem pages individually.
49765af191aSRussell King 	 * If highmem is not configured then the bulk of this loop gets
49865af191aSRussell King 	 * optimized out.
49965af191aSRussell King 	 */
50065af191aSRussell King 	size_t left = size;
50165af191aSRussell King 	do {
50265af191aSRussell King 		size_t len = left;
50393f1d629SRussell King 		void *vaddr;
50493f1d629SRussell King 
50593f1d629SRussell King 		if (PageHighMem(page)) {
50693f1d629SRussell King 			if (len + offset > PAGE_SIZE) {
50765af191aSRussell King 				if (offset >= PAGE_SIZE) {
50865af191aSRussell King 					page += offset / PAGE_SIZE;
50965af191aSRussell King 					offset %= PAGE_SIZE;
51065af191aSRussell King 				}
51165af191aSRussell King 				len = PAGE_SIZE - offset;
51265af191aSRussell King 			}
51393f1d629SRussell King 			vaddr = kmap_high_get(page);
51493f1d629SRussell King 			if (vaddr) {
51593f1d629SRussell King 				vaddr += offset;
516a9c9147eSRussell King 				op(vaddr, len, dir);
51793f1d629SRussell King 				kunmap_high(page);
5187e5a69e8SNicolas Pitre 			} else if (cache_is_vipt()) {
51939af22a7SNicolas Pitre 				/* unmapped pages might still be cached */
52039af22a7SNicolas Pitre 				vaddr = kmap_atomic(page);
5217e5a69e8SNicolas Pitre 				op(vaddr + offset, len, dir);
52239af22a7SNicolas Pitre 				kunmap_atomic(vaddr);
52393f1d629SRussell King 			}
52493f1d629SRussell King 		} else {
52593f1d629SRussell King 			vaddr = page_address(page) + offset;
526a9c9147eSRussell King 			op(vaddr, len, dir);
52793f1d629SRussell King 		}
52865af191aSRussell King 		offset = 0;
52965af191aSRussell King 		page++;
53065af191aSRussell King 		left -= len;
53165af191aSRussell King 	} while (left);
53265af191aSRussell King }
53365af191aSRussell King 
53465af191aSRussell King void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
53565af191aSRussell King 	size_t size, enum dma_data_direction dir)
53665af191aSRussell King {
53743377453SNicolas Pitre 	unsigned long paddr;
53843377453SNicolas Pitre 
539a9c9147eSRussell King 	dma_cache_maint_page(page, off, size, dir, dmac_map_area);
54043377453SNicolas Pitre 
54165af191aSRussell King 	paddr = page_to_phys(page) + off;
5422ffe2da3SRussell King 	if (dir == DMA_FROM_DEVICE) {
5432ffe2da3SRussell King 		outer_inv_range(paddr, paddr + size);
5442ffe2da3SRussell King 	} else {
5452ffe2da3SRussell King 		outer_clean_range(paddr, paddr + size);
5462ffe2da3SRussell King 	}
5472ffe2da3SRussell King 	/* FIXME: non-speculating: flush on bidirectional mappings? */
54843377453SNicolas Pitre }
5494ea0d737SRussell King EXPORT_SYMBOL(___dma_page_cpu_to_dev);
5504ea0d737SRussell King 
5514ea0d737SRussell King void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
5524ea0d737SRussell King 	size_t size, enum dma_data_direction dir)
5534ea0d737SRussell King {
5542ffe2da3SRussell King 	unsigned long paddr = page_to_phys(page) + off;
5552ffe2da3SRussell King 
5562ffe2da3SRussell King 	/* FIXME: non-speculating: not required */
5572ffe2da3SRussell King 	/* don't bother invalidating if DMA to device */
5582ffe2da3SRussell King 	if (dir != DMA_TO_DEVICE)
5592ffe2da3SRussell King 		outer_inv_range(paddr, paddr + size);
5602ffe2da3SRussell King 
561a9c9147eSRussell King 	dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
562c0177800SCatalin Marinas 
563c0177800SCatalin Marinas 	/*
564c0177800SCatalin Marinas 	 * Mark the D-cache clean for this page to avoid extra flushing.
565c0177800SCatalin Marinas 	 */
566c0177800SCatalin Marinas 	if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
567c0177800SCatalin Marinas 		set_bit(PG_dcache_clean, &page->flags);
5684ea0d737SRussell King }
5694ea0d737SRussell King EXPORT_SYMBOL(___dma_page_dev_to_cpu);
57043377453SNicolas Pitre 
571afd1a321SRussell King /**
572afd1a321SRussell King  * dma_map_sg - map a set of SG buffers for streaming mode DMA
573afd1a321SRussell King  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
574afd1a321SRussell King  * @sg: list of buffers
575afd1a321SRussell King  * @nents: number of buffers to map
576afd1a321SRussell King  * @dir: DMA transfer direction
577afd1a321SRussell King  *
578afd1a321SRussell King  * Map a set of buffers described by scatterlist in streaming mode for DMA.
579afd1a321SRussell King  * This is the scatter-gather version of the dma_map_single interface.
580afd1a321SRussell King  * Here the scatter gather list elements are each tagged with the
581afd1a321SRussell King  * appropriate dma address and length.  They are obtained via
582afd1a321SRussell King  * sg_dma_{address,length}.
583afd1a321SRussell King  *
584afd1a321SRussell King  * Device ownership issues as mentioned for dma_map_single are the same
585afd1a321SRussell King  * here.
586afd1a321SRussell King  */
587afd1a321SRussell King int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
588afd1a321SRussell King 		enum dma_data_direction dir)
589afd1a321SRussell King {
590afd1a321SRussell King 	struct scatterlist *s;
59101135d92SRussell King 	int i, j;
592afd1a321SRussell King 
59324056f52SRussell King 	BUG_ON(!valid_dma_direction(dir));
59424056f52SRussell King 
595afd1a321SRussell King 	for_each_sg(sg, s, nents, i) {
59624056f52SRussell King 		s->dma_address = __dma_map_page(dev, sg_page(s), s->offset,
59701135d92SRussell King 						s->length, dir);
59801135d92SRussell King 		if (dma_mapping_error(dev, s->dma_address))
59901135d92SRussell King 			goto bad_mapping;
600afd1a321SRussell King 	}
60124056f52SRussell King 	debug_dma_map_sg(dev, sg, nents, nents, dir);
602afd1a321SRussell King 	return nents;
60301135d92SRussell King 
60401135d92SRussell King  bad_mapping:
60501135d92SRussell King 	for_each_sg(sg, s, i, j)
60624056f52SRussell King 		__dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
60701135d92SRussell King 	return 0;
608afd1a321SRussell King }
609afd1a321SRussell King EXPORT_SYMBOL(dma_map_sg);
610afd1a321SRussell King 
611afd1a321SRussell King /**
612afd1a321SRussell King  * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
613afd1a321SRussell King  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
614afd1a321SRussell King  * @sg: list of buffers
6150adfca6fSLinus Walleij  * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
616afd1a321SRussell King  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
617afd1a321SRussell King  *
618afd1a321SRussell King  * Unmap a set of streaming mode DMA translations.  Again, CPU access
619afd1a321SRussell King  * rules concerning calls here are the same as for dma_unmap_single().
620afd1a321SRussell King  */
621afd1a321SRussell King void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
622afd1a321SRussell King 		enum dma_data_direction dir)
623afd1a321SRussell King {
62401135d92SRussell King 	struct scatterlist *s;
62501135d92SRussell King 	int i;
62601135d92SRussell King 
62724056f52SRussell King 	debug_dma_unmap_sg(dev, sg, nents, dir);
62824056f52SRussell King 
62901135d92SRussell King 	for_each_sg(sg, s, nents, i)
63024056f52SRussell King 		__dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
631afd1a321SRussell King }
632afd1a321SRussell King EXPORT_SYMBOL(dma_unmap_sg);
633afd1a321SRussell King 
634afd1a321SRussell King /**
635afd1a321SRussell King  * dma_sync_sg_for_cpu
636afd1a321SRussell King  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
637afd1a321SRussell King  * @sg: list of buffers
638afd1a321SRussell King  * @nents: number of buffers to map (returned from dma_map_sg)
639afd1a321SRussell King  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
640afd1a321SRussell King  */
641afd1a321SRussell King void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
642afd1a321SRussell King 			int nents, enum dma_data_direction dir)
643afd1a321SRussell King {
644afd1a321SRussell King 	struct scatterlist *s;
645afd1a321SRussell King 	int i;
646afd1a321SRussell King 
647afd1a321SRussell King 	for_each_sg(sg, s, nents, i) {
64818eabe23SRussell King 		if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
64918eabe23SRussell King 					    sg_dma_len(s), dir))
65018eabe23SRussell King 			continue;
65118eabe23SRussell King 
65218eabe23SRussell King 		__dma_page_dev_to_cpu(sg_page(s), s->offset,
65318eabe23SRussell King 				      s->length, dir);
654afd1a321SRussell King 	}
65524056f52SRussell King 
65624056f52SRussell King 	debug_dma_sync_sg_for_cpu(dev, sg, nents, dir);
657afd1a321SRussell King }
658afd1a321SRussell King EXPORT_SYMBOL(dma_sync_sg_for_cpu);
659afd1a321SRussell King 
660afd1a321SRussell King /**
661afd1a321SRussell King  * dma_sync_sg_for_device
662afd1a321SRussell King  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
663afd1a321SRussell King  * @sg: list of buffers
664afd1a321SRussell King  * @nents: number of buffers to map (returned from dma_map_sg)
665afd1a321SRussell King  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
666afd1a321SRussell King  */
667afd1a321SRussell King void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
668afd1a321SRussell King 			int nents, enum dma_data_direction dir)
669afd1a321SRussell King {
670afd1a321SRussell King 	struct scatterlist *s;
671afd1a321SRussell King 	int i;
672afd1a321SRussell King 
673afd1a321SRussell King 	for_each_sg(sg, s, nents, i) {
6742638b4dbSRussell King 		if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0,
6752638b4dbSRussell King 					sg_dma_len(s), dir))
6762638b4dbSRussell King 			continue;
6772638b4dbSRussell King 
67818eabe23SRussell King 		__dma_page_cpu_to_dev(sg_page(s), s->offset,
67943377453SNicolas Pitre 				      s->length, dir);
680afd1a321SRussell King 	}
68124056f52SRussell King 
68224056f52SRussell King 	debug_dma_sync_sg_for_device(dev, sg, nents, dir);
683afd1a321SRussell King }
684afd1a321SRussell King EXPORT_SYMBOL(dma_sync_sg_for_device);
68524056f52SRussell King 
686022ae537SRussell King /*
687022ae537SRussell King  * Return whether the given device DMA address mask can be supported
688022ae537SRussell King  * properly.  For example, if your device can only drive the low 24-bits
689022ae537SRussell King  * during bus mastering, then you would pass 0x00ffffff as the mask
690022ae537SRussell King  * to this function.
691022ae537SRussell King  */
692022ae537SRussell King int dma_supported(struct device *dev, u64 mask)
693022ae537SRussell King {
694022ae537SRussell King 	if (mask < (u64)arm_dma_limit)
695022ae537SRussell King 		return 0;
696022ae537SRussell King 	return 1;
697022ae537SRussell King }
698022ae537SRussell King EXPORT_SYMBOL(dma_supported);
699022ae537SRussell King 
700022ae537SRussell King int dma_set_mask(struct device *dev, u64 dma_mask)
701022ae537SRussell King {
702022ae537SRussell King 	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
703022ae537SRussell King 		return -EIO;
704022ae537SRussell King 
705022ae537SRussell King #ifndef CONFIG_DMABOUNCE
706022ae537SRussell King 	*dev->dma_mask = dma_mask;
707022ae537SRussell King #endif
708022ae537SRussell King 
709022ae537SRussell King 	return 0;
710022ae537SRussell King }
711022ae537SRussell King EXPORT_SYMBOL(dma_set_mask);
712022ae537SRussell King 
71324056f52SRussell King #define PREALLOC_DMA_DEBUG_ENTRIES	4096
71424056f52SRussell King 
71524056f52SRussell King static int __init dma_debug_do_init(void)
71624056f52SRussell King {
71724056f52SRussell King 	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
71824056f52SRussell King 	return 0;
71924056f52SRussell King }
72024056f52SRussell King fs_initcall(dma_debug_do_init);
721