xref: /openbmc/linux/arch/arm/mm/dma-mapping.c (revision a9c9147e)
10ddbccd1SRussell King /*
20ddbccd1SRussell King  *  linux/arch/arm/mm/dma-mapping.c
30ddbccd1SRussell King  *
40ddbccd1SRussell King  *  Copyright (C) 2000-2004 Russell King
50ddbccd1SRussell King  *
60ddbccd1SRussell King  * This program is free software; you can redistribute it and/or modify
70ddbccd1SRussell King  * it under the terms of the GNU General Public License version 2 as
80ddbccd1SRussell King  * published by the Free Software Foundation.
90ddbccd1SRussell King  *
100ddbccd1SRussell King  *  DMA uncached mapping support.
110ddbccd1SRussell King  */
120ddbccd1SRussell King #include <linux/module.h>
130ddbccd1SRussell King #include <linux/mm.h>
140ddbccd1SRussell King #include <linux/slab.h>
150ddbccd1SRussell King #include <linux/errno.h>
160ddbccd1SRussell King #include <linux/list.h>
170ddbccd1SRussell King #include <linux/init.h>
180ddbccd1SRussell King #include <linux/device.h>
190ddbccd1SRussell King #include <linux/dma-mapping.h>
200ddbccd1SRussell King 
210ddbccd1SRussell King #include <asm/memory.h>
2243377453SNicolas Pitre #include <asm/highmem.h>
230ddbccd1SRussell King #include <asm/cacheflush.h>
240ddbccd1SRussell King #include <asm/tlbflush.h>
250ddbccd1SRussell King #include <asm/sizes.h>
260ddbccd1SRussell King 
270ddbccd1SRussell King /* Sanity check size */
280ddbccd1SRussell King #if (CONSISTENT_DMA_SIZE % SZ_2M)
290ddbccd1SRussell King #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
300ddbccd1SRussell King #endif
310ddbccd1SRussell King 
320ddbccd1SRussell King #define CONSISTENT_END	(0xffe00000)
330ddbccd1SRussell King #define CONSISTENT_BASE	(CONSISTENT_END - CONSISTENT_DMA_SIZE)
340ddbccd1SRussell King 
350ddbccd1SRussell King #define CONSISTENT_OFFSET(x)	(((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
360ddbccd1SRussell King #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
370ddbccd1SRussell King #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
380ddbccd1SRussell King 
39ab6494f0SCatalin Marinas static u64 get_coherent_dma_mask(struct device *dev)
40ab6494f0SCatalin Marinas {
41ab6494f0SCatalin Marinas 	u64 mask = ISA_DMA_THRESHOLD;
420ddbccd1SRussell King 
43ab6494f0SCatalin Marinas 	if (dev) {
44ab6494f0SCatalin Marinas 		mask = dev->coherent_dma_mask;
45ab6494f0SCatalin Marinas 
46ab6494f0SCatalin Marinas 		/*
47ab6494f0SCatalin Marinas 		 * Sanity check the DMA mask - it must be non-zero, and
48ab6494f0SCatalin Marinas 		 * must be able to be satisfied by a DMA allocation.
49ab6494f0SCatalin Marinas 		 */
50ab6494f0SCatalin Marinas 		if (mask == 0) {
51ab6494f0SCatalin Marinas 			dev_warn(dev, "coherent DMA mask is unset\n");
52ab6494f0SCatalin Marinas 			return 0;
53ab6494f0SCatalin Marinas 		}
54ab6494f0SCatalin Marinas 
55ab6494f0SCatalin Marinas 		if ((~mask) & ISA_DMA_THRESHOLD) {
56ab6494f0SCatalin Marinas 			dev_warn(dev, "coherent DMA mask %#llx is smaller "
57ab6494f0SCatalin Marinas 				 "than system GFP_DMA mask %#llx\n",
58ab6494f0SCatalin Marinas 				 mask, (unsigned long long)ISA_DMA_THRESHOLD);
59ab6494f0SCatalin Marinas 			return 0;
60ab6494f0SCatalin Marinas 		}
61ab6494f0SCatalin Marinas 	}
62ab6494f0SCatalin Marinas 
63ab6494f0SCatalin Marinas 	return mask;
64ab6494f0SCatalin Marinas }
65ab6494f0SCatalin Marinas 
667a9a32a9SRussell King /*
677a9a32a9SRussell King  * Allocate a DMA buffer for 'dev' of size 'size' using the
687a9a32a9SRussell King  * specified gfp mask.  Note that 'size' must be page aligned.
697a9a32a9SRussell King  */
707a9a32a9SRussell King static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
717a9a32a9SRussell King {
727a9a32a9SRussell King 	unsigned long order = get_order(size);
737a9a32a9SRussell King 	struct page *page, *p, *e;
747a9a32a9SRussell King 	void *ptr;
757a9a32a9SRussell King 	u64 mask = get_coherent_dma_mask(dev);
767a9a32a9SRussell King 
777a9a32a9SRussell King #ifdef CONFIG_DMA_API_DEBUG
787a9a32a9SRussell King 	u64 limit = (mask + 1) & ~mask;
797a9a32a9SRussell King 	if (limit && size >= limit) {
807a9a32a9SRussell King 		dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
817a9a32a9SRussell King 			size, mask);
827a9a32a9SRussell King 		return NULL;
837a9a32a9SRussell King 	}
847a9a32a9SRussell King #endif
857a9a32a9SRussell King 
867a9a32a9SRussell King 	if (!mask)
877a9a32a9SRussell King 		return NULL;
887a9a32a9SRussell King 
897a9a32a9SRussell King 	if (mask < 0xffffffffULL)
907a9a32a9SRussell King 		gfp |= GFP_DMA;
917a9a32a9SRussell King 
927a9a32a9SRussell King 	page = alloc_pages(gfp, order);
937a9a32a9SRussell King 	if (!page)
947a9a32a9SRussell King 		return NULL;
957a9a32a9SRussell King 
967a9a32a9SRussell King 	/*
977a9a32a9SRussell King 	 * Now split the huge page and free the excess pages
987a9a32a9SRussell King 	 */
997a9a32a9SRussell King 	split_page(page, order);
1007a9a32a9SRussell King 	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
1017a9a32a9SRussell King 		__free_page(p);
1027a9a32a9SRussell King 
1037a9a32a9SRussell King 	/*
1047a9a32a9SRussell King 	 * Ensure that the allocated pages are zeroed, and that any data
1057a9a32a9SRussell King 	 * lurking in the kernel direct-mapped region is invalidated.
1067a9a32a9SRussell King 	 */
1077a9a32a9SRussell King 	ptr = page_address(page);
1087a9a32a9SRussell King 	memset(ptr, 0, size);
1097a9a32a9SRussell King 	dmac_flush_range(ptr, ptr + size);
1107a9a32a9SRussell King 	outer_flush_range(__pa(ptr), __pa(ptr) + size);
1117a9a32a9SRussell King 
1127a9a32a9SRussell King 	return page;
1137a9a32a9SRussell King }
1147a9a32a9SRussell King 
1157a9a32a9SRussell King /*
1167a9a32a9SRussell King  * Free a DMA buffer.  'size' must be page aligned.
1177a9a32a9SRussell King  */
1187a9a32a9SRussell King static void __dma_free_buffer(struct page *page, size_t size)
1197a9a32a9SRussell King {
1207a9a32a9SRussell King 	struct page *e = page + (size >> PAGE_SHIFT);
1217a9a32a9SRussell King 
1227a9a32a9SRussell King 	while (page < e) {
1237a9a32a9SRussell King 		__free_page(page);
1247a9a32a9SRussell King 		page++;
1257a9a32a9SRussell King 	}
1267a9a32a9SRussell King }
1277a9a32a9SRussell King 
128ab6494f0SCatalin Marinas #ifdef CONFIG_MMU
1290ddbccd1SRussell King /*
1300ddbccd1SRussell King  * These are the page tables (2MB each) covering uncached, DMA consistent allocations
1310ddbccd1SRussell King  */
1320ddbccd1SRussell King static pte_t *consistent_pte[NUM_CONSISTENT_PTES];
1330ddbccd1SRussell King 
13413ccf3adSRussell King #include "vmregion.h"
1350ddbccd1SRussell King 
13613ccf3adSRussell King static struct arm_vmregion_head consistent_head = {
13713ccf3adSRussell King 	.vm_lock	= __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock),
1380ddbccd1SRussell King 	.vm_list	= LIST_HEAD_INIT(consistent_head.vm_list),
1390ddbccd1SRussell King 	.vm_start	= CONSISTENT_BASE,
1400ddbccd1SRussell King 	.vm_end		= CONSISTENT_END,
1410ddbccd1SRussell King };
1420ddbccd1SRussell King 
1430ddbccd1SRussell King #ifdef CONFIG_HUGETLB_PAGE
1440ddbccd1SRussell King #error ARM Coherent DMA allocator does not (yet) support huge TLB
1450ddbccd1SRussell King #endif
1460ddbccd1SRussell King 
14788c58f3bSRussell King /*
14888c58f3bSRussell King  * Initialise the consistent memory allocation.
14988c58f3bSRussell King  */
15088c58f3bSRussell King static int __init consistent_init(void)
15188c58f3bSRussell King {
15288c58f3bSRussell King 	int ret = 0;
15388c58f3bSRussell King 	pgd_t *pgd;
15488c58f3bSRussell King 	pmd_t *pmd;
15588c58f3bSRussell King 	pte_t *pte;
15688c58f3bSRussell King 	int i = 0;
15788c58f3bSRussell King 	u32 base = CONSISTENT_BASE;
15888c58f3bSRussell King 
15988c58f3bSRussell King 	do {
16088c58f3bSRussell King 		pgd = pgd_offset(&init_mm, base);
16188c58f3bSRussell King 		pmd = pmd_alloc(&init_mm, pgd, base);
16288c58f3bSRussell King 		if (!pmd) {
16388c58f3bSRussell King 			printk(KERN_ERR "%s: no pmd tables\n", __func__);
16488c58f3bSRussell King 			ret = -ENOMEM;
16588c58f3bSRussell King 			break;
16688c58f3bSRussell King 		}
16788c58f3bSRussell King 		WARN_ON(!pmd_none(*pmd));
16888c58f3bSRussell King 
16988c58f3bSRussell King 		pte = pte_alloc_kernel(pmd, base);
17088c58f3bSRussell King 		if (!pte) {
17188c58f3bSRussell King 			printk(KERN_ERR "%s: no pte tables\n", __func__);
17288c58f3bSRussell King 			ret = -ENOMEM;
17388c58f3bSRussell King 			break;
17488c58f3bSRussell King 		}
17588c58f3bSRussell King 
17688c58f3bSRussell King 		consistent_pte[i++] = pte;
17788c58f3bSRussell King 		base += (1 << PGDIR_SHIFT);
17888c58f3bSRussell King 	} while (base < CONSISTENT_END);
17988c58f3bSRussell King 
18088c58f3bSRussell King 	return ret;
18188c58f3bSRussell King }
18288c58f3bSRussell King 
18388c58f3bSRussell King core_initcall(consistent_init);
18488c58f3bSRussell King 
1850ddbccd1SRussell King static void *
18631ebf944SRussell King __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
1870ddbccd1SRussell King {
18813ccf3adSRussell King 	struct arm_vmregion *c;
1890ddbccd1SRussell King 
190ebd7a845SRussell King 	if (!consistent_pte[0]) {
191ebd7a845SRussell King 		printk(KERN_ERR "%s: not initialised\n", __func__);
192ebd7a845SRussell King 		dump_stack();
193ebd7a845SRussell King 		return NULL;
194ebd7a845SRussell King 	}
195ebd7a845SRussell King 
1960ddbccd1SRussell King 	/*
1970ddbccd1SRussell King 	 * Allocate a virtual address in the consistent mapping region.
1980ddbccd1SRussell King 	 */
19913ccf3adSRussell King 	c = arm_vmregion_alloc(&consistent_head, size,
2000ddbccd1SRussell King 			    gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
2010ddbccd1SRussell King 	if (c) {
2020ddbccd1SRussell King 		pte_t *pte;
2030ddbccd1SRussell King 		int idx = CONSISTENT_PTE_INDEX(c->vm_start);
2040ddbccd1SRussell King 		u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
2050ddbccd1SRussell King 
2060ddbccd1SRussell King 		pte = consistent_pte[idx] + off;
2070ddbccd1SRussell King 		c->vm_pages = page;
2080ddbccd1SRussell King 
2090ddbccd1SRussell King 		do {
2100ddbccd1SRussell King 			BUG_ON(!pte_none(*pte));
2110ddbccd1SRussell King 
2120ddbccd1SRussell King 			set_pte_ext(pte, mk_pte(page, prot), 0);
2130ddbccd1SRussell King 			page++;
2140ddbccd1SRussell King 			pte++;
2150ddbccd1SRussell King 			off++;
2160ddbccd1SRussell King 			if (off >= PTRS_PER_PTE) {
2170ddbccd1SRussell King 				off = 0;
2180ddbccd1SRussell King 				pte = consistent_pte[++idx];
2190ddbccd1SRussell King 			}
2200ddbccd1SRussell King 		} while (size -= PAGE_SIZE);
2210ddbccd1SRussell King 
2220ddbccd1SRussell King 		return (void *)c->vm_start;
2230ddbccd1SRussell King 	}
2240ddbccd1SRussell King 	return NULL;
2250ddbccd1SRussell King }
226695ae0afSRussell King 
227695ae0afSRussell King static void __dma_free_remap(void *cpu_addr, size_t size)
228695ae0afSRussell King {
229695ae0afSRussell King 	struct arm_vmregion *c;
230695ae0afSRussell King 	unsigned long addr;
231695ae0afSRussell King 	pte_t *ptep;
232695ae0afSRussell King 	int idx;
233695ae0afSRussell King 	u32 off;
234695ae0afSRussell King 
235695ae0afSRussell King 	c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
236695ae0afSRussell King 	if (!c) {
237695ae0afSRussell King 		printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
238695ae0afSRussell King 		       __func__, cpu_addr);
239695ae0afSRussell King 		dump_stack();
240695ae0afSRussell King 		return;
241695ae0afSRussell King 	}
242695ae0afSRussell King 
243695ae0afSRussell King 	if ((c->vm_end - c->vm_start) != size) {
244695ae0afSRussell King 		printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
245695ae0afSRussell King 		       __func__, c->vm_end - c->vm_start, size);
246695ae0afSRussell King 		dump_stack();
247695ae0afSRussell King 		size = c->vm_end - c->vm_start;
248695ae0afSRussell King 	}
249695ae0afSRussell King 
250695ae0afSRussell King 	idx = CONSISTENT_PTE_INDEX(c->vm_start);
251695ae0afSRussell King 	off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
252695ae0afSRussell King 	ptep = consistent_pte[idx] + off;
253695ae0afSRussell King 	addr = c->vm_start;
254695ae0afSRussell King 	do {
255695ae0afSRussell King 		pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
256695ae0afSRussell King 
257695ae0afSRussell King 		ptep++;
258695ae0afSRussell King 		addr += PAGE_SIZE;
259695ae0afSRussell King 		off++;
260695ae0afSRussell King 		if (off >= PTRS_PER_PTE) {
261695ae0afSRussell King 			off = 0;
262695ae0afSRussell King 			ptep = consistent_pte[++idx];
263695ae0afSRussell King 		}
264695ae0afSRussell King 
265acaac256SRussell King 		if (pte_none(pte) || !pte_present(pte))
266695ae0afSRussell King 			printk(KERN_CRIT "%s: bad page in kernel page table\n",
267695ae0afSRussell King 			       __func__);
268695ae0afSRussell King 	} while (size -= PAGE_SIZE);
269695ae0afSRussell King 
270695ae0afSRussell King 	flush_tlb_kernel_range(c->vm_start, c->vm_end);
271695ae0afSRussell King 
272695ae0afSRussell King 	arm_vmregion_free(&consistent_head, c);
273695ae0afSRussell King }
274695ae0afSRussell King 
275ab6494f0SCatalin Marinas #else	/* !CONFIG_MMU */
276695ae0afSRussell King 
27731ebf944SRussell King #define __dma_alloc_remap(page, size, gfp, prot)	page_address(page)
27831ebf944SRussell King #define __dma_free_remap(addr, size)			do { } while (0)
27931ebf944SRussell King 
28031ebf944SRussell King #endif	/* CONFIG_MMU */
28131ebf944SRussell King 
282ab6494f0SCatalin Marinas static void *
283ab6494f0SCatalin Marinas __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
284ab6494f0SCatalin Marinas 	    pgprot_t prot)
285ab6494f0SCatalin Marinas {
28604da5694SRussell King 	struct page *page;
28731ebf944SRussell King 	void *addr;
288ab6494f0SCatalin Marinas 
289ab6494f0SCatalin Marinas 	*handle = ~0;
29004da5694SRussell King 	size = PAGE_ALIGN(size);
29104da5694SRussell King 
29204da5694SRussell King 	page = __dma_alloc_buffer(dev, size, gfp);
29304da5694SRussell King 	if (!page)
294ab6494f0SCatalin Marinas 		return NULL;
29504da5694SRussell King 
29631ebf944SRussell King 	if (!arch_is_coherent())
29731ebf944SRussell King 		addr = __dma_alloc_remap(page, size, gfp, prot);
29831ebf944SRussell King 	else
29931ebf944SRussell King 		addr = page_address(page);
30031ebf944SRussell King 
30131ebf944SRussell King 	if (addr)
30204da5694SRussell King 		*handle = page_to_dma(dev, page);
30331ebf944SRussell King 
30431ebf944SRussell King 	return addr;
305ab6494f0SCatalin Marinas }
306695ae0afSRussell King 
3070ddbccd1SRussell King /*
3080ddbccd1SRussell King  * Allocate DMA-coherent memory space and return both the kernel remapped
3090ddbccd1SRussell King  * virtual and bus address for that space.
3100ddbccd1SRussell King  */
3110ddbccd1SRussell King void *
3120ddbccd1SRussell King dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
3130ddbccd1SRussell King {
3140ddbccd1SRussell King 	void *memory;
3150ddbccd1SRussell King 
3160ddbccd1SRussell King 	if (dma_alloc_from_coherent(dev, size, handle, &memory))
3170ddbccd1SRussell King 		return memory;
3180ddbccd1SRussell King 
3190ddbccd1SRussell King 	return __dma_alloc(dev, size, handle, gfp,
32026a26d32SRussell King 			   pgprot_dmacoherent(pgprot_kernel));
3210ddbccd1SRussell King }
3220ddbccd1SRussell King EXPORT_SYMBOL(dma_alloc_coherent);
3230ddbccd1SRussell King 
3240ddbccd1SRussell King /*
3250ddbccd1SRussell King  * Allocate a writecombining region, in much the same way as
3260ddbccd1SRussell King  * dma_alloc_coherent above.
3270ddbccd1SRussell King  */
3280ddbccd1SRussell King void *
3290ddbccd1SRussell King dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
3300ddbccd1SRussell King {
3310ddbccd1SRussell King 	return __dma_alloc(dev, size, handle, gfp,
3320ddbccd1SRussell King 			   pgprot_writecombine(pgprot_kernel));
3330ddbccd1SRussell King }
3340ddbccd1SRussell King EXPORT_SYMBOL(dma_alloc_writecombine);
3350ddbccd1SRussell King 
3360ddbccd1SRussell King static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
3370ddbccd1SRussell King 		    void *cpu_addr, dma_addr_t dma_addr, size_t size)
3380ddbccd1SRussell King {
339ab6494f0SCatalin Marinas 	int ret = -ENXIO;
340ab6494f0SCatalin Marinas #ifdef CONFIG_MMU
34113ccf3adSRussell King 	unsigned long user_size, kern_size;
34213ccf3adSRussell King 	struct arm_vmregion *c;
3430ddbccd1SRussell King 
3440ddbccd1SRussell King 	user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
3450ddbccd1SRussell King 
34613ccf3adSRussell King 	c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
3470ddbccd1SRussell King 	if (c) {
3480ddbccd1SRussell King 		unsigned long off = vma->vm_pgoff;
3490ddbccd1SRussell King 
3500ddbccd1SRussell King 		kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
3510ddbccd1SRussell King 
3520ddbccd1SRussell King 		if (off < kern_size &&
3530ddbccd1SRussell King 		    user_size <= (kern_size - off)) {
3540ddbccd1SRussell King 			ret = remap_pfn_range(vma, vma->vm_start,
3550ddbccd1SRussell King 					      page_to_pfn(c->vm_pages) + off,
3560ddbccd1SRussell King 					      user_size << PAGE_SHIFT,
3570ddbccd1SRussell King 					      vma->vm_page_prot);
3580ddbccd1SRussell King 		}
3590ddbccd1SRussell King 	}
360ab6494f0SCatalin Marinas #endif	/* CONFIG_MMU */
3610ddbccd1SRussell King 
3620ddbccd1SRussell King 	return ret;
3630ddbccd1SRussell King }
3640ddbccd1SRussell King 
3650ddbccd1SRussell King int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
3660ddbccd1SRussell King 		      void *cpu_addr, dma_addr_t dma_addr, size_t size)
3670ddbccd1SRussell King {
36826a26d32SRussell King 	vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
3690ddbccd1SRussell King 	return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
3700ddbccd1SRussell King }
3710ddbccd1SRussell King EXPORT_SYMBOL(dma_mmap_coherent);
3720ddbccd1SRussell King 
3730ddbccd1SRussell King int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
3740ddbccd1SRussell King 			  void *cpu_addr, dma_addr_t dma_addr, size_t size)
3750ddbccd1SRussell King {
3760ddbccd1SRussell King 	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
3770ddbccd1SRussell King 	return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
3780ddbccd1SRussell King }
3790ddbccd1SRussell King EXPORT_SYMBOL(dma_mmap_writecombine);
3800ddbccd1SRussell King 
3810ddbccd1SRussell King /*
3820ddbccd1SRussell King  * free a page as defined by the above mapping.
3830ddbccd1SRussell King  * Must not be called with IRQs disabled.
3840ddbccd1SRussell King  */
3850ddbccd1SRussell King void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
3860ddbccd1SRussell King {
3870ddbccd1SRussell King 	WARN_ON(irqs_disabled());
3880ddbccd1SRussell King 
3890ddbccd1SRussell King 	if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
3900ddbccd1SRussell King 		return;
3910ddbccd1SRussell King 
3923e82d012SRussell King 	size = PAGE_ALIGN(size);
3933e82d012SRussell King 
394695ae0afSRussell King 	if (!arch_is_coherent())
395695ae0afSRussell King 		__dma_free_remap(cpu_addr, size);
3967a9a32a9SRussell King 
3977a9a32a9SRussell King 	__dma_free_buffer(dma_to_page(dev, handle), size);
3980ddbccd1SRussell King }
3990ddbccd1SRussell King EXPORT_SYMBOL(dma_free_coherent);
4000ddbccd1SRussell King 
4010ddbccd1SRussell King /*
4020ddbccd1SRussell King  * Make an area consistent for devices.
4030ddbccd1SRussell King  * Note: Drivers should NOT use this function directly, as it will break
4040ddbccd1SRussell King  * platforms with CONFIG_DMABOUNCE.
4050ddbccd1SRussell King  * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
4060ddbccd1SRussell King  */
4074ea0d737SRussell King static void dma_cache_maint(const void *start, size_t size, int direction)
4080ddbccd1SRussell King {
4091522ac3eSRussell King 	void (*outer_op)(unsigned long, unsigned long);
4100ddbccd1SRussell King 
4110ddbccd1SRussell King 	switch (direction) {
4120ddbccd1SRussell King 	case DMA_FROM_DEVICE:		/* invalidate only */
4131522ac3eSRussell King 		outer_op = outer_inv_range;
4140ddbccd1SRussell King 		break;
4150ddbccd1SRussell King 	case DMA_TO_DEVICE:		/* writeback only */
4161522ac3eSRussell King 		outer_op = outer_clean_range;
4170ddbccd1SRussell King 		break;
4180ddbccd1SRussell King 	case DMA_BIDIRECTIONAL:		/* writeback and invalidate */
4191522ac3eSRussell King 		outer_op = outer_flush_range;
4200ddbccd1SRussell King 		break;
4210ddbccd1SRussell King 	default:
4220ddbccd1SRussell King 		BUG();
4230ddbccd1SRussell King 	}
4241522ac3eSRussell King 
4251522ac3eSRussell King 	outer_op(__pa(start), __pa(start) + size);
4260ddbccd1SRussell King }
4274ea0d737SRussell King 
4284ea0d737SRussell King void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
4294ea0d737SRussell King 	enum dma_data_direction dir)
4304ea0d737SRussell King {
431a9c9147eSRussell King 	BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
432a9c9147eSRussell King 
433a9c9147eSRussell King 	dmac_map_area(kaddr, size, dir);
4344ea0d737SRussell King 	dma_cache_maint(kaddr, size, dir);
4354ea0d737SRussell King }
4364ea0d737SRussell King EXPORT_SYMBOL(___dma_single_cpu_to_dev);
4374ea0d737SRussell King 
4384ea0d737SRussell King void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
4394ea0d737SRussell King 	enum dma_data_direction dir)
4404ea0d737SRussell King {
441a9c9147eSRussell King 	BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
442a9c9147eSRussell King 
443a9c9147eSRussell King 	dmac_unmap_area(kaddr, size, dir);
4444ea0d737SRussell King }
4454ea0d737SRussell King EXPORT_SYMBOL(___dma_single_dev_to_cpu);
446afd1a321SRussell King 
44765af191aSRussell King static void dma_cache_maint_page(struct page *page, unsigned long offset,
448a9c9147eSRussell King 	size_t size, enum dma_data_direction dir,
449a9c9147eSRussell King 	void (*op)(const void *, size_t, int))
45065af191aSRussell King {
45165af191aSRussell King 	/*
45265af191aSRussell King 	 * A single sg entry may refer to multiple physically contiguous
45365af191aSRussell King 	 * pages.  But we still need to process highmem pages individually.
45465af191aSRussell King 	 * If highmem is not configured then the bulk of this loop gets
45565af191aSRussell King 	 * optimized out.
45665af191aSRussell King 	 */
45765af191aSRussell King 	size_t left = size;
45865af191aSRussell King 	do {
45965af191aSRussell King 		size_t len = left;
46093f1d629SRussell King 		void *vaddr;
46193f1d629SRussell King 
46293f1d629SRussell King 		if (PageHighMem(page)) {
46393f1d629SRussell King 			if (len + offset > PAGE_SIZE) {
46465af191aSRussell King 				if (offset >= PAGE_SIZE) {
46565af191aSRussell King 					page += offset / PAGE_SIZE;
46665af191aSRussell King 					offset %= PAGE_SIZE;
46765af191aSRussell King 				}
46865af191aSRussell King 				len = PAGE_SIZE - offset;
46965af191aSRussell King 			}
47093f1d629SRussell King 			vaddr = kmap_high_get(page);
47193f1d629SRussell King 			if (vaddr) {
47293f1d629SRussell King 				vaddr += offset;
473a9c9147eSRussell King 				op(vaddr, len, dir);
47493f1d629SRussell King 				kunmap_high(page);
47593f1d629SRussell King 			}
47693f1d629SRussell King 		} else {
47793f1d629SRussell King 			vaddr = page_address(page) + offset;
478a9c9147eSRussell King 			op(vaddr, len, dir);
47993f1d629SRussell King 		}
48065af191aSRussell King 		offset = 0;
48165af191aSRussell King 		page++;
48265af191aSRussell King 		left -= len;
48365af191aSRussell King 	} while (left);
48465af191aSRussell King }
48565af191aSRussell King 
48665af191aSRussell King void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
48765af191aSRussell King 	size_t size, enum dma_data_direction dir)
48865af191aSRussell King {
48943377453SNicolas Pitre 	unsigned long paddr;
49043377453SNicolas Pitre 	void (*outer_op)(unsigned long, unsigned long);
49143377453SNicolas Pitre 
49243377453SNicolas Pitre 	switch (direction) {
49343377453SNicolas Pitre 	case DMA_FROM_DEVICE:		/* invalidate only */
49443377453SNicolas Pitre 		outer_op = outer_inv_range;
49543377453SNicolas Pitre 		break;
49643377453SNicolas Pitre 	case DMA_TO_DEVICE:		/* writeback only */
49743377453SNicolas Pitre 		outer_op = outer_clean_range;
49843377453SNicolas Pitre 		break;
49943377453SNicolas Pitre 	case DMA_BIDIRECTIONAL:		/* writeback and invalidate */
50043377453SNicolas Pitre 		outer_op = outer_flush_range;
50143377453SNicolas Pitre 		break;
50243377453SNicolas Pitre 	default:
50343377453SNicolas Pitre 		BUG();
50443377453SNicolas Pitre 	}
50543377453SNicolas Pitre 
506a9c9147eSRussell King 	dma_cache_maint_page(page, off, size, dir, dmac_map_area);
50743377453SNicolas Pitre 
50865af191aSRussell King 	paddr = page_to_phys(page) + off;
50943377453SNicolas Pitre 	outer_op(paddr, paddr + size);
51043377453SNicolas Pitre }
5114ea0d737SRussell King EXPORT_SYMBOL(___dma_page_cpu_to_dev);
5124ea0d737SRussell King 
5134ea0d737SRussell King void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
5144ea0d737SRussell King 	size_t size, enum dma_data_direction dir)
5154ea0d737SRussell King {
516a9c9147eSRussell King 	dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
5174ea0d737SRussell King }
5184ea0d737SRussell King EXPORT_SYMBOL(___dma_page_dev_to_cpu);
51943377453SNicolas Pitre 
520afd1a321SRussell King /**
521afd1a321SRussell King  * dma_map_sg - map a set of SG buffers for streaming mode DMA
522afd1a321SRussell King  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
523afd1a321SRussell King  * @sg: list of buffers
524afd1a321SRussell King  * @nents: number of buffers to map
525afd1a321SRussell King  * @dir: DMA transfer direction
526afd1a321SRussell King  *
527afd1a321SRussell King  * Map a set of buffers described by scatterlist in streaming mode for DMA.
528afd1a321SRussell King  * This is the scatter-gather version of the dma_map_single interface.
529afd1a321SRussell King  * Here the scatter gather list elements are each tagged with the
530afd1a321SRussell King  * appropriate dma address and length.  They are obtained via
531afd1a321SRussell King  * sg_dma_{address,length}.
532afd1a321SRussell King  *
533afd1a321SRussell King  * Device ownership issues as mentioned for dma_map_single are the same
534afd1a321SRussell King  * here.
535afd1a321SRussell King  */
536afd1a321SRussell King int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
537afd1a321SRussell King 		enum dma_data_direction dir)
538afd1a321SRussell King {
539afd1a321SRussell King 	struct scatterlist *s;
54001135d92SRussell King 	int i, j;
541afd1a321SRussell King 
542afd1a321SRussell King 	for_each_sg(sg, s, nents, i) {
54301135d92SRussell King 		s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
54401135d92SRussell King 						s->length, dir);
54501135d92SRussell King 		if (dma_mapping_error(dev, s->dma_address))
54601135d92SRussell King 			goto bad_mapping;
547afd1a321SRussell King 	}
548afd1a321SRussell King 	return nents;
54901135d92SRussell King 
55001135d92SRussell King  bad_mapping:
55101135d92SRussell King 	for_each_sg(sg, s, i, j)
55201135d92SRussell King 		dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
55301135d92SRussell King 	return 0;
554afd1a321SRussell King }
555afd1a321SRussell King EXPORT_SYMBOL(dma_map_sg);
556afd1a321SRussell King 
557afd1a321SRussell King /**
558afd1a321SRussell King  * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
559afd1a321SRussell King  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
560afd1a321SRussell King  * @sg: list of buffers
561afd1a321SRussell King  * @nents: number of buffers to unmap (returned from dma_map_sg)
562afd1a321SRussell King  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
563afd1a321SRussell King  *
564afd1a321SRussell King  * Unmap a set of streaming mode DMA translations.  Again, CPU access
565afd1a321SRussell King  * rules concerning calls here are the same as for dma_unmap_single().
566afd1a321SRussell King  */
567afd1a321SRussell King void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
568afd1a321SRussell King 		enum dma_data_direction dir)
569afd1a321SRussell King {
57001135d92SRussell King 	struct scatterlist *s;
57101135d92SRussell King 	int i;
57201135d92SRussell King 
57301135d92SRussell King 	for_each_sg(sg, s, nents, i)
57401135d92SRussell King 		dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
575afd1a321SRussell King }
576afd1a321SRussell King EXPORT_SYMBOL(dma_unmap_sg);
577afd1a321SRussell King 
578afd1a321SRussell King /**
579afd1a321SRussell King  * dma_sync_sg_for_cpu
580afd1a321SRussell King  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
581afd1a321SRussell King  * @sg: list of buffers
582afd1a321SRussell King  * @nents: number of buffers to map (returned from dma_map_sg)
583afd1a321SRussell King  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
584afd1a321SRussell King  */
585afd1a321SRussell King void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
586afd1a321SRussell King 			int nents, enum dma_data_direction dir)
587afd1a321SRussell King {
588afd1a321SRussell King 	struct scatterlist *s;
589afd1a321SRussell King 	int i;
590afd1a321SRussell King 
591afd1a321SRussell King 	for_each_sg(sg, s, nents, i) {
59218eabe23SRussell King 		if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
59318eabe23SRussell King 					    sg_dma_len(s), dir))
59418eabe23SRussell King 			continue;
59518eabe23SRussell King 
59618eabe23SRussell King 		__dma_page_dev_to_cpu(sg_page(s), s->offset,
59718eabe23SRussell King 				      s->length, dir);
598afd1a321SRussell King 	}
599afd1a321SRussell King }
600afd1a321SRussell King EXPORT_SYMBOL(dma_sync_sg_for_cpu);
601afd1a321SRussell King 
602afd1a321SRussell King /**
603afd1a321SRussell King  * dma_sync_sg_for_device
604afd1a321SRussell King  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
605afd1a321SRussell King  * @sg: list of buffers
606afd1a321SRussell King  * @nents: number of buffers to map (returned from dma_map_sg)
607afd1a321SRussell King  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
608afd1a321SRussell King  */
609afd1a321SRussell King void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
610afd1a321SRussell King 			int nents, enum dma_data_direction dir)
611afd1a321SRussell King {
612afd1a321SRussell King 	struct scatterlist *s;
613afd1a321SRussell King 	int i;
614afd1a321SRussell King 
615afd1a321SRussell King 	for_each_sg(sg, s, nents, i) {
6162638b4dbSRussell King 		if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0,
6172638b4dbSRussell King 					sg_dma_len(s), dir))
6182638b4dbSRussell King 			continue;
6192638b4dbSRussell King 
62018eabe23SRussell King 		__dma_page_cpu_to_dev(sg_page(s), s->offset,
62143377453SNicolas Pitre 				      s->length, dir);
622afd1a321SRussell King 	}
623afd1a321SRussell King }
624afd1a321SRussell King EXPORT_SYMBOL(dma_sync_sg_for_device);
625