xref: /openbmc/linux/arch/arm/mm/dma-mapping.c (revision 2a550e73)
10ddbccd1SRussell King /*
20ddbccd1SRussell King  *  linux/arch/arm/mm/dma-mapping.c
30ddbccd1SRussell King  *
40ddbccd1SRussell King  *  Copyright (C) 2000-2004 Russell King
50ddbccd1SRussell King  *
60ddbccd1SRussell King  * This program is free software; you can redistribute it and/or modify
70ddbccd1SRussell King  * it under the terms of the GNU General Public License version 2 as
80ddbccd1SRussell King  * published by the Free Software Foundation.
90ddbccd1SRussell King  *
100ddbccd1SRussell King  *  DMA uncached mapping support.
110ddbccd1SRussell King  */
120ddbccd1SRussell King #include <linux/module.h>
130ddbccd1SRussell King #include <linux/mm.h>
145a0e3ad6STejun Heo #include <linux/gfp.h>
150ddbccd1SRussell King #include <linux/errno.h>
160ddbccd1SRussell King #include <linux/list.h>
170ddbccd1SRussell King #include <linux/init.h>
180ddbccd1SRussell King #include <linux/device.h>
190ddbccd1SRussell King #include <linux/dma-mapping.h>
2039af22a7SNicolas Pitre #include <linux/highmem.h>
2199d1717dSJon Medhurst #include <linux/slab.h>
220ddbccd1SRussell King 
230ddbccd1SRussell King #include <asm/memory.h>
2443377453SNicolas Pitre #include <asm/highmem.h>
250ddbccd1SRussell King #include <asm/cacheflush.h>
260ddbccd1SRussell King #include <asm/tlbflush.h>
270ddbccd1SRussell King #include <asm/sizes.h>
2899d1717dSJon Medhurst #include <asm/mach/arch.h>
290ddbccd1SRussell King 
30022ae537SRussell King #include "mm.h"
31022ae537SRussell King 
322dc6a016SMarek Szyprowski /**
332dc6a016SMarek Szyprowski  * arm_dma_map_page - map a portion of a page for streaming DMA
342dc6a016SMarek Szyprowski  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
352dc6a016SMarek Szyprowski  * @page: page that buffer resides in
362dc6a016SMarek Szyprowski  * @offset: offset into page for start of buffer
372dc6a016SMarek Szyprowski  * @size: size of buffer to map
382dc6a016SMarek Szyprowski  * @dir: DMA transfer direction
392dc6a016SMarek Szyprowski  *
402dc6a016SMarek Szyprowski  * Ensure that any data held in the cache is appropriately discarded
412dc6a016SMarek Szyprowski  * or written back.
422dc6a016SMarek Szyprowski  *
432dc6a016SMarek Szyprowski  * The device owns this memory once this call has completed.  The CPU
442dc6a016SMarek Szyprowski  * can regain ownership by calling dma_unmap_page().
452dc6a016SMarek Szyprowski  */
462dc6a016SMarek Szyprowski static inline dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
472dc6a016SMarek Szyprowski 	     unsigned long offset, size_t size, enum dma_data_direction dir,
482dc6a016SMarek Szyprowski 	     struct dma_attrs *attrs)
492dc6a016SMarek Szyprowski {
502dc6a016SMarek Szyprowski 	return __dma_map_page(dev, page, offset, size, dir);
512dc6a016SMarek Szyprowski }
522dc6a016SMarek Szyprowski 
532dc6a016SMarek Szyprowski /**
542dc6a016SMarek Szyprowski  * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
552dc6a016SMarek Szyprowski  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
562dc6a016SMarek Szyprowski  * @handle: DMA address of buffer
572dc6a016SMarek Szyprowski  * @size: size of buffer (same as passed to dma_map_page)
582dc6a016SMarek Szyprowski  * @dir: DMA transfer direction (same as passed to dma_map_page)
592dc6a016SMarek Szyprowski  *
602dc6a016SMarek Szyprowski  * Unmap a page streaming mode DMA translation.  The handle and size
612dc6a016SMarek Szyprowski  * must match what was provided in the previous dma_map_page() call.
622dc6a016SMarek Szyprowski  * All other usages are undefined.
632dc6a016SMarek Szyprowski  *
642dc6a016SMarek Szyprowski  * After this call, reads by the CPU to the buffer are guaranteed to see
652dc6a016SMarek Szyprowski  * whatever the device wrote there.
662dc6a016SMarek Szyprowski  */
672dc6a016SMarek Szyprowski static inline void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
682dc6a016SMarek Szyprowski 		size_t size, enum dma_data_direction dir,
692dc6a016SMarek Szyprowski 		struct dma_attrs *attrs)
702dc6a016SMarek Szyprowski {
712dc6a016SMarek Szyprowski 	__dma_unmap_page(dev, handle, size, dir);
722dc6a016SMarek Szyprowski }
732dc6a016SMarek Szyprowski 
742dc6a016SMarek Szyprowski static inline void arm_dma_sync_single_for_cpu(struct device *dev,
752dc6a016SMarek Szyprowski 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
762dc6a016SMarek Szyprowski {
772dc6a016SMarek Szyprowski 	unsigned int offset = handle & (PAGE_SIZE - 1);
782dc6a016SMarek Szyprowski 	struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
792dc6a016SMarek Szyprowski 	if (!dmabounce_sync_for_cpu(dev, handle, size, dir))
802dc6a016SMarek Szyprowski 		return;
812dc6a016SMarek Szyprowski 
822dc6a016SMarek Szyprowski 	__dma_page_dev_to_cpu(page, offset, size, dir);
832dc6a016SMarek Szyprowski }
842dc6a016SMarek Szyprowski 
852dc6a016SMarek Szyprowski static inline void arm_dma_sync_single_for_device(struct device *dev,
862dc6a016SMarek Szyprowski 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
872dc6a016SMarek Szyprowski {
882dc6a016SMarek Szyprowski 	unsigned int offset = handle & (PAGE_SIZE - 1);
892dc6a016SMarek Szyprowski 	struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
902dc6a016SMarek Szyprowski 	if (!dmabounce_sync_for_device(dev, handle, size, dir))
912dc6a016SMarek Szyprowski 		return;
922dc6a016SMarek Szyprowski 
932dc6a016SMarek Szyprowski 	__dma_page_cpu_to_dev(page, offset, size, dir);
942dc6a016SMarek Szyprowski }
952dc6a016SMarek Szyprowski 
962dc6a016SMarek Szyprowski static int arm_dma_set_mask(struct device *dev, u64 dma_mask);
972dc6a016SMarek Szyprowski 
982dc6a016SMarek Szyprowski struct dma_map_ops arm_dma_ops = {
992dc6a016SMarek Szyprowski 	.map_page		= arm_dma_map_page,
1002dc6a016SMarek Szyprowski 	.unmap_page		= arm_dma_unmap_page,
1012dc6a016SMarek Szyprowski 	.map_sg			= arm_dma_map_sg,
1022dc6a016SMarek Szyprowski 	.unmap_sg		= arm_dma_unmap_sg,
1032dc6a016SMarek Szyprowski 	.sync_single_for_cpu	= arm_dma_sync_single_for_cpu,
1042dc6a016SMarek Szyprowski 	.sync_single_for_device	= arm_dma_sync_single_for_device,
1052dc6a016SMarek Szyprowski 	.sync_sg_for_cpu	= arm_dma_sync_sg_for_cpu,
1062dc6a016SMarek Szyprowski 	.sync_sg_for_device	= arm_dma_sync_sg_for_device,
1072dc6a016SMarek Szyprowski 	.set_dma_mask		= arm_dma_set_mask,
1082dc6a016SMarek Szyprowski };
1092dc6a016SMarek Szyprowski EXPORT_SYMBOL(arm_dma_ops);
1102dc6a016SMarek Szyprowski 
111ab6494f0SCatalin Marinas static u64 get_coherent_dma_mask(struct device *dev)
112ab6494f0SCatalin Marinas {
113022ae537SRussell King 	u64 mask = (u64)arm_dma_limit;
1140ddbccd1SRussell King 
115ab6494f0SCatalin Marinas 	if (dev) {
116ab6494f0SCatalin Marinas 		mask = dev->coherent_dma_mask;
117ab6494f0SCatalin Marinas 
118ab6494f0SCatalin Marinas 		/*
119ab6494f0SCatalin Marinas 		 * Sanity check the DMA mask - it must be non-zero, and
120ab6494f0SCatalin Marinas 		 * must be able to be satisfied by a DMA allocation.
121ab6494f0SCatalin Marinas 		 */
122ab6494f0SCatalin Marinas 		if (mask == 0) {
123ab6494f0SCatalin Marinas 			dev_warn(dev, "coherent DMA mask is unset\n");
124ab6494f0SCatalin Marinas 			return 0;
125ab6494f0SCatalin Marinas 		}
126ab6494f0SCatalin Marinas 
127022ae537SRussell King 		if ((~mask) & (u64)arm_dma_limit) {
128ab6494f0SCatalin Marinas 			dev_warn(dev, "coherent DMA mask %#llx is smaller "
129ab6494f0SCatalin Marinas 				 "than system GFP_DMA mask %#llx\n",
130022ae537SRussell King 				 mask, (u64)arm_dma_limit);
131ab6494f0SCatalin Marinas 			return 0;
132ab6494f0SCatalin Marinas 		}
133ab6494f0SCatalin Marinas 	}
134ab6494f0SCatalin Marinas 
135ab6494f0SCatalin Marinas 	return mask;
136ab6494f0SCatalin Marinas }
137ab6494f0SCatalin Marinas 
1387a9a32a9SRussell King /*
1397a9a32a9SRussell King  * Allocate a DMA buffer for 'dev' of size 'size' using the
1407a9a32a9SRussell King  * specified gfp mask.  Note that 'size' must be page aligned.
1417a9a32a9SRussell King  */
1427a9a32a9SRussell King static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
1437a9a32a9SRussell King {
1447a9a32a9SRussell King 	unsigned long order = get_order(size);
1457a9a32a9SRussell King 	struct page *page, *p, *e;
1467a9a32a9SRussell King 	void *ptr;
1477a9a32a9SRussell King 	u64 mask = get_coherent_dma_mask(dev);
1487a9a32a9SRussell King 
1497a9a32a9SRussell King #ifdef CONFIG_DMA_API_DEBUG
1507a9a32a9SRussell King 	u64 limit = (mask + 1) & ~mask;
1517a9a32a9SRussell King 	if (limit && size >= limit) {
1527a9a32a9SRussell King 		dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
1537a9a32a9SRussell King 			size, mask);
1547a9a32a9SRussell King 		return NULL;
1557a9a32a9SRussell King 	}
1567a9a32a9SRussell King #endif
1577a9a32a9SRussell King 
1587a9a32a9SRussell King 	if (!mask)
1597a9a32a9SRussell King 		return NULL;
1607a9a32a9SRussell King 
1617a9a32a9SRussell King 	if (mask < 0xffffffffULL)
1627a9a32a9SRussell King 		gfp |= GFP_DMA;
1637a9a32a9SRussell King 
1647a9a32a9SRussell King 	page = alloc_pages(gfp, order);
1657a9a32a9SRussell King 	if (!page)
1667a9a32a9SRussell King 		return NULL;
1677a9a32a9SRussell King 
1687a9a32a9SRussell King 	/*
1697a9a32a9SRussell King 	 * Now split the huge page and free the excess pages
1707a9a32a9SRussell King 	 */
1717a9a32a9SRussell King 	split_page(page, order);
1727a9a32a9SRussell King 	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
1737a9a32a9SRussell King 		__free_page(p);
1747a9a32a9SRussell King 
1757a9a32a9SRussell King 	/*
1767a9a32a9SRussell King 	 * Ensure that the allocated pages are zeroed, and that any data
1777a9a32a9SRussell King 	 * lurking in the kernel direct-mapped region is invalidated.
1787a9a32a9SRussell King 	 */
1797a9a32a9SRussell King 	ptr = page_address(page);
1807a9a32a9SRussell King 	memset(ptr, 0, size);
1817a9a32a9SRussell King 	dmac_flush_range(ptr, ptr + size);
1827a9a32a9SRussell King 	outer_flush_range(__pa(ptr), __pa(ptr) + size);
1837a9a32a9SRussell King 
1847a9a32a9SRussell King 	return page;
1857a9a32a9SRussell King }
1867a9a32a9SRussell King 
1877a9a32a9SRussell King /*
1887a9a32a9SRussell King  * Free a DMA buffer.  'size' must be page aligned.
1897a9a32a9SRussell King  */
1907a9a32a9SRussell King static void __dma_free_buffer(struct page *page, size_t size)
1917a9a32a9SRussell King {
1927a9a32a9SRussell King 	struct page *e = page + (size >> PAGE_SHIFT);
1937a9a32a9SRussell King 
1947a9a32a9SRussell King 	while (page < e) {
1957a9a32a9SRussell King 		__free_page(page);
1967a9a32a9SRussell King 		page++;
1977a9a32a9SRussell King 	}
1987a9a32a9SRussell King }
1997a9a32a9SRussell King 
200ab6494f0SCatalin Marinas #ifdef CONFIG_MMU
201a5e9d38bSCatalin Marinas 
20299d1717dSJon Medhurst #define CONSISTENT_OFFSET(x)	(((unsigned long)(x) - consistent_base) >> PAGE_SHIFT)
2031fdb24e9SLinus Torvalds #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - consistent_base) >> PMD_SHIFT)
204a5e9d38bSCatalin Marinas 
2050ddbccd1SRussell King /*
2060ddbccd1SRussell King  * These are the page tables (2MB each) covering uncached, DMA consistent allocations
2070ddbccd1SRussell King  */
20899d1717dSJon Medhurst static pte_t **consistent_pte;
20999d1717dSJon Medhurst 
21099d1717dSJon Medhurst #define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M
21199d1717dSJon Medhurst 
21299d1717dSJon Medhurst unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
21399d1717dSJon Medhurst 
21499d1717dSJon Medhurst void __init init_consistent_dma_size(unsigned long size)
21599d1717dSJon Medhurst {
21699d1717dSJon Medhurst 	unsigned long base = CONSISTENT_END - ALIGN(size, SZ_2M);
21799d1717dSJon Medhurst 
21899d1717dSJon Medhurst 	BUG_ON(consistent_pte); /* Check we're called before DMA region init */
21999d1717dSJon Medhurst 	BUG_ON(base < VMALLOC_END);
22099d1717dSJon Medhurst 
22199d1717dSJon Medhurst 	/* Grow region to accommodate specified size  */
22299d1717dSJon Medhurst 	if (base < consistent_base)
22399d1717dSJon Medhurst 		consistent_base = base;
22499d1717dSJon Medhurst }
2250ddbccd1SRussell King 
22613ccf3adSRussell King #include "vmregion.h"
2270ddbccd1SRussell King 
22813ccf3adSRussell King static struct arm_vmregion_head consistent_head = {
22913ccf3adSRussell King 	.vm_lock	= __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock),
2300ddbccd1SRussell King 	.vm_list	= LIST_HEAD_INIT(consistent_head.vm_list),
2310ddbccd1SRussell King 	.vm_end		= CONSISTENT_END,
2320ddbccd1SRussell King };
2330ddbccd1SRussell King 
2340ddbccd1SRussell King #ifdef CONFIG_HUGETLB_PAGE
2350ddbccd1SRussell King #error ARM Coherent DMA allocator does not (yet) support huge TLB
2360ddbccd1SRussell King #endif
2370ddbccd1SRussell King 
23888c58f3bSRussell King /*
23988c58f3bSRussell King  * Initialise the consistent memory allocation.
24088c58f3bSRussell King  */
24188c58f3bSRussell King static int __init consistent_init(void)
24288c58f3bSRussell King {
24388c58f3bSRussell King 	int ret = 0;
24488c58f3bSRussell King 	pgd_t *pgd;
245516295e5SRussell King 	pud_t *pud;
24688c58f3bSRussell King 	pmd_t *pmd;
24788c58f3bSRussell King 	pte_t *pte;
24888c58f3bSRussell King 	int i = 0;
24999d1717dSJon Medhurst 	unsigned long base = consistent_base;
25053cbcbcfSCatalin Marinas 	unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
25199d1717dSJon Medhurst 
25299d1717dSJon Medhurst 	consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
25399d1717dSJon Medhurst 	if (!consistent_pte) {
25499d1717dSJon Medhurst 		pr_err("%s: no memory\n", __func__);
25599d1717dSJon Medhurst 		return -ENOMEM;
25699d1717dSJon Medhurst 	}
25799d1717dSJon Medhurst 
25899d1717dSJon Medhurst 	pr_debug("DMA memory: 0x%08lx - 0x%08lx:\n", base, CONSISTENT_END);
25999d1717dSJon Medhurst 	consistent_head.vm_start = base;
26088c58f3bSRussell King 
26188c58f3bSRussell King 	do {
26288c58f3bSRussell King 		pgd = pgd_offset(&init_mm, base);
263516295e5SRussell King 
264516295e5SRussell King 		pud = pud_alloc(&init_mm, pgd, base);
265516295e5SRussell King 		if (!pud) {
2666b6f770bSMarek Szyprowski 			pr_err("%s: no pud tables\n", __func__);
267516295e5SRussell King 			ret = -ENOMEM;
268516295e5SRussell King 			break;
269516295e5SRussell King 		}
270516295e5SRussell King 
271516295e5SRussell King 		pmd = pmd_alloc(&init_mm, pud, base);
27288c58f3bSRussell King 		if (!pmd) {
2736b6f770bSMarek Szyprowski 			pr_err("%s: no pmd tables\n", __func__);
27488c58f3bSRussell King 			ret = -ENOMEM;
27588c58f3bSRussell King 			break;
27688c58f3bSRussell King 		}
27788c58f3bSRussell King 		WARN_ON(!pmd_none(*pmd));
27888c58f3bSRussell King 
27988c58f3bSRussell King 		pte = pte_alloc_kernel(pmd, base);
28088c58f3bSRussell King 		if (!pte) {
2816b6f770bSMarek Szyprowski 			pr_err("%s: no pte tables\n", __func__);
28288c58f3bSRussell King 			ret = -ENOMEM;
28388c58f3bSRussell King 			break;
28488c58f3bSRussell King 		}
28588c58f3bSRussell King 
28688c58f3bSRussell King 		consistent_pte[i++] = pte;
287e73fc88eSCatalin Marinas 		base += PMD_SIZE;
28888c58f3bSRussell King 	} while (base < CONSISTENT_END);
28988c58f3bSRussell King 
29088c58f3bSRussell King 	return ret;
29188c58f3bSRussell King }
29288c58f3bSRussell King 
29388c58f3bSRussell King core_initcall(consistent_init);
29488c58f3bSRussell King 
2950ddbccd1SRussell King static void *
29645cd5290SRussell King __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
29745cd5290SRussell King 	const void *caller)
2980ddbccd1SRussell King {
29913ccf3adSRussell King 	struct arm_vmregion *c;
3005bc23d32SRussell King 	size_t align;
3015bc23d32SRussell King 	int bit;
3020ddbccd1SRussell King 
30399d1717dSJon Medhurst 	if (!consistent_pte) {
3046b6f770bSMarek Szyprowski 		pr_err("%s: not initialised\n", __func__);
305ebd7a845SRussell King 		dump_stack();
306ebd7a845SRussell King 		return NULL;
307ebd7a845SRussell King 	}
308ebd7a845SRussell King 
3090ddbccd1SRussell King 	/*
3105bc23d32SRussell King 	 * Align the virtual region allocation - maximum alignment is
3115bc23d32SRussell King 	 * a section size, minimum is a page size.  This helps reduce
3125bc23d32SRussell King 	 * fragmentation of the DMA space, and also prevents allocations
3135bc23d32SRussell King 	 * smaller than a section from crossing a section boundary.
3145bc23d32SRussell King 	 */
315c947f69fSRussell King 	bit = fls(size - 1);
3165bc23d32SRussell King 	if (bit > SECTION_SHIFT)
3175bc23d32SRussell King 		bit = SECTION_SHIFT;
3185bc23d32SRussell King 	align = 1 << bit;
3195bc23d32SRussell King 
3205bc23d32SRussell King 	/*
3210ddbccd1SRussell King 	 * Allocate a virtual address in the consistent mapping region.
3220ddbccd1SRussell King 	 */
3235bc23d32SRussell King 	c = arm_vmregion_alloc(&consistent_head, align, size,
32445cd5290SRussell King 			    gfp & ~(__GFP_DMA | __GFP_HIGHMEM), caller);
3250ddbccd1SRussell King 	if (c) {
3260ddbccd1SRussell King 		pte_t *pte;
3270ddbccd1SRussell King 		int idx = CONSISTENT_PTE_INDEX(c->vm_start);
3280ddbccd1SRussell King 		u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
3290ddbccd1SRussell King 
3300ddbccd1SRussell King 		pte = consistent_pte[idx] + off;
3310ddbccd1SRussell King 		c->vm_pages = page;
3320ddbccd1SRussell King 
3330ddbccd1SRussell King 		do {
3340ddbccd1SRussell King 			BUG_ON(!pte_none(*pte));
3350ddbccd1SRussell King 
3360ddbccd1SRussell King 			set_pte_ext(pte, mk_pte(page, prot), 0);
3370ddbccd1SRussell King 			page++;
3380ddbccd1SRussell King 			pte++;
3390ddbccd1SRussell King 			off++;
3400ddbccd1SRussell King 			if (off >= PTRS_PER_PTE) {
3410ddbccd1SRussell King 				off = 0;
3420ddbccd1SRussell King 				pte = consistent_pte[++idx];
3430ddbccd1SRussell King 			}
3440ddbccd1SRussell King 		} while (size -= PAGE_SIZE);
3450ddbccd1SRussell King 
3462be23c47SRussell King 		dsb();
3472be23c47SRussell King 
3480ddbccd1SRussell King 		return (void *)c->vm_start;
3490ddbccd1SRussell King 	}
3500ddbccd1SRussell King 	return NULL;
3510ddbccd1SRussell King }
352695ae0afSRussell King 
353695ae0afSRussell King static void __dma_free_remap(void *cpu_addr, size_t size)
354695ae0afSRussell King {
355695ae0afSRussell King 	struct arm_vmregion *c;
356695ae0afSRussell King 	unsigned long addr;
357695ae0afSRussell King 	pte_t *ptep;
358695ae0afSRussell King 	int idx;
359695ae0afSRussell King 	u32 off;
360695ae0afSRussell King 
361695ae0afSRussell King 	c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
362695ae0afSRussell King 	if (!c) {
3636b6f770bSMarek Szyprowski 		pr_err("%s: trying to free invalid coherent area: %p\n",
364695ae0afSRussell King 		       __func__, cpu_addr);
365695ae0afSRussell King 		dump_stack();
366695ae0afSRussell King 		return;
367695ae0afSRussell King 	}
368695ae0afSRussell King 
369695ae0afSRussell King 	if ((c->vm_end - c->vm_start) != size) {
3706b6f770bSMarek Szyprowski 		pr_err("%s: freeing wrong coherent size (%ld != %d)\n",
371695ae0afSRussell King 		       __func__, c->vm_end - c->vm_start, size);
372695ae0afSRussell King 		dump_stack();
373695ae0afSRussell King 		size = c->vm_end - c->vm_start;
374695ae0afSRussell King 	}
375695ae0afSRussell King 
376695ae0afSRussell King 	idx = CONSISTENT_PTE_INDEX(c->vm_start);
377695ae0afSRussell King 	off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
378695ae0afSRussell King 	ptep = consistent_pte[idx] + off;
379695ae0afSRussell King 	addr = c->vm_start;
380695ae0afSRussell King 	do {
381695ae0afSRussell King 		pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
382695ae0afSRussell King 
383695ae0afSRussell King 		ptep++;
384695ae0afSRussell King 		addr += PAGE_SIZE;
385695ae0afSRussell King 		off++;
386695ae0afSRussell King 		if (off >= PTRS_PER_PTE) {
387695ae0afSRussell King 			off = 0;
388695ae0afSRussell King 			ptep = consistent_pte[++idx];
389695ae0afSRussell King 		}
390695ae0afSRussell King 
391acaac256SRussell King 		if (pte_none(pte) || !pte_present(pte))
3926b6f770bSMarek Szyprowski 			pr_crit("%s: bad page in kernel page table\n",
393695ae0afSRussell King 				__func__);
394695ae0afSRussell King 	} while (size -= PAGE_SIZE);
395695ae0afSRussell King 
396695ae0afSRussell King 	flush_tlb_kernel_range(c->vm_start, c->vm_end);
397695ae0afSRussell King 
398695ae0afSRussell King 	arm_vmregion_free(&consistent_head, c);
399695ae0afSRussell King }
400695ae0afSRussell King 
401ab6494f0SCatalin Marinas #else	/* !CONFIG_MMU */
402695ae0afSRussell King 
40345cd5290SRussell King #define __dma_alloc_remap(page, size, gfp, prot, c)	page_address(page)
40431ebf944SRussell King #define __dma_free_remap(addr, size)			do { } while (0)
40531ebf944SRussell King 
40631ebf944SRussell King #endif	/* CONFIG_MMU */
40731ebf944SRussell King 
408ab6494f0SCatalin Marinas static void *
409ab6494f0SCatalin Marinas __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
41045cd5290SRussell King 	    pgprot_t prot, const void *caller)
411ab6494f0SCatalin Marinas {
41204da5694SRussell King 	struct page *page;
41331ebf944SRussell King 	void *addr;
414ab6494f0SCatalin Marinas 
415ea2e7057SSumit Bhattacharya 	/*
416ea2e7057SSumit Bhattacharya 	 * Following is a work-around (a.k.a. hack) to prevent pages
417ea2e7057SSumit Bhattacharya 	 * with __GFP_COMP being passed to split_page() which cannot
418ea2e7057SSumit Bhattacharya 	 * handle them.  The real problem is that this flag probably
419ea2e7057SSumit Bhattacharya 	 * should be 0 on ARM as it is not supported on this
420ea2e7057SSumit Bhattacharya 	 * platform; see CONFIG_HUGETLBFS.
421ea2e7057SSumit Bhattacharya 	 */
422ea2e7057SSumit Bhattacharya 	gfp &= ~(__GFP_COMP);
423ea2e7057SSumit Bhattacharya 
424553ac788SMarek Szyprowski 	*handle = DMA_ERROR_CODE;
42504da5694SRussell King 	size = PAGE_ALIGN(size);
42604da5694SRussell King 
42704da5694SRussell King 	page = __dma_alloc_buffer(dev, size, gfp);
42804da5694SRussell King 	if (!page)
429ab6494f0SCatalin Marinas 		return NULL;
43004da5694SRussell King 
43131ebf944SRussell King 	if (!arch_is_coherent())
43245cd5290SRussell King 		addr = __dma_alloc_remap(page, size, gfp, prot, caller);
43331ebf944SRussell King 	else
43431ebf944SRussell King 		addr = page_address(page);
43531ebf944SRussell King 
43631ebf944SRussell King 	if (addr)
4379eedd963SRussell King 		*handle = pfn_to_dma(dev, page_to_pfn(page));
438d8e89b47SRussell King 	else
439d8e89b47SRussell King 		__dma_free_buffer(page, size);
44031ebf944SRussell King 
44131ebf944SRussell King 	return addr;
442ab6494f0SCatalin Marinas }
443695ae0afSRussell King 
4440ddbccd1SRussell King /*
4450ddbccd1SRussell King  * Allocate DMA-coherent memory space and return both the kernel remapped
4460ddbccd1SRussell King  * virtual and bus address for that space.
4470ddbccd1SRussell King  */
4480ddbccd1SRussell King void *
4490ddbccd1SRussell King dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
4500ddbccd1SRussell King {
4510ddbccd1SRussell King 	void *memory;
4520ddbccd1SRussell King 
4530ddbccd1SRussell King 	if (dma_alloc_from_coherent(dev, size, handle, &memory))
4540ddbccd1SRussell King 		return memory;
4550ddbccd1SRussell King 
4560ddbccd1SRussell King 	return __dma_alloc(dev, size, handle, gfp,
45745cd5290SRussell King 			   pgprot_dmacoherent(pgprot_kernel),
45845cd5290SRussell King 			   __builtin_return_address(0));
4590ddbccd1SRussell King }
4600ddbccd1SRussell King EXPORT_SYMBOL(dma_alloc_coherent);
4610ddbccd1SRussell King 
4620ddbccd1SRussell King /*
4630ddbccd1SRussell King  * Allocate a writecombining region, in much the same way as
4640ddbccd1SRussell King  * dma_alloc_coherent above.
4650ddbccd1SRussell King  */
4660ddbccd1SRussell King void *
4670ddbccd1SRussell King dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
4680ddbccd1SRussell King {
4690ddbccd1SRussell King 	return __dma_alloc(dev, size, handle, gfp,
47045cd5290SRussell King 			   pgprot_writecombine(pgprot_kernel),
47145cd5290SRussell King 			   __builtin_return_address(0));
4720ddbccd1SRussell King }
4730ddbccd1SRussell King EXPORT_SYMBOL(dma_alloc_writecombine);
4740ddbccd1SRussell King 
4750ddbccd1SRussell King static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
4760ddbccd1SRussell King 		    void *cpu_addr, dma_addr_t dma_addr, size_t size)
4770ddbccd1SRussell King {
478ab6494f0SCatalin Marinas 	int ret = -ENXIO;
479ab6494f0SCatalin Marinas #ifdef CONFIG_MMU
48013ccf3adSRussell King 	unsigned long user_size, kern_size;
48113ccf3adSRussell King 	struct arm_vmregion *c;
4820ddbccd1SRussell King 
48347142f07SMarek Szyprowski 	if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
48447142f07SMarek Szyprowski 		return ret;
48547142f07SMarek Szyprowski 
4860ddbccd1SRussell King 	user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
4870ddbccd1SRussell King 
48813ccf3adSRussell King 	c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
4890ddbccd1SRussell King 	if (c) {
4900ddbccd1SRussell King 		unsigned long off = vma->vm_pgoff;
4910ddbccd1SRussell King 
4920ddbccd1SRussell King 		kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
4930ddbccd1SRussell King 
4940ddbccd1SRussell King 		if (off < kern_size &&
4950ddbccd1SRussell King 		    user_size <= (kern_size - off)) {
4960ddbccd1SRussell King 			ret = remap_pfn_range(vma, vma->vm_start,
4970ddbccd1SRussell King 					      page_to_pfn(c->vm_pages) + off,
4980ddbccd1SRussell King 					      user_size << PAGE_SHIFT,
4990ddbccd1SRussell King 					      vma->vm_page_prot);
5000ddbccd1SRussell King 		}
5010ddbccd1SRussell King 	}
502ab6494f0SCatalin Marinas #endif	/* CONFIG_MMU */
5030ddbccd1SRussell King 
5040ddbccd1SRussell King 	return ret;
5050ddbccd1SRussell King }
5060ddbccd1SRussell King 
5070ddbccd1SRussell King int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
5080ddbccd1SRussell King 		      void *cpu_addr, dma_addr_t dma_addr, size_t size)
5090ddbccd1SRussell King {
51026a26d32SRussell King 	vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
5110ddbccd1SRussell King 	return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
5120ddbccd1SRussell King }
5130ddbccd1SRussell King EXPORT_SYMBOL(dma_mmap_coherent);
5140ddbccd1SRussell King 
5150ddbccd1SRussell King int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
5160ddbccd1SRussell King 			  void *cpu_addr, dma_addr_t dma_addr, size_t size)
5170ddbccd1SRussell King {
5180ddbccd1SRussell King 	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
5190ddbccd1SRussell King 	return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
5200ddbccd1SRussell King }
5210ddbccd1SRussell King EXPORT_SYMBOL(dma_mmap_writecombine);
5220ddbccd1SRussell King 
5230ddbccd1SRussell King /*
5240ddbccd1SRussell King  * free a page as defined by the above mapping.
5250ddbccd1SRussell King  * Must not be called with IRQs disabled.
5260ddbccd1SRussell King  */
5270ddbccd1SRussell King void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
5280ddbccd1SRussell King {
5290ddbccd1SRussell King 	WARN_ON(irqs_disabled());
5300ddbccd1SRussell King 
5310ddbccd1SRussell King 	if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
5320ddbccd1SRussell King 		return;
5330ddbccd1SRussell King 
5343e82d012SRussell King 	size = PAGE_ALIGN(size);
5353e82d012SRussell King 
536695ae0afSRussell King 	if (!arch_is_coherent())
537695ae0afSRussell King 		__dma_free_remap(cpu_addr, size);
5387a9a32a9SRussell King 
5399eedd963SRussell King 	__dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size);
5400ddbccd1SRussell King }
5410ddbccd1SRussell King EXPORT_SYMBOL(dma_free_coherent);
5420ddbccd1SRussell King 
54365af191aSRussell King static void dma_cache_maint_page(struct page *page, unsigned long offset,
544a9c9147eSRussell King 	size_t size, enum dma_data_direction dir,
545a9c9147eSRussell King 	void (*op)(const void *, size_t, int))
54665af191aSRussell King {
54765af191aSRussell King 	/*
54865af191aSRussell King 	 * A single sg entry may refer to multiple physically contiguous
54965af191aSRussell King 	 * pages.  But we still need to process highmem pages individually.
55065af191aSRussell King 	 * If highmem is not configured then the bulk of this loop gets
55165af191aSRussell King 	 * optimized out.
55265af191aSRussell King 	 */
55365af191aSRussell King 	size_t left = size;
55465af191aSRussell King 	do {
55565af191aSRussell King 		size_t len = left;
55693f1d629SRussell King 		void *vaddr;
55793f1d629SRussell King 
55893f1d629SRussell King 		if (PageHighMem(page)) {
55993f1d629SRussell King 			if (len + offset > PAGE_SIZE) {
56065af191aSRussell King 				if (offset >= PAGE_SIZE) {
56165af191aSRussell King 					page += offset / PAGE_SIZE;
56265af191aSRussell King 					offset %= PAGE_SIZE;
56365af191aSRussell King 				}
56465af191aSRussell King 				len = PAGE_SIZE - offset;
56565af191aSRussell King 			}
56693f1d629SRussell King 			vaddr = kmap_high_get(page);
56793f1d629SRussell King 			if (vaddr) {
56893f1d629SRussell King 				vaddr += offset;
569a9c9147eSRussell King 				op(vaddr, len, dir);
57093f1d629SRussell King 				kunmap_high(page);
5717e5a69e8SNicolas Pitre 			} else if (cache_is_vipt()) {
57239af22a7SNicolas Pitre 				/* unmapped pages might still be cached */
57339af22a7SNicolas Pitre 				vaddr = kmap_atomic(page);
5747e5a69e8SNicolas Pitre 				op(vaddr + offset, len, dir);
57539af22a7SNicolas Pitre 				kunmap_atomic(vaddr);
57693f1d629SRussell King 			}
57793f1d629SRussell King 		} else {
57893f1d629SRussell King 			vaddr = page_address(page) + offset;
579a9c9147eSRussell King 			op(vaddr, len, dir);
58093f1d629SRussell King 		}
58165af191aSRussell King 		offset = 0;
58265af191aSRussell King 		page++;
58365af191aSRussell King 		left -= len;
58465af191aSRussell King 	} while (left);
58565af191aSRussell King }
58665af191aSRussell King 
58765af191aSRussell King void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
58865af191aSRussell King 	size_t size, enum dma_data_direction dir)
58965af191aSRussell King {
59043377453SNicolas Pitre 	unsigned long paddr;
59143377453SNicolas Pitre 
592a9c9147eSRussell King 	dma_cache_maint_page(page, off, size, dir, dmac_map_area);
59343377453SNicolas Pitre 
59465af191aSRussell King 	paddr = page_to_phys(page) + off;
5952ffe2da3SRussell King 	if (dir == DMA_FROM_DEVICE) {
5962ffe2da3SRussell King 		outer_inv_range(paddr, paddr + size);
5972ffe2da3SRussell King 	} else {
5982ffe2da3SRussell King 		outer_clean_range(paddr, paddr + size);
5992ffe2da3SRussell King 	}
6002ffe2da3SRussell King 	/* FIXME: non-speculating: flush on bidirectional mappings? */
60143377453SNicolas Pitre }
6024ea0d737SRussell King EXPORT_SYMBOL(___dma_page_cpu_to_dev);
6034ea0d737SRussell King 
6044ea0d737SRussell King void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
6054ea0d737SRussell King 	size_t size, enum dma_data_direction dir)
6064ea0d737SRussell King {
6072ffe2da3SRussell King 	unsigned long paddr = page_to_phys(page) + off;
6082ffe2da3SRussell King 
6092ffe2da3SRussell King 	/* FIXME: non-speculating: not required */
6102ffe2da3SRussell King 	/* don't bother invalidating if DMA to device */
6112ffe2da3SRussell King 	if (dir != DMA_TO_DEVICE)
6122ffe2da3SRussell King 		outer_inv_range(paddr, paddr + size);
6132ffe2da3SRussell King 
614a9c9147eSRussell King 	dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
615c0177800SCatalin Marinas 
616c0177800SCatalin Marinas 	/*
617c0177800SCatalin Marinas 	 * Mark the D-cache clean for this page to avoid extra flushing.
618c0177800SCatalin Marinas 	 */
619c0177800SCatalin Marinas 	if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
620c0177800SCatalin Marinas 		set_bit(PG_dcache_clean, &page->flags);
6214ea0d737SRussell King }
6224ea0d737SRussell King EXPORT_SYMBOL(___dma_page_dev_to_cpu);
62343377453SNicolas Pitre 
624afd1a321SRussell King /**
6252a550e73SMarek Szyprowski  * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
626afd1a321SRussell King  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
627afd1a321SRussell King  * @sg: list of buffers
628afd1a321SRussell King  * @nents: number of buffers to map
629afd1a321SRussell King  * @dir: DMA transfer direction
630afd1a321SRussell King  *
631afd1a321SRussell King  * Map a set of buffers described by scatterlist in streaming mode for DMA.
632afd1a321SRussell King  * This is the scatter-gather version of the dma_map_single interface.
633afd1a321SRussell King  * Here the scatter gather list elements are each tagged with the
634afd1a321SRussell King  * appropriate dma address and length.  They are obtained via
635afd1a321SRussell King  * sg_dma_{address,length}.
636afd1a321SRussell King  *
637afd1a321SRussell King  * Device ownership issues as mentioned for dma_map_single are the same
638afd1a321SRussell King  * here.
639afd1a321SRussell King  */
6402dc6a016SMarek Szyprowski int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
6412dc6a016SMarek Szyprowski 		enum dma_data_direction dir, struct dma_attrs *attrs)
642afd1a321SRussell King {
6432a550e73SMarek Szyprowski 	struct dma_map_ops *ops = get_dma_ops(dev);
644afd1a321SRussell King 	struct scatterlist *s;
64501135d92SRussell King 	int i, j;
646afd1a321SRussell King 
647afd1a321SRussell King 	for_each_sg(sg, s, nents, i) {
6482a550e73SMarek Szyprowski 		s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
6492a550e73SMarek Szyprowski 						s->length, dir, attrs);
65001135d92SRussell King 		if (dma_mapping_error(dev, s->dma_address))
65101135d92SRussell King 			goto bad_mapping;
652afd1a321SRussell King 	}
653afd1a321SRussell King 	return nents;
65401135d92SRussell King 
65501135d92SRussell King  bad_mapping:
65601135d92SRussell King 	for_each_sg(sg, s, i, j)
6572a550e73SMarek Szyprowski 		ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
65801135d92SRussell King 	return 0;
659afd1a321SRussell King }
660afd1a321SRussell King 
661afd1a321SRussell King /**
6622a550e73SMarek Szyprowski  * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
663afd1a321SRussell King  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
664afd1a321SRussell King  * @sg: list of buffers
6650adfca6fSLinus Walleij  * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
666afd1a321SRussell King  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
667afd1a321SRussell King  *
668afd1a321SRussell King  * Unmap a set of streaming mode DMA translations.  Again, CPU access
669afd1a321SRussell King  * rules concerning calls here are the same as for dma_unmap_single().
670afd1a321SRussell King  */
6712dc6a016SMarek Szyprowski void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
6722dc6a016SMarek Szyprowski 		enum dma_data_direction dir, struct dma_attrs *attrs)
673afd1a321SRussell King {
6742a550e73SMarek Szyprowski 	struct dma_map_ops *ops = get_dma_ops(dev);
67501135d92SRussell King 	struct scatterlist *s;
6762a550e73SMarek Szyprowski 
67701135d92SRussell King 	int i;
67801135d92SRussell King 
67901135d92SRussell King 	for_each_sg(sg, s, nents, i)
6802a550e73SMarek Szyprowski 		ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
681afd1a321SRussell King }
682afd1a321SRussell King 
683afd1a321SRussell King /**
6842a550e73SMarek Szyprowski  * arm_dma_sync_sg_for_cpu
685afd1a321SRussell King  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
686afd1a321SRussell King  * @sg: list of buffers
687afd1a321SRussell King  * @nents: number of buffers to map (returned from dma_map_sg)
688afd1a321SRussell King  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
689afd1a321SRussell King  */
6902dc6a016SMarek Szyprowski void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
691afd1a321SRussell King 			int nents, enum dma_data_direction dir)
692afd1a321SRussell King {
6932a550e73SMarek Szyprowski 	struct dma_map_ops *ops = get_dma_ops(dev);
694afd1a321SRussell King 	struct scatterlist *s;
695afd1a321SRussell King 	int i;
696afd1a321SRussell King 
6972a550e73SMarek Szyprowski 	for_each_sg(sg, s, nents, i)
6982a550e73SMarek Szyprowski 		ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
6992a550e73SMarek Szyprowski 					 dir);
700afd1a321SRussell King }
701afd1a321SRussell King 
702afd1a321SRussell King /**
7032a550e73SMarek Szyprowski  * arm_dma_sync_sg_for_device
704afd1a321SRussell King  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
705afd1a321SRussell King  * @sg: list of buffers
706afd1a321SRussell King  * @nents: number of buffers to map (returned from dma_map_sg)
707afd1a321SRussell King  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
708afd1a321SRussell King  */
7092dc6a016SMarek Szyprowski void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
710afd1a321SRussell King 			int nents, enum dma_data_direction dir)
711afd1a321SRussell King {
7122a550e73SMarek Szyprowski 	struct dma_map_ops *ops = get_dma_ops(dev);
713afd1a321SRussell King 	struct scatterlist *s;
714afd1a321SRussell King 	int i;
715afd1a321SRussell King 
7162a550e73SMarek Szyprowski 	for_each_sg(sg, s, nents, i)
7172a550e73SMarek Szyprowski 		ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
7182a550e73SMarek Szyprowski 					    dir);
719afd1a321SRussell King }
72024056f52SRussell King 
721022ae537SRussell King /*
722022ae537SRussell King  * Return whether the given device DMA address mask can be supported
723022ae537SRussell King  * properly.  For example, if your device can only drive the low 24-bits
724022ae537SRussell King  * during bus mastering, then you would pass 0x00ffffff as the mask
725022ae537SRussell King  * to this function.
726022ae537SRussell King  */
727022ae537SRussell King int dma_supported(struct device *dev, u64 mask)
728022ae537SRussell King {
729022ae537SRussell King 	if (mask < (u64)arm_dma_limit)
730022ae537SRussell King 		return 0;
731022ae537SRussell King 	return 1;
732022ae537SRussell King }
733022ae537SRussell King EXPORT_SYMBOL(dma_supported);
734022ae537SRussell King 
7352dc6a016SMarek Szyprowski static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
736022ae537SRussell King {
737022ae537SRussell King 	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
738022ae537SRussell King 		return -EIO;
739022ae537SRussell King 
740022ae537SRussell King #ifndef CONFIG_DMABOUNCE
741022ae537SRussell King 	*dev->dma_mask = dma_mask;
742022ae537SRussell King #endif
743022ae537SRussell King 
744022ae537SRussell King 	return 0;
745022ae537SRussell King }
746022ae537SRussell King 
74724056f52SRussell King #define PREALLOC_DMA_DEBUG_ENTRIES	4096
74824056f52SRussell King 
74924056f52SRussell King static int __init dma_debug_do_init(void)
75024056f52SRussell King {
75145cd5290SRussell King #ifdef CONFIG_MMU
75245cd5290SRussell King 	arm_vmregion_create_proc("dma-mappings", &consistent_head);
75345cd5290SRussell King #endif
75424056f52SRussell King 	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
75524056f52SRussell King 	return 0;
75624056f52SRussell King }
75724056f52SRussell King fs_initcall(dma_debug_do_init);
758