xref: /openbmc/linux/arch/arm/mm/dma-mapping.c (revision 13ccf3ad)
10ddbccd1SRussell King /*
20ddbccd1SRussell King  *  linux/arch/arm/mm/dma-mapping.c
30ddbccd1SRussell King  *
40ddbccd1SRussell King  *  Copyright (C) 2000-2004 Russell King
50ddbccd1SRussell King  *
60ddbccd1SRussell King  * This program is free software; you can redistribute it and/or modify
70ddbccd1SRussell King  * it under the terms of the GNU General Public License version 2 as
80ddbccd1SRussell King  * published by the Free Software Foundation.
90ddbccd1SRussell King  *
100ddbccd1SRussell King  *  DMA uncached mapping support.
110ddbccd1SRussell King  */
120ddbccd1SRussell King #include <linux/module.h>
130ddbccd1SRussell King #include <linux/mm.h>
140ddbccd1SRussell King #include <linux/slab.h>
150ddbccd1SRussell King #include <linux/errno.h>
160ddbccd1SRussell King #include <linux/list.h>
170ddbccd1SRussell King #include <linux/init.h>
180ddbccd1SRussell King #include <linux/device.h>
190ddbccd1SRussell King #include <linux/dma-mapping.h>
200ddbccd1SRussell King 
210ddbccd1SRussell King #include <asm/memory.h>
2243377453SNicolas Pitre #include <asm/highmem.h>
230ddbccd1SRussell King #include <asm/cacheflush.h>
240ddbccd1SRussell King #include <asm/tlbflush.h>
250ddbccd1SRussell King #include <asm/sizes.h>
260ddbccd1SRussell King 
270ddbccd1SRussell King /* Sanity check size */
280ddbccd1SRussell King #if (CONSISTENT_DMA_SIZE % SZ_2M)
290ddbccd1SRussell King #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
300ddbccd1SRussell King #endif
310ddbccd1SRussell King 
320ddbccd1SRussell King #define CONSISTENT_END	(0xffe00000)
330ddbccd1SRussell King #define CONSISTENT_BASE	(CONSISTENT_END - CONSISTENT_DMA_SIZE)
340ddbccd1SRussell King 
350ddbccd1SRussell King #define CONSISTENT_OFFSET(x)	(((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
360ddbccd1SRussell King #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
370ddbccd1SRussell King #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
380ddbccd1SRussell King 
39ab6494f0SCatalin Marinas static u64 get_coherent_dma_mask(struct device *dev)
40ab6494f0SCatalin Marinas {
41ab6494f0SCatalin Marinas 	u64 mask = ISA_DMA_THRESHOLD;
420ddbccd1SRussell King 
43ab6494f0SCatalin Marinas 	if (dev) {
44ab6494f0SCatalin Marinas 		mask = dev->coherent_dma_mask;
45ab6494f0SCatalin Marinas 
46ab6494f0SCatalin Marinas 		/*
47ab6494f0SCatalin Marinas 		 * Sanity check the DMA mask - it must be non-zero, and
48ab6494f0SCatalin Marinas 		 * must be able to be satisfied by a DMA allocation.
49ab6494f0SCatalin Marinas 		 */
50ab6494f0SCatalin Marinas 		if (mask == 0) {
51ab6494f0SCatalin Marinas 			dev_warn(dev, "coherent DMA mask is unset\n");
52ab6494f0SCatalin Marinas 			return 0;
53ab6494f0SCatalin Marinas 		}
54ab6494f0SCatalin Marinas 
55ab6494f0SCatalin Marinas 		if ((~mask) & ISA_DMA_THRESHOLD) {
56ab6494f0SCatalin Marinas 			dev_warn(dev, "coherent DMA mask %#llx is smaller "
57ab6494f0SCatalin Marinas 				 "than system GFP_DMA mask %#llx\n",
58ab6494f0SCatalin Marinas 				 mask, (unsigned long long)ISA_DMA_THRESHOLD);
59ab6494f0SCatalin Marinas 			return 0;
60ab6494f0SCatalin Marinas 		}
61ab6494f0SCatalin Marinas 	}
62ab6494f0SCatalin Marinas 
63ab6494f0SCatalin Marinas 	return mask;
64ab6494f0SCatalin Marinas }
65ab6494f0SCatalin Marinas 
66ab6494f0SCatalin Marinas #ifdef CONFIG_MMU
670ddbccd1SRussell King /*
680ddbccd1SRussell King  * These are the page tables (2MB each) covering uncached, DMA consistent allocations
690ddbccd1SRussell King  */
700ddbccd1SRussell King static pte_t *consistent_pte[NUM_CONSISTENT_PTES];
710ddbccd1SRussell King 
7213ccf3adSRussell King #include "vmregion.h"
730ddbccd1SRussell King 
7413ccf3adSRussell King static struct arm_vmregion_head consistent_head = {
7513ccf3adSRussell King 	.vm_lock	= __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock),
760ddbccd1SRussell King 	.vm_list	= LIST_HEAD_INIT(consistent_head.vm_list),
770ddbccd1SRussell King 	.vm_start	= CONSISTENT_BASE,
780ddbccd1SRussell King 	.vm_end		= CONSISTENT_END,
790ddbccd1SRussell King };
800ddbccd1SRussell King 
810ddbccd1SRussell King #ifdef CONFIG_HUGETLB_PAGE
820ddbccd1SRussell King #error ARM Coherent DMA allocator does not (yet) support huge TLB
830ddbccd1SRussell King #endif
840ddbccd1SRussell King 
850ddbccd1SRussell King static void *
860ddbccd1SRussell King __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
870ddbccd1SRussell King 	    pgprot_t prot)
880ddbccd1SRussell King {
890ddbccd1SRussell King 	struct page *page;
9013ccf3adSRussell King 	struct arm_vmregion *c;
910ddbccd1SRussell King 	unsigned long order;
92ab6494f0SCatalin Marinas 	u64 mask = get_coherent_dma_mask(dev);
93ab6494f0SCatalin Marinas 	u64 limit;
940ddbccd1SRussell King 
950ddbccd1SRussell King 	if (!consistent_pte[0]) {
960ddbccd1SRussell King 		printk(KERN_ERR "%s: not initialised\n", __func__);
970ddbccd1SRussell King 		dump_stack();
980ddbccd1SRussell King 		return NULL;
990ddbccd1SRussell King 	}
1000ddbccd1SRussell King 
101ab6494f0SCatalin Marinas 	if (!mask)
1020ddbccd1SRussell King 		goto no_page;
1030ddbccd1SRussell King 
1040ddbccd1SRussell King 	size = PAGE_ALIGN(size);
1050ddbccd1SRussell King 	limit = (mask + 1) & ~mask;
10613ccf3adSRussell King 	if (limit && size >= limit) {
1070ddbccd1SRussell King 		printk(KERN_WARNING "coherent allocation too big "
1080ddbccd1SRussell King 		       "(requested %#x mask %#llx)\n", size, mask);
1090ddbccd1SRussell King 		goto no_page;
1100ddbccd1SRussell King 	}
1110ddbccd1SRussell King 
1120ddbccd1SRussell King 	order = get_order(size);
1130ddbccd1SRussell King 
114c06e004cSRussell King 	if (mask < 0xffffffffULL)
1150ddbccd1SRussell King 		gfp |= GFP_DMA;
1160ddbccd1SRussell King 
1170ddbccd1SRussell King 	page = alloc_pages(gfp, order);
1180ddbccd1SRussell King 	if (!page)
1190ddbccd1SRussell King 		goto no_page;
1200ddbccd1SRussell King 
1210ddbccd1SRussell King 	/*
1220ddbccd1SRussell King 	 * Invalidate any data that might be lurking in the
1230ddbccd1SRussell King 	 * kernel direct-mapped region for device DMA.
1240ddbccd1SRussell King 	 */
1250ddbccd1SRussell King 	{
1260ddbccd1SRussell King 		void *ptr = page_address(page);
1270ddbccd1SRussell King 		memset(ptr, 0, size);
1280ddbccd1SRussell King 		dmac_flush_range(ptr, ptr + size);
1290ddbccd1SRussell King 		outer_flush_range(__pa(ptr), __pa(ptr) + size);
1300ddbccd1SRussell King 	}
1310ddbccd1SRussell King 
1320ddbccd1SRussell King 	/*
1330ddbccd1SRussell King 	 * Allocate a virtual address in the consistent mapping region.
1340ddbccd1SRussell King 	 */
13513ccf3adSRussell King 	c = arm_vmregion_alloc(&consistent_head, size,
1360ddbccd1SRussell King 			    gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
1370ddbccd1SRussell King 	if (c) {
1380ddbccd1SRussell King 		pte_t *pte;
1390ddbccd1SRussell King 		struct page *end = page + (1 << order);
1400ddbccd1SRussell King 		int idx = CONSISTENT_PTE_INDEX(c->vm_start);
1410ddbccd1SRussell King 		u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
1420ddbccd1SRussell King 
1430ddbccd1SRussell King 		pte = consistent_pte[idx] + off;
1440ddbccd1SRussell King 		c->vm_pages = page;
1450ddbccd1SRussell King 
1460ddbccd1SRussell King 		split_page(page, order);
1470ddbccd1SRussell King 
1480ddbccd1SRussell King 		/*
1490ddbccd1SRussell King 		 * Set the "dma handle"
1500ddbccd1SRussell King 		 */
1510ddbccd1SRussell King 		*handle = page_to_dma(dev, page);
1520ddbccd1SRussell King 
1530ddbccd1SRussell King 		do {
1540ddbccd1SRussell King 			BUG_ON(!pte_none(*pte));
1550ddbccd1SRussell King 
1560ddbccd1SRussell King 			/*
1570ddbccd1SRussell King 			 * x86 does not mark the pages reserved...
1580ddbccd1SRussell King 			 */
1590ddbccd1SRussell King 			SetPageReserved(page);
1600ddbccd1SRussell King 			set_pte_ext(pte, mk_pte(page, prot), 0);
1610ddbccd1SRussell King 			page++;
1620ddbccd1SRussell King 			pte++;
1630ddbccd1SRussell King 			off++;
1640ddbccd1SRussell King 			if (off >= PTRS_PER_PTE) {
1650ddbccd1SRussell King 				off = 0;
1660ddbccd1SRussell King 				pte = consistent_pte[++idx];
1670ddbccd1SRussell King 			}
1680ddbccd1SRussell King 		} while (size -= PAGE_SIZE);
1690ddbccd1SRussell King 
1700ddbccd1SRussell King 		/*
1710ddbccd1SRussell King 		 * Free the otherwise unused pages.
1720ddbccd1SRussell King 		 */
1730ddbccd1SRussell King 		while (page < end) {
1740ddbccd1SRussell King 			__free_page(page);
1750ddbccd1SRussell King 			page++;
1760ddbccd1SRussell King 		}
1770ddbccd1SRussell King 
1780ddbccd1SRussell King 		return (void *)c->vm_start;
1790ddbccd1SRussell King 	}
1800ddbccd1SRussell King 
1810ddbccd1SRussell King 	if (page)
1820ddbccd1SRussell King 		__free_pages(page, order);
1830ddbccd1SRussell King  no_page:
1840ddbccd1SRussell King 	*handle = ~0;
1850ddbccd1SRussell King 	return NULL;
1860ddbccd1SRussell King }
187ab6494f0SCatalin Marinas #else	/* !CONFIG_MMU */
188ab6494f0SCatalin Marinas static void *
189ab6494f0SCatalin Marinas __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
190ab6494f0SCatalin Marinas 	    pgprot_t prot)
191ab6494f0SCatalin Marinas {
192ab6494f0SCatalin Marinas 	void *virt;
193ab6494f0SCatalin Marinas 	u64 mask = get_coherent_dma_mask(dev);
194ab6494f0SCatalin Marinas 
195ab6494f0SCatalin Marinas 	if (!mask)
196ab6494f0SCatalin Marinas 		goto error;
197ab6494f0SCatalin Marinas 
198c06e004cSRussell King 	if (mask < 0xffffffffULL)
199ab6494f0SCatalin Marinas 		gfp |= GFP_DMA;
200ab6494f0SCatalin Marinas 	virt = kmalloc(size, gfp);
201ab6494f0SCatalin Marinas 	if (!virt)
202ab6494f0SCatalin Marinas 		goto error;
203ab6494f0SCatalin Marinas 
204ab6494f0SCatalin Marinas 	*handle =  virt_to_dma(dev, virt);
205ab6494f0SCatalin Marinas 	return virt;
206ab6494f0SCatalin Marinas 
207ab6494f0SCatalin Marinas error:
208ab6494f0SCatalin Marinas 	*handle = ~0;
209ab6494f0SCatalin Marinas 	return NULL;
210ab6494f0SCatalin Marinas }
211ab6494f0SCatalin Marinas #endif	/* CONFIG_MMU */
2120ddbccd1SRussell King 
2130ddbccd1SRussell King /*
2140ddbccd1SRussell King  * Allocate DMA-coherent memory space and return both the kernel remapped
2150ddbccd1SRussell King  * virtual and bus address for that space.
2160ddbccd1SRussell King  */
2170ddbccd1SRussell King void *
2180ddbccd1SRussell King dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
2190ddbccd1SRussell King {
2200ddbccd1SRussell King 	void *memory;
2210ddbccd1SRussell King 
2220ddbccd1SRussell King 	if (dma_alloc_from_coherent(dev, size, handle, &memory))
2230ddbccd1SRussell King 		return memory;
2240ddbccd1SRussell King 
2250ddbccd1SRussell King 	if (arch_is_coherent()) {
2260ddbccd1SRussell King 		void *virt;
2270ddbccd1SRussell King 
2280ddbccd1SRussell King 		virt = kmalloc(size, gfp);
2290ddbccd1SRussell King 		if (!virt)
2300ddbccd1SRussell King 			return NULL;
2310ddbccd1SRussell King 		*handle =  virt_to_dma(dev, virt);
2320ddbccd1SRussell King 
2330ddbccd1SRussell King 		return virt;
2340ddbccd1SRussell King 	}
2350ddbccd1SRussell King 
2360ddbccd1SRussell King 	return __dma_alloc(dev, size, handle, gfp,
2370ddbccd1SRussell King 			   pgprot_noncached(pgprot_kernel));
2380ddbccd1SRussell King }
2390ddbccd1SRussell King EXPORT_SYMBOL(dma_alloc_coherent);
2400ddbccd1SRussell King 
2410ddbccd1SRussell King /*
2420ddbccd1SRussell King  * Allocate a writecombining region, in much the same way as
2430ddbccd1SRussell King  * dma_alloc_coherent above.
2440ddbccd1SRussell King  */
2450ddbccd1SRussell King void *
2460ddbccd1SRussell King dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
2470ddbccd1SRussell King {
2480ddbccd1SRussell King 	return __dma_alloc(dev, size, handle, gfp,
2490ddbccd1SRussell King 			   pgprot_writecombine(pgprot_kernel));
2500ddbccd1SRussell King }
2510ddbccd1SRussell King EXPORT_SYMBOL(dma_alloc_writecombine);
2520ddbccd1SRussell King 
2530ddbccd1SRussell King static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
2540ddbccd1SRussell King 		    void *cpu_addr, dma_addr_t dma_addr, size_t size)
2550ddbccd1SRussell King {
256ab6494f0SCatalin Marinas 	int ret = -ENXIO;
257ab6494f0SCatalin Marinas #ifdef CONFIG_MMU
25813ccf3adSRussell King 	unsigned long user_size, kern_size;
25913ccf3adSRussell King 	struct arm_vmregion *c;
2600ddbccd1SRussell King 
2610ddbccd1SRussell King 	user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
2620ddbccd1SRussell King 
26313ccf3adSRussell King 	c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
2640ddbccd1SRussell King 	if (c) {
2650ddbccd1SRussell King 		unsigned long off = vma->vm_pgoff;
2660ddbccd1SRussell King 
2670ddbccd1SRussell King 		kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
2680ddbccd1SRussell King 
2690ddbccd1SRussell King 		if (off < kern_size &&
2700ddbccd1SRussell King 		    user_size <= (kern_size - off)) {
2710ddbccd1SRussell King 			ret = remap_pfn_range(vma, vma->vm_start,
2720ddbccd1SRussell King 					      page_to_pfn(c->vm_pages) + off,
2730ddbccd1SRussell King 					      user_size << PAGE_SHIFT,
2740ddbccd1SRussell King 					      vma->vm_page_prot);
2750ddbccd1SRussell King 		}
2760ddbccd1SRussell King 	}
277ab6494f0SCatalin Marinas #endif	/* CONFIG_MMU */
2780ddbccd1SRussell King 
2790ddbccd1SRussell King 	return ret;
2800ddbccd1SRussell King }
2810ddbccd1SRussell King 
2820ddbccd1SRussell King int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
2830ddbccd1SRussell King 		      void *cpu_addr, dma_addr_t dma_addr, size_t size)
2840ddbccd1SRussell King {
2850ddbccd1SRussell King 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2860ddbccd1SRussell King 	return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
2870ddbccd1SRussell King }
2880ddbccd1SRussell King EXPORT_SYMBOL(dma_mmap_coherent);
2890ddbccd1SRussell King 
2900ddbccd1SRussell King int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
2910ddbccd1SRussell King 			  void *cpu_addr, dma_addr_t dma_addr, size_t size)
2920ddbccd1SRussell King {
2930ddbccd1SRussell King 	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
2940ddbccd1SRussell King 	return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
2950ddbccd1SRussell King }
2960ddbccd1SRussell King EXPORT_SYMBOL(dma_mmap_writecombine);
2970ddbccd1SRussell King 
2980ddbccd1SRussell King /*
2990ddbccd1SRussell King  * free a page as defined by the above mapping.
3000ddbccd1SRussell King  * Must not be called with IRQs disabled.
3010ddbccd1SRussell King  */
302ab6494f0SCatalin Marinas #ifdef CONFIG_MMU
3030ddbccd1SRussell King void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
3040ddbccd1SRussell King {
30513ccf3adSRussell King 	struct arm_vmregion *c;
30613ccf3adSRussell King 	unsigned long addr;
3070ddbccd1SRussell King 	pte_t *ptep;
3080ddbccd1SRussell King 	int idx;
3090ddbccd1SRussell King 	u32 off;
3100ddbccd1SRussell King 
3110ddbccd1SRussell King 	WARN_ON(irqs_disabled());
3120ddbccd1SRussell King 
3130ddbccd1SRussell King 	if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
3140ddbccd1SRussell King 		return;
3150ddbccd1SRussell King 
3160ddbccd1SRussell King 	if (arch_is_coherent()) {
3170ddbccd1SRussell King 		kfree(cpu_addr);
3180ddbccd1SRussell King 		return;
3190ddbccd1SRussell King 	}
3200ddbccd1SRussell King 
3210ddbccd1SRussell King 	size = PAGE_ALIGN(size);
3220ddbccd1SRussell King 
32313ccf3adSRussell King 	c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
3240ddbccd1SRussell King 	if (!c)
3250ddbccd1SRussell King 		goto no_area;
3260ddbccd1SRussell King 
3270ddbccd1SRussell King 	if ((c->vm_end - c->vm_start) != size) {
3280ddbccd1SRussell King 		printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
3290ddbccd1SRussell King 		       __func__, c->vm_end - c->vm_start, size);
3300ddbccd1SRussell King 		dump_stack();
3310ddbccd1SRussell King 		size = c->vm_end - c->vm_start;
3320ddbccd1SRussell King 	}
3330ddbccd1SRussell King 
3340ddbccd1SRussell King 	idx = CONSISTENT_PTE_INDEX(c->vm_start);
3350ddbccd1SRussell King 	off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
3360ddbccd1SRussell King 	ptep = consistent_pte[idx] + off;
3370ddbccd1SRussell King 	addr = c->vm_start;
3380ddbccd1SRussell King 	do {
3390ddbccd1SRussell King 		pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
3400ddbccd1SRussell King 		unsigned long pfn;
3410ddbccd1SRussell King 
3420ddbccd1SRussell King 		ptep++;
3430ddbccd1SRussell King 		addr += PAGE_SIZE;
3440ddbccd1SRussell King 		off++;
3450ddbccd1SRussell King 		if (off >= PTRS_PER_PTE) {
3460ddbccd1SRussell King 			off = 0;
3470ddbccd1SRussell King 			ptep = consistent_pte[++idx];
3480ddbccd1SRussell King 		}
3490ddbccd1SRussell King 
3500ddbccd1SRussell King 		if (!pte_none(pte) && pte_present(pte)) {
3510ddbccd1SRussell King 			pfn = pte_pfn(pte);
3520ddbccd1SRussell King 
3530ddbccd1SRussell King 			if (pfn_valid(pfn)) {
3540ddbccd1SRussell King 				struct page *page = pfn_to_page(pfn);
3550ddbccd1SRussell King 
3560ddbccd1SRussell King 				/*
3570ddbccd1SRussell King 				 * x86 does not mark the pages reserved...
3580ddbccd1SRussell King 				 */
3590ddbccd1SRussell King 				ClearPageReserved(page);
3600ddbccd1SRussell King 
3610ddbccd1SRussell King 				__free_page(page);
3620ddbccd1SRussell King 				continue;
3630ddbccd1SRussell King 			}
3640ddbccd1SRussell King 		}
3650ddbccd1SRussell King 
3660ddbccd1SRussell King 		printk(KERN_CRIT "%s: bad page in kernel page table\n",
3670ddbccd1SRussell King 		       __func__);
3680ddbccd1SRussell King 	} while (size -= PAGE_SIZE);
3690ddbccd1SRussell King 
3700ddbccd1SRussell King 	flush_tlb_kernel_range(c->vm_start, c->vm_end);
3710ddbccd1SRussell King 
37213ccf3adSRussell King 	arm_vmregion_free(&consistent_head, c);
3730ddbccd1SRussell King 	return;
3740ddbccd1SRussell King 
3750ddbccd1SRussell King  no_area:
3760ddbccd1SRussell King 	printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
3770ddbccd1SRussell King 	       __func__, cpu_addr);
3780ddbccd1SRussell King 	dump_stack();
3790ddbccd1SRussell King }
380ab6494f0SCatalin Marinas #else	/* !CONFIG_MMU */
381ab6494f0SCatalin Marinas void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
382ab6494f0SCatalin Marinas {
383ab6494f0SCatalin Marinas 	if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
384ab6494f0SCatalin Marinas 		return;
385ab6494f0SCatalin Marinas 	kfree(cpu_addr);
386ab6494f0SCatalin Marinas }
387ab6494f0SCatalin Marinas #endif	/* CONFIG_MMU */
3880ddbccd1SRussell King EXPORT_SYMBOL(dma_free_coherent);
3890ddbccd1SRussell King 
3900ddbccd1SRussell King /*
3910ddbccd1SRussell King  * Initialise the consistent memory allocation.
3920ddbccd1SRussell King  */
3930ddbccd1SRussell King static int __init consistent_init(void)
3940ddbccd1SRussell King {
395ab6494f0SCatalin Marinas 	int ret = 0;
396ab6494f0SCatalin Marinas #ifdef CONFIG_MMU
3970ddbccd1SRussell King 	pgd_t *pgd;
3980ddbccd1SRussell King 	pmd_t *pmd;
3990ddbccd1SRussell King 	pte_t *pte;
400ab6494f0SCatalin Marinas 	int i = 0;
4010ddbccd1SRussell King 	u32 base = CONSISTENT_BASE;
4020ddbccd1SRussell King 
4030ddbccd1SRussell King 	do {
4040ddbccd1SRussell King 		pgd = pgd_offset(&init_mm, base);
4050ddbccd1SRussell King 		pmd = pmd_alloc(&init_mm, pgd, base);
4060ddbccd1SRussell King 		if (!pmd) {
4070ddbccd1SRussell King 			printk(KERN_ERR "%s: no pmd tables\n", __func__);
4080ddbccd1SRussell King 			ret = -ENOMEM;
4090ddbccd1SRussell King 			break;
4100ddbccd1SRussell King 		}
4110ddbccd1SRussell King 		WARN_ON(!pmd_none(*pmd));
4120ddbccd1SRussell King 
4130ddbccd1SRussell King 		pte = pte_alloc_kernel(pmd, base);
4140ddbccd1SRussell King 		if (!pte) {
4150ddbccd1SRussell King 			printk(KERN_ERR "%s: no pte tables\n", __func__);
4160ddbccd1SRussell King 			ret = -ENOMEM;
4170ddbccd1SRussell King 			break;
4180ddbccd1SRussell King 		}
4190ddbccd1SRussell King 
4200ddbccd1SRussell King 		consistent_pte[i++] = pte;
4210ddbccd1SRussell King 		base += (1 << PGDIR_SHIFT);
4220ddbccd1SRussell King 	} while (base < CONSISTENT_END);
423ab6494f0SCatalin Marinas #endif	/* !CONFIG_MMU */
4240ddbccd1SRussell King 
4250ddbccd1SRussell King 	return ret;
4260ddbccd1SRussell King }
4270ddbccd1SRussell King 
4280ddbccd1SRussell King core_initcall(consistent_init);
4290ddbccd1SRussell King 
4300ddbccd1SRussell King /*
4310ddbccd1SRussell King  * Make an area consistent for devices.
4320ddbccd1SRussell King  * Note: Drivers should NOT use this function directly, as it will break
4330ddbccd1SRussell King  * platforms with CONFIG_DMABOUNCE.
4340ddbccd1SRussell King  * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
4350ddbccd1SRussell King  */
4360ddbccd1SRussell King void dma_cache_maint(const void *start, size_t size, int direction)
4370ddbccd1SRussell King {
4381522ac3eSRussell King 	void (*inner_op)(const void *, const void *);
4391522ac3eSRussell King 	void (*outer_op)(unsigned long, unsigned long);
4400ddbccd1SRussell King 
4411522ac3eSRussell King 	BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(start + size - 1));
4420ddbccd1SRussell King 
4430ddbccd1SRussell King 	switch (direction) {
4440ddbccd1SRussell King 	case DMA_FROM_DEVICE:		/* invalidate only */
4451522ac3eSRussell King 		inner_op = dmac_inv_range;
4461522ac3eSRussell King 		outer_op = outer_inv_range;
4470ddbccd1SRussell King 		break;
4480ddbccd1SRussell King 	case DMA_TO_DEVICE:		/* writeback only */
4491522ac3eSRussell King 		inner_op = dmac_clean_range;
4501522ac3eSRussell King 		outer_op = outer_clean_range;
4510ddbccd1SRussell King 		break;
4520ddbccd1SRussell King 	case DMA_BIDIRECTIONAL:		/* writeback and invalidate */
4531522ac3eSRussell King 		inner_op = dmac_flush_range;
4541522ac3eSRussell King 		outer_op = outer_flush_range;
4550ddbccd1SRussell King 		break;
4560ddbccd1SRussell King 	default:
4570ddbccd1SRussell King 		BUG();
4580ddbccd1SRussell King 	}
4591522ac3eSRussell King 
4601522ac3eSRussell King 	inner_op(start, start + size);
4611522ac3eSRussell King 	outer_op(__pa(start), __pa(start) + size);
4620ddbccd1SRussell King }
4630ddbccd1SRussell King EXPORT_SYMBOL(dma_cache_maint);
464afd1a321SRussell King 
46543377453SNicolas Pitre static void dma_cache_maint_contiguous(struct page *page, unsigned long offset,
46643377453SNicolas Pitre 				       size_t size, int direction)
46743377453SNicolas Pitre {
46843377453SNicolas Pitre 	void *vaddr;
46943377453SNicolas Pitre 	unsigned long paddr;
47043377453SNicolas Pitre 	void (*inner_op)(const void *, const void *);
47143377453SNicolas Pitre 	void (*outer_op)(unsigned long, unsigned long);
47243377453SNicolas Pitre 
47343377453SNicolas Pitre 	switch (direction) {
47443377453SNicolas Pitre 	case DMA_FROM_DEVICE:		/* invalidate only */
47543377453SNicolas Pitre 		inner_op = dmac_inv_range;
47643377453SNicolas Pitre 		outer_op = outer_inv_range;
47743377453SNicolas Pitre 		break;
47843377453SNicolas Pitre 	case DMA_TO_DEVICE:		/* writeback only */
47943377453SNicolas Pitre 		inner_op = dmac_clean_range;
48043377453SNicolas Pitre 		outer_op = outer_clean_range;
48143377453SNicolas Pitre 		break;
48243377453SNicolas Pitre 	case DMA_BIDIRECTIONAL:		/* writeback and invalidate */
48343377453SNicolas Pitre 		inner_op = dmac_flush_range;
48443377453SNicolas Pitre 		outer_op = outer_flush_range;
48543377453SNicolas Pitre 		break;
48643377453SNicolas Pitre 	default:
48743377453SNicolas Pitre 		BUG();
48843377453SNicolas Pitre 	}
48943377453SNicolas Pitre 
49043377453SNicolas Pitre 	if (!PageHighMem(page)) {
49143377453SNicolas Pitre 		vaddr = page_address(page) + offset;
49243377453SNicolas Pitre 		inner_op(vaddr, vaddr + size);
49343377453SNicolas Pitre 	} else {
49443377453SNicolas Pitre 		vaddr = kmap_high_get(page);
49543377453SNicolas Pitre 		if (vaddr) {
49643377453SNicolas Pitre 			vaddr += offset;
49743377453SNicolas Pitre 			inner_op(vaddr, vaddr + size);
49843377453SNicolas Pitre 			kunmap_high(page);
49943377453SNicolas Pitre 		}
50043377453SNicolas Pitre 	}
50143377453SNicolas Pitre 
50243377453SNicolas Pitre 	paddr = page_to_phys(page) + offset;
50343377453SNicolas Pitre 	outer_op(paddr, paddr + size);
50443377453SNicolas Pitre }
50543377453SNicolas Pitre 
50643377453SNicolas Pitre void dma_cache_maint_page(struct page *page, unsigned long offset,
50743377453SNicolas Pitre 			  size_t size, int dir)
50843377453SNicolas Pitre {
50943377453SNicolas Pitre 	/*
51043377453SNicolas Pitre 	 * A single sg entry may refer to multiple physically contiguous
51143377453SNicolas Pitre 	 * pages.  But we still need to process highmem pages individually.
51243377453SNicolas Pitre 	 * If highmem is not configured then the bulk of this loop gets
51343377453SNicolas Pitre 	 * optimized out.
51443377453SNicolas Pitre 	 */
51543377453SNicolas Pitre 	size_t left = size;
51643377453SNicolas Pitre 	do {
51743377453SNicolas Pitre 		size_t len = left;
51843377453SNicolas Pitre 		if (PageHighMem(page) && len + offset > PAGE_SIZE) {
51943377453SNicolas Pitre 			if (offset >= PAGE_SIZE) {
52043377453SNicolas Pitre 				page += offset / PAGE_SIZE;
52143377453SNicolas Pitre 				offset %= PAGE_SIZE;
52243377453SNicolas Pitre 			}
52343377453SNicolas Pitre 			len = PAGE_SIZE - offset;
52443377453SNicolas Pitre 		}
52543377453SNicolas Pitre 		dma_cache_maint_contiguous(page, offset, len, dir);
52643377453SNicolas Pitre 		offset = 0;
52743377453SNicolas Pitre 		page++;
52843377453SNicolas Pitre 		left -= len;
52943377453SNicolas Pitre 	} while (left);
53043377453SNicolas Pitre }
53143377453SNicolas Pitre EXPORT_SYMBOL(dma_cache_maint_page);
53243377453SNicolas Pitre 
533afd1a321SRussell King /**
534afd1a321SRussell King  * dma_map_sg - map a set of SG buffers for streaming mode DMA
535afd1a321SRussell King  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
536afd1a321SRussell King  * @sg: list of buffers
537afd1a321SRussell King  * @nents: number of buffers to map
538afd1a321SRussell King  * @dir: DMA transfer direction
539afd1a321SRussell King  *
540afd1a321SRussell King  * Map a set of buffers described by scatterlist in streaming mode for DMA.
541afd1a321SRussell King  * This is the scatter-gather version of the dma_map_single interface.
542afd1a321SRussell King  * Here the scatter gather list elements are each tagged with the
543afd1a321SRussell King  * appropriate dma address and length.  They are obtained via
544afd1a321SRussell King  * sg_dma_{address,length}.
545afd1a321SRussell King  *
546afd1a321SRussell King  * Device ownership issues as mentioned for dma_map_single are the same
547afd1a321SRussell King  * here.
548afd1a321SRussell King  */
549afd1a321SRussell King int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
550afd1a321SRussell King 		enum dma_data_direction dir)
551afd1a321SRussell King {
552afd1a321SRussell King 	struct scatterlist *s;
55301135d92SRussell King 	int i, j;
554afd1a321SRussell King 
555afd1a321SRussell King 	for_each_sg(sg, s, nents, i) {
55601135d92SRussell King 		s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
55701135d92SRussell King 						s->length, dir);
55801135d92SRussell King 		if (dma_mapping_error(dev, s->dma_address))
55901135d92SRussell King 			goto bad_mapping;
560afd1a321SRussell King 	}
561afd1a321SRussell King 	return nents;
56201135d92SRussell King 
56301135d92SRussell King  bad_mapping:
56401135d92SRussell King 	for_each_sg(sg, s, i, j)
56501135d92SRussell King 		dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
56601135d92SRussell King 	return 0;
567afd1a321SRussell King }
568afd1a321SRussell King EXPORT_SYMBOL(dma_map_sg);
569afd1a321SRussell King 
570afd1a321SRussell King /**
571afd1a321SRussell King  * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
572afd1a321SRussell King  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
573afd1a321SRussell King  * @sg: list of buffers
574afd1a321SRussell King  * @nents: number of buffers to unmap (returned from dma_map_sg)
575afd1a321SRussell King  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
576afd1a321SRussell King  *
577afd1a321SRussell King  * Unmap a set of streaming mode DMA translations.  Again, CPU access
578afd1a321SRussell King  * rules concerning calls here are the same as for dma_unmap_single().
579afd1a321SRussell King  */
580afd1a321SRussell King void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
581afd1a321SRussell King 		enum dma_data_direction dir)
582afd1a321SRussell King {
58301135d92SRussell King 	struct scatterlist *s;
58401135d92SRussell King 	int i;
58501135d92SRussell King 
58601135d92SRussell King 	for_each_sg(sg, s, nents, i)
58701135d92SRussell King 		dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
588afd1a321SRussell King }
589afd1a321SRussell King EXPORT_SYMBOL(dma_unmap_sg);
590afd1a321SRussell King 
591afd1a321SRussell King /**
592afd1a321SRussell King  * dma_sync_sg_for_cpu
593afd1a321SRussell King  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
594afd1a321SRussell King  * @sg: list of buffers
595afd1a321SRussell King  * @nents: number of buffers to map (returned from dma_map_sg)
596afd1a321SRussell King  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
597afd1a321SRussell King  */
598afd1a321SRussell King void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
599afd1a321SRussell King 			int nents, enum dma_data_direction dir)
600afd1a321SRussell King {
601afd1a321SRussell King 	struct scatterlist *s;
602afd1a321SRussell King 	int i;
603afd1a321SRussell King 
604afd1a321SRussell King 	for_each_sg(sg, s, nents, i) {
605309dbbabSRussell King 		dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
606309dbbabSRussell King 					sg_dma_len(s), dir);
607afd1a321SRussell King 	}
608afd1a321SRussell King }
609afd1a321SRussell King EXPORT_SYMBOL(dma_sync_sg_for_cpu);
610afd1a321SRussell King 
611afd1a321SRussell King /**
612afd1a321SRussell King  * dma_sync_sg_for_device
613afd1a321SRussell King  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
614afd1a321SRussell King  * @sg: list of buffers
615afd1a321SRussell King  * @nents: number of buffers to map (returned from dma_map_sg)
616afd1a321SRussell King  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
617afd1a321SRussell King  */
618afd1a321SRussell King void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
619afd1a321SRussell King 			int nents, enum dma_data_direction dir)
620afd1a321SRussell King {
621afd1a321SRussell King 	struct scatterlist *s;
622afd1a321SRussell King 	int i;
623afd1a321SRussell King 
624afd1a321SRussell King 	for_each_sg(sg, s, nents, i) {
6252638b4dbSRussell King 		if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0,
6262638b4dbSRussell King 					sg_dma_len(s), dir))
6272638b4dbSRussell King 			continue;
6282638b4dbSRussell King 
629afd1a321SRussell King 		if (!arch_is_coherent())
63043377453SNicolas Pitre 			dma_cache_maint_page(sg_page(s), s->offset,
63143377453SNicolas Pitre 					     s->length, dir);
632afd1a321SRussell King 	}
633afd1a321SRussell King }
634afd1a321SRussell King EXPORT_SYMBOL(dma_sync_sg_for_device);
635