xref: /openbmc/linux/arch/arm64/mm/dma-mapping.c (revision f6723b56)
1 /*
2  * SWIOTLB-based DMA API implementation
3  *
4  * Copyright (C) 2012 ARM Ltd.
5  * Author: Catalin Marinas <catalin.marinas@arm.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <linux/gfp.h>
21 #include <linux/export.h>
22 #include <linux/slab.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dma-contiguous.h>
25 #include <linux/vmalloc.h>
26 #include <linux/swiotlb.h>
27 
28 #include <asm/cacheflush.h>
29 
30 struct dma_map_ops *dma_ops;
31 EXPORT_SYMBOL(dma_ops);
32 
33 static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size,
34 					  dma_addr_t *dma_handle, gfp_t flags,
35 					  struct dma_attrs *attrs)
36 {
37 	if (dev == NULL) {
38 		WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
39 		return NULL;
40 	}
41 
42 	if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
43 	    dev->coherent_dma_mask <= DMA_BIT_MASK(32))
44 		flags |= GFP_DMA32;
45 	if (IS_ENABLED(CONFIG_DMA_CMA)) {
46 		struct page *page;
47 
48 		size = PAGE_ALIGN(size);
49 		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
50 							get_order(size));
51 		if (!page)
52 			return NULL;
53 
54 		*dma_handle = phys_to_dma(dev, page_to_phys(page));
55 		return page_address(page);
56 	} else {
57 		return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
58 	}
59 }
60 
61 static void arm64_swiotlb_free_coherent(struct device *dev, size_t size,
62 					void *vaddr, dma_addr_t dma_handle,
63 					struct dma_attrs *attrs)
64 {
65 	if (dev == NULL) {
66 		WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
67 		return;
68 	}
69 
70 	if (IS_ENABLED(CONFIG_DMA_CMA)) {
71 		phys_addr_t paddr = dma_to_phys(dev, dma_handle);
72 
73 		dma_release_from_contiguous(dev,
74 					phys_to_page(paddr),
75 					size >> PAGE_SHIFT);
76 	} else {
77 		swiotlb_free_coherent(dev, size, vaddr, dma_handle);
78 	}
79 }
80 
81 static struct dma_map_ops arm64_swiotlb_dma_ops = {
82 	.alloc = arm64_swiotlb_alloc_coherent,
83 	.free = arm64_swiotlb_free_coherent,
84 	.map_page = swiotlb_map_page,
85 	.unmap_page = swiotlb_unmap_page,
86 	.map_sg = swiotlb_map_sg_attrs,
87 	.unmap_sg = swiotlb_unmap_sg_attrs,
88 	.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
89 	.sync_single_for_device = swiotlb_sync_single_for_device,
90 	.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
91 	.sync_sg_for_device = swiotlb_sync_sg_for_device,
92 	.dma_supported = swiotlb_dma_supported,
93 	.mapping_error = swiotlb_dma_mapping_error,
94 };
95 
96 void __init arm64_swiotlb_init(void)
97 {
98 	dma_ops = &arm64_swiotlb_dma_ops;
99 	swiotlb_init(1);
100 }
101 
102 #define PREALLOC_DMA_DEBUG_ENTRIES	4096
103 
104 static int __init dma_debug_do_init(void)
105 {
106 	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
107 	return 0;
108 }
109 fs_initcall(dma_debug_do_init);
110