xref: /openbmc/linux/kernel/dma/direct.c (revision 53809828)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2018 Christoph Hellwig.
4  *
5  * DMA operations that map physical memory directly without using an IOMMU.
6  */
7 #include <linux/memblock.h> /* for max_pfn */
8 #include <linux/export.h>
9 #include <linux/mm.h>
10 #include <linux/dma-direct.h>
11 #include <linux/scatterlist.h>
12 #include <linux/dma-contiguous.h>
13 #include <linux/dma-noncoherent.h>
14 #include <linux/pfn.h>
15 #include <linux/set_memory.h>
16 
17 /*
18  * Most architectures use ZONE_DMA for the first 16 Megabytes, but
19  * some use it for entirely different regions:
20  */
21 #ifndef ARCH_ZONE_DMA_BITS
22 #define ARCH_ZONE_DMA_BITS 24
23 #endif
24 
25 /*
26  * For AMD SEV all DMA must be to unencrypted addresses.
27  */
28 static inline bool force_dma_unencrypted(void)
29 {
30 	return sev_active();
31 }
32 
33 static bool
34 check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
35 		const char *caller)
36 {
37 	if (unlikely(dev && !dma_capable(dev, dma_addr, size))) {
38 		if (!dev->dma_mask) {
39 			dev_err(dev,
40 				"%s: call on device without dma_mask\n",
41 				caller);
42 			return false;
43 		}
44 
45 		if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_mask) {
46 			dev_err(dev,
47 				"%s: overflow %pad+%zu of device mask %llx bus mask %llx\n",
48 				caller, &dma_addr, size,
49 				*dev->dma_mask, dev->bus_dma_mask);
50 		}
51 		return false;
52 	}
53 	return true;
54 }
55 
56 static inline dma_addr_t phys_to_dma_direct(struct device *dev,
57 		phys_addr_t phys)
58 {
59 	if (force_dma_unencrypted())
60 		return __phys_to_dma(dev, phys);
61 	return phys_to_dma(dev, phys);
62 }
63 
64 u64 dma_direct_get_required_mask(struct device *dev)
65 {
66 	u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT);
67 
68 	if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma)
69 		max_dma = dev->bus_dma_mask;
70 
71 	return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
72 }
73 
74 static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
75 		u64 *phys_mask)
76 {
77 	if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask)
78 		dma_mask = dev->bus_dma_mask;
79 
80 	if (force_dma_unencrypted())
81 		*phys_mask = __dma_to_phys(dev, dma_mask);
82 	else
83 		*phys_mask = dma_to_phys(dev, dma_mask);
84 
85 	/*
86 	 * Optimistically try the zone that the physical address mask falls
87 	 * into first.  If that returns memory that isn't actually addressable
88 	 * we will fallback to the next lower zone and try again.
89 	 *
90 	 * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
91 	 * zones.
92 	 */
93 	if (*phys_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
94 		return GFP_DMA;
95 	if (*phys_mask <= DMA_BIT_MASK(32))
96 		return GFP_DMA32;
97 	return 0;
98 }
99 
100 static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
101 {
102 	return phys_to_dma_direct(dev, phys) + size - 1 <=
103 			min_not_zero(dev->coherent_dma_mask, dev->bus_dma_mask);
104 }
105 
106 void *dma_direct_alloc_pages(struct device *dev, size_t size,
107 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
108 {
109 	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
110 	int page_order = get_order(size);
111 	struct page *page = NULL;
112 	u64 phys_mask;
113 	void *ret;
114 
115 	if (attrs & DMA_ATTR_NO_WARN)
116 		gfp |= __GFP_NOWARN;
117 
118 	/* we always manually zero the memory once we are done: */
119 	gfp &= ~__GFP_ZERO;
120 	gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
121 			&phys_mask);
122 again:
123 	/* CMA can be used only in the context which permits sleeping */
124 	if (gfpflags_allow_blocking(gfp)) {
125 		page = dma_alloc_from_contiguous(dev, count, page_order,
126 						 gfp & __GFP_NOWARN);
127 		if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
128 			dma_release_from_contiguous(dev, page, count);
129 			page = NULL;
130 		}
131 	}
132 	if (!page)
133 		page = alloc_pages_node(dev_to_node(dev), gfp, page_order);
134 
135 	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
136 		__free_pages(page, page_order);
137 		page = NULL;
138 
139 		if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
140 		    phys_mask < DMA_BIT_MASK(64) &&
141 		    !(gfp & (GFP_DMA32 | GFP_DMA))) {
142 			gfp |= GFP_DMA32;
143 			goto again;
144 		}
145 
146 		if (IS_ENABLED(CONFIG_ZONE_DMA) &&
147 		    phys_mask < DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) {
148 			gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
149 			goto again;
150 		}
151 	}
152 
153 	if (!page)
154 		return NULL;
155 	ret = page_address(page);
156 	if (force_dma_unencrypted()) {
157 		set_memory_decrypted((unsigned long)ret, 1 << page_order);
158 		*dma_handle = __phys_to_dma(dev, page_to_phys(page));
159 	} else {
160 		*dma_handle = phys_to_dma(dev, page_to_phys(page));
161 	}
162 	memset(ret, 0, size);
163 	return ret;
164 }
165 
166 /*
167  * NOTE: this function must never look at the dma_addr argument, because we want
168  * to be able to use it as a helper for iommu implementations as well.
169  */
170 void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
171 		dma_addr_t dma_addr, unsigned long attrs)
172 {
173 	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
174 	unsigned int page_order = get_order(size);
175 
176 	if (force_dma_unencrypted())
177 		set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
178 	if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
179 		free_pages((unsigned long)cpu_addr, page_order);
180 }
181 
182 void *dma_direct_alloc(struct device *dev, size_t size,
183 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
184 {
185 	if (!dev_is_dma_coherent(dev))
186 		return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
187 	return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
188 }
189 
190 void dma_direct_free(struct device *dev, size_t size,
191 		void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
192 {
193 	if (!dev_is_dma_coherent(dev))
194 		arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
195 	else
196 		dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
197 }
198 
199 static void dma_direct_sync_single_for_device(struct device *dev,
200 		dma_addr_t addr, size_t size, enum dma_data_direction dir)
201 {
202 	if (dev_is_dma_coherent(dev))
203 		return;
204 	arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir);
205 }
206 
207 static void dma_direct_sync_sg_for_device(struct device *dev,
208 		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
209 {
210 	struct scatterlist *sg;
211 	int i;
212 
213 	if (dev_is_dma_coherent(dev))
214 		return;
215 
216 	for_each_sg(sgl, sg, nents, i)
217 		arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
218 }
219 
220 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
221     defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
222 static void dma_direct_sync_single_for_cpu(struct device *dev,
223 		dma_addr_t addr, size_t size, enum dma_data_direction dir)
224 {
225 	if (dev_is_dma_coherent(dev))
226 		return;
227 	arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir);
228 	arch_sync_dma_for_cpu_all(dev);
229 }
230 
231 static void dma_direct_sync_sg_for_cpu(struct device *dev,
232 		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
233 {
234 	struct scatterlist *sg;
235 	int i;
236 
237 	if (dev_is_dma_coherent(dev))
238 		return;
239 
240 	for_each_sg(sgl, sg, nents, i)
241 		arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
242 	arch_sync_dma_for_cpu_all(dev);
243 }
244 
245 static void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
246 		size_t size, enum dma_data_direction dir, unsigned long attrs)
247 {
248 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
249 		dma_direct_sync_single_for_cpu(dev, addr, size, dir);
250 }
251 
252 static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
253 		int nents, enum dma_data_direction dir, unsigned long attrs)
254 {
255 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
256 		dma_direct_sync_sg_for_cpu(dev, sgl, nents, dir);
257 }
258 #endif
259 
260 dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
261 		unsigned long offset, size_t size, enum dma_data_direction dir,
262 		unsigned long attrs)
263 {
264 	phys_addr_t phys = page_to_phys(page) + offset;
265 	dma_addr_t dma_addr = phys_to_dma(dev, phys);
266 
267 	if (!check_addr(dev, dma_addr, size, __func__))
268 		return DIRECT_MAPPING_ERROR;
269 
270 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
271 		dma_direct_sync_single_for_device(dev, dma_addr, size, dir);
272 	return dma_addr;
273 }
274 
275 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
276 		enum dma_data_direction dir, unsigned long attrs)
277 {
278 	int i;
279 	struct scatterlist *sg;
280 
281 	for_each_sg(sgl, sg, nents, i) {
282 		BUG_ON(!sg_page(sg));
283 
284 		sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg));
285 		if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__))
286 			return 0;
287 		sg_dma_len(sg) = sg->length;
288 	}
289 
290 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
291 		dma_direct_sync_sg_for_device(dev, sgl, nents, dir);
292 	return nents;
293 }
294 
295 /*
296  * Because 32-bit DMA masks are so common we expect every architecture to be
297  * able to satisfy them - either by not supporting more physical memory, or by
298  * providing a ZONE_DMA32.  If neither is the case, the architecture needs to
299  * use an IOMMU instead of the direct mapping.
300  */
301 int dma_direct_supported(struct device *dev, u64 mask)
302 {
303 	u64 min_mask;
304 
305 	if (IS_ENABLED(CONFIG_ZONE_DMA))
306 		min_mask = DMA_BIT_MASK(ARCH_ZONE_DMA_BITS);
307 	else
308 		min_mask = DMA_BIT_MASK(32);
309 
310 	min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT);
311 
312 	return mask >= phys_to_dma(dev, min_mask);
313 }
314 
315 int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
316 {
317 	return dma_addr == DIRECT_MAPPING_ERROR;
318 }
319 
320 const struct dma_map_ops dma_direct_ops = {
321 	.alloc			= dma_direct_alloc,
322 	.free			= dma_direct_free,
323 	.map_page		= dma_direct_map_page,
324 	.map_sg			= dma_direct_map_sg,
325 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE)
326 	.sync_single_for_device	= dma_direct_sync_single_for_device,
327 	.sync_sg_for_device	= dma_direct_sync_sg_for_device,
328 #endif
329 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
330     defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
331 	.sync_single_for_cpu	= dma_direct_sync_single_for_cpu,
332 	.sync_sg_for_cpu	= dma_direct_sync_sg_for_cpu,
333 	.unmap_page		= dma_direct_unmap_page,
334 	.unmap_sg		= dma_direct_unmap_sg,
335 #endif
336 	.get_required_mask	= dma_direct_get_required_mask,
337 	.dma_supported		= dma_direct_supported,
338 	.mapping_error		= dma_direct_mapping_error,
339 	.cache_sync		= arch_dma_cache_sync,
340 };
341 EXPORT_SYMBOL(dma_direct_ops);
342