xref: /openbmc/linux/kernel/dma/mapping.c (revision 8ffd3d5e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * arch-independent dma-mapping routines
4  *
5  * Copyright (c) 2006  SUSE Linux Products GmbH
6  * Copyright (c) 2006  Tejun Heo <teheo@suse.de>
7  */
8 #include <linux/memblock.h> /* for max_pfn */
9 #include <linux/acpi.h>
10 #include <linux/dma-map-ops.h>
11 #include <linux/export.h>
12 #include <linux/gfp.h>
13 #include <linux/kmsan.h>
14 #include <linux/of_device.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
17 #include "debug.h"
18 #include "direct.h"
19 
20 bool dma_default_coherent;
21 
22 /*
23  * Managed DMA API
24  */
25 struct dma_devres {
26 	size_t		size;
27 	void		*vaddr;
28 	dma_addr_t	dma_handle;
29 	unsigned long	attrs;
30 };
31 
32 static void dmam_release(struct device *dev, void *res)
33 {
34 	struct dma_devres *this = res;
35 
36 	dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
37 			this->attrs);
38 }
39 
40 static int dmam_match(struct device *dev, void *res, void *match_data)
41 {
42 	struct dma_devres *this = res, *match = match_data;
43 
44 	if (this->vaddr == match->vaddr) {
45 		WARN_ON(this->size != match->size ||
46 			this->dma_handle != match->dma_handle);
47 		return 1;
48 	}
49 	return 0;
50 }
51 
52 /**
53  * dmam_free_coherent - Managed dma_free_coherent()
54  * @dev: Device to free coherent memory for
55  * @size: Size of allocation
56  * @vaddr: Virtual address of the memory to free
57  * @dma_handle: DMA handle of the memory to free
58  *
59  * Managed dma_free_coherent().
60  */
61 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
62 			dma_addr_t dma_handle)
63 {
64 	struct dma_devres match_data = { size, vaddr, dma_handle };
65 
66 	dma_free_coherent(dev, size, vaddr, dma_handle);
67 	WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
68 }
69 EXPORT_SYMBOL(dmam_free_coherent);
70 
71 /**
72  * dmam_alloc_attrs - Managed dma_alloc_attrs()
73  * @dev: Device to allocate non_coherent memory for
74  * @size: Size of allocation
75  * @dma_handle: Out argument for allocated DMA handle
76  * @gfp: Allocation flags
77  * @attrs: Flags in the DMA_ATTR_* namespace.
78  *
79  * Managed dma_alloc_attrs().  Memory allocated using this function will be
80  * automatically released on driver detach.
81  *
82  * RETURNS:
83  * Pointer to allocated memory on success, NULL on failure.
84  */
85 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
86 		gfp_t gfp, unsigned long attrs)
87 {
88 	struct dma_devres *dr;
89 	void *vaddr;
90 
91 	dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
92 	if (!dr)
93 		return NULL;
94 
95 	vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
96 	if (!vaddr) {
97 		devres_free(dr);
98 		return NULL;
99 	}
100 
101 	dr->vaddr = vaddr;
102 	dr->dma_handle = *dma_handle;
103 	dr->size = size;
104 	dr->attrs = attrs;
105 
106 	devres_add(dev, dr);
107 
108 	return vaddr;
109 }
110 EXPORT_SYMBOL(dmam_alloc_attrs);
111 
112 static bool dma_go_direct(struct device *dev, dma_addr_t mask,
113 		const struct dma_map_ops *ops)
114 {
115 	if (likely(!ops))
116 		return true;
117 #ifdef CONFIG_DMA_OPS_BYPASS
118 	if (dev->dma_ops_bypass)
119 		return min_not_zero(mask, dev->bus_dma_limit) >=
120 			    dma_direct_get_required_mask(dev);
121 #endif
122 	return false;
123 }
124 
125 
126 /*
127  * Check if the devices uses a direct mapping for streaming DMA operations.
128  * This allows IOMMU drivers to set a bypass mode if the DMA mask is large
129  * enough.
130  */
131 static inline bool dma_alloc_direct(struct device *dev,
132 		const struct dma_map_ops *ops)
133 {
134 	return dma_go_direct(dev, dev->coherent_dma_mask, ops);
135 }
136 
137 static inline bool dma_map_direct(struct device *dev,
138 		const struct dma_map_ops *ops)
139 {
140 	return dma_go_direct(dev, *dev->dma_mask, ops);
141 }
142 
143 dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
144 		size_t offset, size_t size, enum dma_data_direction dir,
145 		unsigned long attrs)
146 {
147 	const struct dma_map_ops *ops = get_dma_ops(dev);
148 	dma_addr_t addr;
149 
150 	BUG_ON(!valid_dma_direction(dir));
151 
152 	if (WARN_ON_ONCE(!dev->dma_mask))
153 		return DMA_MAPPING_ERROR;
154 
155 	if (dma_map_direct(dev, ops) ||
156 	    arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size))
157 		addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
158 	else
159 		addr = ops->map_page(dev, page, offset, size, dir, attrs);
160 	kmsan_handle_dma(page, offset, size, dir);
161 	debug_dma_map_page(dev, page, offset, size, dir, addr, attrs);
162 
163 	return addr;
164 }
165 EXPORT_SYMBOL(dma_map_page_attrs);
166 
167 void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
168 		enum dma_data_direction dir, unsigned long attrs)
169 {
170 	const struct dma_map_ops *ops = get_dma_ops(dev);
171 
172 	BUG_ON(!valid_dma_direction(dir));
173 	if (dma_map_direct(dev, ops) ||
174 	    arch_dma_unmap_page_direct(dev, addr + size))
175 		dma_direct_unmap_page(dev, addr, size, dir, attrs);
176 	else if (ops->unmap_page)
177 		ops->unmap_page(dev, addr, size, dir, attrs);
178 	debug_dma_unmap_page(dev, addr, size, dir);
179 }
180 EXPORT_SYMBOL(dma_unmap_page_attrs);
181 
182 static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
183 	 int nents, enum dma_data_direction dir, unsigned long attrs)
184 {
185 	const struct dma_map_ops *ops = get_dma_ops(dev);
186 	int ents;
187 
188 	BUG_ON(!valid_dma_direction(dir));
189 
190 	if (WARN_ON_ONCE(!dev->dma_mask))
191 		return 0;
192 
193 	if (dma_map_direct(dev, ops) ||
194 	    arch_dma_map_sg_direct(dev, sg, nents))
195 		ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
196 	else
197 		ents = ops->map_sg(dev, sg, nents, dir, attrs);
198 
199 	if (ents > 0) {
200 		kmsan_handle_dma_sg(sg, nents, dir);
201 		debug_dma_map_sg(dev, sg, nents, ents, dir, attrs);
202 	} else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM &&
203 				ents != -EIO && ents != -EREMOTEIO)) {
204 		return -EIO;
205 	}
206 
207 	return ents;
208 }
209 
210 /**
211  * dma_map_sg_attrs - Map the given buffer for DMA
212  * @dev:	The device for which to perform the DMA operation
213  * @sg:		The sg_table object describing the buffer
214  * @nents:	Number of entries to map
215  * @dir:	DMA direction
216  * @attrs:	Optional DMA attributes for the map operation
217  *
218  * Maps a buffer described by a scatterlist passed in the sg argument with
219  * nents segments for the @dir DMA operation by the @dev device.
220  *
221  * Returns the number of mapped entries (which can be less than nents)
222  * on success. Zero is returned for any error.
223  *
224  * dma_unmap_sg_attrs() should be used to unmap the buffer with the
225  * original sg and original nents (not the value returned by this funciton).
226  */
227 unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
228 		    int nents, enum dma_data_direction dir, unsigned long attrs)
229 {
230 	int ret;
231 
232 	ret = __dma_map_sg_attrs(dev, sg, nents, dir, attrs);
233 	if (ret < 0)
234 		return 0;
235 	return ret;
236 }
237 EXPORT_SYMBOL(dma_map_sg_attrs);
238 
239 /**
240  * dma_map_sgtable - Map the given buffer for DMA
241  * @dev:	The device for which to perform the DMA operation
242  * @sgt:	The sg_table object describing the buffer
243  * @dir:	DMA direction
244  * @attrs:	Optional DMA attributes for the map operation
245  *
246  * Maps a buffer described by a scatterlist stored in the given sg_table
247  * object for the @dir DMA operation by the @dev device. After success, the
248  * ownership for the buffer is transferred to the DMA domain.  One has to
249  * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
250  * ownership of the buffer back to the CPU domain before touching the
251  * buffer by the CPU.
252  *
253  * Returns 0 on success or a negative error code on error. The following
254  * error codes are supported with the given meaning:
255  *
256  *   -EINVAL		An invalid argument, unaligned access or other error
257  *			in usage. Will not succeed if retried.
258  *   -ENOMEM		Insufficient resources (like memory or IOVA space) to
259  *			complete the mapping. Should succeed if retried later.
260  *   -EIO		Legacy error code with an unknown meaning. eg. this is
261  *			returned if a lower level call returned
262  *			DMA_MAPPING_ERROR.
263  *   -EREMOTEIO		The DMA device cannot access P2PDMA memory specified
264  *			in the sg_table. This will not succeed if retried.
265  */
266 int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
267 		    enum dma_data_direction dir, unsigned long attrs)
268 {
269 	int nents;
270 
271 	nents = __dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
272 	if (nents < 0)
273 		return nents;
274 	sgt->nents = nents;
275 	return 0;
276 }
277 EXPORT_SYMBOL_GPL(dma_map_sgtable);
278 
279 void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
280 				      int nents, enum dma_data_direction dir,
281 				      unsigned long attrs)
282 {
283 	const struct dma_map_ops *ops = get_dma_ops(dev);
284 
285 	BUG_ON(!valid_dma_direction(dir));
286 	debug_dma_unmap_sg(dev, sg, nents, dir);
287 	if (dma_map_direct(dev, ops) ||
288 	    arch_dma_unmap_sg_direct(dev, sg, nents))
289 		dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
290 	else if (ops->unmap_sg)
291 		ops->unmap_sg(dev, sg, nents, dir, attrs);
292 }
293 EXPORT_SYMBOL(dma_unmap_sg_attrs);
294 
295 dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
296 		size_t size, enum dma_data_direction dir, unsigned long attrs)
297 {
298 	const struct dma_map_ops *ops = get_dma_ops(dev);
299 	dma_addr_t addr = DMA_MAPPING_ERROR;
300 
301 	BUG_ON(!valid_dma_direction(dir));
302 
303 	if (WARN_ON_ONCE(!dev->dma_mask))
304 		return DMA_MAPPING_ERROR;
305 
306 	if (dma_map_direct(dev, ops))
307 		addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
308 	else if (ops->map_resource)
309 		addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
310 
311 	debug_dma_map_resource(dev, phys_addr, size, dir, addr, attrs);
312 	return addr;
313 }
314 EXPORT_SYMBOL(dma_map_resource);
315 
316 void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
317 		enum dma_data_direction dir, unsigned long attrs)
318 {
319 	const struct dma_map_ops *ops = get_dma_ops(dev);
320 
321 	BUG_ON(!valid_dma_direction(dir));
322 	if (!dma_map_direct(dev, ops) && ops->unmap_resource)
323 		ops->unmap_resource(dev, addr, size, dir, attrs);
324 	debug_dma_unmap_resource(dev, addr, size, dir);
325 }
326 EXPORT_SYMBOL(dma_unmap_resource);
327 
328 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
329 		enum dma_data_direction dir)
330 {
331 	const struct dma_map_ops *ops = get_dma_ops(dev);
332 
333 	BUG_ON(!valid_dma_direction(dir));
334 	if (dma_map_direct(dev, ops))
335 		dma_direct_sync_single_for_cpu(dev, addr, size, dir);
336 	else if (ops->sync_single_for_cpu)
337 		ops->sync_single_for_cpu(dev, addr, size, dir);
338 	debug_dma_sync_single_for_cpu(dev, addr, size, dir);
339 }
340 EXPORT_SYMBOL(dma_sync_single_for_cpu);
341 
342 void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
343 		size_t size, enum dma_data_direction dir)
344 {
345 	const struct dma_map_ops *ops = get_dma_ops(dev);
346 
347 	BUG_ON(!valid_dma_direction(dir));
348 	if (dma_map_direct(dev, ops))
349 		dma_direct_sync_single_for_device(dev, addr, size, dir);
350 	else if (ops->sync_single_for_device)
351 		ops->sync_single_for_device(dev, addr, size, dir);
352 	debug_dma_sync_single_for_device(dev, addr, size, dir);
353 }
354 EXPORT_SYMBOL(dma_sync_single_for_device);
355 
356 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
357 		    int nelems, enum dma_data_direction dir)
358 {
359 	const struct dma_map_ops *ops = get_dma_ops(dev);
360 
361 	BUG_ON(!valid_dma_direction(dir));
362 	if (dma_map_direct(dev, ops))
363 		dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
364 	else if (ops->sync_sg_for_cpu)
365 		ops->sync_sg_for_cpu(dev, sg, nelems, dir);
366 	debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
367 }
368 EXPORT_SYMBOL(dma_sync_sg_for_cpu);
369 
370 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
371 		       int nelems, enum dma_data_direction dir)
372 {
373 	const struct dma_map_ops *ops = get_dma_ops(dev);
374 
375 	BUG_ON(!valid_dma_direction(dir));
376 	if (dma_map_direct(dev, ops))
377 		dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
378 	else if (ops->sync_sg_for_device)
379 		ops->sync_sg_for_device(dev, sg, nelems, dir);
380 	debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
381 }
382 EXPORT_SYMBOL(dma_sync_sg_for_device);
383 
384 /*
385  * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
386  * that the intention is to allow exporting memory allocated via the
387  * coherent DMA APIs through the dma_buf API, which only accepts a
388  * scattertable.  This presents a couple of problems:
389  * 1. Not all memory allocated via the coherent DMA APIs is backed by
390  *    a struct page
391  * 2. Passing coherent DMA memory into the streaming APIs is not allowed
392  *    as we will try to flush the memory through a different alias to that
393  *    actually being used (and the flushes are redundant.)
394  */
395 int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
396 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
397 		unsigned long attrs)
398 {
399 	const struct dma_map_ops *ops = get_dma_ops(dev);
400 
401 	if (dma_alloc_direct(dev, ops))
402 		return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr,
403 				size, attrs);
404 	if (!ops->get_sgtable)
405 		return -ENXIO;
406 	return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs);
407 }
408 EXPORT_SYMBOL(dma_get_sgtable_attrs);
409 
410 #ifdef CONFIG_MMU
411 /*
412  * Return the page attributes used for mapping dma_alloc_* memory, either in
413  * kernel space if remapping is needed, or to userspace through dma_mmap_*.
414  */
415 pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
416 {
417 	if (dev_is_dma_coherent(dev))
418 		return prot;
419 #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
420 	if (attrs & DMA_ATTR_WRITE_COMBINE)
421 		return pgprot_writecombine(prot);
422 #endif
423 	return pgprot_dmacoherent(prot);
424 }
425 #endif /* CONFIG_MMU */
426 
427 /**
428  * dma_can_mmap - check if a given device supports dma_mmap_*
429  * @dev: device to check
430  *
431  * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to
432  * map DMA allocations to userspace.
433  */
434 bool dma_can_mmap(struct device *dev)
435 {
436 	const struct dma_map_ops *ops = get_dma_ops(dev);
437 
438 	if (dma_alloc_direct(dev, ops))
439 		return dma_direct_can_mmap(dev);
440 	return ops->mmap != NULL;
441 }
442 EXPORT_SYMBOL_GPL(dma_can_mmap);
443 
444 /**
445  * dma_mmap_attrs - map a coherent DMA allocation into user space
446  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
447  * @vma: vm_area_struct describing requested user mapping
448  * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
449  * @dma_addr: device-view address returned from dma_alloc_attrs
450  * @size: size of memory originally requested in dma_alloc_attrs
451  * @attrs: attributes of mapping properties requested in dma_alloc_attrs
452  *
453  * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user
454  * space.  The coherent DMA buffer must not be freed by the driver until the
455  * user space mapping has been released.
456  */
457 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
458 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
459 		unsigned long attrs)
460 {
461 	const struct dma_map_ops *ops = get_dma_ops(dev);
462 
463 	if (dma_alloc_direct(dev, ops))
464 		return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size,
465 				attrs);
466 	if (!ops->mmap)
467 		return -ENXIO;
468 	return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
469 }
470 EXPORT_SYMBOL(dma_mmap_attrs);
471 
472 u64 dma_get_required_mask(struct device *dev)
473 {
474 	const struct dma_map_ops *ops = get_dma_ops(dev);
475 
476 	if (dma_alloc_direct(dev, ops))
477 		return dma_direct_get_required_mask(dev);
478 	if (ops->get_required_mask)
479 		return ops->get_required_mask(dev);
480 
481 	/*
482 	 * We require every DMA ops implementation to at least support a 32-bit
483 	 * DMA mask (and use bounce buffering if that isn't supported in
484 	 * hardware).  As the direct mapping code has its own routine to
485 	 * actually report an optimal mask we default to 32-bit here as that
486 	 * is the right thing for most IOMMUs, and at least not actively
487 	 * harmful in general.
488 	 */
489 	return DMA_BIT_MASK(32);
490 }
491 EXPORT_SYMBOL_GPL(dma_get_required_mask);
492 
493 void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
494 		gfp_t flag, unsigned long attrs)
495 {
496 	const struct dma_map_ops *ops = get_dma_ops(dev);
497 	void *cpu_addr;
498 
499 	WARN_ON_ONCE(!dev->coherent_dma_mask);
500 
501 	/*
502 	 * DMA allocations can never be turned back into a page pointer, so
503 	 * requesting compound pages doesn't make sense (and can't even be
504 	 * supported at all by various backends).
505 	 */
506 	if (WARN_ON_ONCE(flag & __GFP_COMP))
507 		return NULL;
508 
509 	if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
510 		return cpu_addr;
511 
512 	/* let the implementation decide on the zone to allocate from: */
513 	flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
514 
515 	if (dma_alloc_direct(dev, ops))
516 		cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
517 	else if (ops->alloc)
518 		cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
519 	else
520 		return NULL;
521 
522 	debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr, attrs);
523 	return cpu_addr;
524 }
525 EXPORT_SYMBOL(dma_alloc_attrs);
526 
527 void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
528 		dma_addr_t dma_handle, unsigned long attrs)
529 {
530 	const struct dma_map_ops *ops = get_dma_ops(dev);
531 
532 	if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
533 		return;
534 	/*
535 	 * On non-coherent platforms which implement DMA-coherent buffers via
536 	 * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
537 	 * this far in IRQ context is a) at risk of a BUG_ON() or trying to
538 	 * sleep on some machines, and b) an indication that the driver is
539 	 * probably misusing the coherent API anyway.
540 	 */
541 	WARN_ON(irqs_disabled());
542 
543 	if (!cpu_addr)
544 		return;
545 
546 	debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
547 	if (dma_alloc_direct(dev, ops))
548 		dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
549 	else if (ops->free)
550 		ops->free(dev, size, cpu_addr, dma_handle, attrs);
551 }
552 EXPORT_SYMBOL(dma_free_attrs);
553 
554 static struct page *__dma_alloc_pages(struct device *dev, size_t size,
555 		dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
556 {
557 	const struct dma_map_ops *ops = get_dma_ops(dev);
558 
559 	if (WARN_ON_ONCE(!dev->coherent_dma_mask))
560 		return NULL;
561 	if (WARN_ON_ONCE(gfp & (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)))
562 		return NULL;
563 	if (WARN_ON_ONCE(gfp & __GFP_COMP))
564 		return NULL;
565 
566 	size = PAGE_ALIGN(size);
567 	if (dma_alloc_direct(dev, ops))
568 		return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp);
569 	if (!ops->alloc_pages)
570 		return NULL;
571 	return ops->alloc_pages(dev, size, dma_handle, dir, gfp);
572 }
573 
574 struct page *dma_alloc_pages(struct device *dev, size_t size,
575 		dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
576 {
577 	struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp);
578 
579 	if (page)
580 		debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0);
581 	return page;
582 }
583 EXPORT_SYMBOL_GPL(dma_alloc_pages);
584 
585 static void __dma_free_pages(struct device *dev, size_t size, struct page *page,
586 		dma_addr_t dma_handle, enum dma_data_direction dir)
587 {
588 	const struct dma_map_ops *ops = get_dma_ops(dev);
589 
590 	size = PAGE_ALIGN(size);
591 	if (dma_alloc_direct(dev, ops))
592 		dma_direct_free_pages(dev, size, page, dma_handle, dir);
593 	else if (ops->free_pages)
594 		ops->free_pages(dev, size, page, dma_handle, dir);
595 }
596 
597 void dma_free_pages(struct device *dev, size_t size, struct page *page,
598 		dma_addr_t dma_handle, enum dma_data_direction dir)
599 {
600 	debug_dma_unmap_page(dev, dma_handle, size, dir);
601 	__dma_free_pages(dev, size, page, dma_handle, dir);
602 }
603 EXPORT_SYMBOL_GPL(dma_free_pages);
604 
605 int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
606 		size_t size, struct page *page)
607 {
608 	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
609 
610 	if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff)
611 		return -ENXIO;
612 	return remap_pfn_range(vma, vma->vm_start,
613 			       page_to_pfn(page) + vma->vm_pgoff,
614 			       vma_pages(vma) << PAGE_SHIFT, vma->vm_page_prot);
615 }
616 EXPORT_SYMBOL_GPL(dma_mmap_pages);
617 
618 static struct sg_table *alloc_single_sgt(struct device *dev, size_t size,
619 		enum dma_data_direction dir, gfp_t gfp)
620 {
621 	struct sg_table *sgt;
622 	struct page *page;
623 
624 	sgt = kmalloc(sizeof(*sgt), gfp);
625 	if (!sgt)
626 		return NULL;
627 	if (sg_alloc_table(sgt, 1, gfp))
628 		goto out_free_sgt;
629 	page = __dma_alloc_pages(dev, size, &sgt->sgl->dma_address, dir, gfp);
630 	if (!page)
631 		goto out_free_table;
632 	sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
633 	sg_dma_len(sgt->sgl) = sgt->sgl->length;
634 	return sgt;
635 out_free_table:
636 	sg_free_table(sgt);
637 out_free_sgt:
638 	kfree(sgt);
639 	return NULL;
640 }
641 
642 struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
643 		enum dma_data_direction dir, gfp_t gfp, unsigned long attrs)
644 {
645 	const struct dma_map_ops *ops = get_dma_ops(dev);
646 	struct sg_table *sgt;
647 
648 	if (WARN_ON_ONCE(attrs & ~DMA_ATTR_ALLOC_SINGLE_PAGES))
649 		return NULL;
650 	if (WARN_ON_ONCE(gfp & __GFP_COMP))
651 		return NULL;
652 
653 	if (ops && ops->alloc_noncontiguous)
654 		sgt = ops->alloc_noncontiguous(dev, size, dir, gfp, attrs);
655 	else
656 		sgt = alloc_single_sgt(dev, size, dir, gfp);
657 
658 	if (sgt) {
659 		sgt->nents = 1;
660 		debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
661 	}
662 	return sgt;
663 }
664 EXPORT_SYMBOL_GPL(dma_alloc_noncontiguous);
665 
666 static void free_single_sgt(struct device *dev, size_t size,
667 		struct sg_table *sgt, enum dma_data_direction dir)
668 {
669 	__dma_free_pages(dev, size, sg_page(sgt->sgl), sgt->sgl->dma_address,
670 			 dir);
671 	sg_free_table(sgt);
672 	kfree(sgt);
673 }
674 
675 void dma_free_noncontiguous(struct device *dev, size_t size,
676 		struct sg_table *sgt, enum dma_data_direction dir)
677 {
678 	const struct dma_map_ops *ops = get_dma_ops(dev);
679 
680 	debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
681 	if (ops && ops->free_noncontiguous)
682 		ops->free_noncontiguous(dev, size, sgt, dir);
683 	else
684 		free_single_sgt(dev, size, sgt, dir);
685 }
686 EXPORT_SYMBOL_GPL(dma_free_noncontiguous);
687 
688 void *dma_vmap_noncontiguous(struct device *dev, size_t size,
689 		struct sg_table *sgt)
690 {
691 	const struct dma_map_ops *ops = get_dma_ops(dev);
692 	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
693 
694 	if (ops && ops->alloc_noncontiguous)
695 		return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL);
696 	return page_address(sg_page(sgt->sgl));
697 }
698 EXPORT_SYMBOL_GPL(dma_vmap_noncontiguous);
699 
700 void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
701 {
702 	const struct dma_map_ops *ops = get_dma_ops(dev);
703 
704 	if (ops && ops->alloc_noncontiguous)
705 		vunmap(vaddr);
706 }
707 EXPORT_SYMBOL_GPL(dma_vunmap_noncontiguous);
708 
709 int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
710 		size_t size, struct sg_table *sgt)
711 {
712 	const struct dma_map_ops *ops = get_dma_ops(dev);
713 
714 	if (ops && ops->alloc_noncontiguous) {
715 		unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
716 
717 		if (vma->vm_pgoff >= count ||
718 		    vma_pages(vma) > count - vma->vm_pgoff)
719 			return -ENXIO;
720 		return vm_map_pages(vma, sgt_handle(sgt)->pages, count);
721 	}
722 	return dma_mmap_pages(dev, vma, size, sg_page(sgt->sgl));
723 }
724 EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous);
725 
726 static int dma_supported(struct device *dev, u64 mask)
727 {
728 	const struct dma_map_ops *ops = get_dma_ops(dev);
729 
730 	/*
731 	 * ->dma_supported sets the bypass flag, so we must always call
732 	 * into the method here unless the device is truly direct mapped.
733 	 */
734 	if (!ops)
735 		return dma_direct_supported(dev, mask);
736 	if (!ops->dma_supported)
737 		return 1;
738 	return ops->dma_supported(dev, mask);
739 }
740 
741 bool dma_pci_p2pdma_supported(struct device *dev)
742 {
743 	const struct dma_map_ops *ops = get_dma_ops(dev);
744 
745 	/* if ops is not set, dma direct will be used which supports P2PDMA */
746 	if (!ops)
747 		return true;
748 
749 	/*
750 	 * Note: dma_ops_bypass is not checked here because P2PDMA should
751 	 * not be used with dma mapping ops that do not have support even
752 	 * if the specific device is bypassing them.
753 	 */
754 
755 	return ops->flags & DMA_F_PCI_P2PDMA_SUPPORTED;
756 }
757 EXPORT_SYMBOL_GPL(dma_pci_p2pdma_supported);
758 
759 #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
760 void arch_dma_set_mask(struct device *dev, u64 mask);
761 #else
762 #define arch_dma_set_mask(dev, mask)	do { } while (0)
763 #endif
764 
765 int dma_set_mask(struct device *dev, u64 mask)
766 {
767 	/*
768 	 * Truncate the mask to the actually supported dma_addr_t width to
769 	 * avoid generating unsupportable addresses.
770 	 */
771 	mask = (dma_addr_t)mask;
772 
773 	if (!dev->dma_mask || !dma_supported(dev, mask))
774 		return -EIO;
775 
776 	arch_dma_set_mask(dev, mask);
777 	*dev->dma_mask = mask;
778 	return 0;
779 }
780 EXPORT_SYMBOL(dma_set_mask);
781 
782 int dma_set_coherent_mask(struct device *dev, u64 mask)
783 {
784 	/*
785 	 * Truncate the mask to the actually supported dma_addr_t width to
786 	 * avoid generating unsupportable addresses.
787 	 */
788 	mask = (dma_addr_t)mask;
789 
790 	if (!dma_supported(dev, mask))
791 		return -EIO;
792 
793 	dev->coherent_dma_mask = mask;
794 	return 0;
795 }
796 EXPORT_SYMBOL(dma_set_coherent_mask);
797 
798 size_t dma_max_mapping_size(struct device *dev)
799 {
800 	const struct dma_map_ops *ops = get_dma_ops(dev);
801 	size_t size = SIZE_MAX;
802 
803 	if (dma_map_direct(dev, ops))
804 		size = dma_direct_max_mapping_size(dev);
805 	else if (ops && ops->max_mapping_size)
806 		size = ops->max_mapping_size(dev);
807 
808 	return size;
809 }
810 EXPORT_SYMBOL_GPL(dma_max_mapping_size);
811 
812 size_t dma_opt_mapping_size(struct device *dev)
813 {
814 	const struct dma_map_ops *ops = get_dma_ops(dev);
815 	size_t size = SIZE_MAX;
816 
817 	if (ops && ops->opt_mapping_size)
818 		size = ops->opt_mapping_size();
819 
820 	return min(dma_max_mapping_size(dev), size);
821 }
822 EXPORT_SYMBOL_GPL(dma_opt_mapping_size);
823 
824 bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
825 {
826 	const struct dma_map_ops *ops = get_dma_ops(dev);
827 
828 	if (dma_map_direct(dev, ops))
829 		return dma_direct_need_sync(dev, dma_addr);
830 	return ops->sync_single_for_cpu || ops->sync_single_for_device;
831 }
832 EXPORT_SYMBOL_GPL(dma_need_sync);
833 
834 unsigned long dma_get_merge_boundary(struct device *dev)
835 {
836 	const struct dma_map_ops *ops = get_dma_ops(dev);
837 
838 	if (!ops || !ops->get_merge_boundary)
839 		return 0;	/* can't merge */
840 
841 	return ops->get_merge_boundary(dev);
842 }
843 EXPORT_SYMBOL_GPL(dma_get_merge_boundary);
844