xref: /openbmc/linux/kernel/dma/mapping.c (revision a1fd09e8e6ae35228ecc7c1e4bfff1fd725f78a0)
1cf65a0f6SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
2cf65a0f6SChristoph Hellwig /*
3cf65a0f6SChristoph Hellwig  * arch-independent dma-mapping routines
4cf65a0f6SChristoph Hellwig  *
5cf65a0f6SChristoph Hellwig  * Copyright (c) 2006  SUSE Linux Products GmbH
6cf65a0f6SChristoph Hellwig  * Copyright (c) 2006  Tejun Heo <teheo@suse.de>
7cf65a0f6SChristoph Hellwig  */
805887cb6SChristoph Hellwig #include <linux/memblock.h> /* for max_pfn */
9cf65a0f6SChristoph Hellwig #include <linux/acpi.h>
10356da6d0SChristoph Hellwig #include <linux/dma-direct.h>
110a0f0d8bSChristoph Hellwig #include <linux/dma-map-ops.h>
12cf65a0f6SChristoph Hellwig #include <linux/export.h>
13cf65a0f6SChristoph Hellwig #include <linux/gfp.h>
14cf65a0f6SChristoph Hellwig #include <linux/of_device.h>
15cf65a0f6SChristoph Hellwig #include <linux/slab.h>
16cf65a0f6SChristoph Hellwig #include <linux/vmalloc.h>
17*a1fd09e8SChristoph Hellwig #include "debug.h"
18cf65a0f6SChristoph Hellwig 
19cf65a0f6SChristoph Hellwig /*
20cf65a0f6SChristoph Hellwig  * Managed DMA API
21cf65a0f6SChristoph Hellwig  */
22cf65a0f6SChristoph Hellwig struct dma_devres {
23cf65a0f6SChristoph Hellwig 	size_t		size;
24cf65a0f6SChristoph Hellwig 	void		*vaddr;
25cf65a0f6SChristoph Hellwig 	dma_addr_t	dma_handle;
26cf65a0f6SChristoph Hellwig 	unsigned long	attrs;
27cf65a0f6SChristoph Hellwig };
28cf65a0f6SChristoph Hellwig 
29cf65a0f6SChristoph Hellwig static void dmam_release(struct device *dev, void *res)
30cf65a0f6SChristoph Hellwig {
31cf65a0f6SChristoph Hellwig 	struct dma_devres *this = res;
32cf65a0f6SChristoph Hellwig 
33cf65a0f6SChristoph Hellwig 	dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
34cf65a0f6SChristoph Hellwig 			this->attrs);
35cf65a0f6SChristoph Hellwig }
36cf65a0f6SChristoph Hellwig 
37cf65a0f6SChristoph Hellwig static int dmam_match(struct device *dev, void *res, void *match_data)
38cf65a0f6SChristoph Hellwig {
39cf65a0f6SChristoph Hellwig 	struct dma_devres *this = res, *match = match_data;
40cf65a0f6SChristoph Hellwig 
41cf65a0f6SChristoph Hellwig 	if (this->vaddr == match->vaddr) {
42cf65a0f6SChristoph Hellwig 		WARN_ON(this->size != match->size ||
43cf65a0f6SChristoph Hellwig 			this->dma_handle != match->dma_handle);
44cf65a0f6SChristoph Hellwig 		return 1;
45cf65a0f6SChristoph Hellwig 	}
46cf65a0f6SChristoph Hellwig 	return 0;
47cf65a0f6SChristoph Hellwig }
48cf65a0f6SChristoph Hellwig 
49cf65a0f6SChristoph Hellwig /**
50cf65a0f6SChristoph Hellwig  * dmam_free_coherent - Managed dma_free_coherent()
51cf65a0f6SChristoph Hellwig  * @dev: Device to free coherent memory for
52cf65a0f6SChristoph Hellwig  * @size: Size of allocation
53cf65a0f6SChristoph Hellwig  * @vaddr: Virtual address of the memory to free
54cf65a0f6SChristoph Hellwig  * @dma_handle: DMA handle of the memory to free
55cf65a0f6SChristoph Hellwig  *
56cf65a0f6SChristoph Hellwig  * Managed dma_free_coherent().
57cf65a0f6SChristoph Hellwig  */
58cf65a0f6SChristoph Hellwig void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
59cf65a0f6SChristoph Hellwig 			dma_addr_t dma_handle)
60cf65a0f6SChristoph Hellwig {
61cf65a0f6SChristoph Hellwig 	struct dma_devres match_data = { size, vaddr, dma_handle };
62cf65a0f6SChristoph Hellwig 
63cf65a0f6SChristoph Hellwig 	dma_free_coherent(dev, size, vaddr, dma_handle);
64cf65a0f6SChristoph Hellwig 	WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
65cf65a0f6SChristoph Hellwig }
66cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(dmam_free_coherent);
67cf65a0f6SChristoph Hellwig 
68cf65a0f6SChristoph Hellwig /**
69cf65a0f6SChristoph Hellwig  * dmam_alloc_attrs - Managed dma_alloc_attrs()
70cf65a0f6SChristoph Hellwig  * @dev: Device to allocate non_coherent memory for
71cf65a0f6SChristoph Hellwig  * @size: Size of allocation
72cf65a0f6SChristoph Hellwig  * @dma_handle: Out argument for allocated DMA handle
73cf65a0f6SChristoph Hellwig  * @gfp: Allocation flags
74cf65a0f6SChristoph Hellwig  * @attrs: Flags in the DMA_ATTR_* namespace.
75cf65a0f6SChristoph Hellwig  *
76cf65a0f6SChristoph Hellwig  * Managed dma_alloc_attrs().  Memory allocated using this function will be
77cf65a0f6SChristoph Hellwig  * automatically released on driver detach.
78cf65a0f6SChristoph Hellwig  *
79cf65a0f6SChristoph Hellwig  * RETURNS:
80cf65a0f6SChristoph Hellwig  * Pointer to allocated memory on success, NULL on failure.
81cf65a0f6SChristoph Hellwig  */
82cf65a0f6SChristoph Hellwig void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
83cf65a0f6SChristoph Hellwig 		gfp_t gfp, unsigned long attrs)
84cf65a0f6SChristoph Hellwig {
85cf65a0f6SChristoph Hellwig 	struct dma_devres *dr;
86cf65a0f6SChristoph Hellwig 	void *vaddr;
87cf65a0f6SChristoph Hellwig 
88cf65a0f6SChristoph Hellwig 	dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
89cf65a0f6SChristoph Hellwig 	if (!dr)
90cf65a0f6SChristoph Hellwig 		return NULL;
91cf65a0f6SChristoph Hellwig 
92cf65a0f6SChristoph Hellwig 	vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
93cf65a0f6SChristoph Hellwig 	if (!vaddr) {
94cf65a0f6SChristoph Hellwig 		devres_free(dr);
95cf65a0f6SChristoph Hellwig 		return NULL;
96cf65a0f6SChristoph Hellwig 	}
97cf65a0f6SChristoph Hellwig 
98cf65a0f6SChristoph Hellwig 	dr->vaddr = vaddr;
99cf65a0f6SChristoph Hellwig 	dr->dma_handle = *dma_handle;
100cf65a0f6SChristoph Hellwig 	dr->size = size;
101cf65a0f6SChristoph Hellwig 	dr->attrs = attrs;
102cf65a0f6SChristoph Hellwig 
103cf65a0f6SChristoph Hellwig 	devres_add(dev, dr);
104cf65a0f6SChristoph Hellwig 
105cf65a0f6SChristoph Hellwig 	return vaddr;
106cf65a0f6SChristoph Hellwig }
107cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(dmam_alloc_attrs);
108cf65a0f6SChristoph Hellwig 
109d35834c6SChristoph Hellwig static bool dma_go_direct(struct device *dev, dma_addr_t mask,
110d35834c6SChristoph Hellwig 		const struct dma_map_ops *ops)
111d3fa60d7SChristoph Hellwig {
112d35834c6SChristoph Hellwig 	if (likely(!ops))
113d35834c6SChristoph Hellwig 		return true;
114d35834c6SChristoph Hellwig #ifdef CONFIG_DMA_OPS_BYPASS
115d35834c6SChristoph Hellwig 	if (dev->dma_ops_bypass)
116d35834c6SChristoph Hellwig 		return min_not_zero(mask, dev->bus_dma_limit) >=
117d35834c6SChristoph Hellwig 			    dma_direct_get_required_mask(dev);
118d35834c6SChristoph Hellwig #endif
119d35834c6SChristoph Hellwig 	return false;
120d35834c6SChristoph Hellwig }
121d35834c6SChristoph Hellwig 
122d35834c6SChristoph Hellwig 
123d35834c6SChristoph Hellwig /*
124d35834c6SChristoph Hellwig  * Check if the devices uses a direct mapping for streaming DMA operations.
125d35834c6SChristoph Hellwig  * This allows IOMMU drivers to set a bypass mode if the DMA mask is large
126d35834c6SChristoph Hellwig  * enough.
127d35834c6SChristoph Hellwig  */
128d35834c6SChristoph Hellwig static inline bool dma_alloc_direct(struct device *dev,
129d35834c6SChristoph Hellwig 		const struct dma_map_ops *ops)
130d35834c6SChristoph Hellwig {
131d35834c6SChristoph Hellwig 	return dma_go_direct(dev, dev->coherent_dma_mask, ops);
132d35834c6SChristoph Hellwig }
133d35834c6SChristoph Hellwig 
134d35834c6SChristoph Hellwig static inline bool dma_map_direct(struct device *dev,
135d35834c6SChristoph Hellwig 		const struct dma_map_ops *ops)
136d35834c6SChristoph Hellwig {
137d35834c6SChristoph Hellwig 	return dma_go_direct(dev, *dev->dma_mask, ops);
138d3fa60d7SChristoph Hellwig }
139d3fa60d7SChristoph Hellwig 
140d3fa60d7SChristoph Hellwig dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
141d3fa60d7SChristoph Hellwig 		size_t offset, size_t size, enum dma_data_direction dir,
142d3fa60d7SChristoph Hellwig 		unsigned long attrs)
143d3fa60d7SChristoph Hellwig {
144d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
145d3fa60d7SChristoph Hellwig 	dma_addr_t addr;
146d3fa60d7SChristoph Hellwig 
147d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
148f959dcd6SThomas Tai 
149f959dcd6SThomas Tai 	if (WARN_ON_ONCE(!dev->dma_mask))
150f959dcd6SThomas Tai 		return DMA_MAPPING_ERROR;
151f959dcd6SThomas Tai 
152d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
153d3fa60d7SChristoph Hellwig 		addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
154d3fa60d7SChristoph Hellwig 	else
155d3fa60d7SChristoph Hellwig 		addr = ops->map_page(dev, page, offset, size, dir, attrs);
156d3fa60d7SChristoph Hellwig 	debug_dma_map_page(dev, page, offset, size, dir, addr);
157d3fa60d7SChristoph Hellwig 
158d3fa60d7SChristoph Hellwig 	return addr;
159d3fa60d7SChristoph Hellwig }
160d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_map_page_attrs);
161d3fa60d7SChristoph Hellwig 
162d3fa60d7SChristoph Hellwig void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
163d3fa60d7SChristoph Hellwig 		enum dma_data_direction dir, unsigned long attrs)
164d3fa60d7SChristoph Hellwig {
165d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
166d3fa60d7SChristoph Hellwig 
167d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
168d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
169d3fa60d7SChristoph Hellwig 		dma_direct_unmap_page(dev, addr, size, dir, attrs);
170d3fa60d7SChristoph Hellwig 	else if (ops->unmap_page)
171d3fa60d7SChristoph Hellwig 		ops->unmap_page(dev, addr, size, dir, attrs);
172d3fa60d7SChristoph Hellwig 	debug_dma_unmap_page(dev, addr, size, dir);
173d3fa60d7SChristoph Hellwig }
174d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_unmap_page_attrs);
175d3fa60d7SChristoph Hellwig 
176d3fa60d7SChristoph Hellwig /*
177d3fa60d7SChristoph Hellwig  * dma_maps_sg_attrs returns 0 on error and > 0 on success.
178d3fa60d7SChristoph Hellwig  * It should never return a value < 0.
179d3fa60d7SChristoph Hellwig  */
180d3fa60d7SChristoph Hellwig int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
181d3fa60d7SChristoph Hellwig 		enum dma_data_direction dir, unsigned long attrs)
182d3fa60d7SChristoph Hellwig {
183d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
184d3fa60d7SChristoph Hellwig 	int ents;
185d3fa60d7SChristoph Hellwig 
186d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
187f959dcd6SThomas Tai 
188f959dcd6SThomas Tai 	if (WARN_ON_ONCE(!dev->dma_mask))
189f959dcd6SThomas Tai 		return 0;
190f959dcd6SThomas Tai 
191d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
192d3fa60d7SChristoph Hellwig 		ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
193d3fa60d7SChristoph Hellwig 	else
194d3fa60d7SChristoph Hellwig 		ents = ops->map_sg(dev, sg, nents, dir, attrs);
195d3fa60d7SChristoph Hellwig 	BUG_ON(ents < 0);
196d3fa60d7SChristoph Hellwig 	debug_dma_map_sg(dev, sg, nents, ents, dir);
197d3fa60d7SChristoph Hellwig 
198d3fa60d7SChristoph Hellwig 	return ents;
199d3fa60d7SChristoph Hellwig }
200d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_map_sg_attrs);
201d3fa60d7SChristoph Hellwig 
202d3fa60d7SChristoph Hellwig void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
203d3fa60d7SChristoph Hellwig 				      int nents, enum dma_data_direction dir,
204d3fa60d7SChristoph Hellwig 				      unsigned long attrs)
205d3fa60d7SChristoph Hellwig {
206d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
207d3fa60d7SChristoph Hellwig 
208d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
209d3fa60d7SChristoph Hellwig 	debug_dma_unmap_sg(dev, sg, nents, dir);
210d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
211d3fa60d7SChristoph Hellwig 		dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
212d3fa60d7SChristoph Hellwig 	else if (ops->unmap_sg)
213d3fa60d7SChristoph Hellwig 		ops->unmap_sg(dev, sg, nents, dir, attrs);
214d3fa60d7SChristoph Hellwig }
215d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_unmap_sg_attrs);
216d3fa60d7SChristoph Hellwig 
217d3fa60d7SChristoph Hellwig dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
218d3fa60d7SChristoph Hellwig 		size_t size, enum dma_data_direction dir, unsigned long attrs)
219d3fa60d7SChristoph Hellwig {
220d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
221d3fa60d7SChristoph Hellwig 	dma_addr_t addr = DMA_MAPPING_ERROR;
222d3fa60d7SChristoph Hellwig 
223d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
224d3fa60d7SChristoph Hellwig 
225f959dcd6SThomas Tai 	if (WARN_ON_ONCE(!dev->dma_mask))
226f959dcd6SThomas Tai 		return DMA_MAPPING_ERROR;
227f959dcd6SThomas Tai 
228d3fa60d7SChristoph Hellwig 	/* Don't allow RAM to be mapped */
229d3fa60d7SChristoph Hellwig 	if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr))))
230d3fa60d7SChristoph Hellwig 		return DMA_MAPPING_ERROR;
231d3fa60d7SChristoph Hellwig 
232d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
233d3fa60d7SChristoph Hellwig 		addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
234d3fa60d7SChristoph Hellwig 	else if (ops->map_resource)
235d3fa60d7SChristoph Hellwig 		addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
236d3fa60d7SChristoph Hellwig 
237d3fa60d7SChristoph Hellwig 	debug_dma_map_resource(dev, phys_addr, size, dir, addr);
238d3fa60d7SChristoph Hellwig 	return addr;
239d3fa60d7SChristoph Hellwig }
240d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_map_resource);
241d3fa60d7SChristoph Hellwig 
242d3fa60d7SChristoph Hellwig void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
243d3fa60d7SChristoph Hellwig 		enum dma_data_direction dir, unsigned long attrs)
244d3fa60d7SChristoph Hellwig {
245d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
246d3fa60d7SChristoph Hellwig 
247d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
248d35834c6SChristoph Hellwig 	if (!dma_map_direct(dev, ops) && ops->unmap_resource)
249d3fa60d7SChristoph Hellwig 		ops->unmap_resource(dev, addr, size, dir, attrs);
250d3fa60d7SChristoph Hellwig 	debug_dma_unmap_resource(dev, addr, size, dir);
251d3fa60d7SChristoph Hellwig }
252d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_unmap_resource);
253d3fa60d7SChristoph Hellwig 
254d3fa60d7SChristoph Hellwig void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
255d3fa60d7SChristoph Hellwig 		enum dma_data_direction dir)
256d3fa60d7SChristoph Hellwig {
257d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
258d3fa60d7SChristoph Hellwig 
259d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
260d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
261d3fa60d7SChristoph Hellwig 		dma_direct_sync_single_for_cpu(dev, addr, size, dir);
262d3fa60d7SChristoph Hellwig 	else if (ops->sync_single_for_cpu)
263d3fa60d7SChristoph Hellwig 		ops->sync_single_for_cpu(dev, addr, size, dir);
264d3fa60d7SChristoph Hellwig 	debug_dma_sync_single_for_cpu(dev, addr, size, dir);
265d3fa60d7SChristoph Hellwig }
266d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_sync_single_for_cpu);
267d3fa60d7SChristoph Hellwig 
268d3fa60d7SChristoph Hellwig void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
269d3fa60d7SChristoph Hellwig 		size_t size, enum dma_data_direction dir)
270d3fa60d7SChristoph Hellwig {
271d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
272d3fa60d7SChristoph Hellwig 
273d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
274d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
275d3fa60d7SChristoph Hellwig 		dma_direct_sync_single_for_device(dev, addr, size, dir);
276d3fa60d7SChristoph Hellwig 	else if (ops->sync_single_for_device)
277d3fa60d7SChristoph Hellwig 		ops->sync_single_for_device(dev, addr, size, dir);
278d3fa60d7SChristoph Hellwig 	debug_dma_sync_single_for_device(dev, addr, size, dir);
279d3fa60d7SChristoph Hellwig }
280d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_sync_single_for_device);
281d3fa60d7SChristoph Hellwig 
282d3fa60d7SChristoph Hellwig void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
283d3fa60d7SChristoph Hellwig 		    int nelems, enum dma_data_direction dir)
284d3fa60d7SChristoph Hellwig {
285d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
286d3fa60d7SChristoph Hellwig 
287d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
288d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
289d3fa60d7SChristoph Hellwig 		dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
290d3fa60d7SChristoph Hellwig 	else if (ops->sync_sg_for_cpu)
291d3fa60d7SChristoph Hellwig 		ops->sync_sg_for_cpu(dev, sg, nelems, dir);
292d3fa60d7SChristoph Hellwig 	debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
293d3fa60d7SChristoph Hellwig }
294d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_sync_sg_for_cpu);
295d3fa60d7SChristoph Hellwig 
296d3fa60d7SChristoph Hellwig void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
297d3fa60d7SChristoph Hellwig 		       int nelems, enum dma_data_direction dir)
298d3fa60d7SChristoph Hellwig {
299d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
300d3fa60d7SChristoph Hellwig 
301d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
302d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
303d3fa60d7SChristoph Hellwig 		dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
304d3fa60d7SChristoph Hellwig 	else if (ops->sync_sg_for_device)
305d3fa60d7SChristoph Hellwig 		ops->sync_sg_for_device(dev, sg, nelems, dir);
306d3fa60d7SChristoph Hellwig 	debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
307d3fa60d7SChristoph Hellwig }
308d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_sync_sg_for_device);
309d3fa60d7SChristoph Hellwig 
310cf65a0f6SChristoph Hellwig /*
31114451467SChristoph Hellwig  * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
31214451467SChristoph Hellwig  * that the intention is to allow exporting memory allocated via the
31314451467SChristoph Hellwig  * coherent DMA APIs through the dma_buf API, which only accepts a
31414451467SChristoph Hellwig  * scattertable.  This presents a couple of problems:
31514451467SChristoph Hellwig  * 1. Not all memory allocated via the coherent DMA APIs is backed by
31614451467SChristoph Hellwig  *    a struct page
31714451467SChristoph Hellwig  * 2. Passing coherent DMA memory into the streaming APIs is not allowed
31814451467SChristoph Hellwig  *    as we will try to flush the memory through a different alias to that
31914451467SChristoph Hellwig  *    actually being used (and the flushes are redundant.)
32014451467SChristoph Hellwig  */
3217249c1a5SChristoph Hellwig int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
3227249c1a5SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
3237249c1a5SChristoph Hellwig 		unsigned long attrs)
3247249c1a5SChristoph Hellwig {
3257249c1a5SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
326356da6d0SChristoph Hellwig 
327d35834c6SChristoph Hellwig 	if (dma_alloc_direct(dev, ops))
32834dc0ea6SChristoph Hellwig 		return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr,
329f9f3232aSChristoph Hellwig 				size, attrs);
330f9f3232aSChristoph Hellwig 	if (!ops->get_sgtable)
331f9f3232aSChristoph Hellwig 		return -ENXIO;
332f9f3232aSChristoph Hellwig 	return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs);
3337249c1a5SChristoph Hellwig }
3347249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_get_sgtable_attrs);
335cf65a0f6SChristoph Hellwig 
33633dcb37cSChristoph Hellwig #ifdef CONFIG_MMU
33733dcb37cSChristoph Hellwig /*
33833dcb37cSChristoph Hellwig  * Return the page attributes used for mapping dma_alloc_* memory, either in
33933dcb37cSChristoph Hellwig  * kernel space if remapping is needed, or to userspace through dma_mmap_*.
34033dcb37cSChristoph Hellwig  */
34133dcb37cSChristoph Hellwig pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
34233dcb37cSChristoph Hellwig {
34317c4a2aeSThomas Hellstrom 	if (force_dma_unencrypted(dev))
34417c4a2aeSThomas Hellstrom 		prot = pgprot_decrypted(prot);
345efa70f2fSChristoph Hellwig 	if (dev_is_dma_coherent(dev))
34633dcb37cSChristoph Hellwig 		return prot;
347419e2f18SChristoph Hellwig #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
348419e2f18SChristoph Hellwig 	if (attrs & DMA_ATTR_WRITE_COMBINE)
349419e2f18SChristoph Hellwig 		return pgprot_writecombine(prot);
350419e2f18SChristoph Hellwig #endif
351419e2f18SChristoph Hellwig 	return pgprot_dmacoherent(prot);
35233dcb37cSChristoph Hellwig }
35333dcb37cSChristoph Hellwig #endif /* CONFIG_MMU */
35433dcb37cSChristoph Hellwig 
3557249c1a5SChristoph Hellwig /**
356e29ccc18SChristoph Hellwig  * dma_can_mmap - check if a given device supports dma_mmap_*
357e29ccc18SChristoph Hellwig  * @dev: device to check
358e29ccc18SChristoph Hellwig  *
359e29ccc18SChristoph Hellwig  * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to
360e29ccc18SChristoph Hellwig  * map DMA allocations to userspace.
361e29ccc18SChristoph Hellwig  */
362e29ccc18SChristoph Hellwig bool dma_can_mmap(struct device *dev)
363e29ccc18SChristoph Hellwig {
364e29ccc18SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
365e29ccc18SChristoph Hellwig 
366d35834c6SChristoph Hellwig 	if (dma_alloc_direct(dev, ops))
36734dc0ea6SChristoph Hellwig 		return dma_direct_can_mmap(dev);
368e29ccc18SChristoph Hellwig 	return ops->mmap != NULL;
369e29ccc18SChristoph Hellwig }
370e29ccc18SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_can_mmap);
371e29ccc18SChristoph Hellwig 
372e29ccc18SChristoph Hellwig /**
3737249c1a5SChristoph Hellwig  * dma_mmap_attrs - map a coherent DMA allocation into user space
3747249c1a5SChristoph Hellwig  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
3757249c1a5SChristoph Hellwig  * @vma: vm_area_struct describing requested user mapping
3767249c1a5SChristoph Hellwig  * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
3777249c1a5SChristoph Hellwig  * @dma_addr: device-view address returned from dma_alloc_attrs
3787249c1a5SChristoph Hellwig  * @size: size of memory originally requested in dma_alloc_attrs
3797249c1a5SChristoph Hellwig  * @attrs: attributes of mapping properties requested in dma_alloc_attrs
3807249c1a5SChristoph Hellwig  *
3817249c1a5SChristoph Hellwig  * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user
3827249c1a5SChristoph Hellwig  * space.  The coherent DMA buffer must not be freed by the driver until the
3837249c1a5SChristoph Hellwig  * user space mapping has been released.
3847249c1a5SChristoph Hellwig  */
3857249c1a5SChristoph Hellwig int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
3867249c1a5SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
3877249c1a5SChristoph Hellwig 		unsigned long attrs)
3887249c1a5SChristoph Hellwig {
3897249c1a5SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
390356da6d0SChristoph Hellwig 
391d35834c6SChristoph Hellwig 	if (dma_alloc_direct(dev, ops))
39234dc0ea6SChristoph Hellwig 		return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size,
393f9f3232aSChristoph Hellwig 				attrs);
394f9f3232aSChristoph Hellwig 	if (!ops->mmap)
395f9f3232aSChristoph Hellwig 		return -ENXIO;
3967249c1a5SChristoph Hellwig 	return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
3977249c1a5SChristoph Hellwig }
3987249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_mmap_attrs);
39905887cb6SChristoph Hellwig 
40005887cb6SChristoph Hellwig u64 dma_get_required_mask(struct device *dev)
40105887cb6SChristoph Hellwig {
40205887cb6SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
40305887cb6SChristoph Hellwig 
404d35834c6SChristoph Hellwig 	if (dma_alloc_direct(dev, ops))
405356da6d0SChristoph Hellwig 		return dma_direct_get_required_mask(dev);
40605887cb6SChristoph Hellwig 	if (ops->get_required_mask)
40705887cb6SChristoph Hellwig 		return ops->get_required_mask(dev);
408249baa54SChristoph Hellwig 
409249baa54SChristoph Hellwig 	/*
410249baa54SChristoph Hellwig 	 * We require every DMA ops implementation to at least support a 32-bit
411249baa54SChristoph Hellwig 	 * DMA mask (and use bounce buffering if that isn't supported in
412249baa54SChristoph Hellwig 	 * hardware).  As the direct mapping code has its own routine to
413249baa54SChristoph Hellwig 	 * actually report an optimal mask we default to 32-bit here as that
414249baa54SChristoph Hellwig 	 * is the right thing for most IOMMUs, and at least not actively
415249baa54SChristoph Hellwig 	 * harmful in general.
416249baa54SChristoph Hellwig 	 */
417249baa54SChristoph Hellwig 	return DMA_BIT_MASK(32);
41805887cb6SChristoph Hellwig }
41905887cb6SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_get_required_mask);
42005887cb6SChristoph Hellwig 
4217249c1a5SChristoph Hellwig void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
4227249c1a5SChristoph Hellwig 		gfp_t flag, unsigned long attrs)
4237249c1a5SChristoph Hellwig {
4247249c1a5SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
4257249c1a5SChristoph Hellwig 	void *cpu_addr;
4267249c1a5SChristoph Hellwig 
427148a97d5SDan Carpenter 	WARN_ON_ONCE(!dev->coherent_dma_mask);
4287249c1a5SChristoph Hellwig 
4297249c1a5SChristoph Hellwig 	if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
4307249c1a5SChristoph Hellwig 		return cpu_addr;
4317249c1a5SChristoph Hellwig 
4327249c1a5SChristoph Hellwig 	/* let the implementation decide on the zone to allocate from: */
4337249c1a5SChristoph Hellwig 	flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
4347249c1a5SChristoph Hellwig 
435d35834c6SChristoph Hellwig 	if (dma_alloc_direct(dev, ops))
436356da6d0SChristoph Hellwig 		cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
437356da6d0SChristoph Hellwig 	else if (ops->alloc)
438356da6d0SChristoph Hellwig 		cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
439356da6d0SChristoph Hellwig 	else
4407249c1a5SChristoph Hellwig 		return NULL;
4417249c1a5SChristoph Hellwig 
4427249c1a5SChristoph Hellwig 	debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
4437249c1a5SChristoph Hellwig 	return cpu_addr;
4447249c1a5SChristoph Hellwig }
4457249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_alloc_attrs);
4467249c1a5SChristoph Hellwig 
4477249c1a5SChristoph Hellwig void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
4487249c1a5SChristoph Hellwig 		dma_addr_t dma_handle, unsigned long attrs)
4497249c1a5SChristoph Hellwig {
4507249c1a5SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
4517249c1a5SChristoph Hellwig 
4527249c1a5SChristoph Hellwig 	if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
4537249c1a5SChristoph Hellwig 		return;
4547249c1a5SChristoph Hellwig 	/*
4557249c1a5SChristoph Hellwig 	 * On non-coherent platforms which implement DMA-coherent buffers via
4567249c1a5SChristoph Hellwig 	 * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
4577249c1a5SChristoph Hellwig 	 * this far in IRQ context is a) at risk of a BUG_ON() or trying to
4587249c1a5SChristoph Hellwig 	 * sleep on some machines, and b) an indication that the driver is
4597249c1a5SChristoph Hellwig 	 * probably misusing the coherent API anyway.
4607249c1a5SChristoph Hellwig 	 */
4617249c1a5SChristoph Hellwig 	WARN_ON(irqs_disabled());
4627249c1a5SChristoph Hellwig 
463356da6d0SChristoph Hellwig 	if (!cpu_addr)
4647249c1a5SChristoph Hellwig 		return;
4657249c1a5SChristoph Hellwig 
4667249c1a5SChristoph Hellwig 	debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
467d35834c6SChristoph Hellwig 	if (dma_alloc_direct(dev, ops))
468356da6d0SChristoph Hellwig 		dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
469356da6d0SChristoph Hellwig 	else if (ops->free)
4707249c1a5SChristoph Hellwig 		ops->free(dev, size, cpu_addr, dma_handle, attrs);
4717249c1a5SChristoph Hellwig }
4727249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_free_attrs);
4737249c1a5SChristoph Hellwig 
474efa70f2fSChristoph Hellwig struct page *dma_alloc_pages(struct device *dev, size_t size,
475efa70f2fSChristoph Hellwig 		dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
476efa70f2fSChristoph Hellwig {
477efa70f2fSChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
478efa70f2fSChristoph Hellwig 	struct page *page;
479efa70f2fSChristoph Hellwig 
480efa70f2fSChristoph Hellwig 	if (WARN_ON_ONCE(!dev->coherent_dma_mask))
481efa70f2fSChristoph Hellwig 		return NULL;
482efa70f2fSChristoph Hellwig 	if (WARN_ON_ONCE(gfp & (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)))
483efa70f2fSChristoph Hellwig 		return NULL;
484efa70f2fSChristoph Hellwig 
485efa70f2fSChristoph Hellwig 	size = PAGE_ALIGN(size);
486efa70f2fSChristoph Hellwig 	if (dma_alloc_direct(dev, ops))
487efa70f2fSChristoph Hellwig 		page = dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp);
488efa70f2fSChristoph Hellwig 	else if (ops->alloc_pages)
489efa70f2fSChristoph Hellwig 		page = ops->alloc_pages(dev, size, dma_handle, dir, gfp);
490efa70f2fSChristoph Hellwig 	else
491efa70f2fSChristoph Hellwig 		return NULL;
492efa70f2fSChristoph Hellwig 
493efa70f2fSChristoph Hellwig 	debug_dma_map_page(dev, page, 0, size, dir, *dma_handle);
494efa70f2fSChristoph Hellwig 
495efa70f2fSChristoph Hellwig 	return page;
496efa70f2fSChristoph Hellwig }
497efa70f2fSChristoph Hellwig EXPORT_SYMBOL_GPL(dma_alloc_pages);
498efa70f2fSChristoph Hellwig 
499efa70f2fSChristoph Hellwig void dma_free_pages(struct device *dev, size_t size, struct page *page,
500efa70f2fSChristoph Hellwig 		dma_addr_t dma_handle, enum dma_data_direction dir)
501efa70f2fSChristoph Hellwig {
502efa70f2fSChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
503efa70f2fSChristoph Hellwig 
504efa70f2fSChristoph Hellwig 	size = PAGE_ALIGN(size);
505efa70f2fSChristoph Hellwig 	debug_dma_unmap_page(dev, dma_handle, size, dir);
506efa70f2fSChristoph Hellwig 
507efa70f2fSChristoph Hellwig 	if (dma_alloc_direct(dev, ops))
508efa70f2fSChristoph Hellwig 		dma_direct_free_pages(dev, size, page, dma_handle, dir);
509efa70f2fSChristoph Hellwig 	else if (ops->free_pages)
510efa70f2fSChristoph Hellwig 		ops->free_pages(dev, size, page, dma_handle, dir);
511efa70f2fSChristoph Hellwig }
512efa70f2fSChristoph Hellwig EXPORT_SYMBOL_GPL(dma_free_pages);
513efa70f2fSChristoph Hellwig 
514efa70f2fSChristoph Hellwig void *dma_alloc_noncoherent(struct device *dev, size_t size,
515efa70f2fSChristoph Hellwig 		dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
516efa70f2fSChristoph Hellwig {
517de7cf917SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
518de7cf917SChristoph Hellwig 	void *vaddr;
519de7cf917SChristoph Hellwig 
520de7cf917SChristoph Hellwig 	if (!ops || !ops->alloc_noncoherent) {
521efa70f2fSChristoph Hellwig 		struct page *page;
522efa70f2fSChristoph Hellwig 
523efa70f2fSChristoph Hellwig 		page = dma_alloc_pages(dev, size, dma_handle, dir, gfp);
524efa70f2fSChristoph Hellwig 		if (!page)
525efa70f2fSChristoph Hellwig 			return NULL;
526efa70f2fSChristoph Hellwig 		return page_address(page);
527efa70f2fSChristoph Hellwig 	}
528de7cf917SChristoph Hellwig 
529de7cf917SChristoph Hellwig 	size = PAGE_ALIGN(size);
530de7cf917SChristoph Hellwig 	vaddr = ops->alloc_noncoherent(dev, size, dma_handle, dir, gfp);
531de7cf917SChristoph Hellwig 	if (vaddr)
532de7cf917SChristoph Hellwig 		debug_dma_map_page(dev, virt_to_page(vaddr), 0, size, dir,
533de7cf917SChristoph Hellwig 				   *dma_handle);
534de7cf917SChristoph Hellwig 	return vaddr;
535de7cf917SChristoph Hellwig }
536efa70f2fSChristoph Hellwig EXPORT_SYMBOL_GPL(dma_alloc_noncoherent);
537efa70f2fSChristoph Hellwig 
538efa70f2fSChristoph Hellwig void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
539efa70f2fSChristoph Hellwig 		dma_addr_t dma_handle, enum dma_data_direction dir)
540efa70f2fSChristoph Hellwig {
541de7cf917SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
542de7cf917SChristoph Hellwig 
543de7cf917SChristoph Hellwig 	if (!ops || !ops->free_noncoherent) {
544efa70f2fSChristoph Hellwig 		dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir);
545de7cf917SChristoph Hellwig 		return;
546de7cf917SChristoph Hellwig 	}
547de7cf917SChristoph Hellwig 
548de7cf917SChristoph Hellwig 	size = PAGE_ALIGN(size);
549de7cf917SChristoph Hellwig 	debug_dma_unmap_page(dev, dma_handle, size, dir);
550de7cf917SChristoph Hellwig 	ops->free_noncoherent(dev, size, vaddr, dma_handle, dir);
551efa70f2fSChristoph Hellwig }
552efa70f2fSChristoph Hellwig EXPORT_SYMBOL_GPL(dma_free_noncoherent);
553efa70f2fSChristoph Hellwig 
5547249c1a5SChristoph Hellwig int dma_supported(struct device *dev, u64 mask)
5557249c1a5SChristoph Hellwig {
5567249c1a5SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
5577249c1a5SChristoph Hellwig 
558d35834c6SChristoph Hellwig 	/*
559d35834c6SChristoph Hellwig 	 * ->dma_supported sets the bypass flag, so we must always call
560d35834c6SChristoph Hellwig 	 * into the method here unless the device is truly direct mapped.
561d35834c6SChristoph Hellwig 	 */
562d35834c6SChristoph Hellwig 	if (!ops)
563356da6d0SChristoph Hellwig 		return dma_direct_supported(dev, mask);
5648b1cce9fSThierry Reding 	if (!ops->dma_supported)
5657249c1a5SChristoph Hellwig 		return 1;
5667249c1a5SChristoph Hellwig 	return ops->dma_supported(dev, mask);
5677249c1a5SChristoph Hellwig }
5687249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_supported);
5697249c1a5SChristoph Hellwig 
57011ddce15SChristoph Hellwig #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
57111ddce15SChristoph Hellwig void arch_dma_set_mask(struct device *dev, u64 mask);
57211ddce15SChristoph Hellwig #else
57311ddce15SChristoph Hellwig #define arch_dma_set_mask(dev, mask)	do { } while (0)
57411ddce15SChristoph Hellwig #endif
57511ddce15SChristoph Hellwig 
5767249c1a5SChristoph Hellwig int dma_set_mask(struct device *dev, u64 mask)
5777249c1a5SChristoph Hellwig {
5784a54d16fSChristoph Hellwig 	/*
5794a54d16fSChristoph Hellwig 	 * Truncate the mask to the actually supported dma_addr_t width to
5804a54d16fSChristoph Hellwig 	 * avoid generating unsupportable addresses.
5814a54d16fSChristoph Hellwig 	 */
5824a54d16fSChristoph Hellwig 	mask = (dma_addr_t)mask;
5834a54d16fSChristoph Hellwig 
5847249c1a5SChristoph Hellwig 	if (!dev->dma_mask || !dma_supported(dev, mask))
5857249c1a5SChristoph Hellwig 		return -EIO;
5867249c1a5SChristoph Hellwig 
58711ddce15SChristoph Hellwig 	arch_dma_set_mask(dev, mask);
5887249c1a5SChristoph Hellwig 	*dev->dma_mask = mask;
5897249c1a5SChristoph Hellwig 	return 0;
5907249c1a5SChristoph Hellwig }
5917249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_set_mask);
5927249c1a5SChristoph Hellwig 
5937249c1a5SChristoph Hellwig #ifndef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
5947249c1a5SChristoph Hellwig int dma_set_coherent_mask(struct device *dev, u64 mask)
5957249c1a5SChristoph Hellwig {
5964a54d16fSChristoph Hellwig 	/*
5974a54d16fSChristoph Hellwig 	 * Truncate the mask to the actually supported dma_addr_t width to
5984a54d16fSChristoph Hellwig 	 * avoid generating unsupportable addresses.
5994a54d16fSChristoph Hellwig 	 */
6004a54d16fSChristoph Hellwig 	mask = (dma_addr_t)mask;
6014a54d16fSChristoph Hellwig 
6027249c1a5SChristoph Hellwig 	if (!dma_supported(dev, mask))
6037249c1a5SChristoph Hellwig 		return -EIO;
6047249c1a5SChristoph Hellwig 
6057249c1a5SChristoph Hellwig 	dev->coherent_dma_mask = mask;
6067249c1a5SChristoph Hellwig 	return 0;
6077249c1a5SChristoph Hellwig }
6087249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_set_coherent_mask);
6097249c1a5SChristoph Hellwig #endif
6108ddbe594SChristoph Hellwig 
611133d624bSJoerg Roedel size_t dma_max_mapping_size(struct device *dev)
612133d624bSJoerg Roedel {
613133d624bSJoerg Roedel 	const struct dma_map_ops *ops = get_dma_ops(dev);
614133d624bSJoerg Roedel 	size_t size = SIZE_MAX;
615133d624bSJoerg Roedel 
616d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
617133d624bSJoerg Roedel 		size = dma_direct_max_mapping_size(dev);
618133d624bSJoerg Roedel 	else if (ops && ops->max_mapping_size)
619133d624bSJoerg Roedel 		size = ops->max_mapping_size(dev);
620133d624bSJoerg Roedel 
621133d624bSJoerg Roedel 	return size;
622133d624bSJoerg Roedel }
623133d624bSJoerg Roedel EXPORT_SYMBOL_GPL(dma_max_mapping_size);
6246ba99411SYoshihiro Shimoda 
6253aa91625SChristoph Hellwig bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
6263aa91625SChristoph Hellwig {
6273aa91625SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
6283aa91625SChristoph Hellwig 
629d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
6303aa91625SChristoph Hellwig 		return dma_direct_need_sync(dev, dma_addr);
6313aa91625SChristoph Hellwig 	return ops->sync_single_for_cpu || ops->sync_single_for_device;
6323aa91625SChristoph Hellwig }
6333aa91625SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_need_sync);
6343aa91625SChristoph Hellwig 
6356ba99411SYoshihiro Shimoda unsigned long dma_get_merge_boundary(struct device *dev)
6366ba99411SYoshihiro Shimoda {
6376ba99411SYoshihiro Shimoda 	const struct dma_map_ops *ops = get_dma_ops(dev);
6386ba99411SYoshihiro Shimoda 
6396ba99411SYoshihiro Shimoda 	if (!ops || !ops->get_merge_boundary)
6406ba99411SYoshihiro Shimoda 		return 0;	/* can't merge */
6416ba99411SYoshihiro Shimoda 
6426ba99411SYoshihiro Shimoda 	return ops->get_merge_boundary(dev);
6436ba99411SYoshihiro Shimoda }
6446ba99411SYoshihiro Shimoda EXPORT_SYMBOL_GPL(dma_get_merge_boundary);
645