xref: /openbmc/linux/kernel/dma/mapping.c (revision a229cc14f3395311b899e5e582b71efa8dd01df0)
1cf65a0f6SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
2cf65a0f6SChristoph Hellwig /*
3cf65a0f6SChristoph Hellwig  * arch-independent dma-mapping routines
4cf65a0f6SChristoph Hellwig  *
5cf65a0f6SChristoph Hellwig  * Copyright (c) 2006  SUSE Linux Products GmbH
6cf65a0f6SChristoph Hellwig  * Copyright (c) 2006  Tejun Heo <teheo@suse.de>
7cf65a0f6SChristoph Hellwig  */
805887cb6SChristoph Hellwig #include <linux/memblock.h> /* for max_pfn */
9cf65a0f6SChristoph Hellwig #include <linux/acpi.h>
100a0f0d8bSChristoph Hellwig #include <linux/dma-map-ops.h>
11cf65a0f6SChristoph Hellwig #include <linux/export.h>
12cf65a0f6SChristoph Hellwig #include <linux/gfp.h>
13cf65a0f6SChristoph Hellwig #include <linux/of_device.h>
14cf65a0f6SChristoph Hellwig #include <linux/slab.h>
15cf65a0f6SChristoph Hellwig #include <linux/vmalloc.h>
16a1fd09e8SChristoph Hellwig #include "debug.h"
1719c65c3dSChristoph Hellwig #include "direct.h"
18cf65a0f6SChristoph Hellwig 
196d4e9a8eSChristoph Hellwig bool dma_default_coherent;
206d4e9a8eSChristoph Hellwig 
21cf65a0f6SChristoph Hellwig /*
22cf65a0f6SChristoph Hellwig  * Managed DMA API
23cf65a0f6SChristoph Hellwig  */
24cf65a0f6SChristoph Hellwig struct dma_devres {
25cf65a0f6SChristoph Hellwig 	size_t		size;
26cf65a0f6SChristoph Hellwig 	void		*vaddr;
27cf65a0f6SChristoph Hellwig 	dma_addr_t	dma_handle;
28cf65a0f6SChristoph Hellwig 	unsigned long	attrs;
29cf65a0f6SChristoph Hellwig };
30cf65a0f6SChristoph Hellwig 
31cf65a0f6SChristoph Hellwig static void dmam_release(struct device *dev, void *res)
32cf65a0f6SChristoph Hellwig {
33cf65a0f6SChristoph Hellwig 	struct dma_devres *this = res;
34cf65a0f6SChristoph Hellwig 
35cf65a0f6SChristoph Hellwig 	dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
36cf65a0f6SChristoph Hellwig 			this->attrs);
37cf65a0f6SChristoph Hellwig }
38cf65a0f6SChristoph Hellwig 
39cf65a0f6SChristoph Hellwig static int dmam_match(struct device *dev, void *res, void *match_data)
40cf65a0f6SChristoph Hellwig {
41cf65a0f6SChristoph Hellwig 	struct dma_devres *this = res, *match = match_data;
42cf65a0f6SChristoph Hellwig 
43cf65a0f6SChristoph Hellwig 	if (this->vaddr == match->vaddr) {
44cf65a0f6SChristoph Hellwig 		WARN_ON(this->size != match->size ||
45cf65a0f6SChristoph Hellwig 			this->dma_handle != match->dma_handle);
46cf65a0f6SChristoph Hellwig 		return 1;
47cf65a0f6SChristoph Hellwig 	}
48cf65a0f6SChristoph Hellwig 	return 0;
49cf65a0f6SChristoph Hellwig }
50cf65a0f6SChristoph Hellwig 
51cf65a0f6SChristoph Hellwig /**
52cf65a0f6SChristoph Hellwig  * dmam_free_coherent - Managed dma_free_coherent()
53cf65a0f6SChristoph Hellwig  * @dev: Device to free coherent memory for
54cf65a0f6SChristoph Hellwig  * @size: Size of allocation
55cf65a0f6SChristoph Hellwig  * @vaddr: Virtual address of the memory to free
56cf65a0f6SChristoph Hellwig  * @dma_handle: DMA handle of the memory to free
57cf65a0f6SChristoph Hellwig  *
58cf65a0f6SChristoph Hellwig  * Managed dma_free_coherent().
59cf65a0f6SChristoph Hellwig  */
60cf65a0f6SChristoph Hellwig void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
61cf65a0f6SChristoph Hellwig 			dma_addr_t dma_handle)
62cf65a0f6SChristoph Hellwig {
63cf65a0f6SChristoph Hellwig 	struct dma_devres match_data = { size, vaddr, dma_handle };
64cf65a0f6SChristoph Hellwig 
65cf65a0f6SChristoph Hellwig 	dma_free_coherent(dev, size, vaddr, dma_handle);
66cf65a0f6SChristoph Hellwig 	WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
67cf65a0f6SChristoph Hellwig }
68cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(dmam_free_coherent);
69cf65a0f6SChristoph Hellwig 
70cf65a0f6SChristoph Hellwig /**
71cf65a0f6SChristoph Hellwig  * dmam_alloc_attrs - Managed dma_alloc_attrs()
72cf65a0f6SChristoph Hellwig  * @dev: Device to allocate non_coherent memory for
73cf65a0f6SChristoph Hellwig  * @size: Size of allocation
74cf65a0f6SChristoph Hellwig  * @dma_handle: Out argument for allocated DMA handle
75cf65a0f6SChristoph Hellwig  * @gfp: Allocation flags
76cf65a0f6SChristoph Hellwig  * @attrs: Flags in the DMA_ATTR_* namespace.
77cf65a0f6SChristoph Hellwig  *
78cf65a0f6SChristoph Hellwig  * Managed dma_alloc_attrs().  Memory allocated using this function will be
79cf65a0f6SChristoph Hellwig  * automatically released on driver detach.
80cf65a0f6SChristoph Hellwig  *
81cf65a0f6SChristoph Hellwig  * RETURNS:
82cf65a0f6SChristoph Hellwig  * Pointer to allocated memory on success, NULL on failure.
83cf65a0f6SChristoph Hellwig  */
84cf65a0f6SChristoph Hellwig void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
85cf65a0f6SChristoph Hellwig 		gfp_t gfp, unsigned long attrs)
86cf65a0f6SChristoph Hellwig {
87cf65a0f6SChristoph Hellwig 	struct dma_devres *dr;
88cf65a0f6SChristoph Hellwig 	void *vaddr;
89cf65a0f6SChristoph Hellwig 
90cf65a0f6SChristoph Hellwig 	dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
91cf65a0f6SChristoph Hellwig 	if (!dr)
92cf65a0f6SChristoph Hellwig 		return NULL;
93cf65a0f6SChristoph Hellwig 
94cf65a0f6SChristoph Hellwig 	vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
95cf65a0f6SChristoph Hellwig 	if (!vaddr) {
96cf65a0f6SChristoph Hellwig 		devres_free(dr);
97cf65a0f6SChristoph Hellwig 		return NULL;
98cf65a0f6SChristoph Hellwig 	}
99cf65a0f6SChristoph Hellwig 
100cf65a0f6SChristoph Hellwig 	dr->vaddr = vaddr;
101cf65a0f6SChristoph Hellwig 	dr->dma_handle = *dma_handle;
102cf65a0f6SChristoph Hellwig 	dr->size = size;
103cf65a0f6SChristoph Hellwig 	dr->attrs = attrs;
104cf65a0f6SChristoph Hellwig 
105cf65a0f6SChristoph Hellwig 	devres_add(dev, dr);
106cf65a0f6SChristoph Hellwig 
107cf65a0f6SChristoph Hellwig 	return vaddr;
108cf65a0f6SChristoph Hellwig }
109cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(dmam_alloc_attrs);
110cf65a0f6SChristoph Hellwig 
111d35834c6SChristoph Hellwig static bool dma_go_direct(struct device *dev, dma_addr_t mask,
112d35834c6SChristoph Hellwig 		const struct dma_map_ops *ops)
113d3fa60d7SChristoph Hellwig {
114d35834c6SChristoph Hellwig 	if (likely(!ops))
115d35834c6SChristoph Hellwig 		return true;
116d35834c6SChristoph Hellwig #ifdef CONFIG_DMA_OPS_BYPASS
117d35834c6SChristoph Hellwig 	if (dev->dma_ops_bypass)
118d35834c6SChristoph Hellwig 		return min_not_zero(mask, dev->bus_dma_limit) >=
119d35834c6SChristoph Hellwig 			    dma_direct_get_required_mask(dev);
120d35834c6SChristoph Hellwig #endif
121d35834c6SChristoph Hellwig 	return false;
122d35834c6SChristoph Hellwig }
123d35834c6SChristoph Hellwig 
124d35834c6SChristoph Hellwig 
125d35834c6SChristoph Hellwig /*
126d35834c6SChristoph Hellwig  * Check if the devices uses a direct mapping for streaming DMA operations.
127d35834c6SChristoph Hellwig  * This allows IOMMU drivers to set a bypass mode if the DMA mask is large
128d35834c6SChristoph Hellwig  * enough.
129d35834c6SChristoph Hellwig  */
130d35834c6SChristoph Hellwig static inline bool dma_alloc_direct(struct device *dev,
131d35834c6SChristoph Hellwig 		const struct dma_map_ops *ops)
132d35834c6SChristoph Hellwig {
133d35834c6SChristoph Hellwig 	return dma_go_direct(dev, dev->coherent_dma_mask, ops);
134d35834c6SChristoph Hellwig }
135d35834c6SChristoph Hellwig 
136d35834c6SChristoph Hellwig static inline bool dma_map_direct(struct device *dev,
137d35834c6SChristoph Hellwig 		const struct dma_map_ops *ops)
138d35834c6SChristoph Hellwig {
139d35834c6SChristoph Hellwig 	return dma_go_direct(dev, *dev->dma_mask, ops);
140d3fa60d7SChristoph Hellwig }
141d3fa60d7SChristoph Hellwig 
142d3fa60d7SChristoph Hellwig dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
143d3fa60d7SChristoph Hellwig 		size_t offset, size_t size, enum dma_data_direction dir,
144d3fa60d7SChristoph Hellwig 		unsigned long attrs)
145d3fa60d7SChristoph Hellwig {
146d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
147d3fa60d7SChristoph Hellwig 	dma_addr_t addr;
148d3fa60d7SChristoph Hellwig 
149d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
150f959dcd6SThomas Tai 
151f959dcd6SThomas Tai 	if (WARN_ON_ONCE(!dev->dma_mask))
152f959dcd6SThomas Tai 		return DMA_MAPPING_ERROR;
153f959dcd6SThomas Tai 
1548d8d53cfSAlexey Kardashevskiy 	if (dma_map_direct(dev, ops) ||
1558d8d53cfSAlexey Kardashevskiy 	    arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size))
156d3fa60d7SChristoph Hellwig 		addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
157d3fa60d7SChristoph Hellwig 	else
158d3fa60d7SChristoph Hellwig 		addr = ops->map_page(dev, page, offset, size, dir, attrs);
159c2bbf9d1SHamza Mahfooz 	debug_dma_map_page(dev, page, offset, size, dir, addr, attrs);
160d3fa60d7SChristoph Hellwig 
161d3fa60d7SChristoph Hellwig 	return addr;
162d3fa60d7SChristoph Hellwig }
163d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_map_page_attrs);
164d3fa60d7SChristoph Hellwig 
165d3fa60d7SChristoph Hellwig void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
166d3fa60d7SChristoph Hellwig 		enum dma_data_direction dir, unsigned long attrs)
167d3fa60d7SChristoph Hellwig {
168d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
169d3fa60d7SChristoph Hellwig 
170d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
1718d8d53cfSAlexey Kardashevskiy 	if (dma_map_direct(dev, ops) ||
1728d8d53cfSAlexey Kardashevskiy 	    arch_dma_unmap_page_direct(dev, addr + size))
173d3fa60d7SChristoph Hellwig 		dma_direct_unmap_page(dev, addr, size, dir, attrs);
174d3fa60d7SChristoph Hellwig 	else if (ops->unmap_page)
175d3fa60d7SChristoph Hellwig 		ops->unmap_page(dev, addr, size, dir, attrs);
176d3fa60d7SChristoph Hellwig 	debug_dma_unmap_page(dev, addr, size, dir);
177d3fa60d7SChristoph Hellwig }
178d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_unmap_page_attrs);
179d3fa60d7SChristoph Hellwig 
180fffe3cc8SLogan Gunthorpe static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
181fffe3cc8SLogan Gunthorpe 	 int nents, enum dma_data_direction dir, unsigned long attrs)
182d3fa60d7SChristoph Hellwig {
183d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
184d3fa60d7SChristoph Hellwig 	int ents;
185d3fa60d7SChristoph Hellwig 
186d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
187f959dcd6SThomas Tai 
188f959dcd6SThomas Tai 	if (WARN_ON_ONCE(!dev->dma_mask))
189f959dcd6SThomas Tai 		return 0;
190f959dcd6SThomas Tai 
1918d8d53cfSAlexey Kardashevskiy 	if (dma_map_direct(dev, ops) ||
1928d8d53cfSAlexey Kardashevskiy 	    arch_dma_map_sg_direct(dev, sg, nents))
193d3fa60d7SChristoph Hellwig 		ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
194d3fa60d7SChristoph Hellwig 	else
195d3fa60d7SChristoph Hellwig 		ents = ops->map_sg(dev, sg, nents, dir, attrs);
196fffe3cc8SLogan Gunthorpe 
197fffe3cc8SLogan Gunthorpe 	if (ents > 0)
198c2bbf9d1SHamza Mahfooz 		debug_dma_map_sg(dev, sg, nents, ents, dir, attrs);
199fffe3cc8SLogan Gunthorpe 	else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM &&
200d03c5441SLogan Gunthorpe 			      ents != -EIO))
201fffe3cc8SLogan Gunthorpe 		return -EIO;
202d3fa60d7SChristoph Hellwig 
203d3fa60d7SChristoph Hellwig 	return ents;
204d3fa60d7SChristoph Hellwig }
205fffe3cc8SLogan Gunthorpe 
206fffe3cc8SLogan Gunthorpe /**
207fffe3cc8SLogan Gunthorpe  * dma_map_sg_attrs - Map the given buffer for DMA
208fffe3cc8SLogan Gunthorpe  * @dev:	The device for which to perform the DMA operation
209fffe3cc8SLogan Gunthorpe  * @sg:		The sg_table object describing the buffer
210a61cb601SChristoph Hellwig  * @nents:	Number of entries to map
211fffe3cc8SLogan Gunthorpe  * @dir:	DMA direction
212fffe3cc8SLogan Gunthorpe  * @attrs:	Optional DMA attributes for the map operation
213fffe3cc8SLogan Gunthorpe  *
214fffe3cc8SLogan Gunthorpe  * Maps a buffer described by a scatterlist passed in the sg argument with
215fffe3cc8SLogan Gunthorpe  * nents segments for the @dir DMA operation by the @dev device.
216fffe3cc8SLogan Gunthorpe  *
217fffe3cc8SLogan Gunthorpe  * Returns the number of mapped entries (which can be less than nents)
218fffe3cc8SLogan Gunthorpe  * on success. Zero is returned for any error.
219fffe3cc8SLogan Gunthorpe  *
220fffe3cc8SLogan Gunthorpe  * dma_unmap_sg_attrs() should be used to unmap the buffer with the
221fffe3cc8SLogan Gunthorpe  * original sg and original nents (not the value returned by this funciton).
222fffe3cc8SLogan Gunthorpe  */
2232a047e06SChristoph Hellwig unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
224fffe3cc8SLogan Gunthorpe 		    int nents, enum dma_data_direction dir, unsigned long attrs)
225fffe3cc8SLogan Gunthorpe {
226fffe3cc8SLogan Gunthorpe 	int ret;
227fffe3cc8SLogan Gunthorpe 
228fffe3cc8SLogan Gunthorpe 	ret = __dma_map_sg_attrs(dev, sg, nents, dir, attrs);
229fffe3cc8SLogan Gunthorpe 	if (ret < 0)
230fffe3cc8SLogan Gunthorpe 		return 0;
231fffe3cc8SLogan Gunthorpe 	return ret;
232fffe3cc8SLogan Gunthorpe }
233d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_map_sg_attrs);
234d3fa60d7SChristoph Hellwig 
235fffe3cc8SLogan Gunthorpe /**
236fffe3cc8SLogan Gunthorpe  * dma_map_sgtable - Map the given buffer for DMA
237fffe3cc8SLogan Gunthorpe  * @dev:	The device for which to perform the DMA operation
238fffe3cc8SLogan Gunthorpe  * @sgt:	The sg_table object describing the buffer
239fffe3cc8SLogan Gunthorpe  * @dir:	DMA direction
240fffe3cc8SLogan Gunthorpe  * @attrs:	Optional DMA attributes for the map operation
241fffe3cc8SLogan Gunthorpe  *
242fffe3cc8SLogan Gunthorpe  * Maps a buffer described by a scatterlist stored in the given sg_table
243fffe3cc8SLogan Gunthorpe  * object for the @dir DMA operation by the @dev device. After success, the
244fffe3cc8SLogan Gunthorpe  * ownership for the buffer is transferred to the DMA domain.  One has to
245fffe3cc8SLogan Gunthorpe  * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
246fffe3cc8SLogan Gunthorpe  * ownership of the buffer back to the CPU domain before touching the
247fffe3cc8SLogan Gunthorpe  * buffer by the CPU.
248fffe3cc8SLogan Gunthorpe  *
249fffe3cc8SLogan Gunthorpe  * Returns 0 on success or a negative error code on error. The following
250fffe3cc8SLogan Gunthorpe  * error codes are supported with the given meaning:
251fffe3cc8SLogan Gunthorpe  *
252011a9ce8SLogan Gunthorpe  *   -EINVAL	An invalid argument, unaligned access or other error
253fffe3cc8SLogan Gunthorpe  *		in usage. Will not succeed if retried.
254011a9ce8SLogan Gunthorpe  *   -ENOMEM	Insufficient resources (like memory or IOVA space) to
255fffe3cc8SLogan Gunthorpe  *		complete the mapping. Should succeed if retried later.
256011a9ce8SLogan Gunthorpe  *   -EIO	Legacy error code with an unknown meaning. eg. this is
257fffe3cc8SLogan Gunthorpe  *		returned if a lower level call returned DMA_MAPPING_ERROR.
258fffe3cc8SLogan Gunthorpe  */
259fffe3cc8SLogan Gunthorpe int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
260fffe3cc8SLogan Gunthorpe 		    enum dma_data_direction dir, unsigned long attrs)
261fffe3cc8SLogan Gunthorpe {
262fffe3cc8SLogan Gunthorpe 	int nents;
263fffe3cc8SLogan Gunthorpe 
264fffe3cc8SLogan Gunthorpe 	nents = __dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
265fffe3cc8SLogan Gunthorpe 	if (nents < 0)
266fffe3cc8SLogan Gunthorpe 		return nents;
267fffe3cc8SLogan Gunthorpe 	sgt->nents = nents;
268fffe3cc8SLogan Gunthorpe 	return 0;
269fffe3cc8SLogan Gunthorpe }
270fffe3cc8SLogan Gunthorpe EXPORT_SYMBOL_GPL(dma_map_sgtable);
271fffe3cc8SLogan Gunthorpe 
272d3fa60d7SChristoph Hellwig void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
273d3fa60d7SChristoph Hellwig 				      int nents, enum dma_data_direction dir,
274d3fa60d7SChristoph Hellwig 				      unsigned long attrs)
275d3fa60d7SChristoph Hellwig {
276d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
277d3fa60d7SChristoph Hellwig 
278d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
279d3fa60d7SChristoph Hellwig 	debug_dma_unmap_sg(dev, sg, nents, dir);
2808d8d53cfSAlexey Kardashevskiy 	if (dma_map_direct(dev, ops) ||
2818d8d53cfSAlexey Kardashevskiy 	    arch_dma_unmap_sg_direct(dev, sg, nents))
282d3fa60d7SChristoph Hellwig 		dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
283d3fa60d7SChristoph Hellwig 	else if (ops->unmap_sg)
284d3fa60d7SChristoph Hellwig 		ops->unmap_sg(dev, sg, nents, dir, attrs);
285d3fa60d7SChristoph Hellwig }
286d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_unmap_sg_attrs);
287d3fa60d7SChristoph Hellwig 
288d3fa60d7SChristoph Hellwig dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
289d3fa60d7SChristoph Hellwig 		size_t size, enum dma_data_direction dir, unsigned long attrs)
290d3fa60d7SChristoph Hellwig {
291d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
292d3fa60d7SChristoph Hellwig 	dma_addr_t addr = DMA_MAPPING_ERROR;
293d3fa60d7SChristoph Hellwig 
294d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
295d3fa60d7SChristoph Hellwig 
296f959dcd6SThomas Tai 	if (WARN_ON_ONCE(!dev->dma_mask))
297f959dcd6SThomas Tai 		return DMA_MAPPING_ERROR;
298f959dcd6SThomas Tai 
299d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
300d3fa60d7SChristoph Hellwig 		addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
301d3fa60d7SChristoph Hellwig 	else if (ops->map_resource)
302d3fa60d7SChristoph Hellwig 		addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
303d3fa60d7SChristoph Hellwig 
304c2bbf9d1SHamza Mahfooz 	debug_dma_map_resource(dev, phys_addr, size, dir, addr, attrs);
305d3fa60d7SChristoph Hellwig 	return addr;
306d3fa60d7SChristoph Hellwig }
307d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_map_resource);
308d3fa60d7SChristoph Hellwig 
309d3fa60d7SChristoph Hellwig void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
310d3fa60d7SChristoph Hellwig 		enum dma_data_direction dir, unsigned long attrs)
311d3fa60d7SChristoph Hellwig {
312d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
313d3fa60d7SChristoph Hellwig 
314d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
315d35834c6SChristoph Hellwig 	if (!dma_map_direct(dev, ops) && ops->unmap_resource)
316d3fa60d7SChristoph Hellwig 		ops->unmap_resource(dev, addr, size, dir, attrs);
317d3fa60d7SChristoph Hellwig 	debug_dma_unmap_resource(dev, addr, size, dir);
318d3fa60d7SChristoph Hellwig }
319d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_unmap_resource);
320d3fa60d7SChristoph Hellwig 
321d3fa60d7SChristoph Hellwig void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
322d3fa60d7SChristoph Hellwig 		enum dma_data_direction dir)
323d3fa60d7SChristoph Hellwig {
324d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
325d3fa60d7SChristoph Hellwig 
326d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
327d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
328d3fa60d7SChristoph Hellwig 		dma_direct_sync_single_for_cpu(dev, addr, size, dir);
329d3fa60d7SChristoph Hellwig 	else if (ops->sync_single_for_cpu)
330d3fa60d7SChristoph Hellwig 		ops->sync_single_for_cpu(dev, addr, size, dir);
331d3fa60d7SChristoph Hellwig 	debug_dma_sync_single_for_cpu(dev, addr, size, dir);
332d3fa60d7SChristoph Hellwig }
333d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_sync_single_for_cpu);
334d3fa60d7SChristoph Hellwig 
335d3fa60d7SChristoph Hellwig void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
336d3fa60d7SChristoph Hellwig 		size_t size, enum dma_data_direction dir)
337d3fa60d7SChristoph Hellwig {
338d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
339d3fa60d7SChristoph Hellwig 
340d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
341d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
342d3fa60d7SChristoph Hellwig 		dma_direct_sync_single_for_device(dev, addr, size, dir);
343d3fa60d7SChristoph Hellwig 	else if (ops->sync_single_for_device)
344d3fa60d7SChristoph Hellwig 		ops->sync_single_for_device(dev, addr, size, dir);
345d3fa60d7SChristoph Hellwig 	debug_dma_sync_single_for_device(dev, addr, size, dir);
346d3fa60d7SChristoph Hellwig }
347d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_sync_single_for_device);
348d3fa60d7SChristoph Hellwig 
349d3fa60d7SChristoph Hellwig void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
350d3fa60d7SChristoph Hellwig 		    int nelems, enum dma_data_direction dir)
351d3fa60d7SChristoph Hellwig {
352d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
353d3fa60d7SChristoph Hellwig 
354d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
355d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
356d3fa60d7SChristoph Hellwig 		dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
357d3fa60d7SChristoph Hellwig 	else if (ops->sync_sg_for_cpu)
358d3fa60d7SChristoph Hellwig 		ops->sync_sg_for_cpu(dev, sg, nelems, dir);
359d3fa60d7SChristoph Hellwig 	debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
360d3fa60d7SChristoph Hellwig }
361d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_sync_sg_for_cpu);
362d3fa60d7SChristoph Hellwig 
363d3fa60d7SChristoph Hellwig void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
364d3fa60d7SChristoph Hellwig 		       int nelems, enum dma_data_direction dir)
365d3fa60d7SChristoph Hellwig {
366d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
367d3fa60d7SChristoph Hellwig 
368d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
369d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
370d3fa60d7SChristoph Hellwig 		dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
371d3fa60d7SChristoph Hellwig 	else if (ops->sync_sg_for_device)
372d3fa60d7SChristoph Hellwig 		ops->sync_sg_for_device(dev, sg, nelems, dir);
373d3fa60d7SChristoph Hellwig 	debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
374d3fa60d7SChristoph Hellwig }
375d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_sync_sg_for_device);
376d3fa60d7SChristoph Hellwig 
377cf65a0f6SChristoph Hellwig /*
37814451467SChristoph Hellwig  * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
37914451467SChristoph Hellwig  * that the intention is to allow exporting memory allocated via the
38014451467SChristoph Hellwig  * coherent DMA APIs through the dma_buf API, which only accepts a
38114451467SChristoph Hellwig  * scattertable.  This presents a couple of problems:
38214451467SChristoph Hellwig  * 1. Not all memory allocated via the coherent DMA APIs is backed by
38314451467SChristoph Hellwig  *    a struct page
38414451467SChristoph Hellwig  * 2. Passing coherent DMA memory into the streaming APIs is not allowed
38514451467SChristoph Hellwig  *    as we will try to flush the memory through a different alias to that
38614451467SChristoph Hellwig  *    actually being used (and the flushes are redundant.)
38714451467SChristoph Hellwig  */
3887249c1a5SChristoph Hellwig int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
3897249c1a5SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
3907249c1a5SChristoph Hellwig 		unsigned long attrs)
3917249c1a5SChristoph Hellwig {
3927249c1a5SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
393356da6d0SChristoph Hellwig 
394d35834c6SChristoph Hellwig 	if (dma_alloc_direct(dev, ops))
39534dc0ea6SChristoph Hellwig 		return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr,
396f9f3232aSChristoph Hellwig 				size, attrs);
397f9f3232aSChristoph Hellwig 	if (!ops->get_sgtable)
398f9f3232aSChristoph Hellwig 		return -ENXIO;
399f9f3232aSChristoph Hellwig 	return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs);
4007249c1a5SChristoph Hellwig }
4017249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_get_sgtable_attrs);
402cf65a0f6SChristoph Hellwig 
40333dcb37cSChristoph Hellwig #ifdef CONFIG_MMU
40433dcb37cSChristoph Hellwig /*
40533dcb37cSChristoph Hellwig  * Return the page attributes used for mapping dma_alloc_* memory, either in
40633dcb37cSChristoph Hellwig  * kernel space if remapping is needed, or to userspace through dma_mmap_*.
40733dcb37cSChristoph Hellwig  */
40833dcb37cSChristoph Hellwig pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
40933dcb37cSChristoph Hellwig {
410efa70f2fSChristoph Hellwig 	if (dev_is_dma_coherent(dev))
41133dcb37cSChristoph Hellwig 		return prot;
412419e2f18SChristoph Hellwig #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
413419e2f18SChristoph Hellwig 	if (attrs & DMA_ATTR_WRITE_COMBINE)
414419e2f18SChristoph Hellwig 		return pgprot_writecombine(prot);
415419e2f18SChristoph Hellwig #endif
416419e2f18SChristoph Hellwig 	return pgprot_dmacoherent(prot);
41733dcb37cSChristoph Hellwig }
41833dcb37cSChristoph Hellwig #endif /* CONFIG_MMU */
41933dcb37cSChristoph Hellwig 
4207249c1a5SChristoph Hellwig /**
421e29ccc18SChristoph Hellwig  * dma_can_mmap - check if a given device supports dma_mmap_*
422e29ccc18SChristoph Hellwig  * @dev: device to check
423e29ccc18SChristoph Hellwig  *
424e29ccc18SChristoph Hellwig  * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to
425e29ccc18SChristoph Hellwig  * map DMA allocations to userspace.
426e29ccc18SChristoph Hellwig  */
427e29ccc18SChristoph Hellwig bool dma_can_mmap(struct device *dev)
428e29ccc18SChristoph Hellwig {
429e29ccc18SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
430e29ccc18SChristoph Hellwig 
431d35834c6SChristoph Hellwig 	if (dma_alloc_direct(dev, ops))
43234dc0ea6SChristoph Hellwig 		return dma_direct_can_mmap(dev);
433e29ccc18SChristoph Hellwig 	return ops->mmap != NULL;
434e29ccc18SChristoph Hellwig }
435e29ccc18SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_can_mmap);
436e29ccc18SChristoph Hellwig 
437e29ccc18SChristoph Hellwig /**
4387249c1a5SChristoph Hellwig  * dma_mmap_attrs - map a coherent DMA allocation into user space
4397249c1a5SChristoph Hellwig  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
4407249c1a5SChristoph Hellwig  * @vma: vm_area_struct describing requested user mapping
4417249c1a5SChristoph Hellwig  * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
4427249c1a5SChristoph Hellwig  * @dma_addr: device-view address returned from dma_alloc_attrs
4437249c1a5SChristoph Hellwig  * @size: size of memory originally requested in dma_alloc_attrs
4447249c1a5SChristoph Hellwig  * @attrs: attributes of mapping properties requested in dma_alloc_attrs
4457249c1a5SChristoph Hellwig  *
4467249c1a5SChristoph Hellwig  * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user
4477249c1a5SChristoph Hellwig  * space.  The coherent DMA buffer must not be freed by the driver until the
4487249c1a5SChristoph Hellwig  * user space mapping has been released.
4497249c1a5SChristoph Hellwig  */
4507249c1a5SChristoph Hellwig int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
4517249c1a5SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
4527249c1a5SChristoph Hellwig 		unsigned long attrs)
4537249c1a5SChristoph Hellwig {
4547249c1a5SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
455356da6d0SChristoph Hellwig 
456d35834c6SChristoph Hellwig 	if (dma_alloc_direct(dev, ops))
45734dc0ea6SChristoph Hellwig 		return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size,
458f9f3232aSChristoph Hellwig 				attrs);
459f9f3232aSChristoph Hellwig 	if (!ops->mmap)
460f9f3232aSChristoph Hellwig 		return -ENXIO;
4617249c1a5SChristoph Hellwig 	return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
4627249c1a5SChristoph Hellwig }
4637249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_mmap_attrs);
46405887cb6SChristoph Hellwig 
46505887cb6SChristoph Hellwig u64 dma_get_required_mask(struct device *dev)
46605887cb6SChristoph Hellwig {
46705887cb6SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
46805887cb6SChristoph Hellwig 
469d35834c6SChristoph Hellwig 	if (dma_alloc_direct(dev, ops))
470356da6d0SChristoph Hellwig 		return dma_direct_get_required_mask(dev);
47105887cb6SChristoph Hellwig 	if (ops->get_required_mask)
47205887cb6SChristoph Hellwig 		return ops->get_required_mask(dev);
473249baa54SChristoph Hellwig 
474249baa54SChristoph Hellwig 	/*
475249baa54SChristoph Hellwig 	 * We require every DMA ops implementation to at least support a 32-bit
476249baa54SChristoph Hellwig 	 * DMA mask (and use bounce buffering if that isn't supported in
477249baa54SChristoph Hellwig 	 * hardware).  As the direct mapping code has its own routine to
478249baa54SChristoph Hellwig 	 * actually report an optimal mask we default to 32-bit here as that
479249baa54SChristoph Hellwig 	 * is the right thing for most IOMMUs, and at least not actively
480249baa54SChristoph Hellwig 	 * harmful in general.
481249baa54SChristoph Hellwig 	 */
482249baa54SChristoph Hellwig 	return DMA_BIT_MASK(32);
48305887cb6SChristoph Hellwig }
48405887cb6SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_get_required_mask);
48505887cb6SChristoph Hellwig 
4867249c1a5SChristoph Hellwig void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
4877249c1a5SChristoph Hellwig 		gfp_t flag, unsigned long attrs)
4887249c1a5SChristoph Hellwig {
4897249c1a5SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
4907249c1a5SChristoph Hellwig 	void *cpu_addr;
4917249c1a5SChristoph Hellwig 
492148a97d5SDan Carpenter 	WARN_ON_ONCE(!dev->coherent_dma_mask);
4937249c1a5SChristoph Hellwig 
4947249c1a5SChristoph Hellwig 	if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
4957249c1a5SChristoph Hellwig 		return cpu_addr;
4967249c1a5SChristoph Hellwig 
4977249c1a5SChristoph Hellwig 	/* let the implementation decide on the zone to allocate from: */
4987249c1a5SChristoph Hellwig 	flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
4997249c1a5SChristoph Hellwig 
500d35834c6SChristoph Hellwig 	if (dma_alloc_direct(dev, ops))
501356da6d0SChristoph Hellwig 		cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
502356da6d0SChristoph Hellwig 	else if (ops->alloc)
503356da6d0SChristoph Hellwig 		cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
504356da6d0SChristoph Hellwig 	else
5057249c1a5SChristoph Hellwig 		return NULL;
5067249c1a5SChristoph Hellwig 
507c2bbf9d1SHamza Mahfooz 	debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr, attrs);
5087249c1a5SChristoph Hellwig 	return cpu_addr;
5097249c1a5SChristoph Hellwig }
5107249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_alloc_attrs);
5117249c1a5SChristoph Hellwig 
5127249c1a5SChristoph Hellwig void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
5137249c1a5SChristoph Hellwig 		dma_addr_t dma_handle, unsigned long attrs)
5147249c1a5SChristoph Hellwig {
5157249c1a5SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
5167249c1a5SChristoph Hellwig 
5177249c1a5SChristoph Hellwig 	if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
5187249c1a5SChristoph Hellwig 		return;
5197249c1a5SChristoph Hellwig 	/*
5207249c1a5SChristoph Hellwig 	 * On non-coherent platforms which implement DMA-coherent buffers via
5217249c1a5SChristoph Hellwig 	 * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
5227249c1a5SChristoph Hellwig 	 * this far in IRQ context is a) at risk of a BUG_ON() or trying to
5237249c1a5SChristoph Hellwig 	 * sleep on some machines, and b) an indication that the driver is
5247249c1a5SChristoph Hellwig 	 * probably misusing the coherent API anyway.
5257249c1a5SChristoph Hellwig 	 */
5267249c1a5SChristoph Hellwig 	WARN_ON(irqs_disabled());
5277249c1a5SChristoph Hellwig 
528356da6d0SChristoph Hellwig 	if (!cpu_addr)
5297249c1a5SChristoph Hellwig 		return;
5307249c1a5SChristoph Hellwig 
5317249c1a5SChristoph Hellwig 	debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
532d35834c6SChristoph Hellwig 	if (dma_alloc_direct(dev, ops))
533356da6d0SChristoph Hellwig 		dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
534356da6d0SChristoph Hellwig 	else if (ops->free)
5357249c1a5SChristoph Hellwig 		ops->free(dev, size, cpu_addr, dma_handle, attrs);
5367249c1a5SChristoph Hellwig }
5377249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_free_attrs);
5387249c1a5SChristoph Hellwig 
539198c50e2SChristoph Hellwig static struct page *__dma_alloc_pages(struct device *dev, size_t size,
540efa70f2fSChristoph Hellwig 		dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
541efa70f2fSChristoph Hellwig {
542efa70f2fSChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
543efa70f2fSChristoph Hellwig 
544efa70f2fSChristoph Hellwig 	if (WARN_ON_ONCE(!dev->coherent_dma_mask))
545efa70f2fSChristoph Hellwig 		return NULL;
546efa70f2fSChristoph Hellwig 	if (WARN_ON_ONCE(gfp & (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)))
547efa70f2fSChristoph Hellwig 		return NULL;
548efa70f2fSChristoph Hellwig 
549efa70f2fSChristoph Hellwig 	size = PAGE_ALIGN(size);
550efa70f2fSChristoph Hellwig 	if (dma_alloc_direct(dev, ops))
551198c50e2SChristoph Hellwig 		return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp);
552198c50e2SChristoph Hellwig 	if (!ops->alloc_pages)
553efa70f2fSChristoph Hellwig 		return NULL;
554198c50e2SChristoph Hellwig 	return ops->alloc_pages(dev, size, dma_handle, dir, gfp);
555198c50e2SChristoph Hellwig }
556efa70f2fSChristoph Hellwig 
557198c50e2SChristoph Hellwig struct page *dma_alloc_pages(struct device *dev, size_t size,
558198c50e2SChristoph Hellwig 		dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
559198c50e2SChristoph Hellwig {
560198c50e2SChristoph Hellwig 	struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp);
561198c50e2SChristoph Hellwig 
562198c50e2SChristoph Hellwig 	if (page)
563c2bbf9d1SHamza Mahfooz 		debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0);
564efa70f2fSChristoph Hellwig 	return page;
565efa70f2fSChristoph Hellwig }
566efa70f2fSChristoph Hellwig EXPORT_SYMBOL_GPL(dma_alloc_pages);
567efa70f2fSChristoph Hellwig 
568198c50e2SChristoph Hellwig static void __dma_free_pages(struct device *dev, size_t size, struct page *page,
569efa70f2fSChristoph Hellwig 		dma_addr_t dma_handle, enum dma_data_direction dir)
570efa70f2fSChristoph Hellwig {
571efa70f2fSChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
572efa70f2fSChristoph Hellwig 
573efa70f2fSChristoph Hellwig 	size = PAGE_ALIGN(size);
574efa70f2fSChristoph Hellwig 	if (dma_alloc_direct(dev, ops))
575efa70f2fSChristoph Hellwig 		dma_direct_free_pages(dev, size, page, dma_handle, dir);
576efa70f2fSChristoph Hellwig 	else if (ops->free_pages)
577efa70f2fSChristoph Hellwig 		ops->free_pages(dev, size, page, dma_handle, dir);
578efa70f2fSChristoph Hellwig }
579198c50e2SChristoph Hellwig 
580198c50e2SChristoph Hellwig void dma_free_pages(struct device *dev, size_t size, struct page *page,
581198c50e2SChristoph Hellwig 		dma_addr_t dma_handle, enum dma_data_direction dir)
582198c50e2SChristoph Hellwig {
583198c50e2SChristoph Hellwig 	debug_dma_unmap_page(dev, dma_handle, size, dir);
584198c50e2SChristoph Hellwig 	__dma_free_pages(dev, size, page, dma_handle, dir);
585198c50e2SChristoph Hellwig }
586efa70f2fSChristoph Hellwig EXPORT_SYMBOL_GPL(dma_free_pages);
587efa70f2fSChristoph Hellwig 
588eedb0b12SChristoph Hellwig int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
589eedb0b12SChristoph Hellwig 		size_t size, struct page *page)
590eedb0b12SChristoph Hellwig {
591eedb0b12SChristoph Hellwig 	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
592eedb0b12SChristoph Hellwig 
593eedb0b12SChristoph Hellwig 	if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff)
594eedb0b12SChristoph Hellwig 		return -ENXIO;
595eedb0b12SChristoph Hellwig 	return remap_pfn_range(vma, vma->vm_start,
596eedb0b12SChristoph Hellwig 			       page_to_pfn(page) + vma->vm_pgoff,
597eedb0b12SChristoph Hellwig 			       vma_pages(vma) << PAGE_SHIFT, vma->vm_page_prot);
598eedb0b12SChristoph Hellwig }
599eedb0b12SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_mmap_pages);
600eedb0b12SChristoph Hellwig 
6017d5b5738SChristoph Hellwig static struct sg_table *alloc_single_sgt(struct device *dev, size_t size,
6027d5b5738SChristoph Hellwig 		enum dma_data_direction dir, gfp_t gfp)
6037d5b5738SChristoph Hellwig {
6047d5b5738SChristoph Hellwig 	struct sg_table *sgt;
6057d5b5738SChristoph Hellwig 	struct page *page;
6067d5b5738SChristoph Hellwig 
6077d5b5738SChristoph Hellwig 	sgt = kmalloc(sizeof(*sgt), gfp);
6087d5b5738SChristoph Hellwig 	if (!sgt)
6097d5b5738SChristoph Hellwig 		return NULL;
6107d5b5738SChristoph Hellwig 	if (sg_alloc_table(sgt, 1, gfp))
6117d5b5738SChristoph Hellwig 		goto out_free_sgt;
6127d5b5738SChristoph Hellwig 	page = __dma_alloc_pages(dev, size, &sgt->sgl->dma_address, dir, gfp);
6137d5b5738SChristoph Hellwig 	if (!page)
6147d5b5738SChristoph Hellwig 		goto out_free_table;
6157d5b5738SChristoph Hellwig 	sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
6167d5b5738SChristoph Hellwig 	sg_dma_len(sgt->sgl) = sgt->sgl->length;
6177d5b5738SChristoph Hellwig 	return sgt;
6187d5b5738SChristoph Hellwig out_free_table:
6197d5b5738SChristoph Hellwig 	sg_free_table(sgt);
6207d5b5738SChristoph Hellwig out_free_sgt:
6217d5b5738SChristoph Hellwig 	kfree(sgt);
6227d5b5738SChristoph Hellwig 	return NULL;
6237d5b5738SChristoph Hellwig }
6247d5b5738SChristoph Hellwig 
6257d5b5738SChristoph Hellwig struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
6267d5b5738SChristoph Hellwig 		enum dma_data_direction dir, gfp_t gfp, unsigned long attrs)
6277d5b5738SChristoph Hellwig {
6287d5b5738SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
6297d5b5738SChristoph Hellwig 	struct sg_table *sgt;
6307d5b5738SChristoph Hellwig 
6317d5b5738SChristoph Hellwig 	if (WARN_ON_ONCE(attrs & ~DMA_ATTR_ALLOC_SINGLE_PAGES))
6327d5b5738SChristoph Hellwig 		return NULL;
6337d5b5738SChristoph Hellwig 
6347d5b5738SChristoph Hellwig 	if (ops && ops->alloc_noncontiguous)
6357d5b5738SChristoph Hellwig 		sgt = ops->alloc_noncontiguous(dev, size, dir, gfp, attrs);
6367d5b5738SChristoph Hellwig 	else
6377d5b5738SChristoph Hellwig 		sgt = alloc_single_sgt(dev, size, dir, gfp);
6387d5b5738SChristoph Hellwig 
6397d5b5738SChristoph Hellwig 	if (sgt) {
6407d5b5738SChristoph Hellwig 		sgt->nents = 1;
641c2bbf9d1SHamza Mahfooz 		debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
6427d5b5738SChristoph Hellwig 	}
6437d5b5738SChristoph Hellwig 	return sgt;
6447d5b5738SChristoph Hellwig }
6457d5b5738SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_alloc_noncontiguous);
6467d5b5738SChristoph Hellwig 
6477d5b5738SChristoph Hellwig static void free_single_sgt(struct device *dev, size_t size,
6487d5b5738SChristoph Hellwig 		struct sg_table *sgt, enum dma_data_direction dir)
6497d5b5738SChristoph Hellwig {
6507d5b5738SChristoph Hellwig 	__dma_free_pages(dev, size, sg_page(sgt->sgl), sgt->sgl->dma_address,
6517d5b5738SChristoph Hellwig 			 dir);
6527d5b5738SChristoph Hellwig 	sg_free_table(sgt);
6537d5b5738SChristoph Hellwig 	kfree(sgt);
6547d5b5738SChristoph Hellwig }
6557d5b5738SChristoph Hellwig 
6567d5b5738SChristoph Hellwig void dma_free_noncontiguous(struct device *dev, size_t size,
6577d5b5738SChristoph Hellwig 		struct sg_table *sgt, enum dma_data_direction dir)
6587d5b5738SChristoph Hellwig {
6597d5b5738SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
6607d5b5738SChristoph Hellwig 
6617d5b5738SChristoph Hellwig 	debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
6627d5b5738SChristoph Hellwig 	if (ops && ops->free_noncontiguous)
6637d5b5738SChristoph Hellwig 		ops->free_noncontiguous(dev, size, sgt, dir);
6647d5b5738SChristoph Hellwig 	else
6657d5b5738SChristoph Hellwig 		free_single_sgt(dev, size, sgt, dir);
6667d5b5738SChristoph Hellwig }
6677d5b5738SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_free_noncontiguous);
6687d5b5738SChristoph Hellwig 
6697d5b5738SChristoph Hellwig void *dma_vmap_noncontiguous(struct device *dev, size_t size,
6707d5b5738SChristoph Hellwig 		struct sg_table *sgt)
6717d5b5738SChristoph Hellwig {
6727d5b5738SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
6737d5b5738SChristoph Hellwig 	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
6747d5b5738SChristoph Hellwig 
6757d5b5738SChristoph Hellwig 	if (ops && ops->alloc_noncontiguous)
6767d5b5738SChristoph Hellwig 		return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL);
6777d5b5738SChristoph Hellwig 	return page_address(sg_page(sgt->sgl));
6787d5b5738SChristoph Hellwig }
6797d5b5738SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_vmap_noncontiguous);
6807d5b5738SChristoph Hellwig 
6817d5b5738SChristoph Hellwig void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
6827d5b5738SChristoph Hellwig {
6837d5b5738SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
6847d5b5738SChristoph Hellwig 
6857d5b5738SChristoph Hellwig 	if (ops && ops->alloc_noncontiguous)
6867d5b5738SChristoph Hellwig 		vunmap(vaddr);
6877d5b5738SChristoph Hellwig }
6887d5b5738SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_vunmap_noncontiguous);
6897d5b5738SChristoph Hellwig 
6907d5b5738SChristoph Hellwig int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
6917d5b5738SChristoph Hellwig 		size_t size, struct sg_table *sgt)
6927d5b5738SChristoph Hellwig {
6937d5b5738SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
6947d5b5738SChristoph Hellwig 
6957d5b5738SChristoph Hellwig 	if (ops && ops->alloc_noncontiguous) {
6967d5b5738SChristoph Hellwig 		unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
6977d5b5738SChristoph Hellwig 
6987d5b5738SChristoph Hellwig 		if (vma->vm_pgoff >= count ||
6997d5b5738SChristoph Hellwig 		    vma_pages(vma) > count - vma->vm_pgoff)
7007d5b5738SChristoph Hellwig 			return -ENXIO;
7017d5b5738SChristoph Hellwig 		return vm_map_pages(vma, sgt_handle(sgt)->pages, count);
7027d5b5738SChristoph Hellwig 	}
7037d5b5738SChristoph Hellwig 	return dma_mmap_pages(dev, vma, size, sg_page(sgt->sgl));
7047d5b5738SChristoph Hellwig }
7057d5b5738SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous);
7067d5b5738SChristoph Hellwig 
7077249c1a5SChristoph Hellwig int dma_supported(struct device *dev, u64 mask)
7087249c1a5SChristoph Hellwig {
7097249c1a5SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
7107249c1a5SChristoph Hellwig 
711d35834c6SChristoph Hellwig 	/*
712d35834c6SChristoph Hellwig 	 * ->dma_supported sets the bypass flag, so we must always call
713d35834c6SChristoph Hellwig 	 * into the method here unless the device is truly direct mapped.
714d35834c6SChristoph Hellwig 	 */
715d35834c6SChristoph Hellwig 	if (!ops)
716356da6d0SChristoph Hellwig 		return dma_direct_supported(dev, mask);
7178b1cce9fSThierry Reding 	if (!ops->dma_supported)
7187249c1a5SChristoph Hellwig 		return 1;
7197249c1a5SChristoph Hellwig 	return ops->dma_supported(dev, mask);
7207249c1a5SChristoph Hellwig }
7217249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_supported);
7227249c1a5SChristoph Hellwig 
72311ddce15SChristoph Hellwig #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
72411ddce15SChristoph Hellwig void arch_dma_set_mask(struct device *dev, u64 mask);
72511ddce15SChristoph Hellwig #else
72611ddce15SChristoph Hellwig #define arch_dma_set_mask(dev, mask)	do { } while (0)
72711ddce15SChristoph Hellwig #endif
72811ddce15SChristoph Hellwig 
7297249c1a5SChristoph Hellwig int dma_set_mask(struct device *dev, u64 mask)
7307249c1a5SChristoph Hellwig {
7314a54d16fSChristoph Hellwig 	/*
7324a54d16fSChristoph Hellwig 	 * Truncate the mask to the actually supported dma_addr_t width to
7334a54d16fSChristoph Hellwig 	 * avoid generating unsupportable addresses.
7344a54d16fSChristoph Hellwig 	 */
7354a54d16fSChristoph Hellwig 	mask = (dma_addr_t)mask;
7364a54d16fSChristoph Hellwig 
7377249c1a5SChristoph Hellwig 	if (!dev->dma_mask || !dma_supported(dev, mask))
7387249c1a5SChristoph Hellwig 		return -EIO;
7397249c1a5SChristoph Hellwig 
74011ddce15SChristoph Hellwig 	arch_dma_set_mask(dev, mask);
7417249c1a5SChristoph Hellwig 	*dev->dma_mask = mask;
7427249c1a5SChristoph Hellwig 	return 0;
7437249c1a5SChristoph Hellwig }
7447249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_set_mask);
7457249c1a5SChristoph Hellwig 
7467249c1a5SChristoph Hellwig int dma_set_coherent_mask(struct device *dev, u64 mask)
7477249c1a5SChristoph Hellwig {
7484a54d16fSChristoph Hellwig 	/*
7494a54d16fSChristoph Hellwig 	 * Truncate the mask to the actually supported dma_addr_t width to
7504a54d16fSChristoph Hellwig 	 * avoid generating unsupportable addresses.
7514a54d16fSChristoph Hellwig 	 */
7524a54d16fSChristoph Hellwig 	mask = (dma_addr_t)mask;
7534a54d16fSChristoph Hellwig 
7547249c1a5SChristoph Hellwig 	if (!dma_supported(dev, mask))
7557249c1a5SChristoph Hellwig 		return -EIO;
7567249c1a5SChristoph Hellwig 
7577249c1a5SChristoph Hellwig 	dev->coherent_dma_mask = mask;
7587249c1a5SChristoph Hellwig 	return 0;
7597249c1a5SChristoph Hellwig }
7607249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_set_coherent_mask);
7618ddbe594SChristoph Hellwig 
762133d624bSJoerg Roedel size_t dma_max_mapping_size(struct device *dev)
763133d624bSJoerg Roedel {
764133d624bSJoerg Roedel 	const struct dma_map_ops *ops = get_dma_ops(dev);
765133d624bSJoerg Roedel 	size_t size = SIZE_MAX;
766133d624bSJoerg Roedel 
767d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
768133d624bSJoerg Roedel 		size = dma_direct_max_mapping_size(dev);
769133d624bSJoerg Roedel 	else if (ops && ops->max_mapping_size)
770133d624bSJoerg Roedel 		size = ops->max_mapping_size(dev);
771133d624bSJoerg Roedel 
772133d624bSJoerg Roedel 	return size;
773133d624bSJoerg Roedel }
774133d624bSJoerg Roedel EXPORT_SYMBOL_GPL(dma_max_mapping_size);
7756ba99411SYoshihiro Shimoda 
776*a229cc14SJohn Garry size_t dma_opt_mapping_size(struct device *dev)
777*a229cc14SJohn Garry {
778*a229cc14SJohn Garry 	const struct dma_map_ops *ops = get_dma_ops(dev);
779*a229cc14SJohn Garry 	size_t size = SIZE_MAX;
780*a229cc14SJohn Garry 
781*a229cc14SJohn Garry 	if (ops && ops->opt_mapping_size)
782*a229cc14SJohn Garry 		size = ops->opt_mapping_size();
783*a229cc14SJohn Garry 
784*a229cc14SJohn Garry 	return min(dma_max_mapping_size(dev), size);
785*a229cc14SJohn Garry }
786*a229cc14SJohn Garry EXPORT_SYMBOL_GPL(dma_opt_mapping_size);
787*a229cc14SJohn Garry 
7883aa91625SChristoph Hellwig bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
7893aa91625SChristoph Hellwig {
7903aa91625SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
7913aa91625SChristoph Hellwig 
792d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
7933aa91625SChristoph Hellwig 		return dma_direct_need_sync(dev, dma_addr);
7943aa91625SChristoph Hellwig 	return ops->sync_single_for_cpu || ops->sync_single_for_device;
7953aa91625SChristoph Hellwig }
7963aa91625SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_need_sync);
7973aa91625SChristoph Hellwig 
7986ba99411SYoshihiro Shimoda unsigned long dma_get_merge_boundary(struct device *dev)
7996ba99411SYoshihiro Shimoda {
8006ba99411SYoshihiro Shimoda 	const struct dma_map_ops *ops = get_dma_ops(dev);
8016ba99411SYoshihiro Shimoda 
8026ba99411SYoshihiro Shimoda 	if (!ops || !ops->get_merge_boundary)
8036ba99411SYoshihiro Shimoda 		return 0;	/* can't merge */
8046ba99411SYoshihiro Shimoda 
8056ba99411SYoshihiro Shimoda 	return ops->get_merge_boundary(dev);
8066ba99411SYoshihiro Shimoda }
8076ba99411SYoshihiro Shimoda EXPORT_SYMBOL_GPL(dma_get_merge_boundary);
808