1cf65a0f6SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
2cf65a0f6SChristoph Hellwig /*
3cf65a0f6SChristoph Hellwig * arch-independent dma-mapping routines
4cf65a0f6SChristoph Hellwig *
5cf65a0f6SChristoph Hellwig * Copyright (c) 2006 SUSE Linux Products GmbH
6cf65a0f6SChristoph Hellwig * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
7cf65a0f6SChristoph Hellwig */
805887cb6SChristoph Hellwig #include <linux/memblock.h> /* for max_pfn */
9cf65a0f6SChristoph Hellwig #include <linux/acpi.h>
100a0f0d8bSChristoph Hellwig #include <linux/dma-map-ops.h>
11cf65a0f6SChristoph Hellwig #include <linux/export.h>
12cf65a0f6SChristoph Hellwig #include <linux/gfp.h>
137ade4f10SAlexander Potapenko #include <linux/kmsan.h>
14cf65a0f6SChristoph Hellwig #include <linux/of_device.h>
15cf65a0f6SChristoph Hellwig #include <linux/slab.h>
16cf65a0f6SChristoph Hellwig #include <linux/vmalloc.h>
17a1fd09e8SChristoph Hellwig #include "debug.h"
1819c65c3dSChristoph Hellwig #include "direct.h"
19cf65a0f6SChristoph Hellwig
20fe4e5efaSJiaxun Yang #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
21fe4e5efaSJiaxun Yang defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
22fe4e5efaSJiaxun Yang defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
231d3f56b2SJiaxun Yang bool dma_default_coherent = IS_ENABLED(CONFIG_ARCH_DMA_DEFAULT_COHERENT);
24fe4e5efaSJiaxun Yang #endif
256d4e9a8eSChristoph Hellwig
26cf65a0f6SChristoph Hellwig /*
27cf65a0f6SChristoph Hellwig * Managed DMA API
28cf65a0f6SChristoph Hellwig */
29cf65a0f6SChristoph Hellwig struct dma_devres {
30cf65a0f6SChristoph Hellwig size_t size;
31cf65a0f6SChristoph Hellwig void *vaddr;
32cf65a0f6SChristoph Hellwig dma_addr_t dma_handle;
33cf65a0f6SChristoph Hellwig unsigned long attrs;
34cf65a0f6SChristoph Hellwig };
35cf65a0f6SChristoph Hellwig
dmam_release(struct device * dev,void * res)36cf65a0f6SChristoph Hellwig static void dmam_release(struct device *dev, void *res)
37cf65a0f6SChristoph Hellwig {
38cf65a0f6SChristoph Hellwig struct dma_devres *this = res;
39cf65a0f6SChristoph Hellwig
40cf65a0f6SChristoph Hellwig dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
41cf65a0f6SChristoph Hellwig this->attrs);
42cf65a0f6SChristoph Hellwig }
43cf65a0f6SChristoph Hellwig
dmam_match(struct device * dev,void * res,void * match_data)44cf65a0f6SChristoph Hellwig static int dmam_match(struct device *dev, void *res, void *match_data)
45cf65a0f6SChristoph Hellwig {
46cf65a0f6SChristoph Hellwig struct dma_devres *this = res, *match = match_data;
47cf65a0f6SChristoph Hellwig
48cf65a0f6SChristoph Hellwig if (this->vaddr == match->vaddr) {
49cf65a0f6SChristoph Hellwig WARN_ON(this->size != match->size ||
50cf65a0f6SChristoph Hellwig this->dma_handle != match->dma_handle);
51cf65a0f6SChristoph Hellwig return 1;
52cf65a0f6SChristoph Hellwig }
53cf65a0f6SChristoph Hellwig return 0;
54cf65a0f6SChristoph Hellwig }
55cf65a0f6SChristoph Hellwig
56cf65a0f6SChristoph Hellwig /**
57cf65a0f6SChristoph Hellwig * dmam_free_coherent - Managed dma_free_coherent()
58cf65a0f6SChristoph Hellwig * @dev: Device to free coherent memory for
59cf65a0f6SChristoph Hellwig * @size: Size of allocation
60cf65a0f6SChristoph Hellwig * @vaddr: Virtual address of the memory to free
61cf65a0f6SChristoph Hellwig * @dma_handle: DMA handle of the memory to free
62cf65a0f6SChristoph Hellwig *
63cf65a0f6SChristoph Hellwig * Managed dma_free_coherent().
64cf65a0f6SChristoph Hellwig */
dmam_free_coherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle)65cf65a0f6SChristoph Hellwig void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
66cf65a0f6SChristoph Hellwig dma_addr_t dma_handle)
67cf65a0f6SChristoph Hellwig {
68cf65a0f6SChristoph Hellwig struct dma_devres match_data = { size, vaddr, dma_handle };
69cf65a0f6SChristoph Hellwig
70cf65a0f6SChristoph Hellwig WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
711fe97f68SLance Richardson dma_free_coherent(dev, size, vaddr, dma_handle);
72cf65a0f6SChristoph Hellwig }
73cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(dmam_free_coherent);
74cf65a0f6SChristoph Hellwig
75cf65a0f6SChristoph Hellwig /**
76cf65a0f6SChristoph Hellwig * dmam_alloc_attrs - Managed dma_alloc_attrs()
77cf65a0f6SChristoph Hellwig * @dev: Device to allocate non_coherent memory for
78cf65a0f6SChristoph Hellwig * @size: Size of allocation
79cf65a0f6SChristoph Hellwig * @dma_handle: Out argument for allocated DMA handle
80cf65a0f6SChristoph Hellwig * @gfp: Allocation flags
81cf65a0f6SChristoph Hellwig * @attrs: Flags in the DMA_ATTR_* namespace.
82cf65a0f6SChristoph Hellwig *
83cf65a0f6SChristoph Hellwig * Managed dma_alloc_attrs(). Memory allocated using this function will be
84cf65a0f6SChristoph Hellwig * automatically released on driver detach.
85cf65a0f6SChristoph Hellwig *
86cf65a0f6SChristoph Hellwig * RETURNS:
87cf65a0f6SChristoph Hellwig * Pointer to allocated memory on success, NULL on failure.
88cf65a0f6SChristoph Hellwig */
dmam_alloc_attrs(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp,unsigned long attrs)89cf65a0f6SChristoph Hellwig void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
90cf65a0f6SChristoph Hellwig gfp_t gfp, unsigned long attrs)
91cf65a0f6SChristoph Hellwig {
92cf65a0f6SChristoph Hellwig struct dma_devres *dr;
93cf65a0f6SChristoph Hellwig void *vaddr;
94cf65a0f6SChristoph Hellwig
95cf65a0f6SChristoph Hellwig dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
96cf65a0f6SChristoph Hellwig if (!dr)
97cf65a0f6SChristoph Hellwig return NULL;
98cf65a0f6SChristoph Hellwig
99cf65a0f6SChristoph Hellwig vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
100cf65a0f6SChristoph Hellwig if (!vaddr) {
101cf65a0f6SChristoph Hellwig devres_free(dr);
102cf65a0f6SChristoph Hellwig return NULL;
103cf65a0f6SChristoph Hellwig }
104cf65a0f6SChristoph Hellwig
105cf65a0f6SChristoph Hellwig dr->vaddr = vaddr;
106cf65a0f6SChristoph Hellwig dr->dma_handle = *dma_handle;
107cf65a0f6SChristoph Hellwig dr->size = size;
108cf65a0f6SChristoph Hellwig dr->attrs = attrs;
109cf65a0f6SChristoph Hellwig
110cf65a0f6SChristoph Hellwig devres_add(dev, dr);
111cf65a0f6SChristoph Hellwig
112cf65a0f6SChristoph Hellwig return vaddr;
113cf65a0f6SChristoph Hellwig }
114cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(dmam_alloc_attrs);
115cf65a0f6SChristoph Hellwig
dma_go_direct(struct device * dev,dma_addr_t mask,const struct dma_map_ops * ops)116d35834c6SChristoph Hellwig static bool dma_go_direct(struct device *dev, dma_addr_t mask,
117d35834c6SChristoph Hellwig const struct dma_map_ops *ops)
118d3fa60d7SChristoph Hellwig {
119d35834c6SChristoph Hellwig if (likely(!ops))
120d35834c6SChristoph Hellwig return true;
121d35834c6SChristoph Hellwig #ifdef CONFIG_DMA_OPS_BYPASS
122d35834c6SChristoph Hellwig if (dev->dma_ops_bypass)
123d35834c6SChristoph Hellwig return min_not_zero(mask, dev->bus_dma_limit) >=
124d35834c6SChristoph Hellwig dma_direct_get_required_mask(dev);
125d35834c6SChristoph Hellwig #endif
126d35834c6SChristoph Hellwig return false;
127d35834c6SChristoph Hellwig }
128d35834c6SChristoph Hellwig
129d35834c6SChristoph Hellwig
130d35834c6SChristoph Hellwig /*
131d35834c6SChristoph Hellwig * Check if the devices uses a direct mapping for streaming DMA operations.
132d35834c6SChristoph Hellwig * This allows IOMMU drivers to set a bypass mode if the DMA mask is large
133d35834c6SChristoph Hellwig * enough.
134d35834c6SChristoph Hellwig */
dma_alloc_direct(struct device * dev,const struct dma_map_ops * ops)135d35834c6SChristoph Hellwig static inline bool dma_alloc_direct(struct device *dev,
136d35834c6SChristoph Hellwig const struct dma_map_ops *ops)
137d35834c6SChristoph Hellwig {
138d35834c6SChristoph Hellwig return dma_go_direct(dev, dev->coherent_dma_mask, ops);
139d35834c6SChristoph Hellwig }
140d35834c6SChristoph Hellwig
dma_map_direct(struct device * dev,const struct dma_map_ops * ops)141d35834c6SChristoph Hellwig static inline bool dma_map_direct(struct device *dev,
142d35834c6SChristoph Hellwig const struct dma_map_ops *ops)
143d35834c6SChristoph Hellwig {
144d35834c6SChristoph Hellwig return dma_go_direct(dev, *dev->dma_mask, ops);
145d3fa60d7SChristoph Hellwig }
146d3fa60d7SChristoph Hellwig
dma_map_page_attrs(struct device * dev,struct page * page,size_t offset,size_t size,enum dma_data_direction dir,unsigned long attrs)147d3fa60d7SChristoph Hellwig dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
148d3fa60d7SChristoph Hellwig size_t offset, size_t size, enum dma_data_direction dir,
149d3fa60d7SChristoph Hellwig unsigned long attrs)
150d3fa60d7SChristoph Hellwig {
151d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev);
152d3fa60d7SChristoph Hellwig dma_addr_t addr;
153d3fa60d7SChristoph Hellwig
154d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir));
155f959dcd6SThomas Tai
156f959dcd6SThomas Tai if (WARN_ON_ONCE(!dev->dma_mask))
157f959dcd6SThomas Tai return DMA_MAPPING_ERROR;
158f959dcd6SThomas Tai
1598d8d53cfSAlexey Kardashevskiy if (dma_map_direct(dev, ops) ||
1608d8d53cfSAlexey Kardashevskiy arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size))
161d3fa60d7SChristoph Hellwig addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
162d3fa60d7SChristoph Hellwig else
163d3fa60d7SChristoph Hellwig addr = ops->map_page(dev, page, offset, size, dir, attrs);
1647ade4f10SAlexander Potapenko kmsan_handle_dma(page, offset, size, dir);
165c2bbf9d1SHamza Mahfooz debug_dma_map_page(dev, page, offset, size, dir, addr, attrs);
166d3fa60d7SChristoph Hellwig
167d3fa60d7SChristoph Hellwig return addr;
168d3fa60d7SChristoph Hellwig }
169d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_map_page_attrs);
170d3fa60d7SChristoph Hellwig
dma_unmap_page_attrs(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)171d3fa60d7SChristoph Hellwig void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
172d3fa60d7SChristoph Hellwig enum dma_data_direction dir, unsigned long attrs)
173d3fa60d7SChristoph Hellwig {
174d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev);
175d3fa60d7SChristoph Hellwig
176d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir));
1778d8d53cfSAlexey Kardashevskiy if (dma_map_direct(dev, ops) ||
1788d8d53cfSAlexey Kardashevskiy arch_dma_unmap_page_direct(dev, addr + size))
179d3fa60d7SChristoph Hellwig dma_direct_unmap_page(dev, addr, size, dir, attrs);
180d3fa60d7SChristoph Hellwig else if (ops->unmap_page)
181d3fa60d7SChristoph Hellwig ops->unmap_page(dev, addr, size, dir, attrs);
182d3fa60d7SChristoph Hellwig debug_dma_unmap_page(dev, addr, size, dir);
183d3fa60d7SChristoph Hellwig }
184d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_unmap_page_attrs);
185d3fa60d7SChristoph Hellwig
__dma_map_sg_attrs(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)186fffe3cc8SLogan Gunthorpe static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
187fffe3cc8SLogan Gunthorpe int nents, enum dma_data_direction dir, unsigned long attrs)
188d3fa60d7SChristoph Hellwig {
189d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev);
190d3fa60d7SChristoph Hellwig int ents;
191d3fa60d7SChristoph Hellwig
192d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir));
193f959dcd6SThomas Tai
194f959dcd6SThomas Tai if (WARN_ON_ONCE(!dev->dma_mask))
195f959dcd6SThomas Tai return 0;
196f959dcd6SThomas Tai
1978d8d53cfSAlexey Kardashevskiy if (dma_map_direct(dev, ops) ||
1988d8d53cfSAlexey Kardashevskiy arch_dma_map_sg_direct(dev, sg, nents))
199d3fa60d7SChristoph Hellwig ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
200d3fa60d7SChristoph Hellwig else
201d3fa60d7SChristoph Hellwig ents = ops->map_sg(dev, sg, nents, dir, attrs);
202fffe3cc8SLogan Gunthorpe
2037ade4f10SAlexander Potapenko if (ents > 0) {
2047ade4f10SAlexander Potapenko kmsan_handle_dma_sg(sg, nents, dir);
205c2bbf9d1SHamza Mahfooz debug_dma_map_sg(dev, sg, nents, ents, dir, attrs);
2067ade4f10SAlexander Potapenko } else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM &&
2077ade4f10SAlexander Potapenko ents != -EIO && ents != -EREMOTEIO)) {
208fffe3cc8SLogan Gunthorpe return -EIO;
2097ade4f10SAlexander Potapenko }
210d3fa60d7SChristoph Hellwig
211d3fa60d7SChristoph Hellwig return ents;
212d3fa60d7SChristoph Hellwig }
213fffe3cc8SLogan Gunthorpe
214fffe3cc8SLogan Gunthorpe /**
215fffe3cc8SLogan Gunthorpe * dma_map_sg_attrs - Map the given buffer for DMA
216fffe3cc8SLogan Gunthorpe * @dev: The device for which to perform the DMA operation
217fffe3cc8SLogan Gunthorpe * @sg: The sg_table object describing the buffer
218a61cb601SChristoph Hellwig * @nents: Number of entries to map
219fffe3cc8SLogan Gunthorpe * @dir: DMA direction
220fffe3cc8SLogan Gunthorpe * @attrs: Optional DMA attributes for the map operation
221fffe3cc8SLogan Gunthorpe *
222fffe3cc8SLogan Gunthorpe * Maps a buffer described by a scatterlist passed in the sg argument with
223fffe3cc8SLogan Gunthorpe * nents segments for the @dir DMA operation by the @dev device.
224fffe3cc8SLogan Gunthorpe *
225fffe3cc8SLogan Gunthorpe * Returns the number of mapped entries (which can be less than nents)
226fffe3cc8SLogan Gunthorpe * on success. Zero is returned for any error.
227fffe3cc8SLogan Gunthorpe *
228fffe3cc8SLogan Gunthorpe * dma_unmap_sg_attrs() should be used to unmap the buffer with the
229fffe3cc8SLogan Gunthorpe * original sg and original nents (not the value returned by this funciton).
230fffe3cc8SLogan Gunthorpe */
dma_map_sg_attrs(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)2312a047e06SChristoph Hellwig unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
232fffe3cc8SLogan Gunthorpe int nents, enum dma_data_direction dir, unsigned long attrs)
233fffe3cc8SLogan Gunthorpe {
234fffe3cc8SLogan Gunthorpe int ret;
235fffe3cc8SLogan Gunthorpe
236fffe3cc8SLogan Gunthorpe ret = __dma_map_sg_attrs(dev, sg, nents, dir, attrs);
237fffe3cc8SLogan Gunthorpe if (ret < 0)
238fffe3cc8SLogan Gunthorpe return 0;
239fffe3cc8SLogan Gunthorpe return ret;
240fffe3cc8SLogan Gunthorpe }
241d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_map_sg_attrs);
242d3fa60d7SChristoph Hellwig
243fffe3cc8SLogan Gunthorpe /**
244fffe3cc8SLogan Gunthorpe * dma_map_sgtable - Map the given buffer for DMA
245fffe3cc8SLogan Gunthorpe * @dev: The device for which to perform the DMA operation
246fffe3cc8SLogan Gunthorpe * @sgt: The sg_table object describing the buffer
247fffe3cc8SLogan Gunthorpe * @dir: DMA direction
248fffe3cc8SLogan Gunthorpe * @attrs: Optional DMA attributes for the map operation
249fffe3cc8SLogan Gunthorpe *
250fffe3cc8SLogan Gunthorpe * Maps a buffer described by a scatterlist stored in the given sg_table
251fffe3cc8SLogan Gunthorpe * object for the @dir DMA operation by the @dev device. After success, the
252fffe3cc8SLogan Gunthorpe * ownership for the buffer is transferred to the DMA domain. One has to
253fffe3cc8SLogan Gunthorpe * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
254fffe3cc8SLogan Gunthorpe * ownership of the buffer back to the CPU domain before touching the
255fffe3cc8SLogan Gunthorpe * buffer by the CPU.
256fffe3cc8SLogan Gunthorpe *
257fffe3cc8SLogan Gunthorpe * Returns 0 on success or a negative error code on error. The following
258fffe3cc8SLogan Gunthorpe * error codes are supported with the given meaning:
259fffe3cc8SLogan Gunthorpe *
260011a9ce8SLogan Gunthorpe * -EINVAL An invalid argument, unaligned access or other error
261fffe3cc8SLogan Gunthorpe * in usage. Will not succeed if retried.
262011a9ce8SLogan Gunthorpe * -ENOMEM Insufficient resources (like memory or IOVA space) to
263fffe3cc8SLogan Gunthorpe * complete the mapping. Should succeed if retried later.
264011a9ce8SLogan Gunthorpe * -EIO Legacy error code with an unknown meaning. eg. this is
26584197024SLogan Gunthorpe * returned if a lower level call returned
26684197024SLogan Gunthorpe * DMA_MAPPING_ERROR.
26784197024SLogan Gunthorpe * -EREMOTEIO The DMA device cannot access P2PDMA memory specified
26884197024SLogan Gunthorpe * in the sg_table. This will not succeed if retried.
269fffe3cc8SLogan Gunthorpe */
dma_map_sgtable(struct device * dev,struct sg_table * sgt,enum dma_data_direction dir,unsigned long attrs)270fffe3cc8SLogan Gunthorpe int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
271fffe3cc8SLogan Gunthorpe enum dma_data_direction dir, unsigned long attrs)
272fffe3cc8SLogan Gunthorpe {
273fffe3cc8SLogan Gunthorpe int nents;
274fffe3cc8SLogan Gunthorpe
275fffe3cc8SLogan Gunthorpe nents = __dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
276fffe3cc8SLogan Gunthorpe if (nents < 0)
277fffe3cc8SLogan Gunthorpe return nents;
278fffe3cc8SLogan Gunthorpe sgt->nents = nents;
279fffe3cc8SLogan Gunthorpe return 0;
280fffe3cc8SLogan Gunthorpe }
281fffe3cc8SLogan Gunthorpe EXPORT_SYMBOL_GPL(dma_map_sgtable);
282fffe3cc8SLogan Gunthorpe
dma_unmap_sg_attrs(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)283d3fa60d7SChristoph Hellwig void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
284d3fa60d7SChristoph Hellwig int nents, enum dma_data_direction dir,
285d3fa60d7SChristoph Hellwig unsigned long attrs)
286d3fa60d7SChristoph Hellwig {
287d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev);
288d3fa60d7SChristoph Hellwig
289d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir));
290d3fa60d7SChristoph Hellwig debug_dma_unmap_sg(dev, sg, nents, dir);
2918d8d53cfSAlexey Kardashevskiy if (dma_map_direct(dev, ops) ||
2928d8d53cfSAlexey Kardashevskiy arch_dma_unmap_sg_direct(dev, sg, nents))
293d3fa60d7SChristoph Hellwig dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
294d3fa60d7SChristoph Hellwig else if (ops->unmap_sg)
295d3fa60d7SChristoph Hellwig ops->unmap_sg(dev, sg, nents, dir, attrs);
296d3fa60d7SChristoph Hellwig }
297d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_unmap_sg_attrs);
298d3fa60d7SChristoph Hellwig
dma_map_resource(struct device * dev,phys_addr_t phys_addr,size_t size,enum dma_data_direction dir,unsigned long attrs)299d3fa60d7SChristoph Hellwig dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
300d3fa60d7SChristoph Hellwig size_t size, enum dma_data_direction dir, unsigned long attrs)
301d3fa60d7SChristoph Hellwig {
302d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev);
303d3fa60d7SChristoph Hellwig dma_addr_t addr = DMA_MAPPING_ERROR;
304d3fa60d7SChristoph Hellwig
305d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir));
306d3fa60d7SChristoph Hellwig
307f959dcd6SThomas Tai if (WARN_ON_ONCE(!dev->dma_mask))
308f959dcd6SThomas Tai return DMA_MAPPING_ERROR;
309f959dcd6SThomas Tai
310d35834c6SChristoph Hellwig if (dma_map_direct(dev, ops))
311d3fa60d7SChristoph Hellwig addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
312d3fa60d7SChristoph Hellwig else if (ops->map_resource)
313d3fa60d7SChristoph Hellwig addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
314d3fa60d7SChristoph Hellwig
315c2bbf9d1SHamza Mahfooz debug_dma_map_resource(dev, phys_addr, size, dir, addr, attrs);
316d3fa60d7SChristoph Hellwig return addr;
317d3fa60d7SChristoph Hellwig }
318d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_map_resource);
319d3fa60d7SChristoph Hellwig
dma_unmap_resource(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)320d3fa60d7SChristoph Hellwig void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
321d3fa60d7SChristoph Hellwig enum dma_data_direction dir, unsigned long attrs)
322d3fa60d7SChristoph Hellwig {
323d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev);
324d3fa60d7SChristoph Hellwig
325d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir));
326d35834c6SChristoph Hellwig if (!dma_map_direct(dev, ops) && ops->unmap_resource)
327d3fa60d7SChristoph Hellwig ops->unmap_resource(dev, addr, size, dir, attrs);
328d3fa60d7SChristoph Hellwig debug_dma_unmap_resource(dev, addr, size, dir);
329d3fa60d7SChristoph Hellwig }
330d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_unmap_resource);
331d3fa60d7SChristoph Hellwig
dma_sync_single_for_cpu(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)332d3fa60d7SChristoph Hellwig void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
333d3fa60d7SChristoph Hellwig enum dma_data_direction dir)
334d3fa60d7SChristoph Hellwig {
335d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev);
336d3fa60d7SChristoph Hellwig
337d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir));
338d35834c6SChristoph Hellwig if (dma_map_direct(dev, ops))
339d3fa60d7SChristoph Hellwig dma_direct_sync_single_for_cpu(dev, addr, size, dir);
340d3fa60d7SChristoph Hellwig else if (ops->sync_single_for_cpu)
341d3fa60d7SChristoph Hellwig ops->sync_single_for_cpu(dev, addr, size, dir);
342d3fa60d7SChristoph Hellwig debug_dma_sync_single_for_cpu(dev, addr, size, dir);
343d3fa60d7SChristoph Hellwig }
344d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_sync_single_for_cpu);
345d3fa60d7SChristoph Hellwig
dma_sync_single_for_device(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)346d3fa60d7SChristoph Hellwig void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
347d3fa60d7SChristoph Hellwig size_t size, enum dma_data_direction dir)
348d3fa60d7SChristoph Hellwig {
349d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev);
350d3fa60d7SChristoph Hellwig
351d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir));
352d35834c6SChristoph Hellwig if (dma_map_direct(dev, ops))
353d3fa60d7SChristoph Hellwig dma_direct_sync_single_for_device(dev, addr, size, dir);
354d3fa60d7SChristoph Hellwig else if (ops->sync_single_for_device)
355d3fa60d7SChristoph Hellwig ops->sync_single_for_device(dev, addr, size, dir);
356d3fa60d7SChristoph Hellwig debug_dma_sync_single_for_device(dev, addr, size, dir);
357d3fa60d7SChristoph Hellwig }
358d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_sync_single_for_device);
359d3fa60d7SChristoph Hellwig
dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)360d3fa60d7SChristoph Hellwig void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
361d3fa60d7SChristoph Hellwig int nelems, enum dma_data_direction dir)
362d3fa60d7SChristoph Hellwig {
363d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev);
364d3fa60d7SChristoph Hellwig
365d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir));
366d35834c6SChristoph Hellwig if (dma_map_direct(dev, ops))
367d3fa60d7SChristoph Hellwig dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
368d3fa60d7SChristoph Hellwig else if (ops->sync_sg_for_cpu)
369d3fa60d7SChristoph Hellwig ops->sync_sg_for_cpu(dev, sg, nelems, dir);
370d3fa60d7SChristoph Hellwig debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
371d3fa60d7SChristoph Hellwig }
372d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_sync_sg_for_cpu);
373d3fa60d7SChristoph Hellwig
dma_sync_sg_for_device(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)374d3fa60d7SChristoph Hellwig void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
375d3fa60d7SChristoph Hellwig int nelems, enum dma_data_direction dir)
376d3fa60d7SChristoph Hellwig {
377d3fa60d7SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev);
378d3fa60d7SChristoph Hellwig
379d3fa60d7SChristoph Hellwig BUG_ON(!valid_dma_direction(dir));
380d35834c6SChristoph Hellwig if (dma_map_direct(dev, ops))
381d3fa60d7SChristoph Hellwig dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
382d3fa60d7SChristoph Hellwig else if (ops->sync_sg_for_device)
383d3fa60d7SChristoph Hellwig ops->sync_sg_for_device(dev, sg, nelems, dir);
384d3fa60d7SChristoph Hellwig debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
385d3fa60d7SChristoph Hellwig }
386d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_sync_sg_for_device);
387d3fa60d7SChristoph Hellwig
388cf65a0f6SChristoph Hellwig /*
38914451467SChristoph Hellwig * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
39014451467SChristoph Hellwig * that the intention is to allow exporting memory allocated via the
39114451467SChristoph Hellwig * coherent DMA APIs through the dma_buf API, which only accepts a
39214451467SChristoph Hellwig * scattertable. This presents a couple of problems:
39314451467SChristoph Hellwig * 1. Not all memory allocated via the coherent DMA APIs is backed by
39414451467SChristoph Hellwig * a struct page
39514451467SChristoph Hellwig * 2. Passing coherent DMA memory into the streaming APIs is not allowed
39614451467SChristoph Hellwig * as we will try to flush the memory through a different alias to that
39714451467SChristoph Hellwig * actually being used (and the flushes are redundant.)
39814451467SChristoph Hellwig */
dma_get_sgtable_attrs(struct device * dev,struct sg_table * sgt,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)3997249c1a5SChristoph Hellwig int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
4007249c1a5SChristoph Hellwig void *cpu_addr, dma_addr_t dma_addr, size_t size,
4017249c1a5SChristoph Hellwig unsigned long attrs)
4027249c1a5SChristoph Hellwig {
4037249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev);
404356da6d0SChristoph Hellwig
405d35834c6SChristoph Hellwig if (dma_alloc_direct(dev, ops))
40634dc0ea6SChristoph Hellwig return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr,
407f9f3232aSChristoph Hellwig size, attrs);
408f9f3232aSChristoph Hellwig if (!ops->get_sgtable)
409f9f3232aSChristoph Hellwig return -ENXIO;
410f9f3232aSChristoph Hellwig return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs);
4117249c1a5SChristoph Hellwig }
4127249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_get_sgtable_attrs);
413cf65a0f6SChristoph Hellwig
41433dcb37cSChristoph Hellwig #ifdef CONFIG_MMU
41533dcb37cSChristoph Hellwig /*
41633dcb37cSChristoph Hellwig * Return the page attributes used for mapping dma_alloc_* memory, either in
41733dcb37cSChristoph Hellwig * kernel space if remapping is needed, or to userspace through dma_mmap_*.
41833dcb37cSChristoph Hellwig */
dma_pgprot(struct device * dev,pgprot_t prot,unsigned long attrs)41933dcb37cSChristoph Hellwig pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
42033dcb37cSChristoph Hellwig {
421efa70f2fSChristoph Hellwig if (dev_is_dma_coherent(dev))
42233dcb37cSChristoph Hellwig return prot;
423419e2f18SChristoph Hellwig #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
424419e2f18SChristoph Hellwig if (attrs & DMA_ATTR_WRITE_COMBINE)
425419e2f18SChristoph Hellwig return pgprot_writecombine(prot);
426419e2f18SChristoph Hellwig #endif
427419e2f18SChristoph Hellwig return pgprot_dmacoherent(prot);
42833dcb37cSChristoph Hellwig }
42933dcb37cSChristoph Hellwig #endif /* CONFIG_MMU */
43033dcb37cSChristoph Hellwig
4317249c1a5SChristoph Hellwig /**
432e29ccc18SChristoph Hellwig * dma_can_mmap - check if a given device supports dma_mmap_*
433e29ccc18SChristoph Hellwig * @dev: device to check
434e29ccc18SChristoph Hellwig *
435e29ccc18SChristoph Hellwig * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to
436e29ccc18SChristoph Hellwig * map DMA allocations to userspace.
437e29ccc18SChristoph Hellwig */
dma_can_mmap(struct device * dev)438e29ccc18SChristoph Hellwig bool dma_can_mmap(struct device *dev)
439e29ccc18SChristoph Hellwig {
440e29ccc18SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev);
441e29ccc18SChristoph Hellwig
442d35834c6SChristoph Hellwig if (dma_alloc_direct(dev, ops))
44334dc0ea6SChristoph Hellwig return dma_direct_can_mmap(dev);
444e29ccc18SChristoph Hellwig return ops->mmap != NULL;
445e29ccc18SChristoph Hellwig }
446e29ccc18SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_can_mmap);
447e29ccc18SChristoph Hellwig
448e29ccc18SChristoph Hellwig /**
4497249c1a5SChristoph Hellwig * dma_mmap_attrs - map a coherent DMA allocation into user space
4507249c1a5SChristoph Hellwig * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
4517249c1a5SChristoph Hellwig * @vma: vm_area_struct describing requested user mapping
4527249c1a5SChristoph Hellwig * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
4537249c1a5SChristoph Hellwig * @dma_addr: device-view address returned from dma_alloc_attrs
4547249c1a5SChristoph Hellwig * @size: size of memory originally requested in dma_alloc_attrs
4557249c1a5SChristoph Hellwig * @attrs: attributes of mapping properties requested in dma_alloc_attrs
4567249c1a5SChristoph Hellwig *
4577249c1a5SChristoph Hellwig * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user
4587249c1a5SChristoph Hellwig * space. The coherent DMA buffer must not be freed by the driver until the
4597249c1a5SChristoph Hellwig * user space mapping has been released.
4607249c1a5SChristoph Hellwig */
dma_mmap_attrs(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)4617249c1a5SChristoph Hellwig int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
4627249c1a5SChristoph Hellwig void *cpu_addr, dma_addr_t dma_addr, size_t size,
4637249c1a5SChristoph Hellwig unsigned long attrs)
4647249c1a5SChristoph Hellwig {
4657249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev);
466356da6d0SChristoph Hellwig
467d35834c6SChristoph Hellwig if (dma_alloc_direct(dev, ops))
46834dc0ea6SChristoph Hellwig return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size,
469f9f3232aSChristoph Hellwig attrs);
470f9f3232aSChristoph Hellwig if (!ops->mmap)
471f9f3232aSChristoph Hellwig return -ENXIO;
4727249c1a5SChristoph Hellwig return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
4737249c1a5SChristoph Hellwig }
4747249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_mmap_attrs);
47505887cb6SChristoph Hellwig
dma_get_required_mask(struct device * dev)47605887cb6SChristoph Hellwig u64 dma_get_required_mask(struct device *dev)
47705887cb6SChristoph Hellwig {
47805887cb6SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev);
47905887cb6SChristoph Hellwig
480d35834c6SChristoph Hellwig if (dma_alloc_direct(dev, ops))
481356da6d0SChristoph Hellwig return dma_direct_get_required_mask(dev);
48205887cb6SChristoph Hellwig if (ops->get_required_mask)
48305887cb6SChristoph Hellwig return ops->get_required_mask(dev);
484249baa54SChristoph Hellwig
485249baa54SChristoph Hellwig /*
486249baa54SChristoph Hellwig * We require every DMA ops implementation to at least support a 32-bit
487249baa54SChristoph Hellwig * DMA mask (and use bounce buffering if that isn't supported in
488249baa54SChristoph Hellwig * hardware). As the direct mapping code has its own routine to
489249baa54SChristoph Hellwig * actually report an optimal mask we default to 32-bit here as that
490249baa54SChristoph Hellwig * is the right thing for most IOMMUs, and at least not actively
491249baa54SChristoph Hellwig * harmful in general.
492249baa54SChristoph Hellwig */
493249baa54SChristoph Hellwig return DMA_BIT_MASK(32);
49405887cb6SChristoph Hellwig }
49505887cb6SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_get_required_mask);
49605887cb6SChristoph Hellwig
dma_alloc_attrs(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag,unsigned long attrs)4977249c1a5SChristoph Hellwig void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
4987249c1a5SChristoph Hellwig gfp_t flag, unsigned long attrs)
4997249c1a5SChristoph Hellwig {
5007249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev);
5017249c1a5SChristoph Hellwig void *cpu_addr;
5027249c1a5SChristoph Hellwig
503148a97d5SDan Carpenter WARN_ON_ONCE(!dev->coherent_dma_mask);
5047249c1a5SChristoph Hellwig
505ffcb7545SChristoph Hellwig /*
506ffcb7545SChristoph Hellwig * DMA allocations can never be turned back into a page pointer, so
507ffcb7545SChristoph Hellwig * requesting compound pages doesn't make sense (and can't even be
508ffcb7545SChristoph Hellwig * supported at all by various backends).
509ffcb7545SChristoph Hellwig */
510ffcb7545SChristoph Hellwig if (WARN_ON_ONCE(flag & __GFP_COMP))
511ffcb7545SChristoph Hellwig return NULL;
512ffcb7545SChristoph Hellwig
5137249c1a5SChristoph Hellwig if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
5147249c1a5SChristoph Hellwig return cpu_addr;
5157249c1a5SChristoph Hellwig
5167249c1a5SChristoph Hellwig /* let the implementation decide on the zone to allocate from: */
5177249c1a5SChristoph Hellwig flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
5187249c1a5SChristoph Hellwig
519d35834c6SChristoph Hellwig if (dma_alloc_direct(dev, ops))
520356da6d0SChristoph Hellwig cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
521356da6d0SChristoph Hellwig else if (ops->alloc)
522356da6d0SChristoph Hellwig cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
523356da6d0SChristoph Hellwig else
5247249c1a5SChristoph Hellwig return NULL;
5257249c1a5SChristoph Hellwig
526c2bbf9d1SHamza Mahfooz debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr, attrs);
5277249c1a5SChristoph Hellwig return cpu_addr;
5287249c1a5SChristoph Hellwig }
5297249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_alloc_attrs);
5307249c1a5SChristoph Hellwig
dma_free_attrs(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_handle,unsigned long attrs)5317249c1a5SChristoph Hellwig void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
5327249c1a5SChristoph Hellwig dma_addr_t dma_handle, unsigned long attrs)
5337249c1a5SChristoph Hellwig {
5347249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev);
5357249c1a5SChristoph Hellwig
5367249c1a5SChristoph Hellwig if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
5377249c1a5SChristoph Hellwig return;
5387249c1a5SChristoph Hellwig /*
5397249c1a5SChristoph Hellwig * On non-coherent platforms which implement DMA-coherent buffers via
5407249c1a5SChristoph Hellwig * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
5417249c1a5SChristoph Hellwig * this far in IRQ context is a) at risk of a BUG_ON() or trying to
5427249c1a5SChristoph Hellwig * sleep on some machines, and b) an indication that the driver is
5437249c1a5SChristoph Hellwig * probably misusing the coherent API anyway.
5447249c1a5SChristoph Hellwig */
5457249c1a5SChristoph Hellwig WARN_ON(irqs_disabled());
5467249c1a5SChristoph Hellwig
547356da6d0SChristoph Hellwig if (!cpu_addr)
5487249c1a5SChristoph Hellwig return;
5497249c1a5SChristoph Hellwig
5507249c1a5SChristoph Hellwig debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
551d35834c6SChristoph Hellwig if (dma_alloc_direct(dev, ops))
552356da6d0SChristoph Hellwig dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
553356da6d0SChristoph Hellwig else if (ops->free)
5547249c1a5SChristoph Hellwig ops->free(dev, size, cpu_addr, dma_handle, attrs);
5557249c1a5SChristoph Hellwig }
5567249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_free_attrs);
5577249c1a5SChristoph Hellwig
__dma_alloc_pages(struct device * dev,size_t size,dma_addr_t * dma_handle,enum dma_data_direction dir,gfp_t gfp)558198c50e2SChristoph Hellwig static struct page *__dma_alloc_pages(struct device *dev, size_t size,
559efa70f2fSChristoph Hellwig dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
560efa70f2fSChristoph Hellwig {
561efa70f2fSChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev);
562efa70f2fSChristoph Hellwig
563efa70f2fSChristoph Hellwig if (WARN_ON_ONCE(!dev->coherent_dma_mask))
564efa70f2fSChristoph Hellwig return NULL;
565efa70f2fSChristoph Hellwig if (WARN_ON_ONCE(gfp & (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)))
566efa70f2fSChristoph Hellwig return NULL;
5673622b86fSChristoph Hellwig if (WARN_ON_ONCE(gfp & __GFP_COMP))
5683622b86fSChristoph Hellwig return NULL;
569efa70f2fSChristoph Hellwig
570efa70f2fSChristoph Hellwig size = PAGE_ALIGN(size);
571efa70f2fSChristoph Hellwig if (dma_alloc_direct(dev, ops))
572198c50e2SChristoph Hellwig return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp);
573*05d2e16aSGreg Kroah-Hartman if (!ops->alloc_pages)
574efa70f2fSChristoph Hellwig return NULL;
575*05d2e16aSGreg Kroah-Hartman return ops->alloc_pages(dev, size, dma_handle, dir, gfp);
576198c50e2SChristoph Hellwig }
577efa70f2fSChristoph Hellwig
dma_alloc_pages(struct device * dev,size_t size,dma_addr_t * dma_handle,enum dma_data_direction dir,gfp_t gfp)578198c50e2SChristoph Hellwig struct page *dma_alloc_pages(struct device *dev, size_t size,
579198c50e2SChristoph Hellwig dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
580198c50e2SChristoph Hellwig {
581198c50e2SChristoph Hellwig struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp);
582198c50e2SChristoph Hellwig
583198c50e2SChristoph Hellwig if (page)
584c2bbf9d1SHamza Mahfooz debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0);
585efa70f2fSChristoph Hellwig return page;
586efa70f2fSChristoph Hellwig }
587efa70f2fSChristoph Hellwig EXPORT_SYMBOL_GPL(dma_alloc_pages);
588efa70f2fSChristoph Hellwig
__dma_free_pages(struct device * dev,size_t size,struct page * page,dma_addr_t dma_handle,enum dma_data_direction dir)589198c50e2SChristoph Hellwig static void __dma_free_pages(struct device *dev, size_t size, struct page *page,
590efa70f2fSChristoph Hellwig dma_addr_t dma_handle, enum dma_data_direction dir)
591efa70f2fSChristoph Hellwig {
592efa70f2fSChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev);
593efa70f2fSChristoph Hellwig
594efa70f2fSChristoph Hellwig size = PAGE_ALIGN(size);
595efa70f2fSChristoph Hellwig if (dma_alloc_direct(dev, ops))
596efa70f2fSChristoph Hellwig dma_direct_free_pages(dev, size, page, dma_handle, dir);
597efa70f2fSChristoph Hellwig else if (ops->free_pages)
598efa70f2fSChristoph Hellwig ops->free_pages(dev, size, page, dma_handle, dir);
599efa70f2fSChristoph Hellwig }
600198c50e2SChristoph Hellwig
dma_free_pages(struct device * dev,size_t size,struct page * page,dma_addr_t dma_handle,enum dma_data_direction dir)601198c50e2SChristoph Hellwig void dma_free_pages(struct device *dev, size_t size, struct page *page,
602198c50e2SChristoph Hellwig dma_addr_t dma_handle, enum dma_data_direction dir)
603198c50e2SChristoph Hellwig {
604198c50e2SChristoph Hellwig debug_dma_unmap_page(dev, dma_handle, size, dir);
605198c50e2SChristoph Hellwig __dma_free_pages(dev, size, page, dma_handle, dir);
606198c50e2SChristoph Hellwig }
607efa70f2fSChristoph Hellwig EXPORT_SYMBOL_GPL(dma_free_pages);
608efa70f2fSChristoph Hellwig
dma_mmap_pages(struct device * dev,struct vm_area_struct * vma,size_t size,struct page * page)609eedb0b12SChristoph Hellwig int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
610eedb0b12SChristoph Hellwig size_t size, struct page *page)
611eedb0b12SChristoph Hellwig {
612eedb0b12SChristoph Hellwig unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
613eedb0b12SChristoph Hellwig
614eedb0b12SChristoph Hellwig if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff)
615eedb0b12SChristoph Hellwig return -ENXIO;
616eedb0b12SChristoph Hellwig return remap_pfn_range(vma, vma->vm_start,
617eedb0b12SChristoph Hellwig page_to_pfn(page) + vma->vm_pgoff,
618eedb0b12SChristoph Hellwig vma_pages(vma) << PAGE_SHIFT, vma->vm_page_prot);
619eedb0b12SChristoph Hellwig }
620eedb0b12SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_mmap_pages);
621eedb0b12SChristoph Hellwig
alloc_single_sgt(struct device * dev,size_t size,enum dma_data_direction dir,gfp_t gfp)6227d5b5738SChristoph Hellwig static struct sg_table *alloc_single_sgt(struct device *dev, size_t size,
6237d5b5738SChristoph Hellwig enum dma_data_direction dir, gfp_t gfp)
6247d5b5738SChristoph Hellwig {
6257d5b5738SChristoph Hellwig struct sg_table *sgt;
6267d5b5738SChristoph Hellwig struct page *page;
6277d5b5738SChristoph Hellwig
6287d5b5738SChristoph Hellwig sgt = kmalloc(sizeof(*sgt), gfp);
6297d5b5738SChristoph Hellwig if (!sgt)
6307d5b5738SChristoph Hellwig return NULL;
6317d5b5738SChristoph Hellwig if (sg_alloc_table(sgt, 1, gfp))
6327d5b5738SChristoph Hellwig goto out_free_sgt;
6337d5b5738SChristoph Hellwig page = __dma_alloc_pages(dev, size, &sgt->sgl->dma_address, dir, gfp);
6347d5b5738SChristoph Hellwig if (!page)
6357d5b5738SChristoph Hellwig goto out_free_table;
6367d5b5738SChristoph Hellwig sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
6377d5b5738SChristoph Hellwig sg_dma_len(sgt->sgl) = sgt->sgl->length;
6387d5b5738SChristoph Hellwig return sgt;
6397d5b5738SChristoph Hellwig out_free_table:
6407d5b5738SChristoph Hellwig sg_free_table(sgt);
6417d5b5738SChristoph Hellwig out_free_sgt:
6427d5b5738SChristoph Hellwig kfree(sgt);
6437d5b5738SChristoph Hellwig return NULL;
6447d5b5738SChristoph Hellwig }
6457d5b5738SChristoph Hellwig
dma_alloc_noncontiguous(struct device * dev,size_t size,enum dma_data_direction dir,gfp_t gfp,unsigned long attrs)6467d5b5738SChristoph Hellwig struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
6477d5b5738SChristoph Hellwig enum dma_data_direction dir, gfp_t gfp, unsigned long attrs)
6487d5b5738SChristoph Hellwig {
6497d5b5738SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev);
6507d5b5738SChristoph Hellwig struct sg_table *sgt;
6517d5b5738SChristoph Hellwig
6527d5b5738SChristoph Hellwig if (WARN_ON_ONCE(attrs & ~DMA_ATTR_ALLOC_SINGLE_PAGES))
6537d5b5738SChristoph Hellwig return NULL;
6543622b86fSChristoph Hellwig if (WARN_ON_ONCE(gfp & __GFP_COMP))
6553622b86fSChristoph Hellwig return NULL;
6567d5b5738SChristoph Hellwig
6577d5b5738SChristoph Hellwig if (ops && ops->alloc_noncontiguous)
6587d5b5738SChristoph Hellwig sgt = ops->alloc_noncontiguous(dev, size, dir, gfp, attrs);
6597d5b5738SChristoph Hellwig else
6607d5b5738SChristoph Hellwig sgt = alloc_single_sgt(dev, size, dir, gfp);
6617d5b5738SChristoph Hellwig
6627d5b5738SChristoph Hellwig if (sgt) {
6637d5b5738SChristoph Hellwig sgt->nents = 1;
664c2bbf9d1SHamza Mahfooz debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
6657d5b5738SChristoph Hellwig }
6667d5b5738SChristoph Hellwig return sgt;
6677d5b5738SChristoph Hellwig }
6687d5b5738SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_alloc_noncontiguous);
6697d5b5738SChristoph Hellwig
free_single_sgt(struct device * dev,size_t size,struct sg_table * sgt,enum dma_data_direction dir)6707d5b5738SChristoph Hellwig static void free_single_sgt(struct device *dev, size_t size,
6717d5b5738SChristoph Hellwig struct sg_table *sgt, enum dma_data_direction dir)
6727d5b5738SChristoph Hellwig {
6737d5b5738SChristoph Hellwig __dma_free_pages(dev, size, sg_page(sgt->sgl), sgt->sgl->dma_address,
6747d5b5738SChristoph Hellwig dir);
6757d5b5738SChristoph Hellwig sg_free_table(sgt);
6767d5b5738SChristoph Hellwig kfree(sgt);
6777d5b5738SChristoph Hellwig }
6787d5b5738SChristoph Hellwig
dma_free_noncontiguous(struct device * dev,size_t size,struct sg_table * sgt,enum dma_data_direction dir)6797d5b5738SChristoph Hellwig void dma_free_noncontiguous(struct device *dev, size_t size,
6807d5b5738SChristoph Hellwig struct sg_table *sgt, enum dma_data_direction dir)
6817d5b5738SChristoph Hellwig {
6827d5b5738SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev);
6837d5b5738SChristoph Hellwig
6847d5b5738SChristoph Hellwig debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
6857d5b5738SChristoph Hellwig if (ops && ops->free_noncontiguous)
6867d5b5738SChristoph Hellwig ops->free_noncontiguous(dev, size, sgt, dir);
6877d5b5738SChristoph Hellwig else
6887d5b5738SChristoph Hellwig free_single_sgt(dev, size, sgt, dir);
6897d5b5738SChristoph Hellwig }
6907d5b5738SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_free_noncontiguous);
6917d5b5738SChristoph Hellwig
dma_vmap_noncontiguous(struct device * dev,size_t size,struct sg_table * sgt)6927d5b5738SChristoph Hellwig void *dma_vmap_noncontiguous(struct device *dev, size_t size,
6937d5b5738SChristoph Hellwig struct sg_table *sgt)
6947d5b5738SChristoph Hellwig {
6957d5b5738SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev);
6967d5b5738SChristoph Hellwig unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
6977d5b5738SChristoph Hellwig
6987d5b5738SChristoph Hellwig if (ops && ops->alloc_noncontiguous)
6997d5b5738SChristoph Hellwig return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL);
7007d5b5738SChristoph Hellwig return page_address(sg_page(sgt->sgl));
7017d5b5738SChristoph Hellwig }
7027d5b5738SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_vmap_noncontiguous);
7037d5b5738SChristoph Hellwig
dma_vunmap_noncontiguous(struct device * dev,void * vaddr)7047d5b5738SChristoph Hellwig void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
7057d5b5738SChristoph Hellwig {
7067d5b5738SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev);
7077d5b5738SChristoph Hellwig
7087d5b5738SChristoph Hellwig if (ops && ops->alloc_noncontiguous)
7097d5b5738SChristoph Hellwig vunmap(vaddr);
7107d5b5738SChristoph Hellwig }
7117d5b5738SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_vunmap_noncontiguous);
7127d5b5738SChristoph Hellwig
dma_mmap_noncontiguous(struct device * dev,struct vm_area_struct * vma,size_t size,struct sg_table * sgt)7137d5b5738SChristoph Hellwig int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
7147d5b5738SChristoph Hellwig size_t size, struct sg_table *sgt)
7157d5b5738SChristoph Hellwig {
7167d5b5738SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev);
7177d5b5738SChristoph Hellwig
7187d5b5738SChristoph Hellwig if (ops && ops->alloc_noncontiguous) {
7197d5b5738SChristoph Hellwig unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
7207d5b5738SChristoph Hellwig
7217d5b5738SChristoph Hellwig if (vma->vm_pgoff >= count ||
7227d5b5738SChristoph Hellwig vma_pages(vma) > count - vma->vm_pgoff)
7237d5b5738SChristoph Hellwig return -ENXIO;
7247d5b5738SChristoph Hellwig return vm_map_pages(vma, sgt_handle(sgt)->pages, count);
7257d5b5738SChristoph Hellwig }
7267d5b5738SChristoph Hellwig return dma_mmap_pages(dev, vma, size, sg_page(sgt->sgl));
7277d5b5738SChristoph Hellwig }
7287d5b5738SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous);
7297d5b5738SChristoph Hellwig
dma_supported(struct device * dev,u64 mask)7309fc18f6dSChristoph Hellwig static int dma_supported(struct device *dev, u64 mask)
7317249c1a5SChristoph Hellwig {
7327249c1a5SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev);
7337249c1a5SChristoph Hellwig
734d35834c6SChristoph Hellwig /*
735d35834c6SChristoph Hellwig * ->dma_supported sets the bypass flag, so we must always call
736d35834c6SChristoph Hellwig * into the method here unless the device is truly direct mapped.
737d35834c6SChristoph Hellwig */
738d35834c6SChristoph Hellwig if (!ops)
739356da6d0SChristoph Hellwig return dma_direct_supported(dev, mask);
7408b1cce9fSThierry Reding if (!ops->dma_supported)
7417249c1a5SChristoph Hellwig return 1;
7427249c1a5SChristoph Hellwig return ops->dma_supported(dev, mask);
7437249c1a5SChristoph Hellwig }
7447249c1a5SChristoph Hellwig
dma_pci_p2pdma_supported(struct device * dev)745159bf192SLogan Gunthorpe bool dma_pci_p2pdma_supported(struct device *dev)
746159bf192SLogan Gunthorpe {
747159bf192SLogan Gunthorpe const struct dma_map_ops *ops = get_dma_ops(dev);
748159bf192SLogan Gunthorpe
749159bf192SLogan Gunthorpe /* if ops is not set, dma direct will be used which supports P2PDMA */
750159bf192SLogan Gunthorpe if (!ops)
751159bf192SLogan Gunthorpe return true;
752159bf192SLogan Gunthorpe
753159bf192SLogan Gunthorpe /*
754159bf192SLogan Gunthorpe * Note: dma_ops_bypass is not checked here because P2PDMA should
755159bf192SLogan Gunthorpe * not be used with dma mapping ops that do not have support even
756159bf192SLogan Gunthorpe * if the specific device is bypassing them.
757159bf192SLogan Gunthorpe */
758159bf192SLogan Gunthorpe
759159bf192SLogan Gunthorpe return ops->flags & DMA_F_PCI_P2PDMA_SUPPORTED;
760159bf192SLogan Gunthorpe }
761159bf192SLogan Gunthorpe EXPORT_SYMBOL_GPL(dma_pci_p2pdma_supported);
762159bf192SLogan Gunthorpe
dma_set_mask(struct device * dev,u64 mask)7637249c1a5SChristoph Hellwig int dma_set_mask(struct device *dev, u64 mask)
7647249c1a5SChristoph Hellwig {
7654a54d16fSChristoph Hellwig /*
7664a54d16fSChristoph Hellwig * Truncate the mask to the actually supported dma_addr_t width to
7674a54d16fSChristoph Hellwig * avoid generating unsupportable addresses.
7684a54d16fSChristoph Hellwig */
7694a54d16fSChristoph Hellwig mask = (dma_addr_t)mask;
7704a54d16fSChristoph Hellwig
7717249c1a5SChristoph Hellwig if (!dev->dma_mask || !dma_supported(dev, mask))
7727249c1a5SChristoph Hellwig return -EIO;
7737249c1a5SChristoph Hellwig
77411ddce15SChristoph Hellwig arch_dma_set_mask(dev, mask);
7757249c1a5SChristoph Hellwig *dev->dma_mask = mask;
7767249c1a5SChristoph Hellwig return 0;
7777249c1a5SChristoph Hellwig }
7787249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_set_mask);
7797249c1a5SChristoph Hellwig
dma_set_coherent_mask(struct device * dev,u64 mask)7807249c1a5SChristoph Hellwig int dma_set_coherent_mask(struct device *dev, u64 mask)
7817249c1a5SChristoph Hellwig {
7824a54d16fSChristoph Hellwig /*
7834a54d16fSChristoph Hellwig * Truncate the mask to the actually supported dma_addr_t width to
7844a54d16fSChristoph Hellwig * avoid generating unsupportable addresses.
7854a54d16fSChristoph Hellwig */
7864a54d16fSChristoph Hellwig mask = (dma_addr_t)mask;
7874a54d16fSChristoph Hellwig
7887249c1a5SChristoph Hellwig if (!dma_supported(dev, mask))
7897249c1a5SChristoph Hellwig return -EIO;
7907249c1a5SChristoph Hellwig
7917249c1a5SChristoph Hellwig dev->coherent_dma_mask = mask;
7927249c1a5SChristoph Hellwig return 0;
7937249c1a5SChristoph Hellwig }
7947249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_set_coherent_mask);
7958ddbe594SChristoph Hellwig
dma_max_mapping_size(struct device * dev)796133d624bSJoerg Roedel size_t dma_max_mapping_size(struct device *dev)
797133d624bSJoerg Roedel {
798133d624bSJoerg Roedel const struct dma_map_ops *ops = get_dma_ops(dev);
799133d624bSJoerg Roedel size_t size = SIZE_MAX;
800133d624bSJoerg Roedel
801d35834c6SChristoph Hellwig if (dma_map_direct(dev, ops))
802133d624bSJoerg Roedel size = dma_direct_max_mapping_size(dev);
803133d624bSJoerg Roedel else if (ops && ops->max_mapping_size)
804133d624bSJoerg Roedel size = ops->max_mapping_size(dev);
805133d624bSJoerg Roedel
806133d624bSJoerg Roedel return size;
807133d624bSJoerg Roedel }
808133d624bSJoerg Roedel EXPORT_SYMBOL_GPL(dma_max_mapping_size);
8096ba99411SYoshihiro Shimoda
dma_opt_mapping_size(struct device * dev)810a229cc14SJohn Garry size_t dma_opt_mapping_size(struct device *dev)
811a229cc14SJohn Garry {
812a229cc14SJohn Garry const struct dma_map_ops *ops = get_dma_ops(dev);
813a229cc14SJohn Garry size_t size = SIZE_MAX;
814a229cc14SJohn Garry
815a229cc14SJohn Garry if (ops && ops->opt_mapping_size)
816a229cc14SJohn Garry size = ops->opt_mapping_size();
817a229cc14SJohn Garry
818a229cc14SJohn Garry return min(dma_max_mapping_size(dev), size);
819a229cc14SJohn Garry }
820a229cc14SJohn Garry EXPORT_SYMBOL_GPL(dma_opt_mapping_size);
821a229cc14SJohn Garry
dma_need_sync(struct device * dev,dma_addr_t dma_addr)8223aa91625SChristoph Hellwig bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
8233aa91625SChristoph Hellwig {
8243aa91625SChristoph Hellwig const struct dma_map_ops *ops = get_dma_ops(dev);
8253aa91625SChristoph Hellwig
826d35834c6SChristoph Hellwig if (dma_map_direct(dev, ops))
8273aa91625SChristoph Hellwig return dma_direct_need_sync(dev, dma_addr);
8283aa91625SChristoph Hellwig return ops->sync_single_for_cpu || ops->sync_single_for_device;
8293aa91625SChristoph Hellwig }
8303aa91625SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_need_sync);
8313aa91625SChristoph Hellwig
dma_get_merge_boundary(struct device * dev)8326ba99411SYoshihiro Shimoda unsigned long dma_get_merge_boundary(struct device *dev)
8336ba99411SYoshihiro Shimoda {
8346ba99411SYoshihiro Shimoda const struct dma_map_ops *ops = get_dma_ops(dev);
8356ba99411SYoshihiro Shimoda
8366ba99411SYoshihiro Shimoda if (!ops || !ops->get_merge_boundary)
8376ba99411SYoshihiro Shimoda return 0; /* can't merge */
8386ba99411SYoshihiro Shimoda
8396ba99411SYoshihiro Shimoda return ops->get_merge_boundary(dev);
8406ba99411SYoshihiro Shimoda }
8416ba99411SYoshihiro Shimoda EXPORT_SYMBOL_GPL(dma_get_merge_boundary);
842