xref: /openbmc/linux/kernel/dma/mapping.c (revision d35834c64820c7ef397f8a244061d4450720540e)
1cf65a0f6SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
2cf65a0f6SChristoph Hellwig /*
3cf65a0f6SChristoph Hellwig  * arch-independent dma-mapping routines
4cf65a0f6SChristoph Hellwig  *
5cf65a0f6SChristoph Hellwig  * Copyright (c) 2006  SUSE Linux Products GmbH
6cf65a0f6SChristoph Hellwig  * Copyright (c) 2006  Tejun Heo <teheo@suse.de>
7cf65a0f6SChristoph Hellwig  */
805887cb6SChristoph Hellwig #include <linux/memblock.h> /* for max_pfn */
9cf65a0f6SChristoph Hellwig #include <linux/acpi.h>
10356da6d0SChristoph Hellwig #include <linux/dma-direct.h>
1158b04406SChristoph Hellwig #include <linux/dma-noncoherent.h>
12cf65a0f6SChristoph Hellwig #include <linux/export.h>
13cf65a0f6SChristoph Hellwig #include <linux/gfp.h>
14cf65a0f6SChristoph Hellwig #include <linux/of_device.h>
15cf65a0f6SChristoph Hellwig #include <linux/slab.h>
16cf65a0f6SChristoph Hellwig #include <linux/vmalloc.h>
17cf65a0f6SChristoph Hellwig 
18cf65a0f6SChristoph Hellwig /*
19cf65a0f6SChristoph Hellwig  * Managed DMA API
20cf65a0f6SChristoph Hellwig  */
21cf65a0f6SChristoph Hellwig struct dma_devres {
22cf65a0f6SChristoph Hellwig 	size_t		size;
23cf65a0f6SChristoph Hellwig 	void		*vaddr;
24cf65a0f6SChristoph Hellwig 	dma_addr_t	dma_handle;
25cf65a0f6SChristoph Hellwig 	unsigned long	attrs;
26cf65a0f6SChristoph Hellwig };
27cf65a0f6SChristoph Hellwig 
28cf65a0f6SChristoph Hellwig static void dmam_release(struct device *dev, void *res)
29cf65a0f6SChristoph Hellwig {
30cf65a0f6SChristoph Hellwig 	struct dma_devres *this = res;
31cf65a0f6SChristoph Hellwig 
32cf65a0f6SChristoph Hellwig 	dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
33cf65a0f6SChristoph Hellwig 			this->attrs);
34cf65a0f6SChristoph Hellwig }
35cf65a0f6SChristoph Hellwig 
36cf65a0f6SChristoph Hellwig static int dmam_match(struct device *dev, void *res, void *match_data)
37cf65a0f6SChristoph Hellwig {
38cf65a0f6SChristoph Hellwig 	struct dma_devres *this = res, *match = match_data;
39cf65a0f6SChristoph Hellwig 
40cf65a0f6SChristoph Hellwig 	if (this->vaddr == match->vaddr) {
41cf65a0f6SChristoph Hellwig 		WARN_ON(this->size != match->size ||
42cf65a0f6SChristoph Hellwig 			this->dma_handle != match->dma_handle);
43cf65a0f6SChristoph Hellwig 		return 1;
44cf65a0f6SChristoph Hellwig 	}
45cf65a0f6SChristoph Hellwig 	return 0;
46cf65a0f6SChristoph Hellwig }
47cf65a0f6SChristoph Hellwig 
48cf65a0f6SChristoph Hellwig /**
49cf65a0f6SChristoph Hellwig  * dmam_free_coherent - Managed dma_free_coherent()
50cf65a0f6SChristoph Hellwig  * @dev: Device to free coherent memory for
51cf65a0f6SChristoph Hellwig  * @size: Size of allocation
52cf65a0f6SChristoph Hellwig  * @vaddr: Virtual address of the memory to free
53cf65a0f6SChristoph Hellwig  * @dma_handle: DMA handle of the memory to free
54cf65a0f6SChristoph Hellwig  *
55cf65a0f6SChristoph Hellwig  * Managed dma_free_coherent().
56cf65a0f6SChristoph Hellwig  */
57cf65a0f6SChristoph Hellwig void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
58cf65a0f6SChristoph Hellwig 			dma_addr_t dma_handle)
59cf65a0f6SChristoph Hellwig {
60cf65a0f6SChristoph Hellwig 	struct dma_devres match_data = { size, vaddr, dma_handle };
61cf65a0f6SChristoph Hellwig 
62cf65a0f6SChristoph Hellwig 	dma_free_coherent(dev, size, vaddr, dma_handle);
63cf65a0f6SChristoph Hellwig 	WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
64cf65a0f6SChristoph Hellwig }
65cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(dmam_free_coherent);
66cf65a0f6SChristoph Hellwig 
67cf65a0f6SChristoph Hellwig /**
68cf65a0f6SChristoph Hellwig  * dmam_alloc_attrs - Managed dma_alloc_attrs()
69cf65a0f6SChristoph Hellwig  * @dev: Device to allocate non_coherent memory for
70cf65a0f6SChristoph Hellwig  * @size: Size of allocation
71cf65a0f6SChristoph Hellwig  * @dma_handle: Out argument for allocated DMA handle
72cf65a0f6SChristoph Hellwig  * @gfp: Allocation flags
73cf65a0f6SChristoph Hellwig  * @attrs: Flags in the DMA_ATTR_* namespace.
74cf65a0f6SChristoph Hellwig  *
75cf65a0f6SChristoph Hellwig  * Managed dma_alloc_attrs().  Memory allocated using this function will be
76cf65a0f6SChristoph Hellwig  * automatically released on driver detach.
77cf65a0f6SChristoph Hellwig  *
78cf65a0f6SChristoph Hellwig  * RETURNS:
79cf65a0f6SChristoph Hellwig  * Pointer to allocated memory on success, NULL on failure.
80cf65a0f6SChristoph Hellwig  */
81cf65a0f6SChristoph Hellwig void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
82cf65a0f6SChristoph Hellwig 		gfp_t gfp, unsigned long attrs)
83cf65a0f6SChristoph Hellwig {
84cf65a0f6SChristoph Hellwig 	struct dma_devres *dr;
85cf65a0f6SChristoph Hellwig 	void *vaddr;
86cf65a0f6SChristoph Hellwig 
87cf65a0f6SChristoph Hellwig 	dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
88cf65a0f6SChristoph Hellwig 	if (!dr)
89cf65a0f6SChristoph Hellwig 		return NULL;
90cf65a0f6SChristoph Hellwig 
91cf65a0f6SChristoph Hellwig 	vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
92cf65a0f6SChristoph Hellwig 	if (!vaddr) {
93cf65a0f6SChristoph Hellwig 		devres_free(dr);
94cf65a0f6SChristoph Hellwig 		return NULL;
95cf65a0f6SChristoph Hellwig 	}
96cf65a0f6SChristoph Hellwig 
97cf65a0f6SChristoph Hellwig 	dr->vaddr = vaddr;
98cf65a0f6SChristoph Hellwig 	dr->dma_handle = *dma_handle;
99cf65a0f6SChristoph Hellwig 	dr->size = size;
100cf65a0f6SChristoph Hellwig 	dr->attrs = attrs;
101cf65a0f6SChristoph Hellwig 
102cf65a0f6SChristoph Hellwig 	devres_add(dev, dr);
103cf65a0f6SChristoph Hellwig 
104cf65a0f6SChristoph Hellwig 	return vaddr;
105cf65a0f6SChristoph Hellwig }
106cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(dmam_alloc_attrs);
107cf65a0f6SChristoph Hellwig 
108*d35834c6SChristoph Hellwig static bool dma_go_direct(struct device *dev, dma_addr_t mask,
109*d35834c6SChristoph Hellwig 		const struct dma_map_ops *ops)
110d3fa60d7SChristoph Hellwig {
111*d35834c6SChristoph Hellwig 	if (likely(!ops))
112*d35834c6SChristoph Hellwig 		return true;
113*d35834c6SChristoph Hellwig #ifdef CONFIG_DMA_OPS_BYPASS
114*d35834c6SChristoph Hellwig 	if (dev->dma_ops_bypass)
115*d35834c6SChristoph Hellwig 		return min_not_zero(mask, dev->bus_dma_limit) >=
116*d35834c6SChristoph Hellwig 			    dma_direct_get_required_mask(dev);
117*d35834c6SChristoph Hellwig #endif
118*d35834c6SChristoph Hellwig 	return false;
119*d35834c6SChristoph Hellwig }
120*d35834c6SChristoph Hellwig 
121*d35834c6SChristoph Hellwig 
122*d35834c6SChristoph Hellwig /*
123*d35834c6SChristoph Hellwig  * Check if the devices uses a direct mapping for streaming DMA operations.
124*d35834c6SChristoph Hellwig  * This allows IOMMU drivers to set a bypass mode if the DMA mask is large
125*d35834c6SChristoph Hellwig  * enough.
126*d35834c6SChristoph Hellwig  */
127*d35834c6SChristoph Hellwig static inline bool dma_alloc_direct(struct device *dev,
128*d35834c6SChristoph Hellwig 		const struct dma_map_ops *ops)
129*d35834c6SChristoph Hellwig {
130*d35834c6SChristoph Hellwig 	return dma_go_direct(dev, dev->coherent_dma_mask, ops);
131*d35834c6SChristoph Hellwig }
132*d35834c6SChristoph Hellwig 
133*d35834c6SChristoph Hellwig static inline bool dma_map_direct(struct device *dev,
134*d35834c6SChristoph Hellwig 		const struct dma_map_ops *ops)
135*d35834c6SChristoph Hellwig {
136*d35834c6SChristoph Hellwig 	return dma_go_direct(dev, *dev->dma_mask, ops);
137d3fa60d7SChristoph Hellwig }
138d3fa60d7SChristoph Hellwig 
139d3fa60d7SChristoph Hellwig dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
140d3fa60d7SChristoph Hellwig 		size_t offset, size_t size, enum dma_data_direction dir,
141d3fa60d7SChristoph Hellwig 		unsigned long attrs)
142d3fa60d7SChristoph Hellwig {
143d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
144d3fa60d7SChristoph Hellwig 	dma_addr_t addr;
145d3fa60d7SChristoph Hellwig 
146d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
147*d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
148d3fa60d7SChristoph Hellwig 		addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
149d3fa60d7SChristoph Hellwig 	else
150d3fa60d7SChristoph Hellwig 		addr = ops->map_page(dev, page, offset, size, dir, attrs);
151d3fa60d7SChristoph Hellwig 	debug_dma_map_page(dev, page, offset, size, dir, addr);
152d3fa60d7SChristoph Hellwig 
153d3fa60d7SChristoph Hellwig 	return addr;
154d3fa60d7SChristoph Hellwig }
155d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_map_page_attrs);
156d3fa60d7SChristoph Hellwig 
157d3fa60d7SChristoph Hellwig void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
158d3fa60d7SChristoph Hellwig 		enum dma_data_direction dir, unsigned long attrs)
159d3fa60d7SChristoph Hellwig {
160d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
161d3fa60d7SChristoph Hellwig 
162d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
163*d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
164d3fa60d7SChristoph Hellwig 		dma_direct_unmap_page(dev, addr, size, dir, attrs);
165d3fa60d7SChristoph Hellwig 	else if (ops->unmap_page)
166d3fa60d7SChristoph Hellwig 		ops->unmap_page(dev, addr, size, dir, attrs);
167d3fa60d7SChristoph Hellwig 	debug_dma_unmap_page(dev, addr, size, dir);
168d3fa60d7SChristoph Hellwig }
169d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_unmap_page_attrs);
170d3fa60d7SChristoph Hellwig 
171d3fa60d7SChristoph Hellwig /*
172d3fa60d7SChristoph Hellwig  * dma_maps_sg_attrs returns 0 on error and > 0 on success.
173d3fa60d7SChristoph Hellwig  * It should never return a value < 0.
174d3fa60d7SChristoph Hellwig  */
175d3fa60d7SChristoph Hellwig int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
176d3fa60d7SChristoph Hellwig 		enum dma_data_direction dir, unsigned long attrs)
177d3fa60d7SChristoph Hellwig {
178d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
179d3fa60d7SChristoph Hellwig 	int ents;
180d3fa60d7SChristoph Hellwig 
181d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
182*d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
183d3fa60d7SChristoph Hellwig 		ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
184d3fa60d7SChristoph Hellwig 	else
185d3fa60d7SChristoph Hellwig 		ents = ops->map_sg(dev, sg, nents, dir, attrs);
186d3fa60d7SChristoph Hellwig 	BUG_ON(ents < 0);
187d3fa60d7SChristoph Hellwig 	debug_dma_map_sg(dev, sg, nents, ents, dir);
188d3fa60d7SChristoph Hellwig 
189d3fa60d7SChristoph Hellwig 	return ents;
190d3fa60d7SChristoph Hellwig }
191d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_map_sg_attrs);
192d3fa60d7SChristoph Hellwig 
193d3fa60d7SChristoph Hellwig void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
194d3fa60d7SChristoph Hellwig 				      int nents, enum dma_data_direction dir,
195d3fa60d7SChristoph Hellwig 				      unsigned long attrs)
196d3fa60d7SChristoph Hellwig {
197d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
198d3fa60d7SChristoph Hellwig 
199d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
200d3fa60d7SChristoph Hellwig 	debug_dma_unmap_sg(dev, sg, nents, dir);
201*d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
202d3fa60d7SChristoph Hellwig 		dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
203d3fa60d7SChristoph Hellwig 	else if (ops->unmap_sg)
204d3fa60d7SChristoph Hellwig 		ops->unmap_sg(dev, sg, nents, dir, attrs);
205d3fa60d7SChristoph Hellwig }
206d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_unmap_sg_attrs);
207d3fa60d7SChristoph Hellwig 
208d3fa60d7SChristoph Hellwig dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
209d3fa60d7SChristoph Hellwig 		size_t size, enum dma_data_direction dir, unsigned long attrs)
210d3fa60d7SChristoph Hellwig {
211d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
212d3fa60d7SChristoph Hellwig 	dma_addr_t addr = DMA_MAPPING_ERROR;
213d3fa60d7SChristoph Hellwig 
214d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
215d3fa60d7SChristoph Hellwig 
216d3fa60d7SChristoph Hellwig 	/* Don't allow RAM to be mapped */
217d3fa60d7SChristoph Hellwig 	if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr))))
218d3fa60d7SChristoph Hellwig 		return DMA_MAPPING_ERROR;
219d3fa60d7SChristoph Hellwig 
220*d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
221d3fa60d7SChristoph Hellwig 		addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
222d3fa60d7SChristoph Hellwig 	else if (ops->map_resource)
223d3fa60d7SChristoph Hellwig 		addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
224d3fa60d7SChristoph Hellwig 
225d3fa60d7SChristoph Hellwig 	debug_dma_map_resource(dev, phys_addr, size, dir, addr);
226d3fa60d7SChristoph Hellwig 	return addr;
227d3fa60d7SChristoph Hellwig }
228d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_map_resource);
229d3fa60d7SChristoph Hellwig 
230d3fa60d7SChristoph Hellwig void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
231d3fa60d7SChristoph Hellwig 		enum dma_data_direction dir, unsigned long attrs)
232d3fa60d7SChristoph Hellwig {
233d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
234d3fa60d7SChristoph Hellwig 
235d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
236*d35834c6SChristoph Hellwig 	if (!dma_map_direct(dev, ops) && ops->unmap_resource)
237d3fa60d7SChristoph Hellwig 		ops->unmap_resource(dev, addr, size, dir, attrs);
238d3fa60d7SChristoph Hellwig 	debug_dma_unmap_resource(dev, addr, size, dir);
239d3fa60d7SChristoph Hellwig }
240d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_unmap_resource);
241d3fa60d7SChristoph Hellwig 
242d3fa60d7SChristoph Hellwig void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
243d3fa60d7SChristoph Hellwig 		enum dma_data_direction dir)
244d3fa60d7SChristoph Hellwig {
245d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
246d3fa60d7SChristoph Hellwig 
247d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
248*d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
249d3fa60d7SChristoph Hellwig 		dma_direct_sync_single_for_cpu(dev, addr, size, dir);
250d3fa60d7SChristoph Hellwig 	else if (ops->sync_single_for_cpu)
251d3fa60d7SChristoph Hellwig 		ops->sync_single_for_cpu(dev, addr, size, dir);
252d3fa60d7SChristoph Hellwig 	debug_dma_sync_single_for_cpu(dev, addr, size, dir);
253d3fa60d7SChristoph Hellwig }
254d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_sync_single_for_cpu);
255d3fa60d7SChristoph Hellwig 
256d3fa60d7SChristoph Hellwig void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
257d3fa60d7SChristoph Hellwig 		size_t size, enum dma_data_direction dir)
258d3fa60d7SChristoph Hellwig {
259d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
260d3fa60d7SChristoph Hellwig 
261d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
262*d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
263d3fa60d7SChristoph Hellwig 		dma_direct_sync_single_for_device(dev, addr, size, dir);
264d3fa60d7SChristoph Hellwig 	else if (ops->sync_single_for_device)
265d3fa60d7SChristoph Hellwig 		ops->sync_single_for_device(dev, addr, size, dir);
266d3fa60d7SChristoph Hellwig 	debug_dma_sync_single_for_device(dev, addr, size, dir);
267d3fa60d7SChristoph Hellwig }
268d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_sync_single_for_device);
269d3fa60d7SChristoph Hellwig 
270d3fa60d7SChristoph Hellwig void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
271d3fa60d7SChristoph Hellwig 		    int nelems, enum dma_data_direction dir)
272d3fa60d7SChristoph Hellwig {
273d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
274d3fa60d7SChristoph Hellwig 
275d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
276*d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
277d3fa60d7SChristoph Hellwig 		dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
278d3fa60d7SChristoph Hellwig 	else if (ops->sync_sg_for_cpu)
279d3fa60d7SChristoph Hellwig 		ops->sync_sg_for_cpu(dev, sg, nelems, dir);
280d3fa60d7SChristoph Hellwig 	debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
281d3fa60d7SChristoph Hellwig }
282d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_sync_sg_for_cpu);
283d3fa60d7SChristoph Hellwig 
284d3fa60d7SChristoph Hellwig void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
285d3fa60d7SChristoph Hellwig 		       int nelems, enum dma_data_direction dir)
286d3fa60d7SChristoph Hellwig {
287d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
288d3fa60d7SChristoph Hellwig 
289d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
290*d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
291d3fa60d7SChristoph Hellwig 		dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
292d3fa60d7SChristoph Hellwig 	else if (ops->sync_sg_for_device)
293d3fa60d7SChristoph Hellwig 		ops->sync_sg_for_device(dev, sg, nelems, dir);
294d3fa60d7SChristoph Hellwig 	debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
295d3fa60d7SChristoph Hellwig }
296d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_sync_sg_for_device);
297d3fa60d7SChristoph Hellwig 
298cf65a0f6SChristoph Hellwig /*
299cf65a0f6SChristoph Hellwig  * Create scatter-list for the already allocated DMA buffer.
300cf65a0f6SChristoph Hellwig  */
301cf65a0f6SChristoph Hellwig int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
3029406a49fSChristoph Hellwig 		 void *cpu_addr, dma_addr_t dma_addr, size_t size,
3039406a49fSChristoph Hellwig 		 unsigned long attrs)
304cf65a0f6SChristoph Hellwig {
30534dc0ea6SChristoph Hellwig 	struct page *page = virt_to_page(cpu_addr);
306cf65a0f6SChristoph Hellwig 	int ret;
307cf65a0f6SChristoph Hellwig 
3089406a49fSChristoph Hellwig 	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
3099406a49fSChristoph Hellwig 	if (!ret)
310cf65a0f6SChristoph Hellwig 		sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
3119406a49fSChristoph Hellwig 	return ret;
312cf65a0f6SChristoph Hellwig }
3137249c1a5SChristoph Hellwig 
31414451467SChristoph Hellwig /*
31514451467SChristoph Hellwig  * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
31614451467SChristoph Hellwig  * that the intention is to allow exporting memory allocated via the
31714451467SChristoph Hellwig  * coherent DMA APIs through the dma_buf API, which only accepts a
31814451467SChristoph Hellwig  * scattertable.  This presents a couple of problems:
31914451467SChristoph Hellwig  * 1. Not all memory allocated via the coherent DMA APIs is backed by
32014451467SChristoph Hellwig  *    a struct page
32114451467SChristoph Hellwig  * 2. Passing coherent DMA memory into the streaming APIs is not allowed
32214451467SChristoph Hellwig  *    as we will try to flush the memory through a different alias to that
32314451467SChristoph Hellwig  *    actually being used (and the flushes are redundant.)
32414451467SChristoph Hellwig  */
3257249c1a5SChristoph Hellwig int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
3267249c1a5SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
3277249c1a5SChristoph Hellwig 		unsigned long attrs)
3287249c1a5SChristoph Hellwig {
3297249c1a5SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
330356da6d0SChristoph Hellwig 
331*d35834c6SChristoph Hellwig 	if (dma_alloc_direct(dev, ops))
33234dc0ea6SChristoph Hellwig 		return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr,
333f9f3232aSChristoph Hellwig 				size, attrs);
334f9f3232aSChristoph Hellwig 	if (!ops->get_sgtable)
335f9f3232aSChristoph Hellwig 		return -ENXIO;
336f9f3232aSChristoph Hellwig 	return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs);
3377249c1a5SChristoph Hellwig }
3387249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_get_sgtable_attrs);
339cf65a0f6SChristoph Hellwig 
34033dcb37cSChristoph Hellwig #ifdef CONFIG_MMU
34133dcb37cSChristoph Hellwig /*
34233dcb37cSChristoph Hellwig  * Return the page attributes used for mapping dma_alloc_* memory, either in
34333dcb37cSChristoph Hellwig  * kernel space if remapping is needed, or to userspace through dma_mmap_*.
34433dcb37cSChristoph Hellwig  */
34533dcb37cSChristoph Hellwig pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
34633dcb37cSChristoph Hellwig {
34717c4a2aeSThomas Hellstrom 	if (force_dma_unencrypted(dev))
34817c4a2aeSThomas Hellstrom 		prot = pgprot_decrypted(prot);
34933dcb37cSChristoph Hellwig 	if (dev_is_dma_coherent(dev) ||
35033dcb37cSChristoph Hellwig 	    (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) &&
35133dcb37cSChristoph Hellwig              (attrs & DMA_ATTR_NON_CONSISTENT)))
35233dcb37cSChristoph Hellwig 		return prot;
353419e2f18SChristoph Hellwig #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
354419e2f18SChristoph Hellwig 	if (attrs & DMA_ATTR_WRITE_COMBINE)
355419e2f18SChristoph Hellwig 		return pgprot_writecombine(prot);
356419e2f18SChristoph Hellwig #endif
357419e2f18SChristoph Hellwig 	return pgprot_dmacoherent(prot);
35833dcb37cSChristoph Hellwig }
35933dcb37cSChristoph Hellwig #endif /* CONFIG_MMU */
36033dcb37cSChristoph Hellwig 
361cf65a0f6SChristoph Hellwig /*
362cf65a0f6SChristoph Hellwig  * Create userspace mapping for the DMA-coherent memory.
363cf65a0f6SChristoph Hellwig  */
364cf65a0f6SChristoph Hellwig int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
36558b04406SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
36658b04406SChristoph Hellwig 		unsigned long attrs)
367cf65a0f6SChristoph Hellwig {
36862fcee9aSChristoph Hellwig #ifdef CONFIG_MMU
369cf65a0f6SChristoph Hellwig 	unsigned long user_count = vma_pages(vma);
370cf65a0f6SChristoph Hellwig 	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
371cf65a0f6SChristoph Hellwig 	unsigned long off = vma->vm_pgoff;
37258b04406SChristoph Hellwig 	int ret = -ENXIO;
373cf65a0f6SChristoph Hellwig 
37433dcb37cSChristoph Hellwig 	vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
375cf65a0f6SChristoph Hellwig 
376cf65a0f6SChristoph Hellwig 	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
377cf65a0f6SChristoph Hellwig 		return ret;
378cf65a0f6SChristoph Hellwig 
37958b04406SChristoph Hellwig 	if (off >= count || user_count > count - off)
38058b04406SChristoph Hellwig 		return -ENXIO;
381cf65a0f6SChristoph Hellwig 
38234dc0ea6SChristoph Hellwig 	return remap_pfn_range(vma, vma->vm_start,
38334dc0ea6SChristoph Hellwig 			page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff,
38458b04406SChristoph Hellwig 			user_count << PAGE_SHIFT, vma->vm_page_prot);
38558b04406SChristoph Hellwig #else
38658b04406SChristoph Hellwig 	return -ENXIO;
38762fcee9aSChristoph Hellwig #endif /* CONFIG_MMU */
388cf65a0f6SChristoph Hellwig }
3897249c1a5SChristoph Hellwig 
3907249c1a5SChristoph Hellwig /**
391e29ccc18SChristoph Hellwig  * dma_can_mmap - check if a given device supports dma_mmap_*
392e29ccc18SChristoph Hellwig  * @dev: device to check
393e29ccc18SChristoph Hellwig  *
394e29ccc18SChristoph Hellwig  * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to
395e29ccc18SChristoph Hellwig  * map DMA allocations to userspace.
396e29ccc18SChristoph Hellwig  */
397e29ccc18SChristoph Hellwig bool dma_can_mmap(struct device *dev)
398e29ccc18SChristoph Hellwig {
399e29ccc18SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
400e29ccc18SChristoph Hellwig 
401*d35834c6SChristoph Hellwig 	if (dma_alloc_direct(dev, ops))
40234dc0ea6SChristoph Hellwig 		return dma_direct_can_mmap(dev);
403e29ccc18SChristoph Hellwig 	return ops->mmap != NULL;
404e29ccc18SChristoph Hellwig }
405e29ccc18SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_can_mmap);
406e29ccc18SChristoph Hellwig 
407e29ccc18SChristoph Hellwig /**
4087249c1a5SChristoph Hellwig  * dma_mmap_attrs - map a coherent DMA allocation into user space
4097249c1a5SChristoph Hellwig  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
4107249c1a5SChristoph Hellwig  * @vma: vm_area_struct describing requested user mapping
4117249c1a5SChristoph Hellwig  * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
4127249c1a5SChristoph Hellwig  * @dma_addr: device-view address returned from dma_alloc_attrs
4137249c1a5SChristoph Hellwig  * @size: size of memory originally requested in dma_alloc_attrs
4147249c1a5SChristoph Hellwig  * @attrs: attributes of mapping properties requested in dma_alloc_attrs
4157249c1a5SChristoph Hellwig  *
4167249c1a5SChristoph Hellwig  * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user
4177249c1a5SChristoph Hellwig  * space.  The coherent DMA buffer must not be freed by the driver until the
4187249c1a5SChristoph Hellwig  * user space mapping has been released.
4197249c1a5SChristoph Hellwig  */
4207249c1a5SChristoph Hellwig int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
4217249c1a5SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
4227249c1a5SChristoph Hellwig 		unsigned long attrs)
4237249c1a5SChristoph Hellwig {
4247249c1a5SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
425356da6d0SChristoph Hellwig 
426*d35834c6SChristoph Hellwig 	if (dma_alloc_direct(dev, ops))
42734dc0ea6SChristoph Hellwig 		return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size,
428f9f3232aSChristoph Hellwig 				attrs);
429f9f3232aSChristoph Hellwig 	if (!ops->mmap)
430f9f3232aSChristoph Hellwig 		return -ENXIO;
4317249c1a5SChristoph Hellwig 	return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
4327249c1a5SChristoph Hellwig }
4337249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_mmap_attrs);
43405887cb6SChristoph Hellwig 
43505887cb6SChristoph Hellwig u64 dma_get_required_mask(struct device *dev)
43605887cb6SChristoph Hellwig {
43705887cb6SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
43805887cb6SChristoph Hellwig 
439*d35834c6SChristoph Hellwig 	if (dma_alloc_direct(dev, ops))
440356da6d0SChristoph Hellwig 		return dma_direct_get_required_mask(dev);
44105887cb6SChristoph Hellwig 	if (ops->get_required_mask)
44205887cb6SChristoph Hellwig 		return ops->get_required_mask(dev);
443249baa54SChristoph Hellwig 
444249baa54SChristoph Hellwig 	/*
445249baa54SChristoph Hellwig 	 * We require every DMA ops implementation to at least support a 32-bit
446249baa54SChristoph Hellwig 	 * DMA mask (and use bounce buffering if that isn't supported in
447249baa54SChristoph Hellwig 	 * hardware).  As the direct mapping code has its own routine to
448249baa54SChristoph Hellwig 	 * actually report an optimal mask we default to 32-bit here as that
449249baa54SChristoph Hellwig 	 * is the right thing for most IOMMUs, and at least not actively
450249baa54SChristoph Hellwig 	 * harmful in general.
451249baa54SChristoph Hellwig 	 */
452249baa54SChristoph Hellwig 	return DMA_BIT_MASK(32);
45305887cb6SChristoph Hellwig }
45405887cb6SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_get_required_mask);
45505887cb6SChristoph Hellwig 
4567249c1a5SChristoph Hellwig void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
4577249c1a5SChristoph Hellwig 		gfp_t flag, unsigned long attrs)
4587249c1a5SChristoph Hellwig {
4597249c1a5SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
4607249c1a5SChristoph Hellwig 	void *cpu_addr;
4617249c1a5SChristoph Hellwig 
462148a97d5SDan Carpenter 	WARN_ON_ONCE(!dev->coherent_dma_mask);
4637249c1a5SChristoph Hellwig 
4647249c1a5SChristoph Hellwig 	if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
4657249c1a5SChristoph Hellwig 		return cpu_addr;
4667249c1a5SChristoph Hellwig 
4677249c1a5SChristoph Hellwig 	/* let the implementation decide on the zone to allocate from: */
4687249c1a5SChristoph Hellwig 	flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
4697249c1a5SChristoph Hellwig 
470*d35834c6SChristoph Hellwig 	if (dma_alloc_direct(dev, ops))
471356da6d0SChristoph Hellwig 		cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
472356da6d0SChristoph Hellwig 	else if (ops->alloc)
473356da6d0SChristoph Hellwig 		cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
474356da6d0SChristoph Hellwig 	else
4757249c1a5SChristoph Hellwig 		return NULL;
4767249c1a5SChristoph Hellwig 
4777249c1a5SChristoph Hellwig 	debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
4787249c1a5SChristoph Hellwig 	return cpu_addr;
4797249c1a5SChristoph Hellwig }
4807249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_alloc_attrs);
4817249c1a5SChristoph Hellwig 
4827249c1a5SChristoph Hellwig void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
4837249c1a5SChristoph Hellwig 		dma_addr_t dma_handle, unsigned long attrs)
4847249c1a5SChristoph Hellwig {
4857249c1a5SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
4867249c1a5SChristoph Hellwig 
4877249c1a5SChristoph Hellwig 	if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
4887249c1a5SChristoph Hellwig 		return;
4897249c1a5SChristoph Hellwig 	/*
4907249c1a5SChristoph Hellwig 	 * On non-coherent platforms which implement DMA-coherent buffers via
4917249c1a5SChristoph Hellwig 	 * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
4927249c1a5SChristoph Hellwig 	 * this far in IRQ context is a) at risk of a BUG_ON() or trying to
4937249c1a5SChristoph Hellwig 	 * sleep on some machines, and b) an indication that the driver is
4947249c1a5SChristoph Hellwig 	 * probably misusing the coherent API anyway.
4957249c1a5SChristoph Hellwig 	 */
4967249c1a5SChristoph Hellwig 	WARN_ON(irqs_disabled());
4977249c1a5SChristoph Hellwig 
498356da6d0SChristoph Hellwig 	if (!cpu_addr)
4997249c1a5SChristoph Hellwig 		return;
5007249c1a5SChristoph Hellwig 
5017249c1a5SChristoph Hellwig 	debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
502*d35834c6SChristoph Hellwig 	if (dma_alloc_direct(dev, ops))
503356da6d0SChristoph Hellwig 		dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
504356da6d0SChristoph Hellwig 	else if (ops->free)
5057249c1a5SChristoph Hellwig 		ops->free(dev, size, cpu_addr, dma_handle, attrs);
5067249c1a5SChristoph Hellwig }
5077249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_free_attrs);
5087249c1a5SChristoph Hellwig 
5097249c1a5SChristoph Hellwig int dma_supported(struct device *dev, u64 mask)
5107249c1a5SChristoph Hellwig {
5117249c1a5SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
5127249c1a5SChristoph Hellwig 
513*d35834c6SChristoph Hellwig 	/*
514*d35834c6SChristoph Hellwig 	 * ->dma_supported sets the bypass flag, so we must always call
515*d35834c6SChristoph Hellwig 	 * into the method here unless the device is truly direct mapped.
516*d35834c6SChristoph Hellwig 	 */
517*d35834c6SChristoph Hellwig 	if (!ops)
518356da6d0SChristoph Hellwig 		return dma_direct_supported(dev, mask);
5198b1cce9fSThierry Reding 	if (!ops->dma_supported)
5207249c1a5SChristoph Hellwig 		return 1;
5217249c1a5SChristoph Hellwig 	return ops->dma_supported(dev, mask);
5227249c1a5SChristoph Hellwig }
5237249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_supported);
5247249c1a5SChristoph Hellwig 
52511ddce15SChristoph Hellwig #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
52611ddce15SChristoph Hellwig void arch_dma_set_mask(struct device *dev, u64 mask);
52711ddce15SChristoph Hellwig #else
52811ddce15SChristoph Hellwig #define arch_dma_set_mask(dev, mask)	do { } while (0)
52911ddce15SChristoph Hellwig #endif
53011ddce15SChristoph Hellwig 
5317249c1a5SChristoph Hellwig int dma_set_mask(struct device *dev, u64 mask)
5327249c1a5SChristoph Hellwig {
5334a54d16fSChristoph Hellwig 	/*
5344a54d16fSChristoph Hellwig 	 * Truncate the mask to the actually supported dma_addr_t width to
5354a54d16fSChristoph Hellwig 	 * avoid generating unsupportable addresses.
5364a54d16fSChristoph Hellwig 	 */
5374a54d16fSChristoph Hellwig 	mask = (dma_addr_t)mask;
5384a54d16fSChristoph Hellwig 
5397249c1a5SChristoph Hellwig 	if (!dev->dma_mask || !dma_supported(dev, mask))
5407249c1a5SChristoph Hellwig 		return -EIO;
5417249c1a5SChristoph Hellwig 
54211ddce15SChristoph Hellwig 	arch_dma_set_mask(dev, mask);
5437249c1a5SChristoph Hellwig 	*dev->dma_mask = mask;
5447249c1a5SChristoph Hellwig 	return 0;
5457249c1a5SChristoph Hellwig }
5467249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_set_mask);
5477249c1a5SChristoph Hellwig 
5487249c1a5SChristoph Hellwig #ifndef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
5497249c1a5SChristoph Hellwig int dma_set_coherent_mask(struct device *dev, u64 mask)
5507249c1a5SChristoph Hellwig {
5514a54d16fSChristoph Hellwig 	/*
5524a54d16fSChristoph Hellwig 	 * Truncate the mask to the actually supported dma_addr_t width to
5534a54d16fSChristoph Hellwig 	 * avoid generating unsupportable addresses.
5544a54d16fSChristoph Hellwig 	 */
5554a54d16fSChristoph Hellwig 	mask = (dma_addr_t)mask;
5564a54d16fSChristoph Hellwig 
5577249c1a5SChristoph Hellwig 	if (!dma_supported(dev, mask))
5587249c1a5SChristoph Hellwig 		return -EIO;
5597249c1a5SChristoph Hellwig 
5607249c1a5SChristoph Hellwig 	dev->coherent_dma_mask = mask;
5617249c1a5SChristoph Hellwig 	return 0;
5627249c1a5SChristoph Hellwig }
5637249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_set_coherent_mask);
5647249c1a5SChristoph Hellwig #endif
5658ddbe594SChristoph Hellwig 
5668ddbe594SChristoph Hellwig void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
5678ddbe594SChristoph Hellwig 		enum dma_data_direction dir)
5688ddbe594SChristoph Hellwig {
5698ddbe594SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
5708ddbe594SChristoph Hellwig 
5718ddbe594SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
572356da6d0SChristoph Hellwig 
573*d35834c6SChristoph Hellwig 	if (dma_alloc_direct(dev, ops))
574356da6d0SChristoph Hellwig 		arch_dma_cache_sync(dev, vaddr, size, dir);
575356da6d0SChristoph Hellwig 	else if (ops->cache_sync)
5768ddbe594SChristoph Hellwig 		ops->cache_sync(dev, vaddr, size, dir);
5778ddbe594SChristoph Hellwig }
5788ddbe594SChristoph Hellwig EXPORT_SYMBOL(dma_cache_sync);
579133d624bSJoerg Roedel 
580133d624bSJoerg Roedel size_t dma_max_mapping_size(struct device *dev)
581133d624bSJoerg Roedel {
582133d624bSJoerg Roedel 	const struct dma_map_ops *ops = get_dma_ops(dev);
583133d624bSJoerg Roedel 	size_t size = SIZE_MAX;
584133d624bSJoerg Roedel 
585*d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
586133d624bSJoerg Roedel 		size = dma_direct_max_mapping_size(dev);
587133d624bSJoerg Roedel 	else if (ops && ops->max_mapping_size)
588133d624bSJoerg Roedel 		size = ops->max_mapping_size(dev);
589133d624bSJoerg Roedel 
590133d624bSJoerg Roedel 	return size;
591133d624bSJoerg Roedel }
592133d624bSJoerg Roedel EXPORT_SYMBOL_GPL(dma_max_mapping_size);
5936ba99411SYoshihiro Shimoda 
5943aa91625SChristoph Hellwig bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
5953aa91625SChristoph Hellwig {
5963aa91625SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
5973aa91625SChristoph Hellwig 
598*d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
5993aa91625SChristoph Hellwig 		return dma_direct_need_sync(dev, dma_addr);
6003aa91625SChristoph Hellwig 	return ops->sync_single_for_cpu || ops->sync_single_for_device;
6013aa91625SChristoph Hellwig }
6023aa91625SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_need_sync);
6033aa91625SChristoph Hellwig 
6046ba99411SYoshihiro Shimoda unsigned long dma_get_merge_boundary(struct device *dev)
6056ba99411SYoshihiro Shimoda {
6066ba99411SYoshihiro Shimoda 	const struct dma_map_ops *ops = get_dma_ops(dev);
6076ba99411SYoshihiro Shimoda 
6086ba99411SYoshihiro Shimoda 	if (!ops || !ops->get_merge_boundary)
6096ba99411SYoshihiro Shimoda 		return 0;	/* can't merge */
6106ba99411SYoshihiro Shimoda 
6116ba99411SYoshihiro Shimoda 	return ops->get_merge_boundary(dev);
6126ba99411SYoshihiro Shimoda }
6136ba99411SYoshihiro Shimoda EXPORT_SYMBOL_GPL(dma_get_merge_boundary);
614