xref: /openbmc/linux/kernel/dma/mapping.c (revision e29ccc188f3dae1cb66f59e10e01e0f150642a54)
1cf65a0f6SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
2cf65a0f6SChristoph Hellwig /*
3cf65a0f6SChristoph Hellwig  * arch-independent dma-mapping routines
4cf65a0f6SChristoph Hellwig  *
5cf65a0f6SChristoph Hellwig  * Copyright (c) 2006  SUSE Linux Products GmbH
6cf65a0f6SChristoph Hellwig  * Copyright (c) 2006  Tejun Heo <teheo@suse.de>
7cf65a0f6SChristoph Hellwig  */
805887cb6SChristoph Hellwig #include <linux/memblock.h> /* for max_pfn */
9cf65a0f6SChristoph Hellwig #include <linux/acpi.h>
10356da6d0SChristoph Hellwig #include <linux/dma-direct.h>
1158b04406SChristoph Hellwig #include <linux/dma-noncoherent.h>
12cf65a0f6SChristoph Hellwig #include <linux/export.h>
13cf65a0f6SChristoph Hellwig #include <linux/gfp.h>
14cf65a0f6SChristoph Hellwig #include <linux/of_device.h>
15cf65a0f6SChristoph Hellwig #include <linux/slab.h>
16cf65a0f6SChristoph Hellwig #include <linux/vmalloc.h>
17cf65a0f6SChristoph Hellwig 
18cf65a0f6SChristoph Hellwig /*
19cf65a0f6SChristoph Hellwig  * Managed DMA API
20cf65a0f6SChristoph Hellwig  */
21cf65a0f6SChristoph Hellwig struct dma_devres {
22cf65a0f6SChristoph Hellwig 	size_t		size;
23cf65a0f6SChristoph Hellwig 	void		*vaddr;
24cf65a0f6SChristoph Hellwig 	dma_addr_t	dma_handle;
25cf65a0f6SChristoph Hellwig 	unsigned long	attrs;
26cf65a0f6SChristoph Hellwig };
27cf65a0f6SChristoph Hellwig 
28cf65a0f6SChristoph Hellwig static void dmam_release(struct device *dev, void *res)
29cf65a0f6SChristoph Hellwig {
30cf65a0f6SChristoph Hellwig 	struct dma_devres *this = res;
31cf65a0f6SChristoph Hellwig 
32cf65a0f6SChristoph Hellwig 	dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
33cf65a0f6SChristoph Hellwig 			this->attrs);
34cf65a0f6SChristoph Hellwig }
35cf65a0f6SChristoph Hellwig 
36cf65a0f6SChristoph Hellwig static int dmam_match(struct device *dev, void *res, void *match_data)
37cf65a0f6SChristoph Hellwig {
38cf65a0f6SChristoph Hellwig 	struct dma_devres *this = res, *match = match_data;
39cf65a0f6SChristoph Hellwig 
40cf65a0f6SChristoph Hellwig 	if (this->vaddr == match->vaddr) {
41cf65a0f6SChristoph Hellwig 		WARN_ON(this->size != match->size ||
42cf65a0f6SChristoph Hellwig 			this->dma_handle != match->dma_handle);
43cf65a0f6SChristoph Hellwig 		return 1;
44cf65a0f6SChristoph Hellwig 	}
45cf65a0f6SChristoph Hellwig 	return 0;
46cf65a0f6SChristoph Hellwig }
47cf65a0f6SChristoph Hellwig 
48cf65a0f6SChristoph Hellwig /**
49cf65a0f6SChristoph Hellwig  * dmam_free_coherent - Managed dma_free_coherent()
50cf65a0f6SChristoph Hellwig  * @dev: Device to free coherent memory for
51cf65a0f6SChristoph Hellwig  * @size: Size of allocation
52cf65a0f6SChristoph Hellwig  * @vaddr: Virtual address of the memory to free
53cf65a0f6SChristoph Hellwig  * @dma_handle: DMA handle of the memory to free
54cf65a0f6SChristoph Hellwig  *
55cf65a0f6SChristoph Hellwig  * Managed dma_free_coherent().
56cf65a0f6SChristoph Hellwig  */
57cf65a0f6SChristoph Hellwig void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
58cf65a0f6SChristoph Hellwig 			dma_addr_t dma_handle)
59cf65a0f6SChristoph Hellwig {
60cf65a0f6SChristoph Hellwig 	struct dma_devres match_data = { size, vaddr, dma_handle };
61cf65a0f6SChristoph Hellwig 
62cf65a0f6SChristoph Hellwig 	dma_free_coherent(dev, size, vaddr, dma_handle);
63cf65a0f6SChristoph Hellwig 	WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
64cf65a0f6SChristoph Hellwig }
65cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(dmam_free_coherent);
66cf65a0f6SChristoph Hellwig 
67cf65a0f6SChristoph Hellwig /**
68cf65a0f6SChristoph Hellwig  * dmam_alloc_attrs - Managed dma_alloc_attrs()
69cf65a0f6SChristoph Hellwig  * @dev: Device to allocate non_coherent memory for
70cf65a0f6SChristoph Hellwig  * @size: Size of allocation
71cf65a0f6SChristoph Hellwig  * @dma_handle: Out argument for allocated DMA handle
72cf65a0f6SChristoph Hellwig  * @gfp: Allocation flags
73cf65a0f6SChristoph Hellwig  * @attrs: Flags in the DMA_ATTR_* namespace.
74cf65a0f6SChristoph Hellwig  *
75cf65a0f6SChristoph Hellwig  * Managed dma_alloc_attrs().  Memory allocated using this function will be
76cf65a0f6SChristoph Hellwig  * automatically released on driver detach.
77cf65a0f6SChristoph Hellwig  *
78cf65a0f6SChristoph Hellwig  * RETURNS:
79cf65a0f6SChristoph Hellwig  * Pointer to allocated memory on success, NULL on failure.
80cf65a0f6SChristoph Hellwig  */
81cf65a0f6SChristoph Hellwig void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
82cf65a0f6SChristoph Hellwig 		gfp_t gfp, unsigned long attrs)
83cf65a0f6SChristoph Hellwig {
84cf65a0f6SChristoph Hellwig 	struct dma_devres *dr;
85cf65a0f6SChristoph Hellwig 	void *vaddr;
86cf65a0f6SChristoph Hellwig 
87cf65a0f6SChristoph Hellwig 	dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
88cf65a0f6SChristoph Hellwig 	if (!dr)
89cf65a0f6SChristoph Hellwig 		return NULL;
90cf65a0f6SChristoph Hellwig 
91cf65a0f6SChristoph Hellwig 	vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
92cf65a0f6SChristoph Hellwig 	if (!vaddr) {
93cf65a0f6SChristoph Hellwig 		devres_free(dr);
94cf65a0f6SChristoph Hellwig 		return NULL;
95cf65a0f6SChristoph Hellwig 	}
96cf65a0f6SChristoph Hellwig 
97cf65a0f6SChristoph Hellwig 	dr->vaddr = vaddr;
98cf65a0f6SChristoph Hellwig 	dr->dma_handle = *dma_handle;
99cf65a0f6SChristoph Hellwig 	dr->size = size;
100cf65a0f6SChristoph Hellwig 	dr->attrs = attrs;
101cf65a0f6SChristoph Hellwig 
102cf65a0f6SChristoph Hellwig 	devres_add(dev, dr);
103cf65a0f6SChristoph Hellwig 
104cf65a0f6SChristoph Hellwig 	return vaddr;
105cf65a0f6SChristoph Hellwig }
106cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(dmam_alloc_attrs);
107cf65a0f6SChristoph Hellwig 
108cf65a0f6SChristoph Hellwig /*
109cf65a0f6SChristoph Hellwig  * Create scatter-list for the already allocated DMA buffer.
110cf65a0f6SChristoph Hellwig  */
111cf65a0f6SChristoph Hellwig int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
1129406a49fSChristoph Hellwig 		 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1139406a49fSChristoph Hellwig 		 unsigned long attrs)
114cf65a0f6SChristoph Hellwig {
1159406a49fSChristoph Hellwig 	struct page *page;
116cf65a0f6SChristoph Hellwig 	int ret;
117cf65a0f6SChristoph Hellwig 
1189406a49fSChristoph Hellwig 	if (!dev_is_dma_coherent(dev)) {
11966d7780fSChristoph Hellwig 		unsigned long pfn;
12066d7780fSChristoph Hellwig 
1219406a49fSChristoph Hellwig 		if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
1229406a49fSChristoph Hellwig 			return -ENXIO;
123cf65a0f6SChristoph Hellwig 
12466d7780fSChristoph Hellwig 		/* If the PFN is not valid, we do not have a struct page */
12566d7780fSChristoph Hellwig 		pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr);
12666d7780fSChristoph Hellwig 		if (!pfn_valid(pfn))
12766d7780fSChristoph Hellwig 			return -ENXIO;
12866d7780fSChristoph Hellwig 		page = pfn_to_page(pfn);
1299406a49fSChristoph Hellwig 	} else {
1309406a49fSChristoph Hellwig 		page = virt_to_page(cpu_addr);
1319406a49fSChristoph Hellwig 	}
1329406a49fSChristoph Hellwig 
1339406a49fSChristoph Hellwig 	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
1349406a49fSChristoph Hellwig 	if (!ret)
135cf65a0f6SChristoph Hellwig 		sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
1369406a49fSChristoph Hellwig 	return ret;
137cf65a0f6SChristoph Hellwig }
1387249c1a5SChristoph Hellwig 
13914451467SChristoph Hellwig /*
14014451467SChristoph Hellwig  * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
14114451467SChristoph Hellwig  * that the intention is to allow exporting memory allocated via the
14214451467SChristoph Hellwig  * coherent DMA APIs through the dma_buf API, which only accepts a
14314451467SChristoph Hellwig  * scattertable.  This presents a couple of problems:
14414451467SChristoph Hellwig  * 1. Not all memory allocated via the coherent DMA APIs is backed by
14514451467SChristoph Hellwig  *    a struct page
14614451467SChristoph Hellwig  * 2. Passing coherent DMA memory into the streaming APIs is not allowed
14714451467SChristoph Hellwig  *    as we will try to flush the memory through a different alias to that
14814451467SChristoph Hellwig  *    actually being used (and the flushes are redundant.)
14914451467SChristoph Hellwig  */
1507249c1a5SChristoph Hellwig int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
1517249c1a5SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
1527249c1a5SChristoph Hellwig 		unsigned long attrs)
1537249c1a5SChristoph Hellwig {
1547249c1a5SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
155356da6d0SChristoph Hellwig 
156f9f3232aSChristoph Hellwig 	if (dma_is_direct(ops))
157f9f3232aSChristoph Hellwig 		return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr,
158f9f3232aSChristoph Hellwig 				size, attrs);
159f9f3232aSChristoph Hellwig 	if (!ops->get_sgtable)
160f9f3232aSChristoph Hellwig 		return -ENXIO;
161f9f3232aSChristoph Hellwig 	return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs);
1627249c1a5SChristoph Hellwig }
1637249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_get_sgtable_attrs);
164cf65a0f6SChristoph Hellwig 
16533dcb37cSChristoph Hellwig #ifdef CONFIG_MMU
16633dcb37cSChristoph Hellwig /*
16733dcb37cSChristoph Hellwig  * Return the page attributes used for mapping dma_alloc_* memory, either in
16833dcb37cSChristoph Hellwig  * kernel space if remapping is needed, or to userspace through dma_mmap_*.
16933dcb37cSChristoph Hellwig  */
17033dcb37cSChristoph Hellwig pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
17133dcb37cSChristoph Hellwig {
17233dcb37cSChristoph Hellwig 	if (dev_is_dma_coherent(dev) ||
17333dcb37cSChristoph Hellwig 	    (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) &&
17433dcb37cSChristoph Hellwig              (attrs & DMA_ATTR_NON_CONSISTENT)))
17533dcb37cSChristoph Hellwig 		return prot;
176419e2f18SChristoph Hellwig #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
177419e2f18SChristoph Hellwig 	if (attrs & DMA_ATTR_WRITE_COMBINE)
178419e2f18SChristoph Hellwig 		return pgprot_writecombine(prot);
179419e2f18SChristoph Hellwig #endif
180419e2f18SChristoph Hellwig 	return pgprot_dmacoherent(prot);
18133dcb37cSChristoph Hellwig }
18233dcb37cSChristoph Hellwig #endif /* CONFIG_MMU */
18333dcb37cSChristoph Hellwig 
184cf65a0f6SChristoph Hellwig /*
185cf65a0f6SChristoph Hellwig  * Create userspace mapping for the DMA-coherent memory.
186cf65a0f6SChristoph Hellwig  */
187cf65a0f6SChristoph Hellwig int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
18858b04406SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
18958b04406SChristoph Hellwig 		unsigned long attrs)
190cf65a0f6SChristoph Hellwig {
191cf65a0f6SChristoph Hellwig #ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP
192cf65a0f6SChristoph Hellwig 	unsigned long user_count = vma_pages(vma);
193cf65a0f6SChristoph Hellwig 	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
194cf65a0f6SChristoph Hellwig 	unsigned long off = vma->vm_pgoff;
19558b04406SChristoph Hellwig 	unsigned long pfn;
19658b04406SChristoph Hellwig 	int ret = -ENXIO;
197cf65a0f6SChristoph Hellwig 
19833dcb37cSChristoph Hellwig 	vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
199cf65a0f6SChristoph Hellwig 
200cf65a0f6SChristoph Hellwig 	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
201cf65a0f6SChristoph Hellwig 		return ret;
202cf65a0f6SChristoph Hellwig 
20358b04406SChristoph Hellwig 	if (off >= count || user_count > count - off)
20458b04406SChristoph Hellwig 		return -ENXIO;
205cf65a0f6SChristoph Hellwig 
20658b04406SChristoph Hellwig 	if (!dev_is_dma_coherent(dev)) {
20758b04406SChristoph Hellwig 		if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
20858b04406SChristoph Hellwig 			return -ENXIO;
20966d7780fSChristoph Hellwig 
21066d7780fSChristoph Hellwig 		/* If the PFN is not valid, we do not have a struct page */
21158b04406SChristoph Hellwig 		pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr);
21266d7780fSChristoph Hellwig 		if (!pfn_valid(pfn))
21366d7780fSChristoph Hellwig 			return -ENXIO;
21458b04406SChristoph Hellwig 	} else {
21558b04406SChristoph Hellwig 		pfn = page_to_pfn(virt_to_page(cpu_addr));
21658b04406SChristoph Hellwig 	}
21758b04406SChristoph Hellwig 
21858b04406SChristoph Hellwig 	return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
21958b04406SChristoph Hellwig 			user_count << PAGE_SHIFT, vma->vm_page_prot);
22058b04406SChristoph Hellwig #else
22158b04406SChristoph Hellwig 	return -ENXIO;
22258b04406SChristoph Hellwig #endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
223cf65a0f6SChristoph Hellwig }
2247249c1a5SChristoph Hellwig 
2257249c1a5SChristoph Hellwig /**
226*e29ccc18SChristoph Hellwig  * dma_can_mmap - check if a given device supports dma_mmap_*
227*e29ccc18SChristoph Hellwig  * @dev: device to check
228*e29ccc18SChristoph Hellwig  *
229*e29ccc18SChristoph Hellwig  * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to
230*e29ccc18SChristoph Hellwig  * map DMA allocations to userspace.
231*e29ccc18SChristoph Hellwig  */
232*e29ccc18SChristoph Hellwig bool dma_can_mmap(struct device *dev)
233*e29ccc18SChristoph Hellwig {
234*e29ccc18SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
235*e29ccc18SChristoph Hellwig 
236*e29ccc18SChristoph Hellwig 	if (IS_ENABLED(CONFIG_ARCH_NO_COHERENT_DMA_MMAP))
237*e29ccc18SChristoph Hellwig 		return false;
238*e29ccc18SChristoph Hellwig 
239*e29ccc18SChristoph Hellwig 	if (dma_is_direct(ops)) {
240*e29ccc18SChristoph Hellwig 		return dev_is_dma_coherent(dev) ||
241*e29ccc18SChristoph Hellwig 			IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN);
242*e29ccc18SChristoph Hellwig 	}
243*e29ccc18SChristoph Hellwig 
244*e29ccc18SChristoph Hellwig 	return ops->mmap != NULL;
245*e29ccc18SChristoph Hellwig }
246*e29ccc18SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_can_mmap);
247*e29ccc18SChristoph Hellwig 
248*e29ccc18SChristoph Hellwig /**
2497249c1a5SChristoph Hellwig  * dma_mmap_attrs - map a coherent DMA allocation into user space
2507249c1a5SChristoph Hellwig  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
2517249c1a5SChristoph Hellwig  * @vma: vm_area_struct describing requested user mapping
2527249c1a5SChristoph Hellwig  * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
2537249c1a5SChristoph Hellwig  * @dma_addr: device-view address returned from dma_alloc_attrs
2547249c1a5SChristoph Hellwig  * @size: size of memory originally requested in dma_alloc_attrs
2557249c1a5SChristoph Hellwig  * @attrs: attributes of mapping properties requested in dma_alloc_attrs
2567249c1a5SChristoph Hellwig  *
2577249c1a5SChristoph Hellwig  * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user
2587249c1a5SChristoph Hellwig  * space.  The coherent DMA buffer must not be freed by the driver until the
2597249c1a5SChristoph Hellwig  * user space mapping has been released.
2607249c1a5SChristoph Hellwig  */
2617249c1a5SChristoph Hellwig int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
2627249c1a5SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
2637249c1a5SChristoph Hellwig 		unsigned long attrs)
2647249c1a5SChristoph Hellwig {
2657249c1a5SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
266356da6d0SChristoph Hellwig 
267f9f3232aSChristoph Hellwig 	if (dma_is_direct(ops))
268f9f3232aSChristoph Hellwig 		return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size,
269f9f3232aSChristoph Hellwig 				attrs);
270f9f3232aSChristoph Hellwig 	if (!ops->mmap)
271f9f3232aSChristoph Hellwig 		return -ENXIO;
2727249c1a5SChristoph Hellwig 	return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
2737249c1a5SChristoph Hellwig }
2747249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_mmap_attrs);
27505887cb6SChristoph Hellwig 
27605887cb6SChristoph Hellwig static u64 dma_default_get_required_mask(struct device *dev)
27705887cb6SChristoph Hellwig {
27805887cb6SChristoph Hellwig 	u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
27905887cb6SChristoph Hellwig 	u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
28005887cb6SChristoph Hellwig 	u64 mask;
28105887cb6SChristoph Hellwig 
28205887cb6SChristoph Hellwig 	if (!high_totalram) {
28305887cb6SChristoph Hellwig 		/* convert to mask just covering totalram */
28405887cb6SChristoph Hellwig 		low_totalram = (1 << (fls(low_totalram) - 1));
28505887cb6SChristoph Hellwig 		low_totalram += low_totalram - 1;
28605887cb6SChristoph Hellwig 		mask = low_totalram;
28705887cb6SChristoph Hellwig 	} else {
28805887cb6SChristoph Hellwig 		high_totalram = (1 << (fls(high_totalram) - 1));
28905887cb6SChristoph Hellwig 		high_totalram += high_totalram - 1;
29005887cb6SChristoph Hellwig 		mask = (((u64)high_totalram) << 32) + 0xffffffff;
29105887cb6SChristoph Hellwig 	}
29205887cb6SChristoph Hellwig 	return mask;
29305887cb6SChristoph Hellwig }
29405887cb6SChristoph Hellwig 
29505887cb6SChristoph Hellwig u64 dma_get_required_mask(struct device *dev)
29605887cb6SChristoph Hellwig {
29705887cb6SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
29805887cb6SChristoph Hellwig 
299356da6d0SChristoph Hellwig 	if (dma_is_direct(ops))
300356da6d0SChristoph Hellwig 		return dma_direct_get_required_mask(dev);
30105887cb6SChristoph Hellwig 	if (ops->get_required_mask)
30205887cb6SChristoph Hellwig 		return ops->get_required_mask(dev);
30305887cb6SChristoph Hellwig 	return dma_default_get_required_mask(dev);
30405887cb6SChristoph Hellwig }
30505887cb6SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_get_required_mask);
30605887cb6SChristoph Hellwig 
3077249c1a5SChristoph Hellwig void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
3087249c1a5SChristoph Hellwig 		gfp_t flag, unsigned long attrs)
3097249c1a5SChristoph Hellwig {
3107249c1a5SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
3117249c1a5SChristoph Hellwig 	void *cpu_addr;
3127249c1a5SChristoph Hellwig 
313148a97d5SDan Carpenter 	WARN_ON_ONCE(!dev->coherent_dma_mask);
3147249c1a5SChristoph Hellwig 
3157249c1a5SChristoph Hellwig 	if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
3167249c1a5SChristoph Hellwig 		return cpu_addr;
3177249c1a5SChristoph Hellwig 
3187249c1a5SChristoph Hellwig 	/* let the implementation decide on the zone to allocate from: */
3197249c1a5SChristoph Hellwig 	flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
3207249c1a5SChristoph Hellwig 
321356da6d0SChristoph Hellwig 	if (dma_is_direct(ops))
322356da6d0SChristoph Hellwig 		cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
323356da6d0SChristoph Hellwig 	else if (ops->alloc)
324356da6d0SChristoph Hellwig 		cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
325356da6d0SChristoph Hellwig 	else
3267249c1a5SChristoph Hellwig 		return NULL;
3277249c1a5SChristoph Hellwig 
3287249c1a5SChristoph Hellwig 	debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
3297249c1a5SChristoph Hellwig 	return cpu_addr;
3307249c1a5SChristoph Hellwig }
3317249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_alloc_attrs);
3327249c1a5SChristoph Hellwig 
3337249c1a5SChristoph Hellwig void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
3347249c1a5SChristoph Hellwig 		dma_addr_t dma_handle, unsigned long attrs)
3357249c1a5SChristoph Hellwig {
3367249c1a5SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
3377249c1a5SChristoph Hellwig 
3387249c1a5SChristoph Hellwig 	if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
3397249c1a5SChristoph Hellwig 		return;
3407249c1a5SChristoph Hellwig 	/*
3417249c1a5SChristoph Hellwig 	 * On non-coherent platforms which implement DMA-coherent buffers via
3427249c1a5SChristoph Hellwig 	 * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
3437249c1a5SChristoph Hellwig 	 * this far in IRQ context is a) at risk of a BUG_ON() or trying to
3447249c1a5SChristoph Hellwig 	 * sleep on some machines, and b) an indication that the driver is
3457249c1a5SChristoph Hellwig 	 * probably misusing the coherent API anyway.
3467249c1a5SChristoph Hellwig 	 */
3477249c1a5SChristoph Hellwig 	WARN_ON(irqs_disabled());
3487249c1a5SChristoph Hellwig 
349356da6d0SChristoph Hellwig 	if (!cpu_addr)
3507249c1a5SChristoph Hellwig 		return;
3517249c1a5SChristoph Hellwig 
3527249c1a5SChristoph Hellwig 	debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
353356da6d0SChristoph Hellwig 	if (dma_is_direct(ops))
354356da6d0SChristoph Hellwig 		dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
355356da6d0SChristoph Hellwig 	else if (ops->free)
3567249c1a5SChristoph Hellwig 		ops->free(dev, size, cpu_addr, dma_handle, attrs);
3577249c1a5SChristoph Hellwig }
3587249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_free_attrs);
3597249c1a5SChristoph Hellwig 
3607249c1a5SChristoph Hellwig static inline void dma_check_mask(struct device *dev, u64 mask)
3617249c1a5SChristoph Hellwig {
3627249c1a5SChristoph Hellwig 	if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
3637249c1a5SChristoph Hellwig 		dev_warn(dev, "SME is active, device will require DMA bounce buffers\n");
3647249c1a5SChristoph Hellwig }
3657249c1a5SChristoph Hellwig 
3667249c1a5SChristoph Hellwig int dma_supported(struct device *dev, u64 mask)
3677249c1a5SChristoph Hellwig {
3687249c1a5SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
3697249c1a5SChristoph Hellwig 
370356da6d0SChristoph Hellwig 	if (dma_is_direct(ops))
371356da6d0SChristoph Hellwig 		return dma_direct_supported(dev, mask);
3728b1cce9fSThierry Reding 	if (!ops->dma_supported)
3737249c1a5SChristoph Hellwig 		return 1;
3747249c1a5SChristoph Hellwig 	return ops->dma_supported(dev, mask);
3757249c1a5SChristoph Hellwig }
3767249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_supported);
3777249c1a5SChristoph Hellwig 
37811ddce15SChristoph Hellwig #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
37911ddce15SChristoph Hellwig void arch_dma_set_mask(struct device *dev, u64 mask);
38011ddce15SChristoph Hellwig #else
38111ddce15SChristoph Hellwig #define arch_dma_set_mask(dev, mask)	do { } while (0)
38211ddce15SChristoph Hellwig #endif
38311ddce15SChristoph Hellwig 
3847249c1a5SChristoph Hellwig int dma_set_mask(struct device *dev, u64 mask)
3857249c1a5SChristoph Hellwig {
3864a54d16fSChristoph Hellwig 	/*
3874a54d16fSChristoph Hellwig 	 * Truncate the mask to the actually supported dma_addr_t width to
3884a54d16fSChristoph Hellwig 	 * avoid generating unsupportable addresses.
3894a54d16fSChristoph Hellwig 	 */
3904a54d16fSChristoph Hellwig 	mask = (dma_addr_t)mask;
3914a54d16fSChristoph Hellwig 
3927249c1a5SChristoph Hellwig 	if (!dev->dma_mask || !dma_supported(dev, mask))
3937249c1a5SChristoph Hellwig 		return -EIO;
3947249c1a5SChristoph Hellwig 
39511ddce15SChristoph Hellwig 	arch_dma_set_mask(dev, mask);
3967249c1a5SChristoph Hellwig 	dma_check_mask(dev, mask);
3977249c1a5SChristoph Hellwig 	*dev->dma_mask = mask;
3987249c1a5SChristoph Hellwig 	return 0;
3997249c1a5SChristoph Hellwig }
4007249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_set_mask);
4017249c1a5SChristoph Hellwig 
4027249c1a5SChristoph Hellwig #ifndef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
4037249c1a5SChristoph Hellwig int dma_set_coherent_mask(struct device *dev, u64 mask)
4047249c1a5SChristoph Hellwig {
4054a54d16fSChristoph Hellwig 	/*
4064a54d16fSChristoph Hellwig 	 * Truncate the mask to the actually supported dma_addr_t width to
4074a54d16fSChristoph Hellwig 	 * avoid generating unsupportable addresses.
4084a54d16fSChristoph Hellwig 	 */
4094a54d16fSChristoph Hellwig 	mask = (dma_addr_t)mask;
4104a54d16fSChristoph Hellwig 
4117249c1a5SChristoph Hellwig 	if (!dma_supported(dev, mask))
4127249c1a5SChristoph Hellwig 		return -EIO;
4137249c1a5SChristoph Hellwig 
4147249c1a5SChristoph Hellwig 	dma_check_mask(dev, mask);
4157249c1a5SChristoph Hellwig 	dev->coherent_dma_mask = mask;
4167249c1a5SChristoph Hellwig 	return 0;
4177249c1a5SChristoph Hellwig }
4187249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_set_coherent_mask);
4197249c1a5SChristoph Hellwig #endif
4208ddbe594SChristoph Hellwig 
4218ddbe594SChristoph Hellwig void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
4228ddbe594SChristoph Hellwig 		enum dma_data_direction dir)
4238ddbe594SChristoph Hellwig {
4248ddbe594SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
4258ddbe594SChristoph Hellwig 
4268ddbe594SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
427356da6d0SChristoph Hellwig 
428356da6d0SChristoph Hellwig 	if (dma_is_direct(ops))
429356da6d0SChristoph Hellwig 		arch_dma_cache_sync(dev, vaddr, size, dir);
430356da6d0SChristoph Hellwig 	else if (ops->cache_sync)
4318ddbe594SChristoph Hellwig 		ops->cache_sync(dev, vaddr, size, dir);
4328ddbe594SChristoph Hellwig }
4338ddbe594SChristoph Hellwig EXPORT_SYMBOL(dma_cache_sync);
434133d624bSJoerg Roedel 
435133d624bSJoerg Roedel size_t dma_max_mapping_size(struct device *dev)
436133d624bSJoerg Roedel {
437133d624bSJoerg Roedel 	const struct dma_map_ops *ops = get_dma_ops(dev);
438133d624bSJoerg Roedel 	size_t size = SIZE_MAX;
439133d624bSJoerg Roedel 
440133d624bSJoerg Roedel 	if (dma_is_direct(ops))
441133d624bSJoerg Roedel 		size = dma_direct_max_mapping_size(dev);
442133d624bSJoerg Roedel 	else if (ops && ops->max_mapping_size)
443133d624bSJoerg Roedel 		size = ops->max_mapping_size(dev);
444133d624bSJoerg Roedel 
445133d624bSJoerg Roedel 	return size;
446133d624bSJoerg Roedel }
447133d624bSJoerg Roedel EXPORT_SYMBOL_GPL(dma_max_mapping_size);
4486ba99411SYoshihiro Shimoda 
4496ba99411SYoshihiro Shimoda unsigned long dma_get_merge_boundary(struct device *dev)
4506ba99411SYoshihiro Shimoda {
4516ba99411SYoshihiro Shimoda 	const struct dma_map_ops *ops = get_dma_ops(dev);
4526ba99411SYoshihiro Shimoda 
4536ba99411SYoshihiro Shimoda 	if (!ops || !ops->get_merge_boundary)
4546ba99411SYoshihiro Shimoda 		return 0;	/* can't merge */
4556ba99411SYoshihiro Shimoda 
4566ba99411SYoshihiro Shimoda 	return ops->get_merge_boundary(dev);
4576ba99411SYoshihiro Shimoda }
4586ba99411SYoshihiro Shimoda EXPORT_SYMBOL_GPL(dma_get_merge_boundary);
459