xref: /openbmc/linux/kernel/dma/mapping.c (revision 8b1cce9f5832a8eda17d37a3c49fb7dd2d650f46)
1cf65a0f6SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
2cf65a0f6SChristoph Hellwig /*
3cf65a0f6SChristoph Hellwig  * arch-independent dma-mapping routines
4cf65a0f6SChristoph Hellwig  *
5cf65a0f6SChristoph Hellwig  * Copyright (c) 2006  SUSE Linux Products GmbH
6cf65a0f6SChristoph Hellwig  * Copyright (c) 2006  Tejun Heo <teheo@suse.de>
7cf65a0f6SChristoph Hellwig  */
805887cb6SChristoph Hellwig #include <linux/memblock.h> /* for max_pfn */
9cf65a0f6SChristoph Hellwig #include <linux/acpi.h>
10356da6d0SChristoph Hellwig #include <linux/dma-direct.h>
1158b04406SChristoph Hellwig #include <linux/dma-noncoherent.h>
12cf65a0f6SChristoph Hellwig #include <linux/export.h>
13cf65a0f6SChristoph Hellwig #include <linux/gfp.h>
14cf65a0f6SChristoph Hellwig #include <linux/of_device.h>
15cf65a0f6SChristoph Hellwig #include <linux/slab.h>
16cf65a0f6SChristoph Hellwig #include <linux/vmalloc.h>
17cf65a0f6SChristoph Hellwig 
18cf65a0f6SChristoph Hellwig /*
19cf65a0f6SChristoph Hellwig  * Managed DMA API
20cf65a0f6SChristoph Hellwig  */
21cf65a0f6SChristoph Hellwig struct dma_devres {
22cf65a0f6SChristoph Hellwig 	size_t		size;
23cf65a0f6SChristoph Hellwig 	void		*vaddr;
24cf65a0f6SChristoph Hellwig 	dma_addr_t	dma_handle;
25cf65a0f6SChristoph Hellwig 	unsigned long	attrs;
26cf65a0f6SChristoph Hellwig };
27cf65a0f6SChristoph Hellwig 
28cf65a0f6SChristoph Hellwig static void dmam_release(struct device *dev, void *res)
29cf65a0f6SChristoph Hellwig {
30cf65a0f6SChristoph Hellwig 	struct dma_devres *this = res;
31cf65a0f6SChristoph Hellwig 
32cf65a0f6SChristoph Hellwig 	dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
33cf65a0f6SChristoph Hellwig 			this->attrs);
34cf65a0f6SChristoph Hellwig }
35cf65a0f6SChristoph Hellwig 
36cf65a0f6SChristoph Hellwig static int dmam_match(struct device *dev, void *res, void *match_data)
37cf65a0f6SChristoph Hellwig {
38cf65a0f6SChristoph Hellwig 	struct dma_devres *this = res, *match = match_data;
39cf65a0f6SChristoph Hellwig 
40cf65a0f6SChristoph Hellwig 	if (this->vaddr == match->vaddr) {
41cf65a0f6SChristoph Hellwig 		WARN_ON(this->size != match->size ||
42cf65a0f6SChristoph Hellwig 			this->dma_handle != match->dma_handle);
43cf65a0f6SChristoph Hellwig 		return 1;
44cf65a0f6SChristoph Hellwig 	}
45cf65a0f6SChristoph Hellwig 	return 0;
46cf65a0f6SChristoph Hellwig }
47cf65a0f6SChristoph Hellwig 
48cf65a0f6SChristoph Hellwig /**
49cf65a0f6SChristoph Hellwig  * dmam_alloc_coherent - Managed dma_alloc_coherent()
50cf65a0f6SChristoph Hellwig  * @dev: Device to allocate coherent memory for
51cf65a0f6SChristoph Hellwig  * @size: Size of allocation
52cf65a0f6SChristoph Hellwig  * @dma_handle: Out argument for allocated DMA handle
53cf65a0f6SChristoph Hellwig  * @gfp: Allocation flags
54cf65a0f6SChristoph Hellwig  *
55cf65a0f6SChristoph Hellwig  * Managed dma_alloc_coherent().  Memory allocated using this function
56cf65a0f6SChristoph Hellwig  * will be automatically released on driver detach.
57cf65a0f6SChristoph Hellwig  *
58cf65a0f6SChristoph Hellwig  * RETURNS:
59cf65a0f6SChristoph Hellwig  * Pointer to allocated memory on success, NULL on failure.
60cf65a0f6SChristoph Hellwig  */
61cf65a0f6SChristoph Hellwig void *dmam_alloc_coherent(struct device *dev, size_t size,
62cf65a0f6SChristoph Hellwig 			   dma_addr_t *dma_handle, gfp_t gfp)
63cf65a0f6SChristoph Hellwig {
64cf65a0f6SChristoph Hellwig 	struct dma_devres *dr;
65cf65a0f6SChristoph Hellwig 	void *vaddr;
66cf65a0f6SChristoph Hellwig 
67cf65a0f6SChristoph Hellwig 	dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
68cf65a0f6SChristoph Hellwig 	if (!dr)
69cf65a0f6SChristoph Hellwig 		return NULL;
70cf65a0f6SChristoph Hellwig 
71cf65a0f6SChristoph Hellwig 	vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp);
72cf65a0f6SChristoph Hellwig 	if (!vaddr) {
73cf65a0f6SChristoph Hellwig 		devres_free(dr);
74cf65a0f6SChristoph Hellwig 		return NULL;
75cf65a0f6SChristoph Hellwig 	}
76cf65a0f6SChristoph Hellwig 
77cf65a0f6SChristoph Hellwig 	dr->vaddr = vaddr;
78cf65a0f6SChristoph Hellwig 	dr->dma_handle = *dma_handle;
79cf65a0f6SChristoph Hellwig 	dr->size = size;
80cf65a0f6SChristoph Hellwig 
81cf65a0f6SChristoph Hellwig 	devres_add(dev, dr);
82cf65a0f6SChristoph Hellwig 
83cf65a0f6SChristoph Hellwig 	return vaddr;
84cf65a0f6SChristoph Hellwig }
85cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(dmam_alloc_coherent);
86cf65a0f6SChristoph Hellwig 
87cf65a0f6SChristoph Hellwig /**
88cf65a0f6SChristoph Hellwig  * dmam_free_coherent - Managed dma_free_coherent()
89cf65a0f6SChristoph Hellwig  * @dev: Device to free coherent memory for
90cf65a0f6SChristoph Hellwig  * @size: Size of allocation
91cf65a0f6SChristoph Hellwig  * @vaddr: Virtual address of the memory to free
92cf65a0f6SChristoph Hellwig  * @dma_handle: DMA handle of the memory to free
93cf65a0f6SChristoph Hellwig  *
94cf65a0f6SChristoph Hellwig  * Managed dma_free_coherent().
95cf65a0f6SChristoph Hellwig  */
96cf65a0f6SChristoph Hellwig void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
97cf65a0f6SChristoph Hellwig 			dma_addr_t dma_handle)
98cf65a0f6SChristoph Hellwig {
99cf65a0f6SChristoph Hellwig 	struct dma_devres match_data = { size, vaddr, dma_handle };
100cf65a0f6SChristoph Hellwig 
101cf65a0f6SChristoph Hellwig 	dma_free_coherent(dev, size, vaddr, dma_handle);
102cf65a0f6SChristoph Hellwig 	WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
103cf65a0f6SChristoph Hellwig }
104cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(dmam_free_coherent);
105cf65a0f6SChristoph Hellwig 
106cf65a0f6SChristoph Hellwig /**
107cf65a0f6SChristoph Hellwig  * dmam_alloc_attrs - Managed dma_alloc_attrs()
108cf65a0f6SChristoph Hellwig  * @dev: Device to allocate non_coherent memory for
109cf65a0f6SChristoph Hellwig  * @size: Size of allocation
110cf65a0f6SChristoph Hellwig  * @dma_handle: Out argument for allocated DMA handle
111cf65a0f6SChristoph Hellwig  * @gfp: Allocation flags
112cf65a0f6SChristoph Hellwig  * @attrs: Flags in the DMA_ATTR_* namespace.
113cf65a0f6SChristoph Hellwig  *
114cf65a0f6SChristoph Hellwig  * Managed dma_alloc_attrs().  Memory allocated using this function will be
115cf65a0f6SChristoph Hellwig  * automatically released on driver detach.
116cf65a0f6SChristoph Hellwig  *
117cf65a0f6SChristoph Hellwig  * RETURNS:
118cf65a0f6SChristoph Hellwig  * Pointer to allocated memory on success, NULL on failure.
119cf65a0f6SChristoph Hellwig  */
120cf65a0f6SChristoph Hellwig void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
121cf65a0f6SChristoph Hellwig 		gfp_t gfp, unsigned long attrs)
122cf65a0f6SChristoph Hellwig {
123cf65a0f6SChristoph Hellwig 	struct dma_devres *dr;
124cf65a0f6SChristoph Hellwig 	void *vaddr;
125cf65a0f6SChristoph Hellwig 
126cf65a0f6SChristoph Hellwig 	dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
127cf65a0f6SChristoph Hellwig 	if (!dr)
128cf65a0f6SChristoph Hellwig 		return NULL;
129cf65a0f6SChristoph Hellwig 
130cf65a0f6SChristoph Hellwig 	vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
131cf65a0f6SChristoph Hellwig 	if (!vaddr) {
132cf65a0f6SChristoph Hellwig 		devres_free(dr);
133cf65a0f6SChristoph Hellwig 		return NULL;
134cf65a0f6SChristoph Hellwig 	}
135cf65a0f6SChristoph Hellwig 
136cf65a0f6SChristoph Hellwig 	dr->vaddr = vaddr;
137cf65a0f6SChristoph Hellwig 	dr->dma_handle = *dma_handle;
138cf65a0f6SChristoph Hellwig 	dr->size = size;
139cf65a0f6SChristoph Hellwig 	dr->attrs = attrs;
140cf65a0f6SChristoph Hellwig 
141cf65a0f6SChristoph Hellwig 	devres_add(dev, dr);
142cf65a0f6SChristoph Hellwig 
143cf65a0f6SChristoph Hellwig 	return vaddr;
144cf65a0f6SChristoph Hellwig }
145cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(dmam_alloc_attrs);
146cf65a0f6SChristoph Hellwig 
147cf65a0f6SChristoph Hellwig #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
148cf65a0f6SChristoph Hellwig 
149cf65a0f6SChristoph Hellwig static void dmam_coherent_decl_release(struct device *dev, void *res)
150cf65a0f6SChristoph Hellwig {
151cf65a0f6SChristoph Hellwig 	dma_release_declared_memory(dev);
152cf65a0f6SChristoph Hellwig }
153cf65a0f6SChristoph Hellwig 
154cf65a0f6SChristoph Hellwig /**
155cf65a0f6SChristoph Hellwig  * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory()
156cf65a0f6SChristoph Hellwig  * @dev: Device to declare coherent memory for
157cf65a0f6SChristoph Hellwig  * @phys_addr: Physical address of coherent memory to be declared
158cf65a0f6SChristoph Hellwig  * @device_addr: Device address of coherent memory to be declared
159cf65a0f6SChristoph Hellwig  * @size: Size of coherent memory to be declared
160cf65a0f6SChristoph Hellwig  * @flags: Flags
161cf65a0f6SChristoph Hellwig  *
162cf65a0f6SChristoph Hellwig  * Managed dma_declare_coherent_memory().
163cf65a0f6SChristoph Hellwig  *
164cf65a0f6SChristoph Hellwig  * RETURNS:
165cf65a0f6SChristoph Hellwig  * 0 on success, -errno on failure.
166cf65a0f6SChristoph Hellwig  */
167cf65a0f6SChristoph Hellwig int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
168cf65a0f6SChristoph Hellwig 				 dma_addr_t device_addr, size_t size, int flags)
169cf65a0f6SChristoph Hellwig {
170cf65a0f6SChristoph Hellwig 	void *res;
171cf65a0f6SChristoph Hellwig 	int rc;
172cf65a0f6SChristoph Hellwig 
173cf65a0f6SChristoph Hellwig 	res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL);
174cf65a0f6SChristoph Hellwig 	if (!res)
175cf65a0f6SChristoph Hellwig 		return -ENOMEM;
176cf65a0f6SChristoph Hellwig 
177cf65a0f6SChristoph Hellwig 	rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size,
178cf65a0f6SChristoph Hellwig 					 flags);
179cf65a0f6SChristoph Hellwig 	if (!rc)
180cf65a0f6SChristoph Hellwig 		devres_add(dev, res);
181cf65a0f6SChristoph Hellwig 	else
182cf65a0f6SChristoph Hellwig 		devres_free(res);
183cf65a0f6SChristoph Hellwig 
184cf65a0f6SChristoph Hellwig 	return rc;
185cf65a0f6SChristoph Hellwig }
186cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(dmam_declare_coherent_memory);
187cf65a0f6SChristoph Hellwig 
188cf65a0f6SChristoph Hellwig /**
189cf65a0f6SChristoph Hellwig  * dmam_release_declared_memory - Managed dma_release_declared_memory().
190cf65a0f6SChristoph Hellwig  * @dev: Device to release declared coherent memory for
191cf65a0f6SChristoph Hellwig  *
192cf65a0f6SChristoph Hellwig  * Managed dmam_release_declared_memory().
193cf65a0f6SChristoph Hellwig  */
194cf65a0f6SChristoph Hellwig void dmam_release_declared_memory(struct device *dev)
195cf65a0f6SChristoph Hellwig {
196cf65a0f6SChristoph Hellwig 	WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL));
197cf65a0f6SChristoph Hellwig }
198cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(dmam_release_declared_memory);
199cf65a0f6SChristoph Hellwig 
200cf65a0f6SChristoph Hellwig #endif
201cf65a0f6SChristoph Hellwig 
202cf65a0f6SChristoph Hellwig /*
203cf65a0f6SChristoph Hellwig  * Create scatter-list for the already allocated DMA buffer.
204cf65a0f6SChristoph Hellwig  */
205cf65a0f6SChristoph Hellwig int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
2069406a49fSChristoph Hellwig 		 void *cpu_addr, dma_addr_t dma_addr, size_t size,
2079406a49fSChristoph Hellwig 		 unsigned long attrs)
208cf65a0f6SChristoph Hellwig {
2099406a49fSChristoph Hellwig 	struct page *page;
210cf65a0f6SChristoph Hellwig 	int ret;
211cf65a0f6SChristoph Hellwig 
2129406a49fSChristoph Hellwig 	if (!dev_is_dma_coherent(dev)) {
2139406a49fSChristoph Hellwig 		if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
2149406a49fSChristoph Hellwig 			return -ENXIO;
215cf65a0f6SChristoph Hellwig 
2169406a49fSChristoph Hellwig 		page = pfn_to_page(arch_dma_coherent_to_pfn(dev, cpu_addr,
2179406a49fSChristoph Hellwig 				dma_addr));
2189406a49fSChristoph Hellwig 	} else {
2199406a49fSChristoph Hellwig 		page = virt_to_page(cpu_addr);
2209406a49fSChristoph Hellwig 	}
2219406a49fSChristoph Hellwig 
2229406a49fSChristoph Hellwig 	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
2239406a49fSChristoph Hellwig 	if (!ret)
224cf65a0f6SChristoph Hellwig 		sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
2259406a49fSChristoph Hellwig 	return ret;
226cf65a0f6SChristoph Hellwig }
2277249c1a5SChristoph Hellwig 
2287249c1a5SChristoph Hellwig int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
2297249c1a5SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
2307249c1a5SChristoph Hellwig 		unsigned long attrs)
2317249c1a5SChristoph Hellwig {
2327249c1a5SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
233356da6d0SChristoph Hellwig 
234356da6d0SChristoph Hellwig 	if (!dma_is_direct(ops) && ops->get_sgtable)
2357249c1a5SChristoph Hellwig 		return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
2367249c1a5SChristoph Hellwig 					attrs);
2377249c1a5SChristoph Hellwig 	return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
2387249c1a5SChristoph Hellwig 			attrs);
2397249c1a5SChristoph Hellwig }
2407249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_get_sgtable_attrs);
241cf65a0f6SChristoph Hellwig 
242cf65a0f6SChristoph Hellwig /*
243cf65a0f6SChristoph Hellwig  * Create userspace mapping for the DMA-coherent memory.
244cf65a0f6SChristoph Hellwig  */
245cf65a0f6SChristoph Hellwig int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
24658b04406SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
24758b04406SChristoph Hellwig 		unsigned long attrs)
248cf65a0f6SChristoph Hellwig {
249cf65a0f6SChristoph Hellwig #ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP
250cf65a0f6SChristoph Hellwig 	unsigned long user_count = vma_pages(vma);
251cf65a0f6SChristoph Hellwig 	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
252cf65a0f6SChristoph Hellwig 	unsigned long off = vma->vm_pgoff;
25358b04406SChristoph Hellwig 	unsigned long pfn;
25458b04406SChristoph Hellwig 	int ret = -ENXIO;
255cf65a0f6SChristoph Hellwig 
25658b04406SChristoph Hellwig 	vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
257cf65a0f6SChristoph Hellwig 
258cf65a0f6SChristoph Hellwig 	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
259cf65a0f6SChristoph Hellwig 		return ret;
260cf65a0f6SChristoph Hellwig 
26158b04406SChristoph Hellwig 	if (off >= count || user_count > count - off)
26258b04406SChristoph Hellwig 		return -ENXIO;
263cf65a0f6SChristoph Hellwig 
26458b04406SChristoph Hellwig 	if (!dev_is_dma_coherent(dev)) {
26558b04406SChristoph Hellwig 		if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
26658b04406SChristoph Hellwig 			return -ENXIO;
26758b04406SChristoph Hellwig 		pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr);
26858b04406SChristoph Hellwig 	} else {
26958b04406SChristoph Hellwig 		pfn = page_to_pfn(virt_to_page(cpu_addr));
27058b04406SChristoph Hellwig 	}
27158b04406SChristoph Hellwig 
27258b04406SChristoph Hellwig 	return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
27358b04406SChristoph Hellwig 			user_count << PAGE_SHIFT, vma->vm_page_prot);
27458b04406SChristoph Hellwig #else
27558b04406SChristoph Hellwig 	return -ENXIO;
27658b04406SChristoph Hellwig #endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
277cf65a0f6SChristoph Hellwig }
2787249c1a5SChristoph Hellwig 
2797249c1a5SChristoph Hellwig /**
2807249c1a5SChristoph Hellwig  * dma_mmap_attrs - map a coherent DMA allocation into user space
2817249c1a5SChristoph Hellwig  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
2827249c1a5SChristoph Hellwig  * @vma: vm_area_struct describing requested user mapping
2837249c1a5SChristoph Hellwig  * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
2847249c1a5SChristoph Hellwig  * @dma_addr: device-view address returned from dma_alloc_attrs
2857249c1a5SChristoph Hellwig  * @size: size of memory originally requested in dma_alloc_attrs
2867249c1a5SChristoph Hellwig  * @attrs: attributes of mapping properties requested in dma_alloc_attrs
2877249c1a5SChristoph Hellwig  *
2887249c1a5SChristoph Hellwig  * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user
2897249c1a5SChristoph Hellwig  * space.  The coherent DMA buffer must not be freed by the driver until the
2907249c1a5SChristoph Hellwig  * user space mapping has been released.
2917249c1a5SChristoph Hellwig  */
2927249c1a5SChristoph Hellwig int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
2937249c1a5SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
2947249c1a5SChristoph Hellwig 		unsigned long attrs)
2957249c1a5SChristoph Hellwig {
2967249c1a5SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
297356da6d0SChristoph Hellwig 
298356da6d0SChristoph Hellwig 	if (!dma_is_direct(ops) && ops->mmap)
2997249c1a5SChristoph Hellwig 		return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
3007249c1a5SChristoph Hellwig 	return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
3017249c1a5SChristoph Hellwig }
3027249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_mmap_attrs);
30305887cb6SChristoph Hellwig 
30405887cb6SChristoph Hellwig #ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK
30505887cb6SChristoph Hellwig static u64 dma_default_get_required_mask(struct device *dev)
30605887cb6SChristoph Hellwig {
30705887cb6SChristoph Hellwig 	u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
30805887cb6SChristoph Hellwig 	u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
30905887cb6SChristoph Hellwig 	u64 mask;
31005887cb6SChristoph Hellwig 
31105887cb6SChristoph Hellwig 	if (!high_totalram) {
31205887cb6SChristoph Hellwig 		/* convert to mask just covering totalram */
31305887cb6SChristoph Hellwig 		low_totalram = (1 << (fls(low_totalram) - 1));
31405887cb6SChristoph Hellwig 		low_totalram += low_totalram - 1;
31505887cb6SChristoph Hellwig 		mask = low_totalram;
31605887cb6SChristoph Hellwig 	} else {
31705887cb6SChristoph Hellwig 		high_totalram = (1 << (fls(high_totalram) - 1));
31805887cb6SChristoph Hellwig 		high_totalram += high_totalram - 1;
31905887cb6SChristoph Hellwig 		mask = (((u64)high_totalram) << 32) + 0xffffffff;
32005887cb6SChristoph Hellwig 	}
32105887cb6SChristoph Hellwig 	return mask;
32205887cb6SChristoph Hellwig }
32305887cb6SChristoph Hellwig 
32405887cb6SChristoph Hellwig u64 dma_get_required_mask(struct device *dev)
32505887cb6SChristoph Hellwig {
32605887cb6SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
32705887cb6SChristoph Hellwig 
328356da6d0SChristoph Hellwig 	if (dma_is_direct(ops))
329356da6d0SChristoph Hellwig 		return dma_direct_get_required_mask(dev);
33005887cb6SChristoph Hellwig 	if (ops->get_required_mask)
33105887cb6SChristoph Hellwig 		return ops->get_required_mask(dev);
33205887cb6SChristoph Hellwig 	return dma_default_get_required_mask(dev);
33305887cb6SChristoph Hellwig }
33405887cb6SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_get_required_mask);
33505887cb6SChristoph Hellwig #endif
33605887cb6SChristoph Hellwig 
3377249c1a5SChristoph Hellwig #ifndef arch_dma_alloc_attrs
3387249c1a5SChristoph Hellwig #define arch_dma_alloc_attrs(dev)	(true)
3397249c1a5SChristoph Hellwig #endif
3407249c1a5SChristoph Hellwig 
3417249c1a5SChristoph Hellwig void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
3427249c1a5SChristoph Hellwig 		gfp_t flag, unsigned long attrs)
3437249c1a5SChristoph Hellwig {
3447249c1a5SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
3457249c1a5SChristoph Hellwig 	void *cpu_addr;
3467249c1a5SChristoph Hellwig 
3477249c1a5SChristoph Hellwig 	WARN_ON_ONCE(dev && !dev->coherent_dma_mask);
3487249c1a5SChristoph Hellwig 
3497249c1a5SChristoph Hellwig 	if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
3507249c1a5SChristoph Hellwig 		return cpu_addr;
3517249c1a5SChristoph Hellwig 
3527249c1a5SChristoph Hellwig 	/* let the implementation decide on the zone to allocate from: */
3537249c1a5SChristoph Hellwig 	flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
3547249c1a5SChristoph Hellwig 
3557249c1a5SChristoph Hellwig 	if (!arch_dma_alloc_attrs(&dev))
3567249c1a5SChristoph Hellwig 		return NULL;
357356da6d0SChristoph Hellwig 
358356da6d0SChristoph Hellwig 	if (dma_is_direct(ops))
359356da6d0SChristoph Hellwig 		cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
360356da6d0SChristoph Hellwig 	else if (ops->alloc)
361356da6d0SChristoph Hellwig 		cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
362356da6d0SChristoph Hellwig 	else
3637249c1a5SChristoph Hellwig 		return NULL;
3647249c1a5SChristoph Hellwig 
3657249c1a5SChristoph Hellwig 	debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
3667249c1a5SChristoph Hellwig 	return cpu_addr;
3677249c1a5SChristoph Hellwig }
3687249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_alloc_attrs);
3697249c1a5SChristoph Hellwig 
3707249c1a5SChristoph Hellwig void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
3717249c1a5SChristoph Hellwig 		dma_addr_t dma_handle, unsigned long attrs)
3727249c1a5SChristoph Hellwig {
3737249c1a5SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
3747249c1a5SChristoph Hellwig 
3757249c1a5SChristoph Hellwig 	if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
3767249c1a5SChristoph Hellwig 		return;
3777249c1a5SChristoph Hellwig 	/*
3787249c1a5SChristoph Hellwig 	 * On non-coherent platforms which implement DMA-coherent buffers via
3797249c1a5SChristoph Hellwig 	 * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
3807249c1a5SChristoph Hellwig 	 * this far in IRQ context is a) at risk of a BUG_ON() or trying to
3817249c1a5SChristoph Hellwig 	 * sleep on some machines, and b) an indication that the driver is
3827249c1a5SChristoph Hellwig 	 * probably misusing the coherent API anyway.
3837249c1a5SChristoph Hellwig 	 */
3847249c1a5SChristoph Hellwig 	WARN_ON(irqs_disabled());
3857249c1a5SChristoph Hellwig 
386356da6d0SChristoph Hellwig 	if (!cpu_addr)
3877249c1a5SChristoph Hellwig 		return;
3887249c1a5SChristoph Hellwig 
3897249c1a5SChristoph Hellwig 	debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
390356da6d0SChristoph Hellwig 	if (dma_is_direct(ops))
391356da6d0SChristoph Hellwig 		dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
392356da6d0SChristoph Hellwig 	else if (ops->free)
3937249c1a5SChristoph Hellwig 		ops->free(dev, size, cpu_addr, dma_handle, attrs);
3947249c1a5SChristoph Hellwig }
3957249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_free_attrs);
3967249c1a5SChristoph Hellwig 
3977249c1a5SChristoph Hellwig static inline void dma_check_mask(struct device *dev, u64 mask)
3987249c1a5SChristoph Hellwig {
3997249c1a5SChristoph Hellwig 	if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
4007249c1a5SChristoph Hellwig 		dev_warn(dev, "SME is active, device will require DMA bounce buffers\n");
4017249c1a5SChristoph Hellwig }
4027249c1a5SChristoph Hellwig 
4037249c1a5SChristoph Hellwig int dma_supported(struct device *dev, u64 mask)
4047249c1a5SChristoph Hellwig {
4057249c1a5SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
4067249c1a5SChristoph Hellwig 
407356da6d0SChristoph Hellwig 	if (dma_is_direct(ops))
408356da6d0SChristoph Hellwig 		return dma_direct_supported(dev, mask);
409*8b1cce9fSThierry Reding 	if (!ops->dma_supported)
4107249c1a5SChristoph Hellwig 		return 1;
4117249c1a5SChristoph Hellwig 	return ops->dma_supported(dev, mask);
4127249c1a5SChristoph Hellwig }
4137249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_supported);
4147249c1a5SChristoph Hellwig 
4157249c1a5SChristoph Hellwig #ifndef HAVE_ARCH_DMA_SET_MASK
4167249c1a5SChristoph Hellwig int dma_set_mask(struct device *dev, u64 mask)
4177249c1a5SChristoph Hellwig {
4187249c1a5SChristoph Hellwig 	if (!dev->dma_mask || !dma_supported(dev, mask))
4197249c1a5SChristoph Hellwig 		return -EIO;
4207249c1a5SChristoph Hellwig 
4217249c1a5SChristoph Hellwig 	dma_check_mask(dev, mask);
4227249c1a5SChristoph Hellwig 	*dev->dma_mask = mask;
4237249c1a5SChristoph Hellwig 	return 0;
4247249c1a5SChristoph Hellwig }
4257249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_set_mask);
4267249c1a5SChristoph Hellwig #endif
4277249c1a5SChristoph Hellwig 
4287249c1a5SChristoph Hellwig #ifndef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
4297249c1a5SChristoph Hellwig int dma_set_coherent_mask(struct device *dev, u64 mask)
4307249c1a5SChristoph Hellwig {
4317249c1a5SChristoph Hellwig 	if (!dma_supported(dev, mask))
4327249c1a5SChristoph Hellwig 		return -EIO;
4337249c1a5SChristoph Hellwig 
4347249c1a5SChristoph Hellwig 	dma_check_mask(dev, mask);
4357249c1a5SChristoph Hellwig 	dev->coherent_dma_mask = mask;
4367249c1a5SChristoph Hellwig 	return 0;
4377249c1a5SChristoph Hellwig }
4387249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_set_coherent_mask);
4397249c1a5SChristoph Hellwig #endif
4408ddbe594SChristoph Hellwig 
4418ddbe594SChristoph Hellwig void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
4428ddbe594SChristoph Hellwig 		enum dma_data_direction dir)
4438ddbe594SChristoph Hellwig {
4448ddbe594SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
4458ddbe594SChristoph Hellwig 
4468ddbe594SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
447356da6d0SChristoph Hellwig 
448356da6d0SChristoph Hellwig 	if (dma_is_direct(ops))
449356da6d0SChristoph Hellwig 		arch_dma_cache_sync(dev, vaddr, size, dir);
450356da6d0SChristoph Hellwig 	else if (ops->cache_sync)
4518ddbe594SChristoph Hellwig 		ops->cache_sync(dev, vaddr, size, dir);
4528ddbe594SChristoph Hellwig }
4538ddbe594SChristoph Hellwig EXPORT_SYMBOL(dma_cache_sync);
454