xref: /openbmc/linux/sound/core/memalloc.c (revision 53466ebd)
11a59d1b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
21da177e4SLinus Torvalds /*
3c1017a4cSJaroslav Kysela  *  Copyright (c) by Jaroslav Kysela <perex@perex.cz>
41da177e4SLinus Torvalds  *                   Takashi Iwai <tiwai@suse.de>
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *  Generic memory allocators
71da177e4SLinus Torvalds  */
81da177e4SLinus Torvalds 
91da177e4SLinus Torvalds #include <linux/slab.h>
101da177e4SLinus Torvalds #include <linux/mm.h>
111da177e4SLinus Torvalds #include <linux/dma-mapping.h>
129736a325STakashi Iwai #include <linux/dma-map-ops.h>
1305503214SNicolin Chen #include <linux/genalloc.h>
14a25684a9STakashi Iwai #include <linux/highmem.h>
151fe7f397STakashi Iwai #include <linux/vmalloc.h>
1642e748a0STakashi Iwai #ifdef CONFIG_X86
1742e748a0STakashi Iwai #include <asm/set_memory.h>
1842e748a0STakashi Iwai #endif
191da177e4SLinus Torvalds #include <sound/memalloc.h>
2037af81c5STakashi Iwai #include "memalloc_local.h"
211da177e4SLinus Torvalds 
22dd164fbfSTakashi Iwai #define DEFAULT_GFP \
23dd164fbfSTakashi Iwai 	(GFP_KERNEL | \
24a61c7d88SKai Vehmanen 	 __GFP_RETRY_MAYFAIL | /* don't trigger OOM-killer */ \
25dd164fbfSTakashi Iwai 	 __GFP_NOWARN)   /* no stack trace print - this call is non-critical */
26dd164fbfSTakashi Iwai 
2737af81c5STakashi Iwai static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);
281da177e4SLinus Torvalds 
29a8d302a0STakashi Iwai #ifdef CONFIG_SND_DMA_SGBUF
30a8d302a0STakashi Iwai static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size);
31a8d302a0STakashi Iwai #endif
32a8d302a0STakashi Iwai 
__snd_dma_alloc_pages(struct snd_dma_buffer * dmab,size_t size)33723c1252STakashi Iwai static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
3437af81c5STakashi Iwai {
3537af81c5STakashi Iwai 	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
3637af81c5STakashi Iwai 
3737af81c5STakashi Iwai 	if (WARN_ON_ONCE(!ops || !ops->alloc))
38723c1252STakashi Iwai 		return NULL;
3937af81c5STakashi Iwai 	return ops->alloc(dmab, size);
4008422d2cSTakashi Iwai }
411da177e4SLinus Torvalds 
421da177e4SLinus Torvalds /**
43a25684a9STakashi Iwai  * snd_dma_alloc_dir_pages - allocate the buffer area according to the given
44a25684a9STakashi Iwai  *	type and direction
451da177e4SLinus Torvalds  * @type: the DMA buffer type
461da177e4SLinus Torvalds  * @device: the device pointer
47a25684a9STakashi Iwai  * @dir: DMA direction
481da177e4SLinus Torvalds  * @size: the buffer size to allocate
491da177e4SLinus Torvalds  * @dmab: buffer allocation record to store the allocated data
501da177e4SLinus Torvalds  *
511da177e4SLinus Torvalds  * Calls the memory-allocator function for the corresponding
521da177e4SLinus Torvalds  * buffer type.
531da177e4SLinus Torvalds  *
54eb7c06e8SYacine Belkadi  * Return: Zero if the buffer with the given size is allocated successfully,
55eb7c06e8SYacine Belkadi  * otherwise a negative value on error.
561da177e4SLinus Torvalds  */
snd_dma_alloc_dir_pages(int type,struct device * device,enum dma_data_direction dir,size_t size,struct snd_dma_buffer * dmab)57a25684a9STakashi Iwai int snd_dma_alloc_dir_pages(int type, struct device *device,
58a25684a9STakashi Iwai 			    enum dma_data_direction dir, size_t size,
591da177e4SLinus Torvalds 			    struct snd_dma_buffer *dmab)
601da177e4SLinus Torvalds {
617eaa943cSTakashi Iwai 	if (WARN_ON(!size))
627eaa943cSTakashi Iwai 		return -ENXIO;
637eaa943cSTakashi Iwai 	if (WARN_ON(!dmab))
647eaa943cSTakashi Iwai 		return -ENXIO;
651da177e4SLinus Torvalds 
665c1733e3STakashi Iwai 	size = PAGE_ALIGN(size);
671da177e4SLinus Torvalds 	dmab->dev.type = type;
681da177e4SLinus Torvalds 	dmab->dev.dev = device;
69a25684a9STakashi Iwai 	dmab->dev.dir = dir;
701da177e4SLinus Torvalds 	dmab->bytes = 0;
7128e60dbbSTakashi Iwai 	dmab->addr = 0;
7228e60dbbSTakashi Iwai 	dmab->private_data = NULL;
73723c1252STakashi Iwai 	dmab->area = __snd_dma_alloc_pages(dmab, size);
741da177e4SLinus Torvalds 	if (!dmab->area)
751da177e4SLinus Torvalds 		return -ENOMEM;
761da177e4SLinus Torvalds 	dmab->bytes = size;
771da177e4SLinus Torvalds 	return 0;
781da177e4SLinus Torvalds }
79a25684a9STakashi Iwai EXPORT_SYMBOL(snd_dma_alloc_dir_pages);
801da177e4SLinus Torvalds 
811da177e4SLinus Torvalds /**
821da177e4SLinus Torvalds  * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
831da177e4SLinus Torvalds  * @type: the DMA buffer type
841da177e4SLinus Torvalds  * @device: the device pointer
851da177e4SLinus Torvalds  * @size: the buffer size to allocate
861da177e4SLinus Torvalds  * @dmab: buffer allocation record to store the allocated data
871da177e4SLinus Torvalds  *
881da177e4SLinus Torvalds  * Calls the memory-allocator function for the corresponding
891da177e4SLinus Torvalds  * buffer type.  When no space is left, this function reduces the size and
901da177e4SLinus Torvalds  * tries to allocate again.  The size actually allocated is stored in
911da177e4SLinus Torvalds  * res_size argument.
921da177e4SLinus Torvalds  *
93eb7c06e8SYacine Belkadi  * Return: Zero if the buffer with the given size is allocated successfully,
94eb7c06e8SYacine Belkadi  * otherwise a negative value on error.
951da177e4SLinus Torvalds  */
snd_dma_alloc_pages_fallback(int type,struct device * device,size_t size,struct snd_dma_buffer * dmab)961da177e4SLinus Torvalds int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
971da177e4SLinus Torvalds 				 struct snd_dma_buffer *dmab)
981da177e4SLinus Torvalds {
991da177e4SLinus Torvalds 	int err;
1001da177e4SLinus Torvalds 
1011da177e4SLinus Torvalds 	while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
1021da177e4SLinus Torvalds 		if (err != -ENOMEM)
1031da177e4SLinus Torvalds 			return err;
1041da177e4SLinus Torvalds 		if (size <= PAGE_SIZE)
1051da177e4SLinus Torvalds 			return -ENOMEM;
1064e184f8fSTakashi Iwai 		size >>= 1;
107dfef01e1STakashi Iwai 		size = PAGE_SIZE << get_order(size);
1081da177e4SLinus Torvalds 	}
1091da177e4SLinus Torvalds 	if (! dmab->area)
1101da177e4SLinus Torvalds 		return -ENOMEM;
1111da177e4SLinus Torvalds 	return 0;
1121da177e4SLinus Torvalds }
11335f80014STakashi Iwai EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
1141da177e4SLinus Torvalds 
1151da177e4SLinus Torvalds /**
1161da177e4SLinus Torvalds  * snd_dma_free_pages - release the allocated buffer
1171da177e4SLinus Torvalds  * @dmab: the buffer allocation record to release
1181da177e4SLinus Torvalds  *
1191da177e4SLinus Torvalds  * Releases the allocated buffer via snd_dma_alloc_pages().
1201da177e4SLinus Torvalds  */
snd_dma_free_pages(struct snd_dma_buffer * dmab)1211da177e4SLinus Torvalds void snd_dma_free_pages(struct snd_dma_buffer *dmab)
1221da177e4SLinus Torvalds {
12337af81c5STakashi Iwai 	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
12437af81c5STakashi Iwai 
12537af81c5STakashi Iwai 	if (ops && ops->free)
12637af81c5STakashi Iwai 		ops->free(dmab);
1271da177e4SLinus Torvalds }
1281da177e4SLinus Torvalds EXPORT_SYMBOL(snd_dma_free_pages);
12937af81c5STakashi Iwai 
130427ae268STakashi Iwai /* called by devres */
__snd_release_pages(struct device * dev,void * res)131427ae268STakashi Iwai static void __snd_release_pages(struct device *dev, void *res)
132427ae268STakashi Iwai {
133427ae268STakashi Iwai 	snd_dma_free_pages(res);
134427ae268STakashi Iwai }
135427ae268STakashi Iwai 
136427ae268STakashi Iwai /**
137a25684a9STakashi Iwai  * snd_devm_alloc_dir_pages - allocate the buffer and manage with devres
138427ae268STakashi Iwai  * @dev: the device pointer
139427ae268STakashi Iwai  * @type: the DMA buffer type
140a25684a9STakashi Iwai  * @dir: DMA direction
141427ae268STakashi Iwai  * @size: the buffer size to allocate
142427ae268STakashi Iwai  *
143427ae268STakashi Iwai  * Allocate buffer pages depending on the given type and manage using devres.
144427ae268STakashi Iwai  * The pages will be released automatically at the device removal.
145427ae268STakashi Iwai  *
146427ae268STakashi Iwai  * Unlike snd_dma_alloc_pages(), this function requires the real device pointer,
147427ae268STakashi Iwai  * hence it can't work with SNDRV_DMA_TYPE_CONTINUOUS or
148427ae268STakashi Iwai  * SNDRV_DMA_TYPE_VMALLOC type.
149427ae268STakashi Iwai  *
1506eba99d4STakashi Iwai  * Return: the snd_dma_buffer object at success, or NULL if failed
151427ae268STakashi Iwai  */
152427ae268STakashi Iwai struct snd_dma_buffer *
snd_devm_alloc_dir_pages(struct device * dev,int type,enum dma_data_direction dir,size_t size)153a25684a9STakashi Iwai snd_devm_alloc_dir_pages(struct device *dev, int type,
154a25684a9STakashi Iwai 			 enum dma_data_direction dir, size_t size)
155427ae268STakashi Iwai {
156427ae268STakashi Iwai 	struct snd_dma_buffer *dmab;
157427ae268STakashi Iwai 	int err;
158427ae268STakashi Iwai 
159427ae268STakashi Iwai 	if (WARN_ON(type == SNDRV_DMA_TYPE_CONTINUOUS ||
160427ae268STakashi Iwai 		    type == SNDRV_DMA_TYPE_VMALLOC))
161427ae268STakashi Iwai 		return NULL;
162427ae268STakashi Iwai 
163427ae268STakashi Iwai 	dmab = devres_alloc(__snd_release_pages, sizeof(*dmab), GFP_KERNEL);
164427ae268STakashi Iwai 	if (!dmab)
165427ae268STakashi Iwai 		return NULL;
166427ae268STakashi Iwai 
167a25684a9STakashi Iwai 	err = snd_dma_alloc_dir_pages(type, dev, dir, size, dmab);
168427ae268STakashi Iwai 	if (err < 0) {
169427ae268STakashi Iwai 		devres_free(dmab);
170427ae268STakashi Iwai 		return NULL;
171427ae268STakashi Iwai 	}
172427ae268STakashi Iwai 
173427ae268STakashi Iwai 	devres_add(dev, dmab);
174427ae268STakashi Iwai 	return dmab;
175427ae268STakashi Iwai }
176a25684a9STakashi Iwai EXPORT_SYMBOL_GPL(snd_devm_alloc_dir_pages);
177427ae268STakashi Iwai 
17837af81c5STakashi Iwai /**
179a202bd1aSTakashi Iwai  * snd_dma_buffer_mmap - perform mmap of the given DMA buffer
180a202bd1aSTakashi Iwai  * @dmab: buffer allocation information
181a202bd1aSTakashi Iwai  * @area: VM area information
1826eba99d4STakashi Iwai  *
1836eba99d4STakashi Iwai  * Return: zero if successful, or a negative error code
184a202bd1aSTakashi Iwai  */
snd_dma_buffer_mmap(struct snd_dma_buffer * dmab,struct vm_area_struct * area)185a202bd1aSTakashi Iwai int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
186a202bd1aSTakashi Iwai 			struct vm_area_struct *area)
187a202bd1aSTakashi Iwai {
1888e537d5dSTakashi Iwai 	const struct snd_malloc_ops *ops;
189a202bd1aSTakashi Iwai 
1908e537d5dSTakashi Iwai 	if (!dmab)
1918e537d5dSTakashi Iwai 		return -ENOENT;
1928e537d5dSTakashi Iwai 	ops = snd_dma_get_ops(dmab);
193a202bd1aSTakashi Iwai 	if (ops && ops->mmap)
194a202bd1aSTakashi Iwai 		return ops->mmap(dmab, area);
195a202bd1aSTakashi Iwai 	else
196a202bd1aSTakashi Iwai 		return -ENOENT;
197a202bd1aSTakashi Iwai }
198a202bd1aSTakashi Iwai EXPORT_SYMBOL(snd_dma_buffer_mmap);
199a202bd1aSTakashi Iwai 
200a25684a9STakashi Iwai #ifdef CONFIG_HAS_DMA
201a25684a9STakashi Iwai /**
202a25684a9STakashi Iwai  * snd_dma_buffer_sync - sync DMA buffer between CPU and device
203a25684a9STakashi Iwai  * @dmab: buffer allocation information
204f917c04fSTakashi Iwai  * @mode: sync mode
205a25684a9STakashi Iwai  */
snd_dma_buffer_sync(struct snd_dma_buffer * dmab,enum snd_dma_sync_mode mode)206a25684a9STakashi Iwai void snd_dma_buffer_sync(struct snd_dma_buffer *dmab,
207a25684a9STakashi Iwai 			 enum snd_dma_sync_mode mode)
208a25684a9STakashi Iwai {
209a25684a9STakashi Iwai 	const struct snd_malloc_ops *ops;
210a25684a9STakashi Iwai 
211a25684a9STakashi Iwai 	if (!dmab || !dmab->dev.need_sync)
212a25684a9STakashi Iwai 		return;
213a25684a9STakashi Iwai 	ops = snd_dma_get_ops(dmab);
214a25684a9STakashi Iwai 	if (ops && ops->sync)
215a25684a9STakashi Iwai 		ops->sync(dmab, mode);
216a25684a9STakashi Iwai }
217a25684a9STakashi Iwai EXPORT_SYMBOL_GPL(snd_dma_buffer_sync);
218a25684a9STakashi Iwai #endif /* CONFIG_HAS_DMA */
219a25684a9STakashi Iwai 
220a202bd1aSTakashi Iwai /**
22137af81c5STakashi Iwai  * snd_sgbuf_get_addr - return the physical address at the corresponding offset
22237af81c5STakashi Iwai  * @dmab: buffer allocation information
22337af81c5STakashi Iwai  * @offset: offset in the ring buffer
2246eba99d4STakashi Iwai  *
2256eba99d4STakashi Iwai  * Return: the physical address
22637af81c5STakashi Iwai  */
snd_sgbuf_get_addr(struct snd_dma_buffer * dmab,size_t offset)22737af81c5STakashi Iwai dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset)
22837af81c5STakashi Iwai {
22937af81c5STakashi Iwai 	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
23037af81c5STakashi Iwai 
23137af81c5STakashi Iwai 	if (ops && ops->get_addr)
23237af81c5STakashi Iwai 		return ops->get_addr(dmab, offset);
23337af81c5STakashi Iwai 	else
23437af81c5STakashi Iwai 		return dmab->addr + offset;
23537af81c5STakashi Iwai }
23637af81c5STakashi Iwai EXPORT_SYMBOL(snd_sgbuf_get_addr);
23737af81c5STakashi Iwai 
23837af81c5STakashi Iwai /**
23937af81c5STakashi Iwai  * snd_sgbuf_get_page - return the physical page at the corresponding offset
24037af81c5STakashi Iwai  * @dmab: buffer allocation information
24137af81c5STakashi Iwai  * @offset: offset in the ring buffer
2426eba99d4STakashi Iwai  *
2436eba99d4STakashi Iwai  * Return: the page pointer
24437af81c5STakashi Iwai  */
snd_sgbuf_get_page(struct snd_dma_buffer * dmab,size_t offset)24537af81c5STakashi Iwai struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset)
24637af81c5STakashi Iwai {
24737af81c5STakashi Iwai 	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
24837af81c5STakashi Iwai 
24937af81c5STakashi Iwai 	if (ops && ops->get_page)
25037af81c5STakashi Iwai 		return ops->get_page(dmab, offset);
25137af81c5STakashi Iwai 	else
25237af81c5STakashi Iwai 		return virt_to_page(dmab->area + offset);
25337af81c5STakashi Iwai }
25437af81c5STakashi Iwai EXPORT_SYMBOL(snd_sgbuf_get_page);
25537af81c5STakashi Iwai 
25637af81c5STakashi Iwai /**
25737af81c5STakashi Iwai  * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages
25837af81c5STakashi Iwai  *	on sg-buffer
25937af81c5STakashi Iwai  * @dmab: buffer allocation information
26037af81c5STakashi Iwai  * @ofs: offset in the ring buffer
26137af81c5STakashi Iwai  * @size: the requested size
2626eba99d4STakashi Iwai  *
2636eba99d4STakashi Iwai  * Return: the chunk size
26437af81c5STakashi Iwai  */
snd_sgbuf_get_chunk_size(struct snd_dma_buffer * dmab,unsigned int ofs,unsigned int size)26537af81c5STakashi Iwai unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
26637af81c5STakashi Iwai 				      unsigned int ofs, unsigned int size)
26737af81c5STakashi Iwai {
26837af81c5STakashi Iwai 	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
26937af81c5STakashi Iwai 
27037af81c5STakashi Iwai 	if (ops && ops->get_chunk_size)
27137af81c5STakashi Iwai 		return ops->get_chunk_size(dmab, ofs, size);
27237af81c5STakashi Iwai 	else
27337af81c5STakashi Iwai 		return size;
27437af81c5STakashi Iwai }
27537af81c5STakashi Iwai EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
27637af81c5STakashi Iwai 
27737af81c5STakashi Iwai /*
27837af81c5STakashi Iwai  * Continuous pages allocator
27937af81c5STakashi Iwai  */
do_alloc_pages(struct device * dev,size_t size,dma_addr_t * addr,bool wc)280dd164fbfSTakashi Iwai static void *do_alloc_pages(struct device *dev, size_t size, dma_addr_t *addr,
281dd164fbfSTakashi Iwai 			    bool wc)
28237af81c5STakashi Iwai {
283dd164fbfSTakashi Iwai 	void *p;
284dd164fbfSTakashi Iwai 	gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
28537af81c5STakashi Iwai 
286dd164fbfSTakashi Iwai  again:
287dd164fbfSTakashi Iwai 	p = alloc_pages_exact(size, gfp);
288dd164fbfSTakashi Iwai 	if (!p)
289dd164fbfSTakashi Iwai 		return NULL;
290a8d302a0STakashi Iwai 	*addr = page_to_phys(virt_to_page(p));
291dd164fbfSTakashi Iwai 	if (!dev)
292dd164fbfSTakashi Iwai 		return p;
293dd164fbfSTakashi Iwai 	if ((*addr + size - 1) & ~dev->coherent_dma_mask) {
294dd164fbfSTakashi Iwai 		if (IS_ENABLED(CONFIG_ZONE_DMA32) && !(gfp & GFP_DMA32)) {
295dd164fbfSTakashi Iwai 			gfp |= GFP_DMA32;
296dd164fbfSTakashi Iwai 			goto again;
297dd164fbfSTakashi Iwai 		}
298dd164fbfSTakashi Iwai 		if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
299dd164fbfSTakashi Iwai 			gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
300dd164fbfSTakashi Iwai 			goto again;
301dd164fbfSTakashi Iwai 		}
302dd164fbfSTakashi Iwai 	}
303dd164fbfSTakashi Iwai #ifdef CONFIG_X86
304dd164fbfSTakashi Iwai 	if (wc)
305dd164fbfSTakashi Iwai 		set_memory_wc((unsigned long)(p), size >> PAGE_SHIFT);
306dd164fbfSTakashi Iwai #endif
307f84ba106STakashi Iwai 	return p;
30837af81c5STakashi Iwai }
30937af81c5STakashi Iwai 
do_free_pages(void * p,size_t size,bool wc)310dd164fbfSTakashi Iwai static void do_free_pages(void *p, size_t size, bool wc)
311dd164fbfSTakashi Iwai {
312dd164fbfSTakashi Iwai #ifdef CONFIG_X86
313dd164fbfSTakashi Iwai 	if (wc)
314dd164fbfSTakashi Iwai 		set_memory_wb((unsigned long)(p), size >> PAGE_SHIFT);
315dd164fbfSTakashi Iwai #endif
316dd164fbfSTakashi Iwai 	free_pages_exact(p, size);
317dd164fbfSTakashi Iwai }
318dd164fbfSTakashi Iwai 
319dd164fbfSTakashi Iwai 
snd_dma_continuous_alloc(struct snd_dma_buffer * dmab,size_t size)320a8d302a0STakashi Iwai static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
321a8d302a0STakashi Iwai {
322dd164fbfSTakashi Iwai 	return do_alloc_pages(dmab->dev.dev, size, &dmab->addr, false);
323a8d302a0STakashi Iwai }
324a8d302a0STakashi Iwai 
snd_dma_continuous_free(struct snd_dma_buffer * dmab)32537af81c5STakashi Iwai static void snd_dma_continuous_free(struct snd_dma_buffer *dmab)
32637af81c5STakashi Iwai {
327dd164fbfSTakashi Iwai 	do_free_pages(dmab->area, dmab->bytes, false);
32837af81c5STakashi Iwai }
32937af81c5STakashi Iwai 
snd_dma_continuous_mmap(struct snd_dma_buffer * dmab,struct vm_area_struct * area)33030b7ba69STakashi Iwai static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab,
33130b7ba69STakashi Iwai 				   struct vm_area_struct *area)
33230b7ba69STakashi Iwai {
33330b7ba69STakashi Iwai 	return remap_pfn_range(area, area->vm_start,
334f84ba106STakashi Iwai 			       dmab->addr >> PAGE_SHIFT,
33530b7ba69STakashi Iwai 			       area->vm_end - area->vm_start,
33630b7ba69STakashi Iwai 			       area->vm_page_prot);
33730b7ba69STakashi Iwai }
33830b7ba69STakashi Iwai 
33937af81c5STakashi Iwai static const struct snd_malloc_ops snd_dma_continuous_ops = {
34037af81c5STakashi Iwai 	.alloc = snd_dma_continuous_alloc,
34137af81c5STakashi Iwai 	.free = snd_dma_continuous_free,
34230b7ba69STakashi Iwai 	.mmap = snd_dma_continuous_mmap,
34337af81c5STakashi Iwai };
34437af81c5STakashi Iwai 
34537af81c5STakashi Iwai /*
34637af81c5STakashi Iwai  * VMALLOC allocator
34737af81c5STakashi Iwai  */
snd_dma_vmalloc_alloc(struct snd_dma_buffer * dmab,size_t size)348723c1252STakashi Iwai static void *snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size)
34937af81c5STakashi Iwai {
350dd164fbfSTakashi Iwai 	return vmalloc(size);
35137af81c5STakashi Iwai }
35237af81c5STakashi Iwai 
snd_dma_vmalloc_free(struct snd_dma_buffer * dmab)35337af81c5STakashi Iwai static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab)
35437af81c5STakashi Iwai {
35537af81c5STakashi Iwai 	vfree(dmab->area);
35637af81c5STakashi Iwai }
35737af81c5STakashi Iwai 
snd_dma_vmalloc_mmap(struct snd_dma_buffer * dmab,struct vm_area_struct * area)35830b7ba69STakashi Iwai static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab,
35930b7ba69STakashi Iwai 				struct vm_area_struct *area)
36030b7ba69STakashi Iwai {
36130b7ba69STakashi Iwai 	return remap_vmalloc_range(area, dmab->area, 0);
36230b7ba69STakashi Iwai }
36330b7ba69STakashi Iwai 
364bda36b0fSTakashi Iwai #define get_vmalloc_page_addr(dmab, offset) \
365bda36b0fSTakashi Iwai 	page_to_phys(vmalloc_to_page((dmab)->area + (offset)))
366bda36b0fSTakashi Iwai 
snd_dma_vmalloc_get_addr(struct snd_dma_buffer * dmab,size_t offset)36737af81c5STakashi Iwai static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab,
36837af81c5STakashi Iwai 					   size_t offset)
36937af81c5STakashi Iwai {
370bda36b0fSTakashi Iwai 	return get_vmalloc_page_addr(dmab, offset) + offset % PAGE_SIZE;
37137af81c5STakashi Iwai }
37237af81c5STakashi Iwai 
snd_dma_vmalloc_get_page(struct snd_dma_buffer * dmab,size_t offset)37337af81c5STakashi Iwai static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab,
37437af81c5STakashi Iwai 					     size_t offset)
37537af81c5STakashi Iwai {
37637af81c5STakashi Iwai 	return vmalloc_to_page(dmab->area + offset);
37737af81c5STakashi Iwai }
37837af81c5STakashi Iwai 
37937af81c5STakashi Iwai static unsigned int
snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer * dmab,unsigned int ofs,unsigned int size)38037af81c5STakashi Iwai snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab,
38137af81c5STakashi Iwai 			       unsigned int ofs, unsigned int size)
38237af81c5STakashi Iwai {
383bda36b0fSTakashi Iwai 	unsigned int start, end;
384bda36b0fSTakashi Iwai 	unsigned long addr;
385bda36b0fSTakashi Iwai 
386bda36b0fSTakashi Iwai 	start = ALIGN_DOWN(ofs, PAGE_SIZE);
387bda36b0fSTakashi Iwai 	end = ofs + size - 1; /* the last byte address */
388bda36b0fSTakashi Iwai 	/* check page continuity */
389bda36b0fSTakashi Iwai 	addr = get_vmalloc_page_addr(dmab, start);
390bda36b0fSTakashi Iwai 	for (;;) {
391bda36b0fSTakashi Iwai 		start += PAGE_SIZE;
392bda36b0fSTakashi Iwai 		if (start > end)
393bda36b0fSTakashi Iwai 			break;
394bda36b0fSTakashi Iwai 		addr += PAGE_SIZE;
395bda36b0fSTakashi Iwai 		if (get_vmalloc_page_addr(dmab, start) != addr)
396bda36b0fSTakashi Iwai 			return start - ofs;
397bda36b0fSTakashi Iwai 	}
398bda36b0fSTakashi Iwai 	/* ok, all on continuous pages */
399bda36b0fSTakashi Iwai 	return size;
40037af81c5STakashi Iwai }
40137af81c5STakashi Iwai 
40237af81c5STakashi Iwai static const struct snd_malloc_ops snd_dma_vmalloc_ops = {
40337af81c5STakashi Iwai 	.alloc = snd_dma_vmalloc_alloc,
40437af81c5STakashi Iwai 	.free = snd_dma_vmalloc_free,
40530b7ba69STakashi Iwai 	.mmap = snd_dma_vmalloc_mmap,
40637af81c5STakashi Iwai 	.get_addr = snd_dma_vmalloc_get_addr,
40737af81c5STakashi Iwai 	.get_page = snd_dma_vmalloc_get_page,
40837af81c5STakashi Iwai 	.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
40937af81c5STakashi Iwai };
41037af81c5STakashi Iwai 
41137af81c5STakashi Iwai #ifdef CONFIG_HAS_DMA
41237af81c5STakashi Iwai /*
41337af81c5STakashi Iwai  * IRAM allocator
41437af81c5STakashi Iwai  */
41537af81c5STakashi Iwai #ifdef CONFIG_GENERIC_ALLOCATOR
snd_dma_iram_alloc(struct snd_dma_buffer * dmab,size_t size)416723c1252STakashi Iwai static void *snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size)
41737af81c5STakashi Iwai {
41837af81c5STakashi Iwai 	struct device *dev = dmab->dev.dev;
41937af81c5STakashi Iwai 	struct gen_pool *pool;
420723c1252STakashi Iwai 	void *p;
42137af81c5STakashi Iwai 
42237af81c5STakashi Iwai 	if (dev->of_node) {
42337af81c5STakashi Iwai 		pool = of_gen_pool_get(dev->of_node, "iram", 0);
42437af81c5STakashi Iwai 		/* Assign the pool into private_data field */
42537af81c5STakashi Iwai 		dmab->private_data = pool;
42637af81c5STakashi Iwai 
427723c1252STakashi Iwai 		p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE);
428723c1252STakashi Iwai 		if (p)
429723c1252STakashi Iwai 			return p;
43037af81c5STakashi Iwai 	}
43137af81c5STakashi Iwai 
43237af81c5STakashi Iwai 	/* Internal memory might have limited size and no enough space,
43337af81c5STakashi Iwai 	 * so if we fail to malloc, try to fetch memory traditionally.
43437af81c5STakashi Iwai 	 */
43537af81c5STakashi Iwai 	dmab->dev.type = SNDRV_DMA_TYPE_DEV;
43637af81c5STakashi Iwai 	return __snd_dma_alloc_pages(dmab, size);
43737af81c5STakashi Iwai }
43837af81c5STakashi Iwai 
snd_dma_iram_free(struct snd_dma_buffer * dmab)43937af81c5STakashi Iwai static void snd_dma_iram_free(struct snd_dma_buffer *dmab)
44037af81c5STakashi Iwai {
44137af81c5STakashi Iwai 	struct gen_pool *pool = dmab->private_data;
44237af81c5STakashi Iwai 
44337af81c5STakashi Iwai 	if (pool && dmab->area)
44437af81c5STakashi Iwai 		gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
44537af81c5STakashi Iwai }
44637af81c5STakashi Iwai 
snd_dma_iram_mmap(struct snd_dma_buffer * dmab,struct vm_area_struct * area)447a202bd1aSTakashi Iwai static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab,
448a202bd1aSTakashi Iwai 			     struct vm_area_struct *area)
449a202bd1aSTakashi Iwai {
450a202bd1aSTakashi Iwai 	area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
451a202bd1aSTakashi Iwai 	return remap_pfn_range(area, area->vm_start,
452a202bd1aSTakashi Iwai 			       dmab->addr >> PAGE_SHIFT,
453a202bd1aSTakashi Iwai 			       area->vm_end - area->vm_start,
454a202bd1aSTakashi Iwai 			       area->vm_page_prot);
455a202bd1aSTakashi Iwai }
456a202bd1aSTakashi Iwai 
45737af81c5STakashi Iwai static const struct snd_malloc_ops snd_dma_iram_ops = {
45837af81c5STakashi Iwai 	.alloc = snd_dma_iram_alloc,
45937af81c5STakashi Iwai 	.free = snd_dma_iram_free,
460a202bd1aSTakashi Iwai 	.mmap = snd_dma_iram_mmap,
46137af81c5STakashi Iwai };
46237af81c5STakashi Iwai #endif /* CONFIG_GENERIC_ALLOCATOR */
46337af81c5STakashi Iwai 
46437af81c5STakashi Iwai /*
46537af81c5STakashi Iwai  * Coherent device pages allocator
46637af81c5STakashi Iwai  */
snd_dma_dev_alloc(struct snd_dma_buffer * dmab,size_t size)467723c1252STakashi Iwai static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size)
46837af81c5STakashi Iwai {
4699882d63bSTakashi Iwai 	return dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
47037af81c5STakashi Iwai }
47137af81c5STakashi Iwai 
snd_dma_dev_free(struct snd_dma_buffer * dmab)47237af81c5STakashi Iwai static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
47337af81c5STakashi Iwai {
47437af81c5STakashi Iwai 	dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
47537af81c5STakashi Iwai }
47637af81c5STakashi Iwai 
snd_dma_dev_mmap(struct snd_dma_buffer * dmab,struct vm_area_struct * area)477a202bd1aSTakashi Iwai static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab,
478a202bd1aSTakashi Iwai 			    struct vm_area_struct *area)
479a202bd1aSTakashi Iwai {
480a202bd1aSTakashi Iwai 	return dma_mmap_coherent(dmab->dev.dev, area,
481a202bd1aSTakashi Iwai 				 dmab->area, dmab->addr, dmab->bytes);
482a202bd1aSTakashi Iwai }
483a202bd1aSTakashi Iwai 
48437af81c5STakashi Iwai static const struct snd_malloc_ops snd_dma_dev_ops = {
48537af81c5STakashi Iwai 	.alloc = snd_dma_dev_alloc,
48637af81c5STakashi Iwai 	.free = snd_dma_dev_free,
487a202bd1aSTakashi Iwai 	.mmap = snd_dma_dev_mmap,
48837af81c5STakashi Iwai };
489d5c50558STakashi Iwai 
490d5c50558STakashi Iwai /*
491d5c50558STakashi Iwai  * Write-combined pages
492d5c50558STakashi Iwai  */
493a8d302a0STakashi Iwai /* x86-specific allocations */
494a8d302a0STakashi Iwai #ifdef CONFIG_SND_DMA_SGBUF
snd_dma_wc_alloc(struct snd_dma_buffer * dmab,size_t size)495a8d302a0STakashi Iwai static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
496a8d302a0STakashi Iwai {
497dd164fbfSTakashi Iwai 	return do_alloc_pages(dmab->dev.dev, size, &dmab->addr, true);
498a8d302a0STakashi Iwai }
499a8d302a0STakashi Iwai 
snd_dma_wc_free(struct snd_dma_buffer * dmab)500a8d302a0STakashi Iwai static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
501a8d302a0STakashi Iwai {
502dd164fbfSTakashi Iwai 	do_free_pages(dmab->area, dmab->bytes, true);
503a8d302a0STakashi Iwai }
504a8d302a0STakashi Iwai 
snd_dma_wc_mmap(struct snd_dma_buffer * dmab,struct vm_area_struct * area)505a8d302a0STakashi Iwai static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
506a8d302a0STakashi Iwai 			   struct vm_area_struct *area)
507a8d302a0STakashi Iwai {
508a8d302a0STakashi Iwai 	area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
509a8d302a0STakashi Iwai 	return snd_dma_continuous_mmap(dmab, area);
510a8d302a0STakashi Iwai }
511a8d302a0STakashi Iwai #else
snd_dma_wc_alloc(struct snd_dma_buffer * dmab,size_t size)512d5c50558STakashi Iwai static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
513d5c50558STakashi Iwai {
514d5c50558STakashi Iwai 	return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
515d5c50558STakashi Iwai }
516d5c50558STakashi Iwai 
snd_dma_wc_free(struct snd_dma_buffer * dmab)517d5c50558STakashi Iwai static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
518d5c50558STakashi Iwai {
519d5c50558STakashi Iwai 	dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
520d5c50558STakashi Iwai }
521d5c50558STakashi Iwai 
snd_dma_wc_mmap(struct snd_dma_buffer * dmab,struct vm_area_struct * area)522d5c50558STakashi Iwai static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
523d5c50558STakashi Iwai 			   struct vm_area_struct *area)
524d5c50558STakashi Iwai {
525d5c50558STakashi Iwai 	return dma_mmap_wc(dmab->dev.dev, area,
526d5c50558STakashi Iwai 			   dmab->area, dmab->addr, dmab->bytes);
527d5c50558STakashi Iwai }
528a8d302a0STakashi Iwai #endif /* CONFIG_SND_DMA_SGBUF */
529d5c50558STakashi Iwai 
530d5c50558STakashi Iwai static const struct snd_malloc_ops snd_dma_wc_ops = {
531d5c50558STakashi Iwai 	.alloc = snd_dma_wc_alloc,
532d5c50558STakashi Iwai 	.free = snd_dma_wc_free,
533d5c50558STakashi Iwai 	.mmap = snd_dma_wc_mmap,
534d5c50558STakashi Iwai };
535a25684a9STakashi Iwai 
536a25684a9STakashi Iwai /*
537a25684a9STakashi Iwai  * Non-contiguous pages allocator
538a25684a9STakashi Iwai  */
snd_dma_noncontig_alloc(struct snd_dma_buffer * dmab,size_t size)539a25684a9STakashi Iwai static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
540a25684a9STakashi Iwai {
541a25684a9STakashi Iwai 	struct sg_table *sgt;
542a25684a9STakashi Iwai 	void *p;
543a25684a9STakashi Iwai 
544*53466ebdSTakashi Iwai #ifdef CONFIG_SND_DMA_SGBUF
545*53466ebdSTakashi Iwai 	if (cpu_feature_enabled(X86_FEATURE_XENPV))
546*53466ebdSTakashi Iwai 		return snd_dma_sg_fallback_alloc(dmab, size);
547*53466ebdSTakashi Iwai #endif
5489d8e536dSTakashi Iwai 	sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
549db918321SChristoph Hellwig 				      DEFAULT_GFP, 0);
550925ca893STakashi Iwai #ifdef CONFIG_SND_DMA_SGBUF
551*53466ebdSTakashi Iwai 	if (!sgt && !get_dma_ops(dmab->dev.dev))
552925ca893STakashi Iwai 		return snd_dma_sg_fallback_alloc(dmab, size);
5539736a325STakashi Iwai #endif
5549736a325STakashi Iwai 	if (!sgt)
5559736a325STakashi Iwai 		return NULL;
556925ca893STakashi Iwai 
5578e1741c6STakashi Iwai 	dmab->dev.need_sync = dma_need_sync(dmab->dev.dev,
5588e1741c6STakashi Iwai 					    sg_dma_address(sgt->sgl));
559a25684a9STakashi Iwai 	p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
56037137ec2STakashi Iwai 	if (p) {
561a25684a9STakashi Iwai 		dmab->private_data = sgt;
56237137ec2STakashi Iwai 		/* store the first page address for convenience */
56337137ec2STakashi Iwai 		dmab->addr = snd_sgbuf_get_addr(dmab, 0);
56437137ec2STakashi Iwai 	} else {
565a25684a9STakashi Iwai 		dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir);
56637137ec2STakashi Iwai 	}
567a25684a9STakashi Iwai 	return p;
568a25684a9STakashi Iwai }
569a25684a9STakashi Iwai 
snd_dma_noncontig_free(struct snd_dma_buffer * dmab)570a25684a9STakashi Iwai static void snd_dma_noncontig_free(struct snd_dma_buffer *dmab)
571a25684a9STakashi Iwai {
572a25684a9STakashi Iwai 	dma_vunmap_noncontiguous(dmab->dev.dev, dmab->area);
573a25684a9STakashi Iwai 	dma_free_noncontiguous(dmab->dev.dev, dmab->bytes, dmab->private_data,
574a25684a9STakashi Iwai 			       dmab->dev.dir);
575a25684a9STakashi Iwai }
576a25684a9STakashi Iwai 
snd_dma_noncontig_mmap(struct snd_dma_buffer * dmab,struct vm_area_struct * area)577a25684a9STakashi Iwai static int snd_dma_noncontig_mmap(struct snd_dma_buffer *dmab,
578a25684a9STakashi Iwai 				  struct vm_area_struct *area)
579a25684a9STakashi Iwai {
580a25684a9STakashi Iwai 	return dma_mmap_noncontiguous(dmab->dev.dev, area,
581a25684a9STakashi Iwai 				      dmab->bytes, dmab->private_data);
582a25684a9STakashi Iwai }
583a25684a9STakashi Iwai 
snd_dma_noncontig_sync(struct snd_dma_buffer * dmab,enum snd_dma_sync_mode mode)584a25684a9STakashi Iwai static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab,
585a25684a9STakashi Iwai 				   enum snd_dma_sync_mode mode)
586a25684a9STakashi Iwai {
587a25684a9STakashi Iwai 	if (mode == SNDRV_DMA_SYNC_CPU) {
588a25684a9STakashi Iwai 		if (dmab->dev.dir == DMA_TO_DEVICE)
589a25684a9STakashi Iwai 			return;
5903e16dc50STakashi Iwai 		invalidate_kernel_vmap_range(dmab->area, dmab->bytes);
591a25684a9STakashi Iwai 		dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data,
592a25684a9STakashi Iwai 					 dmab->dev.dir);
593a25684a9STakashi Iwai 	} else {
594a25684a9STakashi Iwai 		if (dmab->dev.dir == DMA_FROM_DEVICE)
595a25684a9STakashi Iwai 			return;
596a25684a9STakashi Iwai 		flush_kernel_vmap_range(dmab->area, dmab->bytes);
597a25684a9STakashi Iwai 		dma_sync_sgtable_for_device(dmab->dev.dev, dmab->private_data,
598a25684a9STakashi Iwai 					    dmab->dev.dir);
599a25684a9STakashi Iwai 	}
600a25684a9STakashi Iwai }
601a25684a9STakashi Iwai 
snd_dma_noncontig_iter_set(struct snd_dma_buffer * dmab,struct sg_page_iter * piter,size_t offset)602ad4f93caSTakashi Iwai static inline void snd_dma_noncontig_iter_set(struct snd_dma_buffer *dmab,
603ad4f93caSTakashi Iwai 					      struct sg_page_iter *piter,
604ad4f93caSTakashi Iwai 					      size_t offset)
605ad4f93caSTakashi Iwai {
606ad4f93caSTakashi Iwai 	struct sg_table *sgt = dmab->private_data;
607ad4f93caSTakashi Iwai 
608ad4f93caSTakashi Iwai 	__sg_page_iter_start(piter, sgt->sgl, sgt->orig_nents,
609ad4f93caSTakashi Iwai 			     offset >> PAGE_SHIFT);
610ad4f93caSTakashi Iwai }
611ad4f93caSTakashi Iwai 
snd_dma_noncontig_get_addr(struct snd_dma_buffer * dmab,size_t offset)612ad4f93caSTakashi Iwai static dma_addr_t snd_dma_noncontig_get_addr(struct snd_dma_buffer *dmab,
613ad4f93caSTakashi Iwai 					     size_t offset)
614ad4f93caSTakashi Iwai {
615ad4f93caSTakashi Iwai 	struct sg_dma_page_iter iter;
616ad4f93caSTakashi Iwai 
617ad4f93caSTakashi Iwai 	snd_dma_noncontig_iter_set(dmab, &iter.base, offset);
618ad4f93caSTakashi Iwai 	__sg_page_iter_dma_next(&iter);
619ad4f93caSTakashi Iwai 	return sg_page_iter_dma_address(&iter) + offset % PAGE_SIZE;
620ad4f93caSTakashi Iwai }
621ad4f93caSTakashi Iwai 
snd_dma_noncontig_get_page(struct snd_dma_buffer * dmab,size_t offset)622ad4f93caSTakashi Iwai static struct page *snd_dma_noncontig_get_page(struct snd_dma_buffer *dmab,
623ad4f93caSTakashi Iwai 					       size_t offset)
624ad4f93caSTakashi Iwai {
625ad4f93caSTakashi Iwai 	struct sg_page_iter iter;
626ad4f93caSTakashi Iwai 
627ad4f93caSTakashi Iwai 	snd_dma_noncontig_iter_set(dmab, &iter, offset);
628ad4f93caSTakashi Iwai 	__sg_page_iter_next(&iter);
629ad4f93caSTakashi Iwai 	return sg_page_iter_page(&iter);
630ad4f93caSTakashi Iwai }
631ad4f93caSTakashi Iwai 
632ad4f93caSTakashi Iwai static unsigned int
snd_dma_noncontig_get_chunk_size(struct snd_dma_buffer * dmab,unsigned int ofs,unsigned int size)633ad4f93caSTakashi Iwai snd_dma_noncontig_get_chunk_size(struct snd_dma_buffer *dmab,
634ad4f93caSTakashi Iwai 				 unsigned int ofs, unsigned int size)
635ad4f93caSTakashi Iwai {
636ad4f93caSTakashi Iwai 	struct sg_dma_page_iter iter;
637ad4f93caSTakashi Iwai 	unsigned int start, end;
638ad4f93caSTakashi Iwai 	unsigned long addr;
639ad4f93caSTakashi Iwai 
640ad4f93caSTakashi Iwai 	start = ALIGN_DOWN(ofs, PAGE_SIZE);
641ad4f93caSTakashi Iwai 	end = ofs + size - 1; /* the last byte address */
642ad4f93caSTakashi Iwai 	snd_dma_noncontig_iter_set(dmab, &iter.base, start);
643ad4f93caSTakashi Iwai 	if (!__sg_page_iter_dma_next(&iter))
644ad4f93caSTakashi Iwai 		return 0;
645ad4f93caSTakashi Iwai 	/* check page continuity */
646ad4f93caSTakashi Iwai 	addr = sg_page_iter_dma_address(&iter);
647ad4f93caSTakashi Iwai 	for (;;) {
648ad4f93caSTakashi Iwai 		start += PAGE_SIZE;
649ad4f93caSTakashi Iwai 		if (start > end)
650ad4f93caSTakashi Iwai 			break;
651ad4f93caSTakashi Iwai 		addr += PAGE_SIZE;
652ad4f93caSTakashi Iwai 		if (!__sg_page_iter_dma_next(&iter) ||
653ad4f93caSTakashi Iwai 		    sg_page_iter_dma_address(&iter) != addr)
654ad4f93caSTakashi Iwai 			return start - ofs;
655ad4f93caSTakashi Iwai 	}
656ad4f93caSTakashi Iwai 	/* ok, all on continuous pages */
657ad4f93caSTakashi Iwai 	return size;
658ad4f93caSTakashi Iwai }
659ad4f93caSTakashi Iwai 
660a25684a9STakashi Iwai static const struct snd_malloc_ops snd_dma_noncontig_ops = {
661a25684a9STakashi Iwai 	.alloc = snd_dma_noncontig_alloc,
662a25684a9STakashi Iwai 	.free = snd_dma_noncontig_free,
663a25684a9STakashi Iwai 	.mmap = snd_dma_noncontig_mmap,
664a25684a9STakashi Iwai 	.sync = snd_dma_noncontig_sync,
665ad4f93caSTakashi Iwai 	.get_addr = snd_dma_noncontig_get_addr,
666ad4f93caSTakashi Iwai 	.get_page = snd_dma_noncontig_get_page,
667ad4f93caSTakashi Iwai 	.get_chunk_size = snd_dma_noncontig_get_chunk_size,
668a25684a9STakashi Iwai };
669a25684a9STakashi Iwai 
6702c95b92eSTakashi Iwai /* x86-specific SG-buffer with WC pages */
6712c95b92eSTakashi Iwai #ifdef CONFIG_SND_DMA_SGBUF
6722c95b92eSTakashi Iwai #define sg_wc_address(it) ((unsigned long)page_address(sg_page_iter_page(it)))
6732c95b92eSTakashi Iwai 
snd_dma_sg_wc_alloc(struct snd_dma_buffer * dmab,size_t size)6742c95b92eSTakashi Iwai static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
6752c95b92eSTakashi Iwai {
6762c95b92eSTakashi Iwai 	void *p = snd_dma_noncontig_alloc(dmab, size);
6772c95b92eSTakashi Iwai 	struct sg_table *sgt = dmab->private_data;
6782c95b92eSTakashi Iwai 	struct sg_page_iter iter;
6792c95b92eSTakashi Iwai 
6802c95b92eSTakashi Iwai 	if (!p)
6812c95b92eSTakashi Iwai 		return NULL;
682925ca893STakashi Iwai 	if (dmab->dev.type != SNDRV_DMA_TYPE_DEV_WC_SG)
683925ca893STakashi Iwai 		return p;
6842c95b92eSTakashi Iwai 	for_each_sgtable_page(sgt, &iter, 0)
6852c95b92eSTakashi Iwai 		set_memory_wc(sg_wc_address(&iter), 1);
6862c95b92eSTakashi Iwai 	return p;
6872c95b92eSTakashi Iwai }
6882c95b92eSTakashi Iwai 
snd_dma_sg_wc_free(struct snd_dma_buffer * dmab)6892c95b92eSTakashi Iwai static void snd_dma_sg_wc_free(struct snd_dma_buffer *dmab)
6902c95b92eSTakashi Iwai {
6912c95b92eSTakashi Iwai 	struct sg_table *sgt = dmab->private_data;
6922c95b92eSTakashi Iwai 	struct sg_page_iter iter;
6932c95b92eSTakashi Iwai 
6942c95b92eSTakashi Iwai 	for_each_sgtable_page(sgt, &iter, 0)
6952c95b92eSTakashi Iwai 		set_memory_wb(sg_wc_address(&iter), 1);
6962c95b92eSTakashi Iwai 	snd_dma_noncontig_free(dmab);
6972c95b92eSTakashi Iwai }
6982c95b92eSTakashi Iwai 
snd_dma_sg_wc_mmap(struct snd_dma_buffer * dmab,struct vm_area_struct * area)6992c95b92eSTakashi Iwai static int snd_dma_sg_wc_mmap(struct snd_dma_buffer *dmab,
7002c95b92eSTakashi Iwai 			      struct vm_area_struct *area)
7012c95b92eSTakashi Iwai {
7022c95b92eSTakashi Iwai 	area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
7032c95b92eSTakashi Iwai 	return dma_mmap_noncontiguous(dmab->dev.dev, area,
7042c95b92eSTakashi Iwai 				      dmab->bytes, dmab->private_data);
7052c95b92eSTakashi Iwai }
7062c95b92eSTakashi Iwai 
7072c95b92eSTakashi Iwai static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
7082c95b92eSTakashi Iwai 	.alloc = snd_dma_sg_wc_alloc,
7092c95b92eSTakashi Iwai 	.free = snd_dma_sg_wc_free,
7102c95b92eSTakashi Iwai 	.mmap = snd_dma_sg_wc_mmap,
7112c95b92eSTakashi Iwai 	.sync = snd_dma_noncontig_sync,
7122c95b92eSTakashi Iwai 	.get_addr = snd_dma_noncontig_get_addr,
7132c95b92eSTakashi Iwai 	.get_page = snd_dma_noncontig_get_page,
7142c95b92eSTakashi Iwai 	.get_chunk_size = snd_dma_noncontig_get_chunk_size,
7152c95b92eSTakashi Iwai };
716925ca893STakashi Iwai 
717925ca893STakashi Iwai /* Fallback SG-buffer allocations for x86 */
718925ca893STakashi Iwai struct snd_dma_sg_fallback {
719*53466ebdSTakashi Iwai 	bool use_dma_alloc_coherent;
720925ca893STakashi Iwai 	size_t count;
721925ca893STakashi Iwai 	struct page **pages;
722*53466ebdSTakashi Iwai 	/* DMA address array; the first page contains #pages in ~PAGE_MASK */
723*53466ebdSTakashi Iwai 	dma_addr_t *addrs;
724925ca893STakashi Iwai };
725925ca893STakashi Iwai 
__snd_dma_sg_fallback_free(struct snd_dma_buffer * dmab,struct snd_dma_sg_fallback * sgbuf)726925ca893STakashi Iwai static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
727925ca893STakashi Iwai 				       struct snd_dma_sg_fallback *sgbuf)
728925ca893STakashi Iwai {
729*53466ebdSTakashi Iwai 	size_t i, size;
730925ca893STakashi Iwai 
731*53466ebdSTakashi Iwai 	if (sgbuf->pages && sgbuf->addrs) {
732*53466ebdSTakashi Iwai 		i = 0;
733*53466ebdSTakashi Iwai 		while (i < sgbuf->count) {
734*53466ebdSTakashi Iwai 			if (!sgbuf->pages[i] || !sgbuf->addrs[i])
735*53466ebdSTakashi Iwai 				break;
736*53466ebdSTakashi Iwai 			size = sgbuf->addrs[i] & ~PAGE_MASK;
737*53466ebdSTakashi Iwai 			if (WARN_ON(!size))
738*53466ebdSTakashi Iwai 				break;
739*53466ebdSTakashi Iwai 			if (sgbuf->use_dma_alloc_coherent)
740*53466ebdSTakashi Iwai 				dma_free_coherent(dmab->dev.dev, size << PAGE_SHIFT,
741*53466ebdSTakashi Iwai 						  page_address(sgbuf->pages[i]),
742*53466ebdSTakashi Iwai 						  sgbuf->addrs[i] & PAGE_MASK);
743*53466ebdSTakashi Iwai 			else
744*53466ebdSTakashi Iwai 				do_free_pages(page_address(sgbuf->pages[i]),
745*53466ebdSTakashi Iwai 					      size << PAGE_SHIFT, false);
746*53466ebdSTakashi Iwai 			i += size;
747*53466ebdSTakashi Iwai 		}
748*53466ebdSTakashi Iwai 	}
749925ca893STakashi Iwai 	kvfree(sgbuf->pages);
750*53466ebdSTakashi Iwai 	kvfree(sgbuf->addrs);
751925ca893STakashi Iwai 	kfree(sgbuf);
752925ca893STakashi Iwai }
753925ca893STakashi Iwai 
snd_dma_sg_fallback_alloc(struct snd_dma_buffer * dmab,size_t size)754925ca893STakashi Iwai static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
755925ca893STakashi Iwai {
756925ca893STakashi Iwai 	struct snd_dma_sg_fallback *sgbuf;
757cc265163STakashi Iwai 	struct page **pagep, *curp;
758cc265163STakashi Iwai 	size_t chunk, npages;
759*53466ebdSTakashi Iwai 	dma_addr_t *addrp;
760cc265163STakashi Iwai 	dma_addr_t addr;
761925ca893STakashi Iwai 	void *p;
762*53466ebdSTakashi Iwai 
763*53466ebdSTakashi Iwai 	/* correct the type */
764*53466ebdSTakashi Iwai 	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_SG)
765*53466ebdSTakashi Iwai 		dmab->dev.type = SNDRV_DMA_TYPE_DEV_SG_FALLBACK;
766*53466ebdSTakashi Iwai 	else if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
767*53466ebdSTakashi Iwai 		dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
768925ca893STakashi Iwai 
769925ca893STakashi Iwai 	sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
770925ca893STakashi Iwai 	if (!sgbuf)
771925ca893STakashi Iwai 		return NULL;
772*53466ebdSTakashi Iwai 	sgbuf->use_dma_alloc_coherent = cpu_feature_enabled(X86_FEATURE_XENPV);
773cc265163STakashi Iwai 	size = PAGE_ALIGN(size);
774cc265163STakashi Iwai 	sgbuf->count = size >> PAGE_SHIFT;
775cc265163STakashi Iwai 	sgbuf->pages = kvcalloc(sgbuf->count, sizeof(*sgbuf->pages), GFP_KERNEL);
776*53466ebdSTakashi Iwai 	sgbuf->addrs = kvcalloc(sgbuf->count, sizeof(*sgbuf->addrs), GFP_KERNEL);
777*53466ebdSTakashi Iwai 	if (!sgbuf->pages || !sgbuf->addrs)
778925ca893STakashi Iwai 		goto error;
779925ca893STakashi Iwai 
780cc265163STakashi Iwai 	pagep = sgbuf->pages;
781*53466ebdSTakashi Iwai 	addrp = sgbuf->addrs;
782*53466ebdSTakashi Iwai 	chunk = (PAGE_SIZE - 1) << PAGE_SHIFT; /* to fit in low bits in addrs */
783cc265163STakashi Iwai 	while (size > 0) {
784cc265163STakashi Iwai 		chunk = min(size, chunk);
785*53466ebdSTakashi Iwai 		if (sgbuf->use_dma_alloc_coherent)
786*53466ebdSTakashi Iwai 			p = dma_alloc_coherent(dmab->dev.dev, chunk, &addr, DEFAULT_GFP);
787*53466ebdSTakashi Iwai 		else
788*53466ebdSTakashi Iwai 			p = do_alloc_pages(dmab->dev.dev, chunk, &addr, false);
789cc265163STakashi Iwai 		if (!p) {
790cc265163STakashi Iwai 			if (chunk <= PAGE_SIZE)
791925ca893STakashi Iwai 				goto error;
792cc265163STakashi Iwai 			chunk >>= 1;
793cc265163STakashi Iwai 			chunk = PAGE_SIZE << get_order(chunk);
794cc265163STakashi Iwai 			continue;
795925ca893STakashi Iwai 		}
796925ca893STakashi Iwai 
797cc265163STakashi Iwai 		size -= chunk;
798cc265163STakashi Iwai 		/* fill pages */
799cc265163STakashi Iwai 		npages = chunk >> PAGE_SHIFT;
800*53466ebdSTakashi Iwai 		*addrp = npages; /* store in lower bits */
801cc265163STakashi Iwai 		curp = virt_to_page(p);
802*53466ebdSTakashi Iwai 		while (npages--) {
803cc265163STakashi Iwai 			*pagep++ = curp++;
804*53466ebdSTakashi Iwai 			*addrp++ |= addr;
805*53466ebdSTakashi Iwai 			addr += PAGE_SIZE;
806*53466ebdSTakashi Iwai 		}
807cc265163STakashi Iwai 	}
808cc265163STakashi Iwai 
809cc265163STakashi Iwai 	p = vmap(sgbuf->pages, sgbuf->count, VM_MAP, PAGE_KERNEL);
810925ca893STakashi Iwai 	if (!p)
811925ca893STakashi Iwai 		goto error;
812*53466ebdSTakashi Iwai 
813*53466ebdSTakashi Iwai 	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
814*53466ebdSTakashi Iwai 		set_pages_array_wc(sgbuf->pages, sgbuf->count);
815*53466ebdSTakashi Iwai 
816925ca893STakashi Iwai 	dmab->private_data = sgbuf;
81737137ec2STakashi Iwai 	/* store the first page address for convenience */
818*53466ebdSTakashi Iwai 	dmab->addr = sgbuf->addrs[0] & PAGE_MASK;
819925ca893STakashi Iwai 	return p;
820925ca893STakashi Iwai 
821925ca893STakashi Iwai  error:
822925ca893STakashi Iwai 	__snd_dma_sg_fallback_free(dmab, sgbuf);
823925ca893STakashi Iwai 	return NULL;
824925ca893STakashi Iwai }
825925ca893STakashi Iwai 
snd_dma_sg_fallback_free(struct snd_dma_buffer * dmab)826925ca893STakashi Iwai static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab)
827925ca893STakashi Iwai {
828*53466ebdSTakashi Iwai 	struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
829*53466ebdSTakashi Iwai 
830*53466ebdSTakashi Iwai 	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
831*53466ebdSTakashi Iwai 		set_pages_array_wb(sgbuf->pages, sgbuf->count);
832925ca893STakashi Iwai 	vunmap(dmab->area);
833925ca893STakashi Iwai 	__snd_dma_sg_fallback_free(dmab, dmab->private_data);
834925ca893STakashi Iwai }
835925ca893STakashi Iwai 
snd_dma_sg_fallback_get_addr(struct snd_dma_buffer * dmab,size_t offset)836*53466ebdSTakashi Iwai static dma_addr_t snd_dma_sg_fallback_get_addr(struct snd_dma_buffer *dmab,
837*53466ebdSTakashi Iwai 					       size_t offset)
838*53466ebdSTakashi Iwai {
839*53466ebdSTakashi Iwai 	struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
840*53466ebdSTakashi Iwai 	size_t index = offset >> PAGE_SHIFT;
841*53466ebdSTakashi Iwai 
842*53466ebdSTakashi Iwai 	return (sgbuf->addrs[index] & PAGE_MASK) | (offset & ~PAGE_MASK);
843*53466ebdSTakashi Iwai }
844*53466ebdSTakashi Iwai 
snd_dma_sg_fallback_mmap(struct snd_dma_buffer * dmab,struct vm_area_struct * area)845925ca893STakashi Iwai static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab,
846925ca893STakashi Iwai 				    struct vm_area_struct *area)
847925ca893STakashi Iwai {
848925ca893STakashi Iwai 	struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
849925ca893STakashi Iwai 
850925ca893STakashi Iwai 	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
851925ca893STakashi Iwai 		area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
852925ca893STakashi Iwai 	return vm_map_pages(area, sgbuf->pages, sgbuf->count);
853925ca893STakashi Iwai }
854925ca893STakashi Iwai 
855925ca893STakashi Iwai static const struct snd_malloc_ops snd_dma_sg_fallback_ops = {
856925ca893STakashi Iwai 	.alloc = snd_dma_sg_fallback_alloc,
857925ca893STakashi Iwai 	.free = snd_dma_sg_fallback_free,
858925ca893STakashi Iwai 	.mmap = snd_dma_sg_fallback_mmap,
859*53466ebdSTakashi Iwai 	.get_addr = snd_dma_sg_fallback_get_addr,
860925ca893STakashi Iwai 	/* reuse vmalloc helpers */
861925ca893STakashi Iwai 	.get_page = snd_dma_vmalloc_get_page,
862925ca893STakashi Iwai 	.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
863925ca893STakashi Iwai };
8642c95b92eSTakashi Iwai #endif /* CONFIG_SND_DMA_SGBUF */
8652c95b92eSTakashi Iwai 
86673325f60STakashi Iwai /*
86773325f60STakashi Iwai  * Non-coherent pages allocator
86873325f60STakashi Iwai  */
snd_dma_noncoherent_alloc(struct snd_dma_buffer * dmab,size_t size)86973325f60STakashi Iwai static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size)
87073325f60STakashi Iwai {
8718e1741c6STakashi Iwai 	void *p;
8728e1741c6STakashi Iwai 
8738e1741c6STakashi Iwai 	p = dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr,
874db918321SChristoph Hellwig 				  dmab->dev.dir, DEFAULT_GFP);
8758e1741c6STakashi Iwai 	if (p)
8768e1741c6STakashi Iwai 		dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->addr);
8778e1741c6STakashi Iwai 	return p;
87873325f60STakashi Iwai }
87973325f60STakashi Iwai 
snd_dma_noncoherent_free(struct snd_dma_buffer * dmab)88073325f60STakashi Iwai static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab)
88173325f60STakashi Iwai {
88273325f60STakashi Iwai 	dma_free_noncoherent(dmab->dev.dev, dmab->bytes, dmab->area,
88373325f60STakashi Iwai 			     dmab->addr, dmab->dev.dir);
88473325f60STakashi Iwai }
88573325f60STakashi Iwai 
snd_dma_noncoherent_mmap(struct snd_dma_buffer * dmab,struct vm_area_struct * area)88673325f60STakashi Iwai static int snd_dma_noncoherent_mmap(struct snd_dma_buffer *dmab,
88773325f60STakashi Iwai 				    struct vm_area_struct *area)
88873325f60STakashi Iwai {
88973325f60STakashi Iwai 	area->vm_page_prot = vm_get_page_prot(area->vm_flags);
89073325f60STakashi Iwai 	return dma_mmap_pages(dmab->dev.dev, area,
89173325f60STakashi Iwai 			      area->vm_end - area->vm_start,
89273325f60STakashi Iwai 			      virt_to_page(dmab->area));
89373325f60STakashi Iwai }
89473325f60STakashi Iwai 
snd_dma_noncoherent_sync(struct snd_dma_buffer * dmab,enum snd_dma_sync_mode mode)89573325f60STakashi Iwai static void snd_dma_noncoherent_sync(struct snd_dma_buffer *dmab,
89673325f60STakashi Iwai 				     enum snd_dma_sync_mode mode)
89773325f60STakashi Iwai {
89873325f60STakashi Iwai 	if (mode == SNDRV_DMA_SYNC_CPU) {
89973325f60STakashi Iwai 		if (dmab->dev.dir != DMA_TO_DEVICE)
90073325f60STakashi Iwai 			dma_sync_single_for_cpu(dmab->dev.dev, dmab->addr,
90173325f60STakashi Iwai 						dmab->bytes, dmab->dev.dir);
90273325f60STakashi Iwai 	} else {
90373325f60STakashi Iwai 		if (dmab->dev.dir != DMA_FROM_DEVICE)
90473325f60STakashi Iwai 			dma_sync_single_for_device(dmab->dev.dev, dmab->addr,
90573325f60STakashi Iwai 						   dmab->bytes, dmab->dev.dir);
90673325f60STakashi Iwai 	}
90773325f60STakashi Iwai }
90873325f60STakashi Iwai 
90973325f60STakashi Iwai static const struct snd_malloc_ops snd_dma_noncoherent_ops = {
91073325f60STakashi Iwai 	.alloc = snd_dma_noncoherent_alloc,
91173325f60STakashi Iwai 	.free = snd_dma_noncoherent_free,
91273325f60STakashi Iwai 	.mmap = snd_dma_noncoherent_mmap,
91373325f60STakashi Iwai 	.sync = snd_dma_noncoherent_sync,
91473325f60STakashi Iwai };
91573325f60STakashi Iwai 
91637af81c5STakashi Iwai #endif /* CONFIG_HAS_DMA */
91737af81c5STakashi Iwai 
91837af81c5STakashi Iwai /*
91937af81c5STakashi Iwai  * Entry points
92037af81c5STakashi Iwai  */
9219736a325STakashi Iwai static const struct snd_malloc_ops *snd_dma_ops[] = {
92237af81c5STakashi Iwai 	[SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops,
92337af81c5STakashi Iwai 	[SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops,
92437af81c5STakashi Iwai #ifdef CONFIG_HAS_DMA
92537af81c5STakashi Iwai 	[SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops,
926d5c50558STakashi Iwai 	[SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops,
927a25684a9STakashi Iwai 	[SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops,
92873325f60STakashi Iwai 	[SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops,
9292c95b92eSTakashi Iwai #ifdef CONFIG_SND_DMA_SGBUF
9302c95b92eSTakashi Iwai 	[SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_wc_ops,
9312c95b92eSTakashi Iwai #endif
93237af81c5STakashi Iwai #ifdef CONFIG_GENERIC_ALLOCATOR
93337af81c5STakashi Iwai 	[SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
93437af81c5STakashi Iwai #endif /* CONFIG_GENERIC_ALLOCATOR */
935925ca893STakashi Iwai #ifdef CONFIG_SND_DMA_SGBUF
936925ca893STakashi Iwai 	[SNDRV_DMA_TYPE_DEV_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
937925ca893STakashi Iwai 	[SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
938925ca893STakashi Iwai #endif
93937af81c5STakashi Iwai #endif /* CONFIG_HAS_DMA */
94037af81c5STakashi Iwai };
94137af81c5STakashi Iwai 
snd_dma_get_ops(struct snd_dma_buffer * dmab)94237af81c5STakashi Iwai static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
94337af81c5STakashi Iwai {
944dce94461STakashi Iwai 	if (WARN_ON_ONCE(!dmab))
945dce94461STakashi Iwai 		return NULL;
94637af81c5STakashi Iwai 	if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN ||
9479736a325STakashi Iwai 			 dmab->dev.type >= ARRAY_SIZE(snd_dma_ops)))
94837af81c5STakashi Iwai 		return NULL;
9499736a325STakashi Iwai 	return snd_dma_ops[dmab->dev.type];
95037af81c5STakashi Iwai }
951