xref: /openbmc/linux/sound/core/memalloc.c (revision c4a11bf4)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Copyright (c) by Jaroslav Kysela <perex@perex.cz>
4  *                   Takashi Iwai <tiwai@suse.de>
5  *
6  *  Generic memory allocators
7  */
8 
9 #include <linux/slab.h>
10 #include <linux/mm.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/genalloc.h>
13 #include <linux/highmem.h>
14 #include <linux/vmalloc.h>
15 #ifdef CONFIG_X86
16 #include <asm/set_memory.h>
17 #endif
18 #include <sound/memalloc.h>
19 #include "memalloc_local.h"
20 
21 static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);
22 
23 /* a cast to gfp flag from the dev pointer; for CONTINUOUS and VMALLOC types */
24 static inline gfp_t snd_mem_get_gfp_flags(const struct snd_dma_buffer *dmab,
25 					  gfp_t default_gfp)
26 {
27 	if (!dmab->dev.dev)
28 		return default_gfp;
29 	else
30 		return (__force gfp_t)(unsigned long)dmab->dev.dev;
31 }
32 
33 static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
34 {
35 	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
36 
37 	if (WARN_ON_ONCE(!ops || !ops->alloc))
38 		return NULL;
39 	return ops->alloc(dmab, size);
40 }
41 
42 /**
43  * snd_dma_alloc_dir_pages - allocate the buffer area according to the given
44  *	type and direction
45  * @type: the DMA buffer type
46  * @device: the device pointer
47  * @dir: DMA direction
48  * @size: the buffer size to allocate
49  * @dmab: buffer allocation record to store the allocated data
50  *
51  * Calls the memory-allocator function for the corresponding
52  * buffer type.
53  *
54  * Return: Zero if the buffer with the given size is allocated successfully,
55  * otherwise a negative value on error.
56  */
57 int snd_dma_alloc_dir_pages(int type, struct device *device,
58 			    enum dma_data_direction dir, size_t size,
59 			    struct snd_dma_buffer *dmab)
60 {
61 	if (WARN_ON(!size))
62 		return -ENXIO;
63 	if (WARN_ON(!dmab))
64 		return -ENXIO;
65 
66 	size = PAGE_ALIGN(size);
67 	dmab->dev.type = type;
68 	dmab->dev.dev = device;
69 	dmab->dev.dir = dir;
70 	dmab->bytes = 0;
71 	dmab->addr = 0;
72 	dmab->private_data = NULL;
73 	dmab->area = __snd_dma_alloc_pages(dmab, size);
74 	if (!dmab->area)
75 		return -ENOMEM;
76 	dmab->bytes = size;
77 	return 0;
78 }
79 EXPORT_SYMBOL(snd_dma_alloc_dir_pages);
80 
81 /**
82  * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
83  * @type: the DMA buffer type
84  * @device: the device pointer
85  * @size: the buffer size to allocate
86  * @dmab: buffer allocation record to store the allocated data
87  *
88  * Calls the memory-allocator function for the corresponding
89  * buffer type.  When no space is left, this function reduces the size and
90  * tries to allocate again.  The size actually allocated is stored in
91  * res_size argument.
92  *
93  * Return: Zero if the buffer with the given size is allocated successfully,
94  * otherwise a negative value on error.
95  */
96 int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
97 				 struct snd_dma_buffer *dmab)
98 {
99 	int err;
100 
101 	while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
102 		if (err != -ENOMEM)
103 			return err;
104 		if (size <= PAGE_SIZE)
105 			return -ENOMEM;
106 		size >>= 1;
107 		size = PAGE_SIZE << get_order(size);
108 	}
109 	if (! dmab->area)
110 		return -ENOMEM;
111 	return 0;
112 }
113 EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
114 
115 /**
116  * snd_dma_free_pages - release the allocated buffer
117  * @dmab: the buffer allocation record to release
118  *
119  * Releases the allocated buffer via snd_dma_alloc_pages().
120  */
121 void snd_dma_free_pages(struct snd_dma_buffer *dmab)
122 {
123 	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
124 
125 	if (ops && ops->free)
126 		ops->free(dmab);
127 }
128 EXPORT_SYMBOL(snd_dma_free_pages);
129 
130 /* called by devres */
131 static void __snd_release_pages(struct device *dev, void *res)
132 {
133 	snd_dma_free_pages(res);
134 }
135 
136 /**
137  * snd_devm_alloc_dir_pages - allocate the buffer and manage with devres
138  * @dev: the device pointer
139  * @type: the DMA buffer type
140  * @dir: DMA direction
141  * @size: the buffer size to allocate
142  *
143  * Allocate buffer pages depending on the given type and manage using devres.
144  * The pages will be released automatically at the device removal.
145  *
146  * Unlike snd_dma_alloc_pages(), this function requires the real device pointer,
147  * hence it can't work with SNDRV_DMA_TYPE_CONTINUOUS or
148  * SNDRV_DMA_TYPE_VMALLOC type.
149  *
150  * The function returns the snd_dma_buffer object at success, or NULL if failed.
151  */
152 struct snd_dma_buffer *
153 snd_devm_alloc_dir_pages(struct device *dev, int type,
154 			 enum dma_data_direction dir, size_t size)
155 {
156 	struct snd_dma_buffer *dmab;
157 	int err;
158 
159 	if (WARN_ON(type == SNDRV_DMA_TYPE_CONTINUOUS ||
160 		    type == SNDRV_DMA_TYPE_VMALLOC))
161 		return NULL;
162 
163 	dmab = devres_alloc(__snd_release_pages, sizeof(*dmab), GFP_KERNEL);
164 	if (!dmab)
165 		return NULL;
166 
167 	err = snd_dma_alloc_dir_pages(type, dev, dir, size, dmab);
168 	if (err < 0) {
169 		devres_free(dmab);
170 		return NULL;
171 	}
172 
173 	devres_add(dev, dmab);
174 	return dmab;
175 }
176 EXPORT_SYMBOL_GPL(snd_devm_alloc_dir_pages);
177 
178 /**
179  * snd_dma_buffer_mmap - perform mmap of the given DMA buffer
180  * @dmab: buffer allocation information
181  * @area: VM area information
182  */
183 int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
184 			struct vm_area_struct *area)
185 {
186 	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
187 
188 	if (ops && ops->mmap)
189 		return ops->mmap(dmab, area);
190 	else
191 		return -ENOENT;
192 }
193 EXPORT_SYMBOL(snd_dma_buffer_mmap);
194 
195 #ifdef CONFIG_HAS_DMA
196 /**
197  * snd_dma_buffer_sync - sync DMA buffer between CPU and device
198  * @dmab: buffer allocation information
199  * @mode: sync mode
200  */
201 void snd_dma_buffer_sync(struct snd_dma_buffer *dmab,
202 			 enum snd_dma_sync_mode mode)
203 {
204 	const struct snd_malloc_ops *ops;
205 
206 	if (!dmab || !dmab->dev.need_sync)
207 		return;
208 	ops = snd_dma_get_ops(dmab);
209 	if (ops && ops->sync)
210 		ops->sync(dmab, mode);
211 }
212 EXPORT_SYMBOL_GPL(snd_dma_buffer_sync);
213 #endif /* CONFIG_HAS_DMA */
214 
215 /**
216  * snd_sgbuf_get_addr - return the physical address at the corresponding offset
217  * @dmab: buffer allocation information
218  * @offset: offset in the ring buffer
219  */
220 dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset)
221 {
222 	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
223 
224 	if (ops && ops->get_addr)
225 		return ops->get_addr(dmab, offset);
226 	else
227 		return dmab->addr + offset;
228 }
229 EXPORT_SYMBOL(snd_sgbuf_get_addr);
230 
231 /**
232  * snd_sgbuf_get_page - return the physical page at the corresponding offset
233  * @dmab: buffer allocation information
234  * @offset: offset in the ring buffer
235  */
236 struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset)
237 {
238 	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
239 
240 	if (ops && ops->get_page)
241 		return ops->get_page(dmab, offset);
242 	else
243 		return virt_to_page(dmab->area + offset);
244 }
245 EXPORT_SYMBOL(snd_sgbuf_get_page);
246 
247 /**
248  * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages
249  *	on sg-buffer
250  * @dmab: buffer allocation information
251  * @ofs: offset in the ring buffer
252  * @size: the requested size
253  */
254 unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
255 				      unsigned int ofs, unsigned int size)
256 {
257 	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
258 
259 	if (ops && ops->get_chunk_size)
260 		return ops->get_chunk_size(dmab, ofs, size);
261 	else
262 		return size;
263 }
264 EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
265 
266 /*
267  * Continuous pages allocator
268  */
269 static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
270 {
271 	gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL);
272 	void *p = alloc_pages_exact(size, gfp);
273 
274 	if (p)
275 		dmab->addr = page_to_phys(virt_to_page(p));
276 	return p;
277 }
278 
279 static void snd_dma_continuous_free(struct snd_dma_buffer *dmab)
280 {
281 	free_pages_exact(dmab->area, dmab->bytes);
282 }
283 
284 static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab,
285 				   struct vm_area_struct *area)
286 {
287 	return remap_pfn_range(area, area->vm_start,
288 			       dmab->addr >> PAGE_SHIFT,
289 			       area->vm_end - area->vm_start,
290 			       area->vm_page_prot);
291 }
292 
293 static const struct snd_malloc_ops snd_dma_continuous_ops = {
294 	.alloc = snd_dma_continuous_alloc,
295 	.free = snd_dma_continuous_free,
296 	.mmap = snd_dma_continuous_mmap,
297 };
298 
299 /*
300  * VMALLOC allocator
301  */
302 static void *snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size)
303 {
304 	gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL | __GFP_HIGHMEM);
305 
306 	return __vmalloc(size, gfp);
307 }
308 
309 static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab)
310 {
311 	vfree(dmab->area);
312 }
313 
314 static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab,
315 				struct vm_area_struct *area)
316 {
317 	return remap_vmalloc_range(area, dmab->area, 0);
318 }
319 
320 #define get_vmalloc_page_addr(dmab, offset) \
321 	page_to_phys(vmalloc_to_page((dmab)->area + (offset)))
322 
323 static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab,
324 					   size_t offset)
325 {
326 	return get_vmalloc_page_addr(dmab, offset) + offset % PAGE_SIZE;
327 }
328 
329 static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab,
330 					     size_t offset)
331 {
332 	return vmalloc_to_page(dmab->area + offset);
333 }
334 
335 static unsigned int
336 snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab,
337 			       unsigned int ofs, unsigned int size)
338 {
339 	unsigned int start, end;
340 	unsigned long addr;
341 
342 	start = ALIGN_DOWN(ofs, PAGE_SIZE);
343 	end = ofs + size - 1; /* the last byte address */
344 	/* check page continuity */
345 	addr = get_vmalloc_page_addr(dmab, start);
346 	for (;;) {
347 		start += PAGE_SIZE;
348 		if (start > end)
349 			break;
350 		addr += PAGE_SIZE;
351 		if (get_vmalloc_page_addr(dmab, start) != addr)
352 			return start - ofs;
353 	}
354 	/* ok, all on continuous pages */
355 	return size;
356 }
357 
358 static const struct snd_malloc_ops snd_dma_vmalloc_ops = {
359 	.alloc = snd_dma_vmalloc_alloc,
360 	.free = snd_dma_vmalloc_free,
361 	.mmap = snd_dma_vmalloc_mmap,
362 	.get_addr = snd_dma_vmalloc_get_addr,
363 	.get_page = snd_dma_vmalloc_get_page,
364 	.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
365 };
366 
367 #ifdef CONFIG_HAS_DMA
368 /*
369  * IRAM allocator
370  */
371 #ifdef CONFIG_GENERIC_ALLOCATOR
372 static void *snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size)
373 {
374 	struct device *dev = dmab->dev.dev;
375 	struct gen_pool *pool;
376 	void *p;
377 
378 	if (dev->of_node) {
379 		pool = of_gen_pool_get(dev->of_node, "iram", 0);
380 		/* Assign the pool into private_data field */
381 		dmab->private_data = pool;
382 
383 		p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE);
384 		if (p)
385 			return p;
386 	}
387 
388 	/* Internal memory might have limited size and no enough space,
389 	 * so if we fail to malloc, try to fetch memory traditionally.
390 	 */
391 	dmab->dev.type = SNDRV_DMA_TYPE_DEV;
392 	return __snd_dma_alloc_pages(dmab, size);
393 }
394 
395 static void snd_dma_iram_free(struct snd_dma_buffer *dmab)
396 {
397 	struct gen_pool *pool = dmab->private_data;
398 
399 	if (pool && dmab->area)
400 		gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
401 }
402 
403 static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab,
404 			     struct vm_area_struct *area)
405 {
406 	area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
407 	return remap_pfn_range(area, area->vm_start,
408 			       dmab->addr >> PAGE_SHIFT,
409 			       area->vm_end - area->vm_start,
410 			       area->vm_page_prot);
411 }
412 
413 static const struct snd_malloc_ops snd_dma_iram_ops = {
414 	.alloc = snd_dma_iram_alloc,
415 	.free = snd_dma_iram_free,
416 	.mmap = snd_dma_iram_mmap,
417 };
418 #endif /* CONFIG_GENERIC_ALLOCATOR */
419 
420 #define DEFAULT_GFP \
421 	(GFP_KERNEL | \
422 	 __GFP_COMP |    /* compound page lets parts be mapped */ \
423 	 __GFP_NORETRY | /* don't trigger OOM-killer */ \
424 	 __GFP_NOWARN)   /* no stack trace print - this call is non-critical */
425 
426 /*
427  * Coherent device pages allocator
428  */
429 static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size)
430 {
431 	void *p;
432 
433 	p = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
434 #ifdef CONFIG_X86
435 	if (p && dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC)
436 		set_memory_wc((unsigned long)p, PAGE_ALIGN(size) >> PAGE_SHIFT);
437 #endif
438 	return p;
439 }
440 
441 static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
442 {
443 #ifdef CONFIG_X86
444 	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC)
445 		set_memory_wb((unsigned long)dmab->area,
446 			      PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT);
447 #endif
448 	dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
449 }
450 
451 static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab,
452 			    struct vm_area_struct *area)
453 {
454 #ifdef CONFIG_X86
455 	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC)
456 		area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
457 #endif
458 	return dma_mmap_coherent(dmab->dev.dev, area,
459 				 dmab->area, dmab->addr, dmab->bytes);
460 }
461 
462 static const struct snd_malloc_ops snd_dma_dev_ops = {
463 	.alloc = snd_dma_dev_alloc,
464 	.free = snd_dma_dev_free,
465 	.mmap = snd_dma_dev_mmap,
466 };
467 
468 /*
469  * Write-combined pages
470  */
471 #ifdef CONFIG_X86
472 /* On x86, share the same ops as the standard dev ops */
473 #define snd_dma_wc_ops	snd_dma_dev_ops
474 #else /* CONFIG_X86 */
475 static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
476 {
477 	return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
478 }
479 
480 static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
481 {
482 	dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
483 }
484 
485 static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
486 			   struct vm_area_struct *area)
487 {
488 	return dma_mmap_wc(dmab->dev.dev, area,
489 			   dmab->area, dmab->addr, dmab->bytes);
490 }
491 
492 static const struct snd_malloc_ops snd_dma_wc_ops = {
493 	.alloc = snd_dma_wc_alloc,
494 	.free = snd_dma_wc_free,
495 	.mmap = snd_dma_wc_mmap,
496 };
497 #endif /* CONFIG_X86 */
498 
499 /*
500  * Non-contiguous pages allocator
501  */
502 static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
503 {
504 	struct sg_table *sgt;
505 	void *p;
506 
507 	sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
508 				      DEFAULT_GFP, 0);
509 	if (!sgt)
510 		return NULL;
511 	dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->dev.dir);
512 	p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
513 	if (p)
514 		dmab->private_data = sgt;
515 	else
516 		dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir);
517 	return p;
518 }
519 
520 static void snd_dma_noncontig_free(struct snd_dma_buffer *dmab)
521 {
522 	dma_vunmap_noncontiguous(dmab->dev.dev, dmab->area);
523 	dma_free_noncontiguous(dmab->dev.dev, dmab->bytes, dmab->private_data,
524 			       dmab->dev.dir);
525 }
526 
527 static int snd_dma_noncontig_mmap(struct snd_dma_buffer *dmab,
528 				  struct vm_area_struct *area)
529 {
530 	return dma_mmap_noncontiguous(dmab->dev.dev, area,
531 				      dmab->bytes, dmab->private_data);
532 }
533 
534 static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab,
535 				   enum snd_dma_sync_mode mode)
536 {
537 	if (mode == SNDRV_DMA_SYNC_CPU) {
538 		if (dmab->dev.dir == DMA_TO_DEVICE)
539 			return;
540 		dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data,
541 					 dmab->dev.dir);
542 		invalidate_kernel_vmap_range(dmab->area, dmab->bytes);
543 	} else {
544 		if (dmab->dev.dir == DMA_FROM_DEVICE)
545 			return;
546 		flush_kernel_vmap_range(dmab->area, dmab->bytes);
547 		dma_sync_sgtable_for_device(dmab->dev.dev, dmab->private_data,
548 					    dmab->dev.dir);
549 	}
550 }
551 
552 static const struct snd_malloc_ops snd_dma_noncontig_ops = {
553 	.alloc = snd_dma_noncontig_alloc,
554 	.free = snd_dma_noncontig_free,
555 	.mmap = snd_dma_noncontig_mmap,
556 	.sync = snd_dma_noncontig_sync,
557 	/* re-use vmalloc helpers for get_* ops */
558 	.get_addr = snd_dma_vmalloc_get_addr,
559 	.get_page = snd_dma_vmalloc_get_page,
560 	.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
561 };
562 
563 /* x86-specific SG-buffer with WC pages */
564 #ifdef CONFIG_SND_DMA_SGBUF
565 #define vmalloc_to_virt(v) (unsigned long)page_to_virt(vmalloc_to_page(v))
566 
567 static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
568 {
569 	void *p = snd_dma_noncontig_alloc(dmab, size);
570 	size_t ofs;
571 
572 	if (!p)
573 		return NULL;
574 	for (ofs = 0; ofs < size; ofs += PAGE_SIZE)
575 		set_memory_uc(vmalloc_to_virt(p + ofs), 1);
576 	return p;
577 }
578 
579 static void snd_dma_sg_wc_free(struct snd_dma_buffer *dmab)
580 {
581 	size_t ofs;
582 
583 	for (ofs = 0; ofs < dmab->bytes; ofs += PAGE_SIZE)
584 		set_memory_wb(vmalloc_to_virt(dmab->area + ofs), 1);
585 	snd_dma_noncontig_free(dmab);
586 }
587 
588 static int snd_dma_sg_wc_mmap(struct snd_dma_buffer *dmab,
589 			      struct vm_area_struct *area)
590 {
591 	area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
592 	/* FIXME: dma_mmap_noncontiguous() works? */
593 	return -ENOENT; /* continue with the default mmap handler */
594 }
595 
596 const struct snd_malloc_ops snd_dma_sg_wc_ops = {
597 	.alloc = snd_dma_sg_wc_alloc,
598 	.free = snd_dma_sg_wc_free,
599 	.mmap = snd_dma_sg_wc_mmap,
600 	.sync = snd_dma_noncontig_sync,
601 	.get_addr = snd_dma_vmalloc_get_addr,
602 	.get_page = snd_dma_vmalloc_get_page,
603 	.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
604 };
605 #endif /* CONFIG_SND_DMA_SGBUF */
606 
607 /*
608  * Non-coherent pages allocator
609  */
610 static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size)
611 {
612 	dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->dev.dir);
613 	return dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr,
614 				     dmab->dev.dir, DEFAULT_GFP);
615 }
616 
617 static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab)
618 {
619 	dma_free_noncoherent(dmab->dev.dev, dmab->bytes, dmab->area,
620 			     dmab->addr, dmab->dev.dir);
621 }
622 
623 static int snd_dma_noncoherent_mmap(struct snd_dma_buffer *dmab,
624 				    struct vm_area_struct *area)
625 {
626 	area->vm_page_prot = vm_get_page_prot(area->vm_flags);
627 	return dma_mmap_pages(dmab->dev.dev, area,
628 			      area->vm_end - area->vm_start,
629 			      virt_to_page(dmab->area));
630 }
631 
632 static void snd_dma_noncoherent_sync(struct snd_dma_buffer *dmab,
633 				     enum snd_dma_sync_mode mode)
634 {
635 	if (mode == SNDRV_DMA_SYNC_CPU) {
636 		if (dmab->dev.dir != DMA_TO_DEVICE)
637 			dma_sync_single_for_cpu(dmab->dev.dev, dmab->addr,
638 						dmab->bytes, dmab->dev.dir);
639 	} else {
640 		if (dmab->dev.dir != DMA_FROM_DEVICE)
641 			dma_sync_single_for_device(dmab->dev.dev, dmab->addr,
642 						   dmab->bytes, dmab->dev.dir);
643 	}
644 }
645 
646 static const struct snd_malloc_ops snd_dma_noncoherent_ops = {
647 	.alloc = snd_dma_noncoherent_alloc,
648 	.free = snd_dma_noncoherent_free,
649 	.mmap = snd_dma_noncoherent_mmap,
650 	.sync = snd_dma_noncoherent_sync,
651 };
652 
653 #endif /* CONFIG_HAS_DMA */
654 
655 /*
656  * Entry points
657  */
658 static const struct snd_malloc_ops *dma_ops[] = {
659 	[SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops,
660 	[SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops,
661 #ifdef CONFIG_HAS_DMA
662 	[SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops,
663 	[SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops,
664 	[SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops,
665 	[SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops,
666 #ifdef CONFIG_SND_DMA_SGBUF
667 	[SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_wc_ops,
668 #endif
669 #ifdef CONFIG_GENERIC_ALLOCATOR
670 	[SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
671 #endif /* CONFIG_GENERIC_ALLOCATOR */
672 #endif /* CONFIG_HAS_DMA */
673 };
674 
675 static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
676 {
677 	if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN ||
678 			 dmab->dev.type >= ARRAY_SIZE(dma_ops)))
679 		return NULL;
680 	return dma_ops[dmab->dev.type];
681 }
682