xref: /openbmc/linux/sound/core/memalloc.c (revision 12382d11)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Copyright (c) by Jaroslav Kysela <perex@perex.cz>
4  *                   Takashi Iwai <tiwai@suse.de>
5  *
6  *  Generic memory allocators
7  */
8 
9 #include <linux/slab.h>
10 #include <linux/mm.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/genalloc.h>
13 #include <linux/highmem.h>
14 #include <linux/vmalloc.h>
15 #ifdef CONFIG_X86
16 #include <asm/set_memory.h>
17 #endif
18 #include <sound/memalloc.h>
19 #include "memalloc_local.h"
20 
21 static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);
22 
23 #ifdef CONFIG_SND_DMA_SGBUF
24 static void *do_alloc_fallback_pages(struct device *dev, size_t size,
25 				     dma_addr_t *addr, bool wc);
26 static void do_free_fallback_pages(void *p, size_t size, bool wc);
27 static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size);
28 #endif
29 
30 /* a cast to gfp flag from the dev pointer; for CONTINUOUS and VMALLOC types */
31 static inline gfp_t snd_mem_get_gfp_flags(const struct snd_dma_buffer *dmab,
32 					  gfp_t default_gfp)
33 {
34 	if (!dmab->dev.dev)
35 		return default_gfp;
36 	else
37 		return (__force gfp_t)(unsigned long)dmab->dev.dev;
38 }
39 
40 static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
41 {
42 	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
43 
44 	if (WARN_ON_ONCE(!ops || !ops->alloc))
45 		return NULL;
46 	return ops->alloc(dmab, size);
47 }
48 
49 /**
50  * snd_dma_alloc_dir_pages - allocate the buffer area according to the given
51  *	type and direction
52  * @type: the DMA buffer type
53  * @device: the device pointer
54  * @dir: DMA direction
55  * @size: the buffer size to allocate
56  * @dmab: buffer allocation record to store the allocated data
57  *
58  * Calls the memory-allocator function for the corresponding
59  * buffer type.
60  *
61  * Return: Zero if the buffer with the given size is allocated successfully,
62  * otherwise a negative value on error.
63  */
64 int snd_dma_alloc_dir_pages(int type, struct device *device,
65 			    enum dma_data_direction dir, size_t size,
66 			    struct snd_dma_buffer *dmab)
67 {
68 	if (WARN_ON(!size))
69 		return -ENXIO;
70 	if (WARN_ON(!dmab))
71 		return -ENXIO;
72 
73 	size = PAGE_ALIGN(size);
74 	dmab->dev.type = type;
75 	dmab->dev.dev = device;
76 	dmab->dev.dir = dir;
77 	dmab->bytes = 0;
78 	dmab->addr = 0;
79 	dmab->private_data = NULL;
80 	dmab->area = __snd_dma_alloc_pages(dmab, size);
81 	if (!dmab->area)
82 		return -ENOMEM;
83 	dmab->bytes = size;
84 	return 0;
85 }
86 EXPORT_SYMBOL(snd_dma_alloc_dir_pages);
87 
88 /**
89  * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
90  * @type: the DMA buffer type
91  * @device: the device pointer
92  * @size: the buffer size to allocate
93  * @dmab: buffer allocation record to store the allocated data
94  *
95  * Calls the memory-allocator function for the corresponding
96  * buffer type.  When no space is left, this function reduces the size and
97  * tries to allocate again.  The size actually allocated is stored in
98  * res_size argument.
99  *
100  * Return: Zero if the buffer with the given size is allocated successfully,
101  * otherwise a negative value on error.
102  */
103 int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
104 				 struct snd_dma_buffer *dmab)
105 {
106 	int err;
107 
108 	while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
109 		if (err != -ENOMEM)
110 			return err;
111 		if (size <= PAGE_SIZE)
112 			return -ENOMEM;
113 		size >>= 1;
114 		size = PAGE_SIZE << get_order(size);
115 	}
116 	if (! dmab->area)
117 		return -ENOMEM;
118 	return 0;
119 }
120 EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
121 
122 /**
123  * snd_dma_free_pages - release the allocated buffer
124  * @dmab: the buffer allocation record to release
125  *
126  * Releases the allocated buffer via snd_dma_alloc_pages().
127  */
128 void snd_dma_free_pages(struct snd_dma_buffer *dmab)
129 {
130 	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
131 
132 	if (ops && ops->free)
133 		ops->free(dmab);
134 }
135 EXPORT_SYMBOL(snd_dma_free_pages);
136 
137 /* called by devres */
138 static void __snd_release_pages(struct device *dev, void *res)
139 {
140 	snd_dma_free_pages(res);
141 }
142 
143 /**
144  * snd_devm_alloc_dir_pages - allocate the buffer and manage with devres
145  * @dev: the device pointer
146  * @type: the DMA buffer type
147  * @dir: DMA direction
148  * @size: the buffer size to allocate
149  *
150  * Allocate buffer pages depending on the given type and manage using devres.
151  * The pages will be released automatically at the device removal.
152  *
153  * Unlike snd_dma_alloc_pages(), this function requires the real device pointer,
154  * hence it can't work with SNDRV_DMA_TYPE_CONTINUOUS or
155  * SNDRV_DMA_TYPE_VMALLOC type.
156  *
157  * Return: the snd_dma_buffer object at success, or NULL if failed
158  */
159 struct snd_dma_buffer *
160 snd_devm_alloc_dir_pages(struct device *dev, int type,
161 			 enum dma_data_direction dir, size_t size)
162 {
163 	struct snd_dma_buffer *dmab;
164 	int err;
165 
166 	if (WARN_ON(type == SNDRV_DMA_TYPE_CONTINUOUS ||
167 		    type == SNDRV_DMA_TYPE_VMALLOC))
168 		return NULL;
169 
170 	dmab = devres_alloc(__snd_release_pages, sizeof(*dmab), GFP_KERNEL);
171 	if (!dmab)
172 		return NULL;
173 
174 	err = snd_dma_alloc_dir_pages(type, dev, dir, size, dmab);
175 	if (err < 0) {
176 		devres_free(dmab);
177 		return NULL;
178 	}
179 
180 	devres_add(dev, dmab);
181 	return dmab;
182 }
183 EXPORT_SYMBOL_GPL(snd_devm_alloc_dir_pages);
184 
185 /**
186  * snd_dma_buffer_mmap - perform mmap of the given DMA buffer
187  * @dmab: buffer allocation information
188  * @area: VM area information
189  *
190  * Return: zero if successful, or a negative error code
191  */
192 int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
193 			struct vm_area_struct *area)
194 {
195 	const struct snd_malloc_ops *ops;
196 
197 	if (!dmab)
198 		return -ENOENT;
199 	ops = snd_dma_get_ops(dmab);
200 	if (ops && ops->mmap)
201 		return ops->mmap(dmab, area);
202 	else
203 		return -ENOENT;
204 }
205 EXPORT_SYMBOL(snd_dma_buffer_mmap);
206 
207 #ifdef CONFIG_HAS_DMA
208 /**
209  * snd_dma_buffer_sync - sync DMA buffer between CPU and device
210  * @dmab: buffer allocation information
211  * @mode: sync mode
212  */
213 void snd_dma_buffer_sync(struct snd_dma_buffer *dmab,
214 			 enum snd_dma_sync_mode mode)
215 {
216 	const struct snd_malloc_ops *ops;
217 
218 	if (!dmab || !dmab->dev.need_sync)
219 		return;
220 	ops = snd_dma_get_ops(dmab);
221 	if (ops && ops->sync)
222 		ops->sync(dmab, mode);
223 }
224 EXPORT_SYMBOL_GPL(snd_dma_buffer_sync);
225 #endif /* CONFIG_HAS_DMA */
226 
227 /**
228  * snd_sgbuf_get_addr - return the physical address at the corresponding offset
229  * @dmab: buffer allocation information
230  * @offset: offset in the ring buffer
231  *
232  * Return: the physical address
233  */
234 dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset)
235 {
236 	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
237 
238 	if (ops && ops->get_addr)
239 		return ops->get_addr(dmab, offset);
240 	else
241 		return dmab->addr + offset;
242 }
243 EXPORT_SYMBOL(snd_sgbuf_get_addr);
244 
245 /**
246  * snd_sgbuf_get_page - return the physical page at the corresponding offset
247  * @dmab: buffer allocation information
248  * @offset: offset in the ring buffer
249  *
250  * Return: the page pointer
251  */
252 struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset)
253 {
254 	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
255 
256 	if (ops && ops->get_page)
257 		return ops->get_page(dmab, offset);
258 	else
259 		return virt_to_page(dmab->area + offset);
260 }
261 EXPORT_SYMBOL(snd_sgbuf_get_page);
262 
263 /**
264  * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages
265  *	on sg-buffer
266  * @dmab: buffer allocation information
267  * @ofs: offset in the ring buffer
268  * @size: the requested size
269  *
270  * Return: the chunk size
271  */
272 unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
273 				      unsigned int ofs, unsigned int size)
274 {
275 	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
276 
277 	if (ops && ops->get_chunk_size)
278 		return ops->get_chunk_size(dmab, ofs, size);
279 	else
280 		return size;
281 }
282 EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
283 
284 /*
285  * Continuous pages allocator
286  */
287 static void *do_alloc_pages(size_t size, dma_addr_t *addr, gfp_t gfp)
288 {
289 	void *p = alloc_pages_exact(size, gfp);
290 
291 	if (p)
292 		*addr = page_to_phys(virt_to_page(p));
293 	return p;
294 }
295 
296 static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
297 {
298 	return do_alloc_pages(size, &dmab->addr,
299 			      snd_mem_get_gfp_flags(dmab, GFP_KERNEL));
300 }
301 
302 static void snd_dma_continuous_free(struct snd_dma_buffer *dmab)
303 {
304 	free_pages_exact(dmab->area, dmab->bytes);
305 }
306 
307 static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab,
308 				   struct vm_area_struct *area)
309 {
310 	return remap_pfn_range(area, area->vm_start,
311 			       dmab->addr >> PAGE_SHIFT,
312 			       area->vm_end - area->vm_start,
313 			       area->vm_page_prot);
314 }
315 
316 static const struct snd_malloc_ops snd_dma_continuous_ops = {
317 	.alloc = snd_dma_continuous_alloc,
318 	.free = snd_dma_continuous_free,
319 	.mmap = snd_dma_continuous_mmap,
320 };
321 
322 /*
323  * VMALLOC allocator
324  */
325 static void *snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size)
326 {
327 	gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL | __GFP_HIGHMEM);
328 
329 	return __vmalloc(size, gfp);
330 }
331 
332 static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab)
333 {
334 	vfree(dmab->area);
335 }
336 
337 static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab,
338 				struct vm_area_struct *area)
339 {
340 	return remap_vmalloc_range(area, dmab->area, 0);
341 }
342 
343 #define get_vmalloc_page_addr(dmab, offset) \
344 	page_to_phys(vmalloc_to_page((dmab)->area + (offset)))
345 
346 static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab,
347 					   size_t offset)
348 {
349 	return get_vmalloc_page_addr(dmab, offset) + offset % PAGE_SIZE;
350 }
351 
352 static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab,
353 					     size_t offset)
354 {
355 	return vmalloc_to_page(dmab->area + offset);
356 }
357 
358 static unsigned int
359 snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab,
360 			       unsigned int ofs, unsigned int size)
361 {
362 	unsigned int start, end;
363 	unsigned long addr;
364 
365 	start = ALIGN_DOWN(ofs, PAGE_SIZE);
366 	end = ofs + size - 1; /* the last byte address */
367 	/* check page continuity */
368 	addr = get_vmalloc_page_addr(dmab, start);
369 	for (;;) {
370 		start += PAGE_SIZE;
371 		if (start > end)
372 			break;
373 		addr += PAGE_SIZE;
374 		if (get_vmalloc_page_addr(dmab, start) != addr)
375 			return start - ofs;
376 	}
377 	/* ok, all on continuous pages */
378 	return size;
379 }
380 
381 static const struct snd_malloc_ops snd_dma_vmalloc_ops = {
382 	.alloc = snd_dma_vmalloc_alloc,
383 	.free = snd_dma_vmalloc_free,
384 	.mmap = snd_dma_vmalloc_mmap,
385 	.get_addr = snd_dma_vmalloc_get_addr,
386 	.get_page = snd_dma_vmalloc_get_page,
387 	.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
388 };
389 
390 #ifdef CONFIG_HAS_DMA
391 /*
392  * IRAM allocator
393  */
394 #ifdef CONFIG_GENERIC_ALLOCATOR
395 static void *snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size)
396 {
397 	struct device *dev = dmab->dev.dev;
398 	struct gen_pool *pool;
399 	void *p;
400 
401 	if (dev->of_node) {
402 		pool = of_gen_pool_get(dev->of_node, "iram", 0);
403 		/* Assign the pool into private_data field */
404 		dmab->private_data = pool;
405 
406 		p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE);
407 		if (p)
408 			return p;
409 	}
410 
411 	/* Internal memory might have limited size and no enough space,
412 	 * so if we fail to malloc, try to fetch memory traditionally.
413 	 */
414 	dmab->dev.type = SNDRV_DMA_TYPE_DEV;
415 	return __snd_dma_alloc_pages(dmab, size);
416 }
417 
418 static void snd_dma_iram_free(struct snd_dma_buffer *dmab)
419 {
420 	struct gen_pool *pool = dmab->private_data;
421 
422 	if (pool && dmab->area)
423 		gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
424 }
425 
426 static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab,
427 			     struct vm_area_struct *area)
428 {
429 	area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
430 	return remap_pfn_range(area, area->vm_start,
431 			       dmab->addr >> PAGE_SHIFT,
432 			       area->vm_end - area->vm_start,
433 			       area->vm_page_prot);
434 }
435 
436 static const struct snd_malloc_ops snd_dma_iram_ops = {
437 	.alloc = snd_dma_iram_alloc,
438 	.free = snd_dma_iram_free,
439 	.mmap = snd_dma_iram_mmap,
440 };
441 #endif /* CONFIG_GENERIC_ALLOCATOR */
442 
443 #define DEFAULT_GFP \
444 	(GFP_KERNEL | \
445 	 __GFP_COMP |    /* compound page lets parts be mapped */ \
446 	 __GFP_NORETRY | /* don't trigger OOM-killer */ \
447 	 __GFP_NOWARN)   /* no stack trace print - this call is non-critical */
448 
449 /*
450  * Coherent device pages allocator
451  */
452 static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size)
453 {
454 	return dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
455 }
456 
457 static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
458 {
459 	dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
460 }
461 
462 static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab,
463 			    struct vm_area_struct *area)
464 {
465 	return dma_mmap_coherent(dmab->dev.dev, area,
466 				 dmab->area, dmab->addr, dmab->bytes);
467 }
468 
469 static const struct snd_malloc_ops snd_dma_dev_ops = {
470 	.alloc = snd_dma_dev_alloc,
471 	.free = snd_dma_dev_free,
472 	.mmap = snd_dma_dev_mmap,
473 };
474 
475 /*
476  * Write-combined pages
477  */
478 /* x86-specific allocations */
479 #ifdef CONFIG_SND_DMA_SGBUF
480 static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
481 {
482 	return do_alloc_fallback_pages(dmab->dev.dev, size, &dmab->addr, true);
483 }
484 
485 static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
486 {
487 	do_free_fallback_pages(dmab->area, dmab->bytes, true);
488 }
489 
490 static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
491 			   struct vm_area_struct *area)
492 {
493 	area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
494 	return snd_dma_continuous_mmap(dmab, area);
495 }
496 #else
497 static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
498 {
499 	return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
500 }
501 
502 static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
503 {
504 	dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
505 }
506 
507 static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
508 			   struct vm_area_struct *area)
509 {
510 	return dma_mmap_wc(dmab->dev.dev, area,
511 			   dmab->area, dmab->addr, dmab->bytes);
512 }
513 #endif /* CONFIG_SND_DMA_SGBUF */
514 
515 static const struct snd_malloc_ops snd_dma_wc_ops = {
516 	.alloc = snd_dma_wc_alloc,
517 	.free = snd_dma_wc_free,
518 	.mmap = snd_dma_wc_mmap,
519 };
520 
521 /*
522  * Non-contiguous pages allocator
523  */
524 static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
525 {
526 	struct sg_table *sgt;
527 	void *p;
528 
529 	sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
530 				      DEFAULT_GFP, 0);
531 	if (!sgt) {
532 #ifdef CONFIG_SND_DMA_SGBUF
533 		if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
534 			dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
535 		else
536 			dmab->dev.type = SNDRV_DMA_TYPE_DEV_SG_FALLBACK;
537 		return snd_dma_sg_fallback_alloc(dmab, size);
538 #else
539 		return NULL;
540 #endif
541 	}
542 
543 	dmab->dev.need_sync = dma_need_sync(dmab->dev.dev,
544 					    sg_dma_address(sgt->sgl));
545 	p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
546 	if (p)
547 		dmab->private_data = sgt;
548 	else
549 		dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir);
550 	return p;
551 }
552 
553 static void snd_dma_noncontig_free(struct snd_dma_buffer *dmab)
554 {
555 	dma_vunmap_noncontiguous(dmab->dev.dev, dmab->area);
556 	dma_free_noncontiguous(dmab->dev.dev, dmab->bytes, dmab->private_data,
557 			       dmab->dev.dir);
558 }
559 
560 static int snd_dma_noncontig_mmap(struct snd_dma_buffer *dmab,
561 				  struct vm_area_struct *area)
562 {
563 	return dma_mmap_noncontiguous(dmab->dev.dev, area,
564 				      dmab->bytes, dmab->private_data);
565 }
566 
567 static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab,
568 				   enum snd_dma_sync_mode mode)
569 {
570 	if (mode == SNDRV_DMA_SYNC_CPU) {
571 		if (dmab->dev.dir == DMA_TO_DEVICE)
572 			return;
573 		invalidate_kernel_vmap_range(dmab->area, dmab->bytes);
574 		dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data,
575 					 dmab->dev.dir);
576 	} else {
577 		if (dmab->dev.dir == DMA_FROM_DEVICE)
578 			return;
579 		flush_kernel_vmap_range(dmab->area, dmab->bytes);
580 		dma_sync_sgtable_for_device(dmab->dev.dev, dmab->private_data,
581 					    dmab->dev.dir);
582 	}
583 }
584 
585 static inline void snd_dma_noncontig_iter_set(struct snd_dma_buffer *dmab,
586 					      struct sg_page_iter *piter,
587 					      size_t offset)
588 {
589 	struct sg_table *sgt = dmab->private_data;
590 
591 	__sg_page_iter_start(piter, sgt->sgl, sgt->orig_nents,
592 			     offset >> PAGE_SHIFT);
593 }
594 
595 static dma_addr_t snd_dma_noncontig_get_addr(struct snd_dma_buffer *dmab,
596 					     size_t offset)
597 {
598 	struct sg_dma_page_iter iter;
599 
600 	snd_dma_noncontig_iter_set(dmab, &iter.base, offset);
601 	__sg_page_iter_dma_next(&iter);
602 	return sg_page_iter_dma_address(&iter) + offset % PAGE_SIZE;
603 }
604 
605 static struct page *snd_dma_noncontig_get_page(struct snd_dma_buffer *dmab,
606 					       size_t offset)
607 {
608 	struct sg_page_iter iter;
609 
610 	snd_dma_noncontig_iter_set(dmab, &iter, offset);
611 	__sg_page_iter_next(&iter);
612 	return sg_page_iter_page(&iter);
613 }
614 
615 static unsigned int
616 snd_dma_noncontig_get_chunk_size(struct snd_dma_buffer *dmab,
617 				 unsigned int ofs, unsigned int size)
618 {
619 	struct sg_dma_page_iter iter;
620 	unsigned int start, end;
621 	unsigned long addr;
622 
623 	start = ALIGN_DOWN(ofs, PAGE_SIZE);
624 	end = ofs + size - 1; /* the last byte address */
625 	snd_dma_noncontig_iter_set(dmab, &iter.base, start);
626 	if (!__sg_page_iter_dma_next(&iter))
627 		return 0;
628 	/* check page continuity */
629 	addr = sg_page_iter_dma_address(&iter);
630 	for (;;) {
631 		start += PAGE_SIZE;
632 		if (start > end)
633 			break;
634 		addr += PAGE_SIZE;
635 		if (!__sg_page_iter_dma_next(&iter) ||
636 		    sg_page_iter_dma_address(&iter) != addr)
637 			return start - ofs;
638 	}
639 	/* ok, all on continuous pages */
640 	return size;
641 }
642 
643 static const struct snd_malloc_ops snd_dma_noncontig_ops = {
644 	.alloc = snd_dma_noncontig_alloc,
645 	.free = snd_dma_noncontig_free,
646 	.mmap = snd_dma_noncontig_mmap,
647 	.sync = snd_dma_noncontig_sync,
648 	.get_addr = snd_dma_noncontig_get_addr,
649 	.get_page = snd_dma_noncontig_get_page,
650 	.get_chunk_size = snd_dma_noncontig_get_chunk_size,
651 };
652 
653 /* x86-specific SG-buffer with WC pages */
654 #ifdef CONFIG_SND_DMA_SGBUF
655 #define sg_wc_address(it) ((unsigned long)page_address(sg_page_iter_page(it)))
656 
657 static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
658 {
659 	void *p = snd_dma_noncontig_alloc(dmab, size);
660 	struct sg_table *sgt = dmab->private_data;
661 	struct sg_page_iter iter;
662 
663 	if (!p)
664 		return NULL;
665 	if (dmab->dev.type != SNDRV_DMA_TYPE_DEV_WC_SG)
666 		return p;
667 	for_each_sgtable_page(sgt, &iter, 0)
668 		set_memory_wc(sg_wc_address(&iter), 1);
669 	return p;
670 }
671 
672 static void snd_dma_sg_wc_free(struct snd_dma_buffer *dmab)
673 {
674 	struct sg_table *sgt = dmab->private_data;
675 	struct sg_page_iter iter;
676 
677 	for_each_sgtable_page(sgt, &iter, 0)
678 		set_memory_wb(sg_wc_address(&iter), 1);
679 	snd_dma_noncontig_free(dmab);
680 }
681 
682 static int snd_dma_sg_wc_mmap(struct snd_dma_buffer *dmab,
683 			      struct vm_area_struct *area)
684 {
685 	area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
686 	return dma_mmap_noncontiguous(dmab->dev.dev, area,
687 				      dmab->bytes, dmab->private_data);
688 }
689 
690 static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
691 	.alloc = snd_dma_sg_wc_alloc,
692 	.free = snd_dma_sg_wc_free,
693 	.mmap = snd_dma_sg_wc_mmap,
694 	.sync = snd_dma_noncontig_sync,
695 	.get_addr = snd_dma_noncontig_get_addr,
696 	.get_page = snd_dma_noncontig_get_page,
697 	.get_chunk_size = snd_dma_noncontig_get_chunk_size,
698 };
699 
700 /* manual page allocations with wc setup */
701 static void *do_alloc_fallback_pages(struct device *dev, size_t size,
702 				     dma_addr_t *addr, bool wc)
703 {
704 	gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
705 	void *p;
706 
707  again:
708 	p = do_alloc_pages(size, addr, gfp);
709 	if (!p || (*addr + size - 1) & ~dev->coherent_dma_mask) {
710 		if (IS_ENABLED(CONFIG_ZONE_DMA32) && !(gfp & GFP_DMA32)) {
711 			gfp |= GFP_DMA32;
712 			goto again;
713 		}
714 		if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
715 			gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
716 			goto again;
717 		}
718 	}
719 	if (p && wc)
720 		set_memory_wc((unsigned long)(p), size >> PAGE_SHIFT);
721 	return p;
722 }
723 
724 static void do_free_fallback_pages(void *p, size_t size, bool wc)
725 {
726 	if (wc)
727 		set_memory_wb((unsigned long)(p), size >> PAGE_SHIFT);
728 	free_pages_exact(p, size);
729 }
730 
731 /* Fallback SG-buffer allocations for x86 */
732 struct snd_dma_sg_fallback {
733 	size_t count;
734 	struct page **pages;
735 	dma_addr_t *addrs;
736 };
737 
738 static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
739 				       struct snd_dma_sg_fallback *sgbuf)
740 {
741 	bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
742 	size_t i;
743 
744 	for (i = 0; i < sgbuf->count && sgbuf->pages[i]; i++)
745 		do_free_fallback_pages(page_address(sgbuf->pages[i]), PAGE_SIZE, wc);
746 	kvfree(sgbuf->pages);
747 	kvfree(sgbuf->addrs);
748 	kfree(sgbuf);
749 }
750 
751 static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
752 {
753 	struct snd_dma_sg_fallback *sgbuf;
754 	struct page **pages;
755 	size_t i, count;
756 	void *p;
757 	bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
758 
759 	sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
760 	if (!sgbuf)
761 		return NULL;
762 	count = PAGE_ALIGN(size) >> PAGE_SHIFT;
763 	pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL);
764 	if (!pages)
765 		goto error;
766 	sgbuf->pages = pages;
767 	sgbuf->addrs = kvcalloc(count, sizeof(*sgbuf->addrs), GFP_KERNEL);
768 	if (!sgbuf->addrs)
769 		goto error;
770 
771 	for (i = 0; i < count; sgbuf->count++, i++) {
772 		p = do_alloc_fallback_pages(dmab->dev.dev, PAGE_SIZE,
773 					    &sgbuf->addrs[i], wc);
774 		if (!p)
775 			goto error;
776 		sgbuf->pages[i] = virt_to_page(p);
777 	}
778 
779 	p = vmap(pages, count, VM_MAP, PAGE_KERNEL);
780 	if (!p)
781 		goto error;
782 	dmab->private_data = sgbuf;
783 	return p;
784 
785  error:
786 	__snd_dma_sg_fallback_free(dmab, sgbuf);
787 	return NULL;
788 }
789 
790 static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab)
791 {
792 	vunmap(dmab->area);
793 	__snd_dma_sg_fallback_free(dmab, dmab->private_data);
794 }
795 
796 static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab,
797 				    struct vm_area_struct *area)
798 {
799 	struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
800 
801 	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
802 		area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
803 	return vm_map_pages(area, sgbuf->pages, sgbuf->count);
804 }
805 
806 static const struct snd_malloc_ops snd_dma_sg_fallback_ops = {
807 	.alloc = snd_dma_sg_fallback_alloc,
808 	.free = snd_dma_sg_fallback_free,
809 	.mmap = snd_dma_sg_fallback_mmap,
810 	/* reuse vmalloc helpers */
811 	.get_addr = snd_dma_vmalloc_get_addr,
812 	.get_page = snd_dma_vmalloc_get_page,
813 	.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
814 };
815 #endif /* CONFIG_SND_DMA_SGBUF */
816 
817 /*
818  * Non-coherent pages allocator
819  */
820 static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size)
821 {
822 	void *p;
823 
824 	p = dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr,
825 				  dmab->dev.dir, DEFAULT_GFP);
826 	if (p)
827 		dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->addr);
828 	return p;
829 }
830 
831 static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab)
832 {
833 	dma_free_noncoherent(dmab->dev.dev, dmab->bytes, dmab->area,
834 			     dmab->addr, dmab->dev.dir);
835 }
836 
837 static int snd_dma_noncoherent_mmap(struct snd_dma_buffer *dmab,
838 				    struct vm_area_struct *area)
839 {
840 	area->vm_page_prot = vm_get_page_prot(area->vm_flags);
841 	return dma_mmap_pages(dmab->dev.dev, area,
842 			      area->vm_end - area->vm_start,
843 			      virt_to_page(dmab->area));
844 }
845 
846 static void snd_dma_noncoherent_sync(struct snd_dma_buffer *dmab,
847 				     enum snd_dma_sync_mode mode)
848 {
849 	if (mode == SNDRV_DMA_SYNC_CPU) {
850 		if (dmab->dev.dir != DMA_TO_DEVICE)
851 			dma_sync_single_for_cpu(dmab->dev.dev, dmab->addr,
852 						dmab->bytes, dmab->dev.dir);
853 	} else {
854 		if (dmab->dev.dir != DMA_FROM_DEVICE)
855 			dma_sync_single_for_device(dmab->dev.dev, dmab->addr,
856 						   dmab->bytes, dmab->dev.dir);
857 	}
858 }
859 
860 static const struct snd_malloc_ops snd_dma_noncoherent_ops = {
861 	.alloc = snd_dma_noncoherent_alloc,
862 	.free = snd_dma_noncoherent_free,
863 	.mmap = snd_dma_noncoherent_mmap,
864 	.sync = snd_dma_noncoherent_sync,
865 };
866 
867 #endif /* CONFIG_HAS_DMA */
868 
869 /*
870  * Entry points
871  */
872 static const struct snd_malloc_ops *dma_ops[] = {
873 	[SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops,
874 	[SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops,
875 #ifdef CONFIG_HAS_DMA
876 	[SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops,
877 	[SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops,
878 	[SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops,
879 	[SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops,
880 #ifdef CONFIG_SND_DMA_SGBUF
881 	[SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_wc_ops,
882 #endif
883 #ifdef CONFIG_GENERIC_ALLOCATOR
884 	[SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
885 #endif /* CONFIG_GENERIC_ALLOCATOR */
886 #ifdef CONFIG_SND_DMA_SGBUF
887 	[SNDRV_DMA_TYPE_DEV_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
888 	[SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
889 #endif
890 #endif /* CONFIG_HAS_DMA */
891 };
892 
893 static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
894 {
895 	if (WARN_ON_ONCE(!dmab))
896 		return NULL;
897 	if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN ||
898 			 dmab->dev.type >= ARRAY_SIZE(dma_ops)))
899 		return NULL;
900 	return dma_ops[dmab->dev.type];
901 }
902