xref: /openbmc/linux/sound/core/memalloc.c (revision 53f9cd5c)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Copyright (c) by Jaroslav Kysela <perex@perex.cz>
4  *                   Takashi Iwai <tiwai@suse.de>
5  *
6  *  Generic memory allocators
7  */
8 
9 #include <linux/slab.h>
10 #include <linux/mm.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/genalloc.h>
13 #include <linux/highmem.h>
14 #include <linux/vmalloc.h>
15 #ifdef CONFIG_X86
16 #include <asm/set_memory.h>
17 #endif
18 #include <sound/memalloc.h>
19 #include "memalloc_local.h"
20 
21 static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);
22 
23 #ifdef CONFIG_SND_DMA_SGBUF
24 static void *do_alloc_fallback_pages(struct device *dev, size_t size,
25 				     dma_addr_t *addr, bool wc);
26 static void do_free_fallback_pages(void *p, size_t size, bool wc);
27 static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size);
28 #endif
29 
30 /* a cast to gfp flag from the dev pointer; for CONTINUOUS and VMALLOC types */
31 static inline gfp_t snd_mem_get_gfp_flags(const struct snd_dma_buffer *dmab,
32 					  gfp_t default_gfp)
33 {
34 	if (!dmab->dev.dev)
35 		return default_gfp;
36 	else
37 		return (__force gfp_t)(unsigned long)dmab->dev.dev;
38 }
39 
40 static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
41 {
42 	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
43 
44 	if (WARN_ON_ONCE(!ops || !ops->alloc))
45 		return NULL;
46 	return ops->alloc(dmab, size);
47 }
48 
49 /**
50  * snd_dma_alloc_dir_pages - allocate the buffer area according to the given
51  *	type and direction
52  * @type: the DMA buffer type
53  * @device: the device pointer
54  * @dir: DMA direction
55  * @size: the buffer size to allocate
56  * @dmab: buffer allocation record to store the allocated data
57  *
58  * Calls the memory-allocator function for the corresponding
59  * buffer type.
60  *
61  * Return: Zero if the buffer with the given size is allocated successfully,
62  * otherwise a negative value on error.
63  */
64 int snd_dma_alloc_dir_pages(int type, struct device *device,
65 			    enum dma_data_direction dir, size_t size,
66 			    struct snd_dma_buffer *dmab)
67 {
68 	if (WARN_ON(!size))
69 		return -ENXIO;
70 	if (WARN_ON(!dmab))
71 		return -ENXIO;
72 
73 	size = PAGE_ALIGN(size);
74 	dmab->dev.type = type;
75 	dmab->dev.dev = device;
76 	dmab->dev.dir = dir;
77 	dmab->bytes = 0;
78 	dmab->addr = 0;
79 	dmab->private_data = NULL;
80 	dmab->area = __snd_dma_alloc_pages(dmab, size);
81 	if (!dmab->area)
82 		return -ENOMEM;
83 	dmab->bytes = size;
84 	return 0;
85 }
86 EXPORT_SYMBOL(snd_dma_alloc_dir_pages);
87 
88 /**
89  * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
90  * @type: the DMA buffer type
91  * @device: the device pointer
92  * @size: the buffer size to allocate
93  * @dmab: buffer allocation record to store the allocated data
94  *
95  * Calls the memory-allocator function for the corresponding
96  * buffer type.  When no space is left, this function reduces the size and
97  * tries to allocate again.  The size actually allocated is stored in
98  * res_size argument.
99  *
100  * Return: Zero if the buffer with the given size is allocated successfully,
101  * otherwise a negative value on error.
102  */
103 int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
104 				 struct snd_dma_buffer *dmab)
105 {
106 	int err;
107 
108 	while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
109 		if (err != -ENOMEM)
110 			return err;
111 		if (size <= PAGE_SIZE)
112 			return -ENOMEM;
113 		size >>= 1;
114 		size = PAGE_SIZE << get_order(size);
115 	}
116 	if (! dmab->area)
117 		return -ENOMEM;
118 	return 0;
119 }
120 EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
121 
122 /**
123  * snd_dma_free_pages - release the allocated buffer
124  * @dmab: the buffer allocation record to release
125  *
126  * Releases the allocated buffer via snd_dma_alloc_pages().
127  */
128 void snd_dma_free_pages(struct snd_dma_buffer *dmab)
129 {
130 	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
131 
132 	if (ops && ops->free)
133 		ops->free(dmab);
134 }
135 EXPORT_SYMBOL(snd_dma_free_pages);
136 
137 /* called by devres */
138 static void __snd_release_pages(struct device *dev, void *res)
139 {
140 	snd_dma_free_pages(res);
141 }
142 
143 /**
144  * snd_devm_alloc_dir_pages - allocate the buffer and manage with devres
145  * @dev: the device pointer
146  * @type: the DMA buffer type
147  * @dir: DMA direction
148  * @size: the buffer size to allocate
149  *
150  * Allocate buffer pages depending on the given type and manage using devres.
151  * The pages will be released automatically at the device removal.
152  *
153  * Unlike snd_dma_alloc_pages(), this function requires the real device pointer,
154  * hence it can't work with SNDRV_DMA_TYPE_CONTINUOUS or
155  * SNDRV_DMA_TYPE_VMALLOC type.
156  *
157  * Return: the snd_dma_buffer object at success, or NULL if failed
158  */
159 struct snd_dma_buffer *
160 snd_devm_alloc_dir_pages(struct device *dev, int type,
161 			 enum dma_data_direction dir, size_t size)
162 {
163 	struct snd_dma_buffer *dmab;
164 	int err;
165 
166 	if (WARN_ON(type == SNDRV_DMA_TYPE_CONTINUOUS ||
167 		    type == SNDRV_DMA_TYPE_VMALLOC))
168 		return NULL;
169 
170 	dmab = devres_alloc(__snd_release_pages, sizeof(*dmab), GFP_KERNEL);
171 	if (!dmab)
172 		return NULL;
173 
174 	err = snd_dma_alloc_dir_pages(type, dev, dir, size, dmab);
175 	if (err < 0) {
176 		devres_free(dmab);
177 		return NULL;
178 	}
179 
180 	devres_add(dev, dmab);
181 	return dmab;
182 }
183 EXPORT_SYMBOL_GPL(snd_devm_alloc_dir_pages);
184 
185 /**
186  * snd_dma_buffer_mmap - perform mmap of the given DMA buffer
187  * @dmab: buffer allocation information
188  * @area: VM area information
189  *
190  * Return: zero if successful, or a negative error code
191  */
192 int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
193 			struct vm_area_struct *area)
194 {
195 	const struct snd_malloc_ops *ops;
196 
197 	if (!dmab)
198 		return -ENOENT;
199 	ops = snd_dma_get_ops(dmab);
200 	if (ops && ops->mmap)
201 		return ops->mmap(dmab, area);
202 	else
203 		return -ENOENT;
204 }
205 EXPORT_SYMBOL(snd_dma_buffer_mmap);
206 
207 #ifdef CONFIG_HAS_DMA
208 /**
209  * snd_dma_buffer_sync - sync DMA buffer between CPU and device
210  * @dmab: buffer allocation information
211  * @mode: sync mode
212  */
213 void snd_dma_buffer_sync(struct snd_dma_buffer *dmab,
214 			 enum snd_dma_sync_mode mode)
215 {
216 	const struct snd_malloc_ops *ops;
217 
218 	if (!dmab || !dmab->dev.need_sync)
219 		return;
220 	ops = snd_dma_get_ops(dmab);
221 	if (ops && ops->sync)
222 		ops->sync(dmab, mode);
223 }
224 EXPORT_SYMBOL_GPL(snd_dma_buffer_sync);
225 #endif /* CONFIG_HAS_DMA */
226 
227 /**
228  * snd_sgbuf_get_addr - return the physical address at the corresponding offset
229  * @dmab: buffer allocation information
230  * @offset: offset in the ring buffer
231  *
232  * Return: the physical address
233  */
234 dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset)
235 {
236 	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
237 
238 	if (ops && ops->get_addr)
239 		return ops->get_addr(dmab, offset);
240 	else
241 		return dmab->addr + offset;
242 }
243 EXPORT_SYMBOL(snd_sgbuf_get_addr);
244 
245 /**
246  * snd_sgbuf_get_page - return the physical page at the corresponding offset
247  * @dmab: buffer allocation information
248  * @offset: offset in the ring buffer
249  *
250  * Return: the page pointer
251  */
252 struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset)
253 {
254 	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
255 
256 	if (ops && ops->get_page)
257 		return ops->get_page(dmab, offset);
258 	else
259 		return virt_to_page(dmab->area + offset);
260 }
261 EXPORT_SYMBOL(snd_sgbuf_get_page);
262 
263 /**
264  * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages
265  *	on sg-buffer
266  * @dmab: buffer allocation information
267  * @ofs: offset in the ring buffer
268  * @size: the requested size
269  *
270  * Return: the chunk size
271  */
272 unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
273 				      unsigned int ofs, unsigned int size)
274 {
275 	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
276 
277 	if (ops && ops->get_chunk_size)
278 		return ops->get_chunk_size(dmab, ofs, size);
279 	else
280 		return size;
281 }
282 EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
283 
284 /*
285  * Continuous pages allocator
286  */
287 static void *do_alloc_pages(size_t size, dma_addr_t *addr, gfp_t gfp)
288 {
289 	void *p = alloc_pages_exact(size, gfp);
290 
291 	if (p)
292 		*addr = page_to_phys(virt_to_page(p));
293 	return p;
294 }
295 
296 static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
297 {
298 	return do_alloc_pages(size, &dmab->addr,
299 			      snd_mem_get_gfp_flags(dmab, GFP_KERNEL));
300 }
301 
302 static void snd_dma_continuous_free(struct snd_dma_buffer *dmab)
303 {
304 	free_pages_exact(dmab->area, dmab->bytes);
305 }
306 
307 static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab,
308 				   struct vm_area_struct *area)
309 {
310 	return remap_pfn_range(area, area->vm_start,
311 			       dmab->addr >> PAGE_SHIFT,
312 			       area->vm_end - area->vm_start,
313 			       area->vm_page_prot);
314 }
315 
316 static const struct snd_malloc_ops snd_dma_continuous_ops = {
317 	.alloc = snd_dma_continuous_alloc,
318 	.free = snd_dma_continuous_free,
319 	.mmap = snd_dma_continuous_mmap,
320 };
321 
322 /*
323  * VMALLOC allocator
324  */
325 static void *snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size)
326 {
327 	gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL | __GFP_HIGHMEM);
328 
329 	return __vmalloc(size, gfp);
330 }
331 
332 static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab)
333 {
334 	vfree(dmab->area);
335 }
336 
337 static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab,
338 				struct vm_area_struct *area)
339 {
340 	return remap_vmalloc_range(area, dmab->area, 0);
341 }
342 
343 #define get_vmalloc_page_addr(dmab, offset) \
344 	page_to_phys(vmalloc_to_page((dmab)->area + (offset)))
345 
346 static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab,
347 					   size_t offset)
348 {
349 	return get_vmalloc_page_addr(dmab, offset) + offset % PAGE_SIZE;
350 }
351 
352 static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab,
353 					     size_t offset)
354 {
355 	return vmalloc_to_page(dmab->area + offset);
356 }
357 
358 static unsigned int
359 snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab,
360 			       unsigned int ofs, unsigned int size)
361 {
362 	unsigned int start, end;
363 	unsigned long addr;
364 
365 	start = ALIGN_DOWN(ofs, PAGE_SIZE);
366 	end = ofs + size - 1; /* the last byte address */
367 	/* check page continuity */
368 	addr = get_vmalloc_page_addr(dmab, start);
369 	for (;;) {
370 		start += PAGE_SIZE;
371 		if (start > end)
372 			break;
373 		addr += PAGE_SIZE;
374 		if (get_vmalloc_page_addr(dmab, start) != addr)
375 			return start - ofs;
376 	}
377 	/* ok, all on continuous pages */
378 	return size;
379 }
380 
381 static const struct snd_malloc_ops snd_dma_vmalloc_ops = {
382 	.alloc = snd_dma_vmalloc_alloc,
383 	.free = snd_dma_vmalloc_free,
384 	.mmap = snd_dma_vmalloc_mmap,
385 	.get_addr = snd_dma_vmalloc_get_addr,
386 	.get_page = snd_dma_vmalloc_get_page,
387 	.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
388 };
389 
390 #ifdef CONFIG_HAS_DMA
391 /*
392  * IRAM allocator
393  */
394 #ifdef CONFIG_GENERIC_ALLOCATOR
395 static void *snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size)
396 {
397 	struct device *dev = dmab->dev.dev;
398 	struct gen_pool *pool;
399 	void *p;
400 
401 	if (dev->of_node) {
402 		pool = of_gen_pool_get(dev->of_node, "iram", 0);
403 		/* Assign the pool into private_data field */
404 		dmab->private_data = pool;
405 
406 		p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE);
407 		if (p)
408 			return p;
409 	}
410 
411 	/* Internal memory might have limited size and no enough space,
412 	 * so if we fail to malloc, try to fetch memory traditionally.
413 	 */
414 	dmab->dev.type = SNDRV_DMA_TYPE_DEV;
415 	return __snd_dma_alloc_pages(dmab, size);
416 }
417 
418 static void snd_dma_iram_free(struct snd_dma_buffer *dmab)
419 {
420 	struct gen_pool *pool = dmab->private_data;
421 
422 	if (pool && dmab->area)
423 		gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
424 }
425 
426 static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab,
427 			     struct vm_area_struct *area)
428 {
429 	area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
430 	return remap_pfn_range(area, area->vm_start,
431 			       dmab->addr >> PAGE_SHIFT,
432 			       area->vm_end - area->vm_start,
433 			       area->vm_page_prot);
434 }
435 
436 static const struct snd_malloc_ops snd_dma_iram_ops = {
437 	.alloc = snd_dma_iram_alloc,
438 	.free = snd_dma_iram_free,
439 	.mmap = snd_dma_iram_mmap,
440 };
441 #endif /* CONFIG_GENERIC_ALLOCATOR */
442 
443 #define DEFAULT_GFP \
444 	(GFP_KERNEL | \
445 	 __GFP_COMP |    /* compound page lets parts be mapped */ \
446 	 __GFP_NORETRY | /* don't trigger OOM-killer */ \
447 	 __GFP_NOWARN)   /* no stack trace print - this call is non-critical */
448 
449 /*
450  * Coherent device pages allocator
451  */
452 static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size)
453 {
454 	return dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
455 }
456 
457 static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
458 {
459 	dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
460 }
461 
462 static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab,
463 			    struct vm_area_struct *area)
464 {
465 	return dma_mmap_coherent(dmab->dev.dev, area,
466 				 dmab->area, dmab->addr, dmab->bytes);
467 }
468 
469 static const struct snd_malloc_ops snd_dma_dev_ops = {
470 	.alloc = snd_dma_dev_alloc,
471 	.free = snd_dma_dev_free,
472 	.mmap = snd_dma_dev_mmap,
473 };
474 
475 /*
476  * Write-combined pages
477  */
478 /* x86-specific allocations */
479 #ifdef CONFIG_SND_DMA_SGBUF
480 static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
481 {
482 	return do_alloc_fallback_pages(dmab->dev.dev, size, &dmab->addr, true);
483 }
484 
485 static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
486 {
487 	do_free_fallback_pages(dmab->area, dmab->bytes, true);
488 }
489 
490 static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
491 			   struct vm_area_struct *area)
492 {
493 	area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
494 	return snd_dma_continuous_mmap(dmab, area);
495 }
496 #else
497 static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
498 {
499 	return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
500 }
501 
502 static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
503 {
504 	dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
505 }
506 
507 static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
508 			   struct vm_area_struct *area)
509 {
510 	return dma_mmap_wc(dmab->dev.dev, area,
511 			   dmab->area, dmab->addr, dmab->bytes);
512 }
513 #endif /* CONFIG_SND_DMA_SGBUF */
514 
515 static const struct snd_malloc_ops snd_dma_wc_ops = {
516 	.alloc = snd_dma_wc_alloc,
517 	.free = snd_dma_wc_free,
518 	.mmap = snd_dma_wc_mmap,
519 };
520 
521 /*
522  * Non-contiguous pages allocator
523  */
524 static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
525 {
526 	struct sg_table *sgt;
527 	void *p;
528 
529 	sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
530 				      DEFAULT_GFP, 0);
531 	if (!sgt) {
532 #ifdef CONFIG_SND_DMA_SGBUF
533 		if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
534 			dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
535 		else
536 			dmab->dev.type = SNDRV_DMA_TYPE_DEV_SG_FALLBACK;
537 		return snd_dma_sg_fallback_alloc(dmab, size);
538 #else
539 		return NULL;
540 #endif
541 	}
542 
543 	dmab->dev.need_sync = dma_need_sync(dmab->dev.dev,
544 					    sg_dma_address(sgt->sgl));
545 	p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
546 	if (p) {
547 		dmab->private_data = sgt;
548 		/* store the first page address for convenience */
549 		dmab->addr = snd_sgbuf_get_addr(dmab, 0);
550 	} else {
551 		dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir);
552 	}
553 	return p;
554 }
555 
556 static void snd_dma_noncontig_free(struct snd_dma_buffer *dmab)
557 {
558 	dma_vunmap_noncontiguous(dmab->dev.dev, dmab->area);
559 	dma_free_noncontiguous(dmab->dev.dev, dmab->bytes, dmab->private_data,
560 			       dmab->dev.dir);
561 }
562 
563 static int snd_dma_noncontig_mmap(struct snd_dma_buffer *dmab,
564 				  struct vm_area_struct *area)
565 {
566 	return dma_mmap_noncontiguous(dmab->dev.dev, area,
567 				      dmab->bytes, dmab->private_data);
568 }
569 
570 static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab,
571 				   enum snd_dma_sync_mode mode)
572 {
573 	if (mode == SNDRV_DMA_SYNC_CPU) {
574 		if (dmab->dev.dir == DMA_TO_DEVICE)
575 			return;
576 		invalidate_kernel_vmap_range(dmab->area, dmab->bytes);
577 		dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data,
578 					 dmab->dev.dir);
579 	} else {
580 		if (dmab->dev.dir == DMA_FROM_DEVICE)
581 			return;
582 		flush_kernel_vmap_range(dmab->area, dmab->bytes);
583 		dma_sync_sgtable_for_device(dmab->dev.dev, dmab->private_data,
584 					    dmab->dev.dir);
585 	}
586 }
587 
588 static inline void snd_dma_noncontig_iter_set(struct snd_dma_buffer *dmab,
589 					      struct sg_page_iter *piter,
590 					      size_t offset)
591 {
592 	struct sg_table *sgt = dmab->private_data;
593 
594 	__sg_page_iter_start(piter, sgt->sgl, sgt->orig_nents,
595 			     offset >> PAGE_SHIFT);
596 }
597 
598 static dma_addr_t snd_dma_noncontig_get_addr(struct snd_dma_buffer *dmab,
599 					     size_t offset)
600 {
601 	struct sg_dma_page_iter iter;
602 
603 	snd_dma_noncontig_iter_set(dmab, &iter.base, offset);
604 	__sg_page_iter_dma_next(&iter);
605 	return sg_page_iter_dma_address(&iter) + offset % PAGE_SIZE;
606 }
607 
608 static struct page *snd_dma_noncontig_get_page(struct snd_dma_buffer *dmab,
609 					       size_t offset)
610 {
611 	struct sg_page_iter iter;
612 
613 	snd_dma_noncontig_iter_set(dmab, &iter, offset);
614 	__sg_page_iter_next(&iter);
615 	return sg_page_iter_page(&iter);
616 }
617 
618 static unsigned int
619 snd_dma_noncontig_get_chunk_size(struct snd_dma_buffer *dmab,
620 				 unsigned int ofs, unsigned int size)
621 {
622 	struct sg_dma_page_iter iter;
623 	unsigned int start, end;
624 	unsigned long addr;
625 
626 	start = ALIGN_DOWN(ofs, PAGE_SIZE);
627 	end = ofs + size - 1; /* the last byte address */
628 	snd_dma_noncontig_iter_set(dmab, &iter.base, start);
629 	if (!__sg_page_iter_dma_next(&iter))
630 		return 0;
631 	/* check page continuity */
632 	addr = sg_page_iter_dma_address(&iter);
633 	for (;;) {
634 		start += PAGE_SIZE;
635 		if (start > end)
636 			break;
637 		addr += PAGE_SIZE;
638 		if (!__sg_page_iter_dma_next(&iter) ||
639 		    sg_page_iter_dma_address(&iter) != addr)
640 			return start - ofs;
641 	}
642 	/* ok, all on continuous pages */
643 	return size;
644 }
645 
646 static const struct snd_malloc_ops snd_dma_noncontig_ops = {
647 	.alloc = snd_dma_noncontig_alloc,
648 	.free = snd_dma_noncontig_free,
649 	.mmap = snd_dma_noncontig_mmap,
650 	.sync = snd_dma_noncontig_sync,
651 	.get_addr = snd_dma_noncontig_get_addr,
652 	.get_page = snd_dma_noncontig_get_page,
653 	.get_chunk_size = snd_dma_noncontig_get_chunk_size,
654 };
655 
656 /* x86-specific SG-buffer with WC pages */
657 #ifdef CONFIG_SND_DMA_SGBUF
658 #define sg_wc_address(it) ((unsigned long)page_address(sg_page_iter_page(it)))
659 
660 static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
661 {
662 	void *p = snd_dma_noncontig_alloc(dmab, size);
663 	struct sg_table *sgt = dmab->private_data;
664 	struct sg_page_iter iter;
665 
666 	if (!p)
667 		return NULL;
668 	if (dmab->dev.type != SNDRV_DMA_TYPE_DEV_WC_SG)
669 		return p;
670 	for_each_sgtable_page(sgt, &iter, 0)
671 		set_memory_wc(sg_wc_address(&iter), 1);
672 	return p;
673 }
674 
675 static void snd_dma_sg_wc_free(struct snd_dma_buffer *dmab)
676 {
677 	struct sg_table *sgt = dmab->private_data;
678 	struct sg_page_iter iter;
679 
680 	for_each_sgtable_page(sgt, &iter, 0)
681 		set_memory_wb(sg_wc_address(&iter), 1);
682 	snd_dma_noncontig_free(dmab);
683 }
684 
685 static int snd_dma_sg_wc_mmap(struct snd_dma_buffer *dmab,
686 			      struct vm_area_struct *area)
687 {
688 	area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
689 	return dma_mmap_noncontiguous(dmab->dev.dev, area,
690 				      dmab->bytes, dmab->private_data);
691 }
692 
693 static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
694 	.alloc = snd_dma_sg_wc_alloc,
695 	.free = snd_dma_sg_wc_free,
696 	.mmap = snd_dma_sg_wc_mmap,
697 	.sync = snd_dma_noncontig_sync,
698 	.get_addr = snd_dma_noncontig_get_addr,
699 	.get_page = snd_dma_noncontig_get_page,
700 	.get_chunk_size = snd_dma_noncontig_get_chunk_size,
701 };
702 
703 /* manual page allocations with wc setup */
704 static void *do_alloc_fallback_pages(struct device *dev, size_t size,
705 				     dma_addr_t *addr, bool wc)
706 {
707 	gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
708 	void *p;
709 
710  again:
711 	p = do_alloc_pages(size, addr, gfp);
712 	if (!p || (*addr + size - 1) & ~dev->coherent_dma_mask) {
713 		if (IS_ENABLED(CONFIG_ZONE_DMA32) && !(gfp & GFP_DMA32)) {
714 			gfp |= GFP_DMA32;
715 			goto again;
716 		}
717 		if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
718 			gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
719 			goto again;
720 		}
721 	}
722 	if (p && wc)
723 		set_memory_wc((unsigned long)(p), size >> PAGE_SHIFT);
724 	return p;
725 }
726 
727 static void do_free_fallback_pages(void *p, size_t size, bool wc)
728 {
729 	if (wc)
730 		set_memory_wb((unsigned long)(p), size >> PAGE_SHIFT);
731 	free_pages_exact(p, size);
732 }
733 
734 /* Fallback SG-buffer allocations for x86 */
735 struct snd_dma_sg_fallback {
736 	size_t count;
737 	struct page **pages;
738 	dma_addr_t *addrs;
739 };
740 
741 static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
742 				       struct snd_dma_sg_fallback *sgbuf)
743 {
744 	bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
745 	size_t i;
746 
747 	for (i = 0; i < sgbuf->count && sgbuf->pages[i]; i++)
748 		do_free_fallback_pages(page_address(sgbuf->pages[i]), PAGE_SIZE, wc);
749 	kvfree(sgbuf->pages);
750 	kvfree(sgbuf->addrs);
751 	kfree(sgbuf);
752 }
753 
754 static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
755 {
756 	struct snd_dma_sg_fallback *sgbuf;
757 	struct page **pages;
758 	size_t i, count;
759 	void *p;
760 	bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
761 
762 	sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
763 	if (!sgbuf)
764 		return NULL;
765 	count = PAGE_ALIGN(size) >> PAGE_SHIFT;
766 	pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL);
767 	if (!pages)
768 		goto error;
769 	sgbuf->pages = pages;
770 	sgbuf->addrs = kvcalloc(count, sizeof(*sgbuf->addrs), GFP_KERNEL);
771 	if (!sgbuf->addrs)
772 		goto error;
773 
774 	for (i = 0; i < count; sgbuf->count++, i++) {
775 		p = do_alloc_fallback_pages(dmab->dev.dev, PAGE_SIZE,
776 					    &sgbuf->addrs[i], wc);
777 		if (!p)
778 			goto error;
779 		sgbuf->pages[i] = virt_to_page(p);
780 	}
781 
782 	p = vmap(pages, count, VM_MAP, PAGE_KERNEL);
783 	if (!p)
784 		goto error;
785 	dmab->private_data = sgbuf;
786 	/* store the first page address for convenience */
787 	dmab->addr = snd_sgbuf_get_addr(dmab, 0);
788 	return p;
789 
790  error:
791 	__snd_dma_sg_fallback_free(dmab, sgbuf);
792 	return NULL;
793 }
794 
795 static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab)
796 {
797 	vunmap(dmab->area);
798 	__snd_dma_sg_fallback_free(dmab, dmab->private_data);
799 }
800 
801 static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab,
802 				    struct vm_area_struct *area)
803 {
804 	struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
805 
806 	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
807 		area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
808 	return vm_map_pages(area, sgbuf->pages, sgbuf->count);
809 }
810 
811 static const struct snd_malloc_ops snd_dma_sg_fallback_ops = {
812 	.alloc = snd_dma_sg_fallback_alloc,
813 	.free = snd_dma_sg_fallback_free,
814 	.mmap = snd_dma_sg_fallback_mmap,
815 	/* reuse vmalloc helpers */
816 	.get_addr = snd_dma_vmalloc_get_addr,
817 	.get_page = snd_dma_vmalloc_get_page,
818 	.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
819 };
820 #endif /* CONFIG_SND_DMA_SGBUF */
821 
822 /*
823  * Non-coherent pages allocator
824  */
825 static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size)
826 {
827 	void *p;
828 
829 	p = dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr,
830 				  dmab->dev.dir, DEFAULT_GFP);
831 	if (p)
832 		dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->addr);
833 	return p;
834 }
835 
836 static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab)
837 {
838 	dma_free_noncoherent(dmab->dev.dev, dmab->bytes, dmab->area,
839 			     dmab->addr, dmab->dev.dir);
840 }
841 
842 static int snd_dma_noncoherent_mmap(struct snd_dma_buffer *dmab,
843 				    struct vm_area_struct *area)
844 {
845 	area->vm_page_prot = vm_get_page_prot(area->vm_flags);
846 	return dma_mmap_pages(dmab->dev.dev, area,
847 			      area->vm_end - area->vm_start,
848 			      virt_to_page(dmab->area));
849 }
850 
851 static void snd_dma_noncoherent_sync(struct snd_dma_buffer *dmab,
852 				     enum snd_dma_sync_mode mode)
853 {
854 	if (mode == SNDRV_DMA_SYNC_CPU) {
855 		if (dmab->dev.dir != DMA_TO_DEVICE)
856 			dma_sync_single_for_cpu(dmab->dev.dev, dmab->addr,
857 						dmab->bytes, dmab->dev.dir);
858 	} else {
859 		if (dmab->dev.dir != DMA_FROM_DEVICE)
860 			dma_sync_single_for_device(dmab->dev.dev, dmab->addr,
861 						   dmab->bytes, dmab->dev.dir);
862 	}
863 }
864 
865 static const struct snd_malloc_ops snd_dma_noncoherent_ops = {
866 	.alloc = snd_dma_noncoherent_alloc,
867 	.free = snd_dma_noncoherent_free,
868 	.mmap = snd_dma_noncoherent_mmap,
869 	.sync = snd_dma_noncoherent_sync,
870 };
871 
872 #endif /* CONFIG_HAS_DMA */
873 
874 /*
875  * Entry points
876  */
877 static const struct snd_malloc_ops *dma_ops[] = {
878 	[SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops,
879 	[SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops,
880 #ifdef CONFIG_HAS_DMA
881 	[SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops,
882 	[SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops,
883 	[SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops,
884 	[SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops,
885 #ifdef CONFIG_SND_DMA_SGBUF
886 	[SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_wc_ops,
887 #endif
888 #ifdef CONFIG_GENERIC_ALLOCATOR
889 	[SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
890 #endif /* CONFIG_GENERIC_ALLOCATOR */
891 #ifdef CONFIG_SND_DMA_SGBUF
892 	[SNDRV_DMA_TYPE_DEV_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
893 	[SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
894 #endif
895 #endif /* CONFIG_HAS_DMA */
896 };
897 
898 static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
899 {
900 	if (WARN_ON_ONCE(!dmab))
901 		return NULL;
902 	if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN ||
903 			 dmab->dev.type >= ARRAY_SIZE(dma_ops)))
904 		return NULL;
905 	return dma_ops[dmab->dev.type];
906 }
907