1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) by Jaroslav Kysela <perex@perex.cz> 4 * Takashi Iwai <tiwai@suse.de> 5 * 6 * Generic memory allocators 7 */ 8 9 #include <linux/slab.h> 10 #include <linux/mm.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/genalloc.h> 13 #include <linux/vmalloc.h> 14 #ifdef CONFIG_X86 15 #include <asm/set_memory.h> 16 #endif 17 #include <sound/memalloc.h> 18 #include "memalloc_local.h" 19 20 static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab); 21 22 /* a cast to gfp flag from the dev pointer; for CONTINUOUS and VMALLOC types */ 23 static inline gfp_t snd_mem_get_gfp_flags(const struct snd_dma_buffer *dmab, 24 gfp_t default_gfp) 25 { 26 if (!dmab->dev.dev) 27 return default_gfp; 28 else 29 return (__force gfp_t)(unsigned long)dmab->dev.dev; 30 } 31 32 static int __snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size) 33 { 34 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); 35 36 if (WARN_ON_ONCE(!ops || !ops->alloc)) 37 return -EINVAL; 38 return ops->alloc(dmab, size); 39 } 40 41 /** 42 * snd_dma_alloc_pages - allocate the buffer area according to the given type 43 * @type: the DMA buffer type 44 * @device: the device pointer 45 * @size: the buffer size to allocate 46 * @dmab: buffer allocation record to store the allocated data 47 * 48 * Calls the memory-allocator function for the corresponding 49 * buffer type. 50 * 51 * Return: Zero if the buffer with the given size is allocated successfully, 52 * otherwise a negative value on error. 53 */ 54 int snd_dma_alloc_pages(int type, struct device *device, size_t size, 55 struct snd_dma_buffer *dmab) 56 { 57 int err; 58 59 if (WARN_ON(!size)) 60 return -ENXIO; 61 if (WARN_ON(!dmab)) 62 return -ENXIO; 63 64 size = PAGE_ALIGN(size); 65 dmab->dev.type = type; 66 dmab->dev.dev = device; 67 dmab->bytes = 0; 68 dmab->area = NULL; 69 dmab->addr = 0; 70 dmab->private_data = NULL; 71 err = __snd_dma_alloc_pages(dmab, size); 72 if (err < 0) 73 return err; 74 if (!dmab->area) 75 return -ENOMEM; 76 dmab->bytes = size; 77 return 0; 78 } 79 EXPORT_SYMBOL(snd_dma_alloc_pages); 80 81 /** 82 * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback 83 * @type: the DMA buffer type 84 * @device: the device pointer 85 * @size: the buffer size to allocate 86 * @dmab: buffer allocation record to store the allocated data 87 * 88 * Calls the memory-allocator function for the corresponding 89 * buffer type. When no space is left, this function reduces the size and 90 * tries to allocate again. The size actually allocated is stored in 91 * res_size argument. 92 * 93 * Return: Zero if the buffer with the given size is allocated successfully, 94 * otherwise a negative value on error. 95 */ 96 int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size, 97 struct snd_dma_buffer *dmab) 98 { 99 int err; 100 101 while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) { 102 if (err != -ENOMEM) 103 return err; 104 if (size <= PAGE_SIZE) 105 return -ENOMEM; 106 size >>= 1; 107 size = PAGE_SIZE << get_order(size); 108 } 109 if (! dmab->area) 110 return -ENOMEM; 111 return 0; 112 } 113 EXPORT_SYMBOL(snd_dma_alloc_pages_fallback); 114 115 /** 116 * snd_dma_free_pages - release the allocated buffer 117 * @dmab: the buffer allocation record to release 118 * 119 * Releases the allocated buffer via snd_dma_alloc_pages(). 120 */ 121 void snd_dma_free_pages(struct snd_dma_buffer *dmab) 122 { 123 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); 124 125 if (ops && ops->free) 126 ops->free(dmab); 127 } 128 EXPORT_SYMBOL(snd_dma_free_pages); 129 130 /* called by devres */ 131 static void __snd_release_pages(struct device *dev, void *res) 132 { 133 snd_dma_free_pages(res); 134 } 135 136 /** 137 * snd_devm_alloc_pages - allocate the buffer and manage with devres 138 * @dev: the device pointer 139 * @type: the DMA buffer type 140 * @size: the buffer size to allocate 141 * 142 * Allocate buffer pages depending on the given type and manage using devres. 143 * The pages will be released automatically at the device removal. 144 * 145 * Unlike snd_dma_alloc_pages(), this function requires the real device pointer, 146 * hence it can't work with SNDRV_DMA_TYPE_CONTINUOUS or 147 * SNDRV_DMA_TYPE_VMALLOC type. 148 * 149 * The function returns the snd_dma_buffer object at success, or NULL if failed. 150 */ 151 struct snd_dma_buffer * 152 snd_devm_alloc_pages(struct device *dev, int type, size_t size) 153 { 154 struct snd_dma_buffer *dmab; 155 int err; 156 157 if (WARN_ON(type == SNDRV_DMA_TYPE_CONTINUOUS || 158 type == SNDRV_DMA_TYPE_VMALLOC)) 159 return NULL; 160 161 dmab = devres_alloc(__snd_release_pages, sizeof(*dmab), GFP_KERNEL); 162 if (!dmab) 163 return NULL; 164 165 err = snd_dma_alloc_pages(type, dev, size, dmab); 166 if (err < 0) { 167 devres_free(dmab); 168 return NULL; 169 } 170 171 devres_add(dev, dmab); 172 return dmab; 173 } 174 EXPORT_SYMBOL_GPL(snd_devm_alloc_pages); 175 176 /** 177 * snd_dma_buffer_mmap - perform mmap of the given DMA buffer 178 * @dmab: buffer allocation information 179 * @area: VM area information 180 */ 181 int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab, 182 struct vm_area_struct *area) 183 { 184 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); 185 186 if (ops && ops->mmap) 187 return ops->mmap(dmab, area); 188 else 189 return -ENOENT; 190 } 191 EXPORT_SYMBOL(snd_dma_buffer_mmap); 192 193 /** 194 * snd_sgbuf_get_addr - return the physical address at the corresponding offset 195 * @dmab: buffer allocation information 196 * @offset: offset in the ring buffer 197 */ 198 dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset) 199 { 200 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); 201 202 if (ops && ops->get_addr) 203 return ops->get_addr(dmab, offset); 204 else 205 return dmab->addr + offset; 206 } 207 EXPORT_SYMBOL(snd_sgbuf_get_addr); 208 209 /** 210 * snd_sgbuf_get_page - return the physical page at the corresponding offset 211 * @dmab: buffer allocation information 212 * @offset: offset in the ring buffer 213 */ 214 struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset) 215 { 216 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); 217 218 if (ops && ops->get_page) 219 return ops->get_page(dmab, offset); 220 else 221 return virt_to_page(dmab->area + offset); 222 } 223 EXPORT_SYMBOL(snd_sgbuf_get_page); 224 225 /** 226 * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages 227 * on sg-buffer 228 * @dmab: buffer allocation information 229 * @ofs: offset in the ring buffer 230 * @size: the requested size 231 */ 232 unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab, 233 unsigned int ofs, unsigned int size) 234 { 235 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); 236 237 if (ops && ops->get_chunk_size) 238 return ops->get_chunk_size(dmab, ofs, size); 239 else 240 return size; 241 } 242 EXPORT_SYMBOL(snd_sgbuf_get_chunk_size); 243 244 /* 245 * Continuous pages allocator 246 */ 247 static int snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size) 248 { 249 gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL); 250 251 dmab->area = alloc_pages_exact(size, gfp); 252 return 0; 253 } 254 255 static void snd_dma_continuous_free(struct snd_dma_buffer *dmab) 256 { 257 free_pages_exact(dmab->area, dmab->bytes); 258 } 259 260 static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab, 261 struct vm_area_struct *area) 262 { 263 return remap_pfn_range(area, area->vm_start, 264 dmab->addr >> PAGE_SHIFT, 265 area->vm_end - area->vm_start, 266 area->vm_page_prot); 267 } 268 269 static const struct snd_malloc_ops snd_dma_continuous_ops = { 270 .alloc = snd_dma_continuous_alloc, 271 .free = snd_dma_continuous_free, 272 .mmap = snd_dma_continuous_mmap, 273 }; 274 275 /* 276 * VMALLOC allocator 277 */ 278 static int snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size) 279 { 280 gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL | __GFP_HIGHMEM); 281 282 dmab->area = __vmalloc(size, gfp); 283 return 0; 284 } 285 286 static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab) 287 { 288 vfree(dmab->area); 289 } 290 291 static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab, 292 struct vm_area_struct *area) 293 { 294 return remap_vmalloc_range(area, dmab->area, 0); 295 } 296 297 static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab, 298 size_t offset) 299 { 300 return page_to_phys(vmalloc_to_page(dmab->area + offset)) + 301 offset % PAGE_SIZE; 302 } 303 304 static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab, 305 size_t offset) 306 { 307 return vmalloc_to_page(dmab->area + offset); 308 } 309 310 static unsigned int 311 snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab, 312 unsigned int ofs, unsigned int size) 313 { 314 ofs %= PAGE_SIZE; 315 size += ofs; 316 if (size > PAGE_SIZE) 317 size = PAGE_SIZE; 318 return size - ofs; 319 } 320 321 static const struct snd_malloc_ops snd_dma_vmalloc_ops = { 322 .alloc = snd_dma_vmalloc_alloc, 323 .free = snd_dma_vmalloc_free, 324 .mmap = snd_dma_vmalloc_mmap, 325 .get_addr = snd_dma_vmalloc_get_addr, 326 .get_page = snd_dma_vmalloc_get_page, 327 .get_chunk_size = snd_dma_vmalloc_get_chunk_size, 328 }; 329 330 #ifdef CONFIG_HAS_DMA 331 /* 332 * IRAM allocator 333 */ 334 #ifdef CONFIG_GENERIC_ALLOCATOR 335 static int snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size) 336 { 337 struct device *dev = dmab->dev.dev; 338 struct gen_pool *pool; 339 340 if (dev->of_node) { 341 pool = of_gen_pool_get(dev->of_node, "iram", 0); 342 /* Assign the pool into private_data field */ 343 dmab->private_data = pool; 344 345 dmab->area = gen_pool_dma_alloc_align(pool, size, &dmab->addr, 346 PAGE_SIZE); 347 if (dmab->area) 348 return 0; 349 } 350 351 /* Internal memory might have limited size and no enough space, 352 * so if we fail to malloc, try to fetch memory traditionally. 353 */ 354 dmab->dev.type = SNDRV_DMA_TYPE_DEV; 355 return __snd_dma_alloc_pages(dmab, size); 356 } 357 358 static void snd_dma_iram_free(struct snd_dma_buffer *dmab) 359 { 360 struct gen_pool *pool = dmab->private_data; 361 362 if (pool && dmab->area) 363 gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes); 364 } 365 366 static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab, 367 struct vm_area_struct *area) 368 { 369 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); 370 return remap_pfn_range(area, area->vm_start, 371 dmab->addr >> PAGE_SHIFT, 372 area->vm_end - area->vm_start, 373 area->vm_page_prot); 374 } 375 376 static const struct snd_malloc_ops snd_dma_iram_ops = { 377 .alloc = snd_dma_iram_alloc, 378 .free = snd_dma_iram_free, 379 .mmap = snd_dma_iram_mmap, 380 }; 381 #endif /* CONFIG_GENERIC_ALLOCATOR */ 382 383 /* 384 * Coherent device pages allocator 385 */ 386 static int snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size) 387 { 388 gfp_t gfp_flags; 389 390 gfp_flags = GFP_KERNEL 391 | __GFP_COMP /* compound page lets parts be mapped */ 392 | __GFP_NORETRY /* don't trigger OOM-killer */ 393 | __GFP_NOWARN; /* no stack trace print - this call is non-critical */ 394 dmab->area = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, 395 gfp_flags); 396 #ifdef CONFIG_X86 397 if (dmab->area && dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC) 398 set_memory_wc((unsigned long)dmab->area, 399 PAGE_ALIGN(size) >> PAGE_SHIFT); 400 #endif 401 return 0; 402 } 403 404 static void snd_dma_dev_free(struct snd_dma_buffer *dmab) 405 { 406 #ifdef CONFIG_X86 407 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC) 408 set_memory_wb((unsigned long)dmab->area, 409 PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT); 410 #endif 411 dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); 412 } 413 414 static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab, 415 struct vm_area_struct *area) 416 { 417 return dma_mmap_coherent(dmab->dev.dev, area, 418 dmab->area, dmab->addr, dmab->bytes); 419 } 420 421 static const struct snd_malloc_ops snd_dma_dev_ops = { 422 .alloc = snd_dma_dev_alloc, 423 .free = snd_dma_dev_free, 424 .mmap = snd_dma_dev_mmap, 425 }; 426 #endif /* CONFIG_HAS_DMA */ 427 428 /* 429 * Entry points 430 */ 431 static const struct snd_malloc_ops *dma_ops[] = { 432 [SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops, 433 [SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops, 434 #ifdef CONFIG_HAS_DMA 435 [SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops, 436 [SNDRV_DMA_TYPE_DEV_UC] = &snd_dma_dev_ops, 437 #ifdef CONFIG_GENERIC_ALLOCATOR 438 [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops, 439 #endif /* CONFIG_GENERIC_ALLOCATOR */ 440 #endif /* CONFIG_HAS_DMA */ 441 #ifdef CONFIG_SND_DMA_SGBUF 442 [SNDRV_DMA_TYPE_DEV_SG] = &snd_dma_sg_ops, 443 [SNDRV_DMA_TYPE_DEV_UC_SG] = &snd_dma_sg_ops, 444 #endif 445 }; 446 447 static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab) 448 { 449 if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN || 450 dmab->dev.type >= ARRAY_SIZE(dma_ops))) 451 return NULL; 452 return dma_ops[dmab->dev.type]; 453 } 454