1 /* 2 * Copyright (c) by Jaroslav Kysela <perex@perex.cz> 3 * Takashi Iwai <tiwai@suse.de> 4 * 5 * Generic memory allocators 6 * 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * 22 */ 23 24 #include <linux/slab.h> 25 #include <linux/mm.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/genalloc.h> 28 #include <sound/memalloc.h> 29 30 /* 31 * 32 * Generic memory allocators 33 * 34 */ 35 36 /** 37 * snd_malloc_pages - allocate pages with the given size 38 * @size: the size to allocate in bytes 39 * @gfp_flags: the allocation conditions, GFP_XXX 40 * 41 * Allocates the physically contiguous pages with the given size. 42 * 43 * Return: The pointer of the buffer, or %NULL if no enough memory. 44 */ 45 void *snd_malloc_pages(size_t size, gfp_t gfp_flags) 46 { 47 int pg; 48 49 if (WARN_ON(!size)) 50 return NULL; 51 if (WARN_ON(!gfp_flags)) 52 return NULL; 53 gfp_flags |= __GFP_COMP; /* compound page lets parts be mapped */ 54 pg = get_order(size); 55 return (void *) __get_free_pages(gfp_flags, pg); 56 } 57 EXPORT_SYMBOL(snd_malloc_pages); 58 59 /** 60 * snd_free_pages - release the pages 61 * @ptr: the buffer pointer to release 62 * @size: the allocated buffer size 63 * 64 * Releases the buffer allocated via snd_malloc_pages(). 65 */ 66 void snd_free_pages(void *ptr, size_t size) 67 { 68 int pg; 69 70 if (ptr == NULL) 71 return; 72 pg = get_order(size); 73 free_pages((unsigned long) ptr, pg); 74 } 75 EXPORT_SYMBOL(snd_free_pages); 76 77 /* 78 * 79 * Bus-specific memory allocators 80 * 81 */ 82 83 #ifdef CONFIG_HAS_DMA 84 /* allocate the coherent DMA pages */ 85 static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *dma) 86 { 87 int pg; 88 gfp_t gfp_flags; 89 90 if (WARN_ON(!dma)) 91 return NULL; 92 pg = get_order(size); 93 gfp_flags = GFP_KERNEL 94 | __GFP_COMP /* compound page lets parts be mapped */ 95 | __GFP_NORETRY /* don't trigger OOM-killer */ 96 | __GFP_NOWARN; /* no stack trace print - this call is non-critical */ 97 return dma_alloc_coherent(dev, PAGE_SIZE << pg, dma, gfp_flags); 98 } 99 100 /* free the coherent DMA pages */ 101 static void snd_free_dev_pages(struct device *dev, size_t size, void *ptr, 102 dma_addr_t dma) 103 { 104 int pg; 105 106 if (ptr == NULL) 107 return; 108 pg = get_order(size); 109 dma_free_coherent(dev, PAGE_SIZE << pg, ptr, dma); 110 } 111 112 #ifdef CONFIG_GENERIC_ALLOCATOR 113 /** 114 * snd_malloc_dev_iram - allocate memory from on-chip internal ram 115 * @dmab: buffer allocation record to store the allocated data 116 * @size: number of bytes to allocate from the iram 117 * 118 * This function requires iram phandle provided via of_node 119 */ 120 static void snd_malloc_dev_iram(struct snd_dma_buffer *dmab, size_t size) 121 { 122 struct device *dev = dmab->dev.dev; 123 struct gen_pool *pool = NULL; 124 125 dmab->area = NULL; 126 dmab->addr = 0; 127 128 if (dev->of_node) 129 pool = of_gen_pool_get(dev->of_node, "iram", 0); 130 131 if (!pool) 132 return; 133 134 /* Assign the pool into private_data field */ 135 dmab->private_data = pool; 136 137 dmab->area = gen_pool_dma_alloc(pool, size, &dmab->addr); 138 } 139 140 /** 141 * snd_free_dev_iram - free allocated specific memory from on-chip internal ram 142 * @dmab: buffer allocation record to store the allocated data 143 */ 144 static void snd_free_dev_iram(struct snd_dma_buffer *dmab) 145 { 146 struct gen_pool *pool = dmab->private_data; 147 148 if (pool && dmab->area) 149 gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes); 150 } 151 #endif /* CONFIG_GENERIC_ALLOCATOR */ 152 #endif /* CONFIG_HAS_DMA */ 153 154 /* 155 * 156 * ALSA generic memory management 157 * 158 */ 159 160 161 /** 162 * snd_dma_alloc_pages - allocate the buffer area according to the given type 163 * @type: the DMA buffer type 164 * @device: the device pointer 165 * @size: the buffer size to allocate 166 * @dmab: buffer allocation record to store the allocated data 167 * 168 * Calls the memory-allocator function for the corresponding 169 * buffer type. 170 * 171 * Return: Zero if the buffer with the given size is allocated successfully, 172 * otherwise a negative value on error. 173 */ 174 int snd_dma_alloc_pages(int type, struct device *device, size_t size, 175 struct snd_dma_buffer *dmab) 176 { 177 if (WARN_ON(!size)) 178 return -ENXIO; 179 if (WARN_ON(!dmab)) 180 return -ENXIO; 181 182 dmab->dev.type = type; 183 dmab->dev.dev = device; 184 dmab->bytes = 0; 185 switch (type) { 186 case SNDRV_DMA_TYPE_CONTINUOUS: 187 dmab->area = snd_malloc_pages(size, 188 (__force gfp_t)(unsigned long)device); 189 dmab->addr = 0; 190 break; 191 #ifdef CONFIG_HAS_DMA 192 #ifdef CONFIG_GENERIC_ALLOCATOR 193 case SNDRV_DMA_TYPE_DEV_IRAM: 194 snd_malloc_dev_iram(dmab, size); 195 if (dmab->area) 196 break; 197 /* Internal memory might have limited size and no enough space, 198 * so if we fail to malloc, try to fetch memory traditionally. 199 */ 200 dmab->dev.type = SNDRV_DMA_TYPE_DEV; 201 #endif /* CONFIG_GENERIC_ALLOCATOR */ 202 case SNDRV_DMA_TYPE_DEV: 203 dmab->area = snd_malloc_dev_pages(device, size, &dmab->addr); 204 break; 205 #endif 206 #ifdef CONFIG_SND_DMA_SGBUF 207 case SNDRV_DMA_TYPE_DEV_SG: 208 snd_malloc_sgbuf_pages(device, size, dmab, NULL); 209 break; 210 #endif 211 default: 212 pr_err("snd-malloc: invalid device type %d\n", type); 213 dmab->area = NULL; 214 dmab->addr = 0; 215 return -ENXIO; 216 } 217 if (! dmab->area) 218 return -ENOMEM; 219 dmab->bytes = size; 220 return 0; 221 } 222 EXPORT_SYMBOL(snd_dma_alloc_pages); 223 224 /** 225 * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback 226 * @type: the DMA buffer type 227 * @device: the device pointer 228 * @size: the buffer size to allocate 229 * @dmab: buffer allocation record to store the allocated data 230 * 231 * Calls the memory-allocator function for the corresponding 232 * buffer type. When no space is left, this function reduces the size and 233 * tries to allocate again. The size actually allocated is stored in 234 * res_size argument. 235 * 236 * Return: Zero if the buffer with the given size is allocated successfully, 237 * otherwise a negative value on error. 238 */ 239 int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size, 240 struct snd_dma_buffer *dmab) 241 { 242 int err; 243 244 while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) { 245 if (err != -ENOMEM) 246 return err; 247 if (size <= PAGE_SIZE) 248 return -ENOMEM; 249 size >>= 1; 250 size = PAGE_SIZE << get_order(size); 251 } 252 if (! dmab->area) 253 return -ENOMEM; 254 return 0; 255 } 256 EXPORT_SYMBOL(snd_dma_alloc_pages_fallback); 257 258 259 /** 260 * snd_dma_free_pages - release the allocated buffer 261 * @dmab: the buffer allocation record to release 262 * 263 * Releases the allocated buffer via snd_dma_alloc_pages(). 264 */ 265 void snd_dma_free_pages(struct snd_dma_buffer *dmab) 266 { 267 switch (dmab->dev.type) { 268 case SNDRV_DMA_TYPE_CONTINUOUS: 269 snd_free_pages(dmab->area, dmab->bytes); 270 break; 271 #ifdef CONFIG_HAS_DMA 272 #ifdef CONFIG_GENERIC_ALLOCATOR 273 case SNDRV_DMA_TYPE_DEV_IRAM: 274 snd_free_dev_iram(dmab); 275 break; 276 #endif /* CONFIG_GENERIC_ALLOCATOR */ 277 case SNDRV_DMA_TYPE_DEV: 278 snd_free_dev_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); 279 break; 280 #endif 281 #ifdef CONFIG_SND_DMA_SGBUF 282 case SNDRV_DMA_TYPE_DEV_SG: 283 snd_free_sgbuf_pages(dmab); 284 break; 285 #endif 286 default: 287 pr_err("snd-malloc: invalid device type %d\n", dmab->dev.type); 288 } 289 } 290 EXPORT_SYMBOL(snd_dma_free_pages); 291