1b2139ce0SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 26182a094SMatthew Wilcox /* 36182a094SMatthew Wilcox * DMA Pool allocator 46182a094SMatthew Wilcox * 56182a094SMatthew Wilcox * Copyright 2001 David Brownell 66182a094SMatthew Wilcox * Copyright 2007 Intel Corporation 76182a094SMatthew Wilcox * Author: Matthew Wilcox <willy@linux.intel.com> 86182a094SMatthew Wilcox * 96182a094SMatthew Wilcox * This allocator returns small blocks of a given size which are DMA-able by 106182a094SMatthew Wilcox * the given device. It uses the dma_alloc_coherent page allocator to get 116182a094SMatthew Wilcox * new pages, then splits them up into blocks of the required size. 126182a094SMatthew Wilcox * Many older drivers still have their own code to do this. 136182a094SMatthew Wilcox * 146182a094SMatthew Wilcox * The current design of this allocator is fairly simple. The pool is 156182a094SMatthew Wilcox * represented by the 'struct dma_pool' which keeps a doubly-linked list of 166182a094SMatthew Wilcox * allocated pages. Each page in the page_list is split into blocks of at 17a35a3455SMatthew Wilcox * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked 18a35a3455SMatthew Wilcox * list of free blocks within the page. Used blocks aren't tracked, but we 19a35a3455SMatthew Wilcox * keep a count of how many are currently allocated from each page. 206182a094SMatthew Wilcox */ 21141e9d4bSMatthew Wilcox 22141e9d4bSMatthew Wilcox #include <linux/device.h> 23141e9d4bSMatthew Wilcox #include <linux/dma-mapping.h> 24141e9d4bSMatthew Wilcox #include <linux/dmapool.h> 256182a094SMatthew Wilcox #include <linux/kernel.h> 266182a094SMatthew Wilcox #include <linux/list.h> 27b95f1b31SPaul Gortmaker #include <linux/export.h> 286182a094SMatthew Wilcox #include <linux/mutex.h> 29141e9d4bSMatthew Wilcox #include <linux/poison.h> 30141e9d4bSMatthew Wilcox #include <linux/sched.h> 310f2f89b6SDaniel Vetter #include <linux/sched/mm.h> 326182a094SMatthew Wilcox #include <linux/slab.h> 337c77509cSPaul Gortmaker #include <linux/stat.h> 346182a094SMatthew Wilcox #include <linux/spinlock.h> 356182a094SMatthew Wilcox #include <linux/string.h> 366182a094SMatthew Wilcox #include <linux/types.h> 376182a094SMatthew Wilcox #include <linux/wait.h> 38141e9d4bSMatthew Wilcox 39b5ee5befSAndi Kleen #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON) 40b5ee5befSAndi Kleen #define DMAPOOL_DEBUG 1 41b5ee5befSAndi Kleen #endif 42b5ee5befSAndi Kleen 43141e9d4bSMatthew Wilcox struct dma_pool { /* the pool */ 44141e9d4bSMatthew Wilcox struct list_head page_list; 45141e9d4bSMatthew Wilcox spinlock_t lock; 46141e9d4bSMatthew Wilcox struct device *dev; 4779023352STony Battersby unsigned int size; 4879023352STony Battersby unsigned int allocation; 4979023352STony Battersby unsigned int boundary; 50141e9d4bSMatthew Wilcox char name[32]; 51141e9d4bSMatthew Wilcox struct list_head pools; 52141e9d4bSMatthew Wilcox }; 53141e9d4bSMatthew Wilcox 54141e9d4bSMatthew Wilcox struct dma_page { /* cacheable header for 'allocation' bytes */ 55141e9d4bSMatthew Wilcox struct list_head page_list; 56141e9d4bSMatthew Wilcox void *vaddr; 57141e9d4bSMatthew Wilcox dma_addr_t dma; 58a35a3455SMatthew Wilcox unsigned int in_use; 59a35a3455SMatthew Wilcox unsigned int offset; 60141e9d4bSMatthew Wilcox }; 61141e9d4bSMatthew Wilcox 62141e9d4bSMatthew Wilcox static DEFINE_MUTEX(pools_lock); 6301c2965fSSebastian Andrzej Siewior static DEFINE_MUTEX(pools_reg_lock); 64141e9d4bSMatthew Wilcox 65e8df2c70SYueHaibing static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf) 66141e9d4bSMatthew Wilcox { 6708cc96c8STony Battersby int size; 68141e9d4bSMatthew Wilcox struct dma_page *page; 69141e9d4bSMatthew Wilcox struct dma_pool *pool; 70141e9d4bSMatthew Wilcox 7108cc96c8STony Battersby size = sysfs_emit(buf, "poolinfo - 0.1\n"); 72141e9d4bSMatthew Wilcox 73141e9d4bSMatthew Wilcox mutex_lock(&pools_lock); 74141e9d4bSMatthew Wilcox list_for_each_entry(pool, &dev->dma_pools, pools) { 75141e9d4bSMatthew Wilcox unsigned pages = 0; 7679023352STony Battersby size_t blocks = 0; 77141e9d4bSMatthew Wilcox 78c4956823SThomas Gleixner spin_lock_irq(&pool->lock); 79141e9d4bSMatthew Wilcox list_for_each_entry(page, &pool->page_list, page_list) { 80141e9d4bSMatthew Wilcox pages++; 81141e9d4bSMatthew Wilcox blocks += page->in_use; 82141e9d4bSMatthew Wilcox } 83c4956823SThomas Gleixner spin_unlock_irq(&pool->lock); 84141e9d4bSMatthew Wilcox 85141e9d4bSMatthew Wilcox /* per-pool info, no real statistics yet */ 8679023352STony Battersby size += sysfs_emit_at(buf, size, "%-16s %4zu %4zu %4u %2u\n", 87a35a3455SMatthew Wilcox pool->name, blocks, 8879023352STony Battersby (size_t) pages * 8979023352STony Battersby (pool->allocation / pool->size), 90141e9d4bSMatthew Wilcox pool->size, pages); 91141e9d4bSMatthew Wilcox } 92141e9d4bSMatthew Wilcox mutex_unlock(&pools_lock); 93141e9d4bSMatthew Wilcox 9408cc96c8STony Battersby return size; 95141e9d4bSMatthew Wilcox } 96e87aa773SMatthew Wilcox 97e8df2c70SYueHaibing static DEVICE_ATTR_RO(pools); 98141e9d4bSMatthew Wilcox 99d93e08b7SKeith Busch #ifdef DMAPOOL_DEBUG 100d93e08b7SKeith Busch static void pool_check_block(struct dma_pool *pool, void *retval, 101d93e08b7SKeith Busch unsigned int offset, gfp_t mem_flags) 102d93e08b7SKeith Busch { 103d93e08b7SKeith Busch int i; 104d93e08b7SKeith Busch u8 *data = retval; 105d93e08b7SKeith Busch /* page->offset is stored in first 4 bytes */ 106d93e08b7SKeith Busch for (i = sizeof(offset); i < pool->size; i++) { 107d93e08b7SKeith Busch if (data[i] == POOL_POISON_FREED) 108d93e08b7SKeith Busch continue; 109d93e08b7SKeith Busch dev_err(pool->dev, "%s %s, %p (corrupted)\n", 110d93e08b7SKeith Busch __func__, pool->name, retval); 111d93e08b7SKeith Busch 112d93e08b7SKeith Busch /* 113d93e08b7SKeith Busch * Dump the first 4 bytes even if they are not 114d93e08b7SKeith Busch * POOL_POISON_FREED 115d93e08b7SKeith Busch */ 116d93e08b7SKeith Busch print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, 117d93e08b7SKeith Busch data, pool->size, 1); 118d93e08b7SKeith Busch break; 119d93e08b7SKeith Busch } 120d93e08b7SKeith Busch if (!want_init_on_alloc(mem_flags)) 121d93e08b7SKeith Busch memset(retval, POOL_POISON_ALLOCATED, pool->size); 122d93e08b7SKeith Busch } 123d93e08b7SKeith Busch 124d93e08b7SKeith Busch static bool pool_page_err(struct dma_pool *pool, struct dma_page *page, 125d93e08b7SKeith Busch void *vaddr, dma_addr_t dma) 126d93e08b7SKeith Busch { 127d93e08b7SKeith Busch unsigned int offset = vaddr - page->vaddr; 128d93e08b7SKeith Busch unsigned int chain = page->offset; 129d93e08b7SKeith Busch 130d93e08b7SKeith Busch if ((dma - page->dma) != offset) { 131d93e08b7SKeith Busch dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n", 132d93e08b7SKeith Busch __func__, pool->name, vaddr, &dma); 133d93e08b7SKeith Busch return true; 134d93e08b7SKeith Busch } 135d93e08b7SKeith Busch 136d93e08b7SKeith Busch while (chain < pool->allocation) { 137d93e08b7SKeith Busch if (chain != offset) { 138d93e08b7SKeith Busch chain = *(int *)(page->vaddr + chain); 139d93e08b7SKeith Busch continue; 140d93e08b7SKeith Busch } 141d93e08b7SKeith Busch dev_err(pool->dev, "%s %s, dma %pad already free\n", 142d93e08b7SKeith Busch __func__, pool->name, &dma); 143d93e08b7SKeith Busch return true; 144d93e08b7SKeith Busch } 145d93e08b7SKeith Busch memset(vaddr, POOL_POISON_FREED, pool->size); 146d93e08b7SKeith Busch return false; 147d93e08b7SKeith Busch } 148d93e08b7SKeith Busch 149d93e08b7SKeith Busch static void pool_init_page(struct dma_pool *pool, struct dma_page *page) 150d93e08b7SKeith Busch { 151d93e08b7SKeith Busch memset(page->vaddr, POOL_POISON_FREED, pool->allocation); 152d93e08b7SKeith Busch } 153d93e08b7SKeith Busch #else 154d93e08b7SKeith Busch static void pool_check_block(struct dma_pool *pool, void *retval, 155d93e08b7SKeith Busch unsigned int offset, gfp_t mem_flags) 156d93e08b7SKeith Busch 157d93e08b7SKeith Busch { 158d93e08b7SKeith Busch } 159d93e08b7SKeith Busch 160d93e08b7SKeith Busch static bool pool_page_err(struct dma_pool *pool, struct dma_page *page, 161d93e08b7SKeith Busch void *vaddr, dma_addr_t dma) 162d93e08b7SKeith Busch { 163d93e08b7SKeith Busch return false; 164d93e08b7SKeith Busch } 165d93e08b7SKeith Busch 166d93e08b7SKeith Busch static void pool_init_page(struct dma_pool *pool, struct dma_page *page) 167d93e08b7SKeith Busch { 168d93e08b7SKeith Busch } 169d93e08b7SKeith Busch #endif 170d93e08b7SKeith Busch 171141e9d4bSMatthew Wilcox /** 172141e9d4bSMatthew Wilcox * dma_pool_create - Creates a pool of consistent memory blocks, for dma. 173141e9d4bSMatthew Wilcox * @name: name of pool, for diagnostics 174141e9d4bSMatthew Wilcox * @dev: device that will be doing the DMA 175141e9d4bSMatthew Wilcox * @size: size of the blocks in this pool. 176141e9d4bSMatthew Wilcox * @align: alignment requirement for blocks; must be a power of two 177e34f44b3SMatthew Wilcox * @boundary: returned blocks won't cross this power of two boundary 178a862f68aSMike Rapoport * Context: not in_interrupt() 179141e9d4bSMatthew Wilcox * 180a862f68aSMike Rapoport * Given one of these pools, dma_pool_alloc() 181141e9d4bSMatthew Wilcox * may be used to allocate memory. Such memory will all have "consistent" 182141e9d4bSMatthew Wilcox * DMA mappings, accessible by the device and its driver without using 183141e9d4bSMatthew Wilcox * cache flushing primitives. The actual size of blocks allocated may be 184141e9d4bSMatthew Wilcox * larger than requested because of alignment. 185141e9d4bSMatthew Wilcox * 186e34f44b3SMatthew Wilcox * If @boundary is nonzero, objects returned from dma_pool_alloc() won't 187141e9d4bSMatthew Wilcox * cross that size boundary. This is useful for devices which have 188141e9d4bSMatthew Wilcox * addressing restrictions on individual DMA transfers, such as not crossing 189141e9d4bSMatthew Wilcox * boundaries of 4KBytes. 190a862f68aSMike Rapoport * 191a862f68aSMike Rapoport * Return: a dma allocation pool with the requested characteristics, or 192a862f68aSMike Rapoport * %NULL if one can't be created. 193141e9d4bSMatthew Wilcox */ 194e87aa773SMatthew Wilcox struct dma_pool *dma_pool_create(const char *name, struct device *dev, 195e34f44b3SMatthew Wilcox size_t size, size_t align, size_t boundary) 196141e9d4bSMatthew Wilcox { 197141e9d4bSMatthew Wilcox struct dma_pool *retval; 198e34f44b3SMatthew Wilcox size_t allocation; 19901c2965fSSebastian Andrzej Siewior bool empty = false; 200141e9d4bSMatthew Wilcox 20167a540c6STony Battersby if (!dev) 20267a540c6STony Battersby return NULL; 20367a540c6STony Battersby 204baa2ef83SPaul McQuade if (align == 0) 205141e9d4bSMatthew Wilcox align = 1; 206baa2ef83SPaul McQuade else if (align & (align - 1)) 207399154beSMatthew Wilcox return NULL; 208399154beSMatthew Wilcox 20979023352STony Battersby if (size == 0 || size > INT_MAX) 210141e9d4bSMatthew Wilcox return NULL; 211baa2ef83SPaul McQuade else if (size < 4) 212a35a3455SMatthew Wilcox size = 4; 213399154beSMatthew Wilcox 214399154beSMatthew Wilcox size = ALIGN(size, align); 215e34f44b3SMatthew Wilcox allocation = max_t(size_t, size, PAGE_SIZE); 216141e9d4bSMatthew Wilcox 217baa2ef83SPaul McQuade if (!boundary) 218e34f44b3SMatthew Wilcox boundary = allocation; 219baa2ef83SPaul McQuade else if ((boundary < size) || (boundary & (boundary - 1))) 220e34f44b3SMatthew Wilcox return NULL; 221e34f44b3SMatthew Wilcox 22279023352STony Battersby boundary = min(boundary, allocation); 22379023352STony Battersby 224cc6266f0SChristian König retval = kmalloc(sizeof(*retval), GFP_KERNEL); 225e34f44b3SMatthew Wilcox if (!retval) 226141e9d4bSMatthew Wilcox return retval; 227141e9d4bSMatthew Wilcox 228943f229eSZhiyuan Dai strscpy(retval->name, name, sizeof(retval->name)); 229141e9d4bSMatthew Wilcox 230141e9d4bSMatthew Wilcox retval->dev = dev; 231141e9d4bSMatthew Wilcox 232141e9d4bSMatthew Wilcox INIT_LIST_HEAD(&retval->page_list); 233141e9d4bSMatthew Wilcox spin_lock_init(&retval->lock); 234141e9d4bSMatthew Wilcox retval->size = size; 235e34f44b3SMatthew Wilcox retval->boundary = boundary; 236141e9d4bSMatthew Wilcox retval->allocation = allocation; 237141e9d4bSMatthew Wilcox 238cc6b664aSDaeseok Youn INIT_LIST_HEAD(&retval->pools); 239141e9d4bSMatthew Wilcox 24001c2965fSSebastian Andrzej Siewior /* 24101c2965fSSebastian Andrzej Siewior * pools_lock ensures that the ->dma_pools list does not get corrupted. 24201c2965fSSebastian Andrzej Siewior * pools_reg_lock ensures that there is not a race between 24301c2965fSSebastian Andrzej Siewior * dma_pool_create() and dma_pool_destroy() or within dma_pool_create() 24401c2965fSSebastian Andrzej Siewior * when the first invocation of dma_pool_create() failed on 24501c2965fSSebastian Andrzej Siewior * device_create_file() and the second assumes that it has been done (I 24601c2965fSSebastian Andrzej Siewior * know it is a short window). 24701c2965fSSebastian Andrzej Siewior */ 24801c2965fSSebastian Andrzej Siewior mutex_lock(&pools_reg_lock); 249141e9d4bSMatthew Wilcox mutex_lock(&pools_lock); 25001c2965fSSebastian Andrzej Siewior if (list_empty(&dev->dma_pools)) 25101c2965fSSebastian Andrzej Siewior empty = true; 252cc6b664aSDaeseok Youn list_add(&retval->pools, &dev->dma_pools); 253cc6b664aSDaeseok Youn mutex_unlock(&pools_lock); 25401c2965fSSebastian Andrzej Siewior if (empty) { 25501c2965fSSebastian Andrzej Siewior int err; 256141e9d4bSMatthew Wilcox 25701c2965fSSebastian Andrzej Siewior err = device_create_file(dev, &dev_attr_pools); 25801c2965fSSebastian Andrzej Siewior if (err) { 25901c2965fSSebastian Andrzej Siewior mutex_lock(&pools_lock); 26001c2965fSSebastian Andrzej Siewior list_del(&retval->pools); 26101c2965fSSebastian Andrzej Siewior mutex_unlock(&pools_lock); 26201c2965fSSebastian Andrzej Siewior mutex_unlock(&pools_reg_lock); 26301c2965fSSebastian Andrzej Siewior kfree(retval); 26401c2965fSSebastian Andrzej Siewior return NULL; 26501c2965fSSebastian Andrzej Siewior } 26601c2965fSSebastian Andrzej Siewior } 26701c2965fSSebastian Andrzej Siewior mutex_unlock(&pools_reg_lock); 268141e9d4bSMatthew Wilcox return retval; 269141e9d4bSMatthew Wilcox } 270e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_create); 271141e9d4bSMatthew Wilcox 272a35a3455SMatthew Wilcox static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) 273a35a3455SMatthew Wilcox { 274a35a3455SMatthew Wilcox unsigned int offset = 0; 275e34f44b3SMatthew Wilcox unsigned int next_boundary = pool->boundary; 276a35a3455SMatthew Wilcox 277*f0bccea6SKeith Busch pool_init_page(pool, page); 278*f0bccea6SKeith Busch page->in_use = 0; 279*f0bccea6SKeith Busch page->offset = 0; 280a35a3455SMatthew Wilcox do { 281a35a3455SMatthew Wilcox unsigned int next = offset + pool->size; 282e34f44b3SMatthew Wilcox if (unlikely((next + pool->size) >= next_boundary)) { 283e34f44b3SMatthew Wilcox next = next_boundary; 284e34f44b3SMatthew Wilcox next_boundary += pool->boundary; 285e34f44b3SMatthew Wilcox } 286a35a3455SMatthew Wilcox *(int *)(page->vaddr + offset) = next; 287a35a3455SMatthew Wilcox offset = next; 288a35a3455SMatthew Wilcox } while (offset < pool->allocation); 289a35a3455SMatthew Wilcox } 290a35a3455SMatthew Wilcox 291e87aa773SMatthew Wilcox static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) 292141e9d4bSMatthew Wilcox { 293141e9d4bSMatthew Wilcox struct dma_page *page; 294141e9d4bSMatthew Wilcox 295a35a3455SMatthew Wilcox page = kmalloc(sizeof(*page), mem_flags); 296141e9d4bSMatthew Wilcox if (!page) 297141e9d4bSMatthew Wilcox return NULL; 2985407df10SKeith Busch 299a35a3455SMatthew Wilcox page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, 300e87aa773SMatthew Wilcox &page->dma, mem_flags); 3015407df10SKeith Busch if (!page->vaddr) { 3025407df10SKeith Busch kfree(page); 3035407df10SKeith Busch return NULL; 3045407df10SKeith Busch } 3055407df10SKeith Busch 306a35a3455SMatthew Wilcox pool_initialise_page(pool, page); 307141e9d4bSMatthew Wilcox return page; 308141e9d4bSMatthew Wilcox } 309141e9d4bSMatthew Wilcox 310d9e7e37bSNicholas Krause static inline bool is_page_busy(struct dma_page *page) 311141e9d4bSMatthew Wilcox { 312a35a3455SMatthew Wilcox return page->in_use != 0; 313141e9d4bSMatthew Wilcox } 314141e9d4bSMatthew Wilcox 315e87aa773SMatthew Wilcox static void pool_free_page(struct dma_pool *pool, struct dma_page *page) 316141e9d4bSMatthew Wilcox { 317141e9d4bSMatthew Wilcox dma_addr_t dma = page->dma; 318141e9d4bSMatthew Wilcox 319d93e08b7SKeith Busch pool_init_page(pool, page); 320141e9d4bSMatthew Wilcox dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma); 321141e9d4bSMatthew Wilcox list_del(&page->page_list); 322141e9d4bSMatthew Wilcox kfree(page); 323141e9d4bSMatthew Wilcox } 324141e9d4bSMatthew Wilcox 325141e9d4bSMatthew Wilcox /** 326141e9d4bSMatthew Wilcox * dma_pool_destroy - destroys a pool of dma memory blocks. 327141e9d4bSMatthew Wilcox * @pool: dma pool that will be destroyed 328141e9d4bSMatthew Wilcox * Context: !in_interrupt() 329141e9d4bSMatthew Wilcox * 330141e9d4bSMatthew Wilcox * Caller guarantees that no more memory from the pool is in use, 331141e9d4bSMatthew Wilcox * and that nothing will try to use the pool after this call. 332141e9d4bSMatthew Wilcox */ 333e87aa773SMatthew Wilcox void dma_pool_destroy(struct dma_pool *pool) 334141e9d4bSMatthew Wilcox { 33542286f83SAndy Shevchenko struct dma_page *page, *tmp; 33601c2965fSSebastian Andrzej Siewior bool empty = false; 33701c2965fSSebastian Andrzej Siewior 33844d7175dSSergey Senozhatsky if (unlikely(!pool)) 33944d7175dSSergey Senozhatsky return; 34044d7175dSSergey Senozhatsky 34101c2965fSSebastian Andrzej Siewior mutex_lock(&pools_reg_lock); 342141e9d4bSMatthew Wilcox mutex_lock(&pools_lock); 343141e9d4bSMatthew Wilcox list_del(&pool->pools); 34467a540c6STony Battersby if (list_empty(&pool->dev->dma_pools)) 34501c2965fSSebastian Andrzej Siewior empty = true; 346141e9d4bSMatthew Wilcox mutex_unlock(&pools_lock); 34701c2965fSSebastian Andrzej Siewior if (empty) 34801c2965fSSebastian Andrzej Siewior device_remove_file(pool->dev, &dev_attr_pools); 34901c2965fSSebastian Andrzej Siewior mutex_unlock(&pools_reg_lock); 350141e9d4bSMatthew Wilcox 35142286f83SAndy Shevchenko list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) { 352a35a3455SMatthew Wilcox if (is_page_busy(page)) { 35341a04814SAndy Shevchenko dev_err(pool->dev, "%s %s, %p busy\n", __func__, 354141e9d4bSMatthew Wilcox pool->name, page->vaddr); 355141e9d4bSMatthew Wilcox /* leak the still-in-use consistent memory */ 356141e9d4bSMatthew Wilcox list_del(&page->page_list); 357141e9d4bSMatthew Wilcox kfree(page); 358141e9d4bSMatthew Wilcox } else 359141e9d4bSMatthew Wilcox pool_free_page(pool, page); 360141e9d4bSMatthew Wilcox } 361141e9d4bSMatthew Wilcox 362141e9d4bSMatthew Wilcox kfree(pool); 363141e9d4bSMatthew Wilcox } 364e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_destroy); 365141e9d4bSMatthew Wilcox 366141e9d4bSMatthew Wilcox /** 367141e9d4bSMatthew Wilcox * dma_pool_alloc - get a block of consistent memory 368141e9d4bSMatthew Wilcox * @pool: dma pool that will produce the block 369141e9d4bSMatthew Wilcox * @mem_flags: GFP_* bitmask 370141e9d4bSMatthew Wilcox * @handle: pointer to dma address of block 371141e9d4bSMatthew Wilcox * 372a862f68aSMike Rapoport * Return: the kernel virtual address of a currently unused block, 373141e9d4bSMatthew Wilcox * and reports its dma address through the handle. 3746182a094SMatthew Wilcox * If such a memory block can't be allocated, %NULL is returned. 375141e9d4bSMatthew Wilcox */ 376e87aa773SMatthew Wilcox void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 377e87aa773SMatthew Wilcox dma_addr_t *handle) 378141e9d4bSMatthew Wilcox { 379141e9d4bSMatthew Wilcox unsigned long flags; 380141e9d4bSMatthew Wilcox struct dma_page *page; 38179023352STony Battersby unsigned int offset; 382141e9d4bSMatthew Wilcox void *retval; 383141e9d4bSMatthew Wilcox 3840f2f89b6SDaniel Vetter might_alloc(mem_flags); 385ea05c844SDima Zavin 386141e9d4bSMatthew Wilcox spin_lock_irqsave(&pool->lock, flags); 387141e9d4bSMatthew Wilcox list_for_each_entry(page, &pool->page_list, page_list) { 388a35a3455SMatthew Wilcox if (page->offset < pool->allocation) 389141e9d4bSMatthew Wilcox goto ready; 390141e9d4bSMatthew Wilcox } 391141e9d4bSMatthew Wilcox 392387870f2SMarek Szyprowski /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */ 393141e9d4bSMatthew Wilcox spin_unlock_irqrestore(&pool->lock, flags); 394141e9d4bSMatthew Wilcox 395fa23f56dSSean O. Stalley page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO)); 396387870f2SMarek Szyprowski if (!page) 397387870f2SMarek Szyprowski return NULL; 398141e9d4bSMatthew Wilcox 3992cae367eSMatthew Wilcox spin_lock_irqsave(&pool->lock, flags); 400141e9d4bSMatthew Wilcox 401387870f2SMarek Szyprowski list_add(&page->page_list, &pool->page_list); 402141e9d4bSMatthew Wilcox ready: 403141e9d4bSMatthew Wilcox page->in_use++; 404a35a3455SMatthew Wilcox offset = page->offset; 405a35a3455SMatthew Wilcox page->offset = *(int *)(page->vaddr + offset); 406141e9d4bSMatthew Wilcox retval = offset + page->vaddr; 407141e9d4bSMatthew Wilcox *handle = offset + page->dma; 408d93e08b7SKeith Busch pool_check_block(pool, retval, offset, mem_flags); 409141e9d4bSMatthew Wilcox spin_unlock_irqrestore(&pool->lock, flags); 410fa23f56dSSean O. Stalley 4116471384aSAlexander Potapenko if (want_init_on_alloc(mem_flags)) 412fa23f56dSSean O. Stalley memset(retval, 0, pool->size); 413fa23f56dSSean O. Stalley 414141e9d4bSMatthew Wilcox return retval; 415141e9d4bSMatthew Wilcox } 416e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_alloc); 417141e9d4bSMatthew Wilcox 418e87aa773SMatthew Wilcox static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) 419141e9d4bSMatthew Wilcox { 420141e9d4bSMatthew Wilcox struct dma_page *page; 421141e9d4bSMatthew Wilcox 422141e9d4bSMatthew Wilcox list_for_each_entry(page, &pool->page_list, page_list) { 423141e9d4bSMatthew Wilcox if (dma < page->dma) 424141e9d4bSMatthew Wilcox continue; 425676bd991SRobin Murphy if ((dma - page->dma) < pool->allocation) 426141e9d4bSMatthew Wilcox return page; 427141e9d4bSMatthew Wilcox } 42884bc227dSRolf Eike Beer return NULL; 42984bc227dSRolf Eike Beer } 430141e9d4bSMatthew Wilcox 431141e9d4bSMatthew Wilcox /** 432141e9d4bSMatthew Wilcox * dma_pool_free - put block back into dma pool 433141e9d4bSMatthew Wilcox * @pool: the dma pool holding the block 434141e9d4bSMatthew Wilcox * @vaddr: virtual address of block 435141e9d4bSMatthew Wilcox * @dma: dma address of block 436141e9d4bSMatthew Wilcox * 437141e9d4bSMatthew Wilcox * Caller promises neither device nor driver will again touch this block 438141e9d4bSMatthew Wilcox * unless it is first re-allocated. 439141e9d4bSMatthew Wilcox */ 440e87aa773SMatthew Wilcox void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) 441141e9d4bSMatthew Wilcox { 442141e9d4bSMatthew Wilcox struct dma_page *page; 443141e9d4bSMatthew Wilcox unsigned long flags; 444141e9d4bSMatthew Wilcox 44584bc227dSRolf Eike Beer spin_lock_irqsave(&pool->lock, flags); 446e87aa773SMatthew Wilcox page = pool_find_page(pool, dma); 447e87aa773SMatthew Wilcox if (!page) { 44884bc227dSRolf Eike Beer spin_unlock_irqrestore(&pool->lock, flags); 44941a04814SAndy Shevchenko dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n", 45041a04814SAndy Shevchenko __func__, pool->name, vaddr, &dma); 451141e9d4bSMatthew Wilcox return; 452141e9d4bSMatthew Wilcox } 453141e9d4bSMatthew Wilcox 4546471384aSAlexander Potapenko if (want_init_on_free()) 4556471384aSAlexander Potapenko memset(vaddr, 0, pool->size); 456d93e08b7SKeith Busch if (pool_page_err(pool, page, vaddr, dma)) { 45784bc227dSRolf Eike Beer spin_unlock_irqrestore(&pool->lock, flags); 458141e9d4bSMatthew Wilcox return; 459141e9d4bSMatthew Wilcox } 460141e9d4bSMatthew Wilcox 461141e9d4bSMatthew Wilcox page->in_use--; 462a35a3455SMatthew Wilcox *(int *)vaddr = page->offset; 463d93e08b7SKeith Busch page->offset = vaddr - page->vaddr; 464141e9d4bSMatthew Wilcox /* 465141e9d4bSMatthew Wilcox * Resist a temptation to do 466a35a3455SMatthew Wilcox * if (!is_page_busy(page)) pool_free_page(pool, page); 467141e9d4bSMatthew Wilcox * Better have a few empty pages hang around. 468141e9d4bSMatthew Wilcox */ 469141e9d4bSMatthew Wilcox spin_unlock_irqrestore(&pool->lock, flags); 470141e9d4bSMatthew Wilcox } 471e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_free); 472141e9d4bSMatthew Wilcox 473141e9d4bSMatthew Wilcox /* 474141e9d4bSMatthew Wilcox * Managed DMA pool 475141e9d4bSMatthew Wilcox */ 476141e9d4bSMatthew Wilcox static void dmam_pool_release(struct device *dev, void *res) 477141e9d4bSMatthew Wilcox { 478141e9d4bSMatthew Wilcox struct dma_pool *pool = *(struct dma_pool **)res; 479141e9d4bSMatthew Wilcox 480141e9d4bSMatthew Wilcox dma_pool_destroy(pool); 481141e9d4bSMatthew Wilcox } 482141e9d4bSMatthew Wilcox 483141e9d4bSMatthew Wilcox static int dmam_pool_match(struct device *dev, void *res, void *match_data) 484141e9d4bSMatthew Wilcox { 485141e9d4bSMatthew Wilcox return *(struct dma_pool **)res == match_data; 486141e9d4bSMatthew Wilcox } 487141e9d4bSMatthew Wilcox 488141e9d4bSMatthew Wilcox /** 489141e9d4bSMatthew Wilcox * dmam_pool_create - Managed dma_pool_create() 490141e9d4bSMatthew Wilcox * @name: name of pool, for diagnostics 491141e9d4bSMatthew Wilcox * @dev: device that will be doing the DMA 492141e9d4bSMatthew Wilcox * @size: size of the blocks in this pool. 493141e9d4bSMatthew Wilcox * @align: alignment requirement for blocks; must be a power of two 494141e9d4bSMatthew Wilcox * @allocation: returned blocks won't cross this boundary (or zero) 495141e9d4bSMatthew Wilcox * 496141e9d4bSMatthew Wilcox * Managed dma_pool_create(). DMA pool created with this function is 497141e9d4bSMatthew Wilcox * automatically destroyed on driver detach. 498a862f68aSMike Rapoport * 499a862f68aSMike Rapoport * Return: a managed dma allocation pool with the requested 500a862f68aSMike Rapoport * characteristics, or %NULL if one can't be created. 501141e9d4bSMatthew Wilcox */ 502141e9d4bSMatthew Wilcox struct dma_pool *dmam_pool_create(const char *name, struct device *dev, 503141e9d4bSMatthew Wilcox size_t size, size_t align, size_t allocation) 504141e9d4bSMatthew Wilcox { 505141e9d4bSMatthew Wilcox struct dma_pool **ptr, *pool; 506141e9d4bSMatthew Wilcox 507141e9d4bSMatthew Wilcox ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL); 508141e9d4bSMatthew Wilcox if (!ptr) 509141e9d4bSMatthew Wilcox return NULL; 510141e9d4bSMatthew Wilcox 511141e9d4bSMatthew Wilcox pool = *ptr = dma_pool_create(name, dev, size, align, allocation); 512141e9d4bSMatthew Wilcox if (pool) 513141e9d4bSMatthew Wilcox devres_add(dev, ptr); 514141e9d4bSMatthew Wilcox else 515141e9d4bSMatthew Wilcox devres_free(ptr); 516141e9d4bSMatthew Wilcox 517141e9d4bSMatthew Wilcox return pool; 518141e9d4bSMatthew Wilcox } 519e87aa773SMatthew Wilcox EXPORT_SYMBOL(dmam_pool_create); 520141e9d4bSMatthew Wilcox 521141e9d4bSMatthew Wilcox /** 522141e9d4bSMatthew Wilcox * dmam_pool_destroy - Managed dma_pool_destroy() 523141e9d4bSMatthew Wilcox * @pool: dma pool that will be destroyed 524141e9d4bSMatthew Wilcox * 525141e9d4bSMatthew Wilcox * Managed dma_pool_destroy(). 526141e9d4bSMatthew Wilcox */ 527141e9d4bSMatthew Wilcox void dmam_pool_destroy(struct dma_pool *pool) 528141e9d4bSMatthew Wilcox { 529141e9d4bSMatthew Wilcox struct device *dev = pool->dev; 530141e9d4bSMatthew Wilcox 531172cb4b3SAndy Shevchenko WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool)); 532141e9d4bSMatthew Wilcox } 533141e9d4bSMatthew Wilcox EXPORT_SYMBOL(dmam_pool_destroy); 534