1b2139ce0SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 26182a094SMatthew Wilcox /* 36182a094SMatthew Wilcox * DMA Pool allocator 46182a094SMatthew Wilcox * 56182a094SMatthew Wilcox * Copyright 2001 David Brownell 66182a094SMatthew Wilcox * Copyright 2007 Intel Corporation 76182a094SMatthew Wilcox * Author: Matthew Wilcox <willy@linux.intel.com> 86182a094SMatthew Wilcox * 96182a094SMatthew Wilcox * This allocator returns small blocks of a given size which are DMA-able by 106182a094SMatthew Wilcox * the given device. It uses the dma_alloc_coherent page allocator to get 116182a094SMatthew Wilcox * new pages, then splits them up into blocks of the required size. 126182a094SMatthew Wilcox * Many older drivers still have their own code to do this. 136182a094SMatthew Wilcox * 146182a094SMatthew Wilcox * The current design of this allocator is fairly simple. The pool is 156182a094SMatthew Wilcox * represented by the 'struct dma_pool' which keeps a doubly-linked list of 166182a094SMatthew Wilcox * allocated pages. Each page in the page_list is split into blocks of at 17a35a3455SMatthew Wilcox * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked 18a35a3455SMatthew Wilcox * list of free blocks within the page. Used blocks aren't tracked, but we 19a35a3455SMatthew Wilcox * keep a count of how many are currently allocated from each page. 206182a094SMatthew Wilcox */ 21141e9d4bSMatthew Wilcox 22141e9d4bSMatthew Wilcox #include <linux/device.h> 23141e9d4bSMatthew Wilcox #include <linux/dma-mapping.h> 24141e9d4bSMatthew Wilcox #include <linux/dmapool.h> 256182a094SMatthew Wilcox #include <linux/kernel.h> 266182a094SMatthew Wilcox #include <linux/list.h> 27b95f1b31SPaul Gortmaker #include <linux/export.h> 286182a094SMatthew Wilcox #include <linux/mutex.h> 29141e9d4bSMatthew Wilcox #include <linux/poison.h> 30141e9d4bSMatthew Wilcox #include <linux/sched.h> 31*0f2f89b6SDaniel Vetter #include <linux/sched/mm.h> 326182a094SMatthew Wilcox #include <linux/slab.h> 337c77509cSPaul Gortmaker #include <linux/stat.h> 346182a094SMatthew Wilcox #include <linux/spinlock.h> 356182a094SMatthew Wilcox #include <linux/string.h> 366182a094SMatthew Wilcox #include <linux/types.h> 376182a094SMatthew Wilcox #include <linux/wait.h> 38141e9d4bSMatthew Wilcox 39b5ee5befSAndi Kleen #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON) 40b5ee5befSAndi Kleen #define DMAPOOL_DEBUG 1 41b5ee5befSAndi Kleen #endif 42b5ee5befSAndi Kleen 43141e9d4bSMatthew Wilcox struct dma_pool { /* the pool */ 44141e9d4bSMatthew Wilcox struct list_head page_list; 45141e9d4bSMatthew Wilcox spinlock_t lock; 46141e9d4bSMatthew Wilcox size_t size; 47141e9d4bSMatthew Wilcox struct device *dev; 48141e9d4bSMatthew Wilcox size_t allocation; 49e34f44b3SMatthew Wilcox size_t boundary; 50141e9d4bSMatthew Wilcox char name[32]; 51141e9d4bSMatthew Wilcox struct list_head pools; 52141e9d4bSMatthew Wilcox }; 53141e9d4bSMatthew Wilcox 54141e9d4bSMatthew Wilcox struct dma_page { /* cacheable header for 'allocation' bytes */ 55141e9d4bSMatthew Wilcox struct list_head page_list; 56141e9d4bSMatthew Wilcox void *vaddr; 57141e9d4bSMatthew Wilcox dma_addr_t dma; 58a35a3455SMatthew Wilcox unsigned int in_use; 59a35a3455SMatthew Wilcox unsigned int offset; 60141e9d4bSMatthew Wilcox }; 61141e9d4bSMatthew Wilcox 62141e9d4bSMatthew Wilcox static DEFINE_MUTEX(pools_lock); 6301c2965fSSebastian Andrzej Siewior static DEFINE_MUTEX(pools_reg_lock); 64141e9d4bSMatthew Wilcox 65141e9d4bSMatthew Wilcox static ssize_t 66141e9d4bSMatthew Wilcox show_pools(struct device *dev, struct device_attribute *attr, char *buf) 67141e9d4bSMatthew Wilcox { 68141e9d4bSMatthew Wilcox unsigned temp; 69141e9d4bSMatthew Wilcox unsigned size; 70141e9d4bSMatthew Wilcox char *next; 71141e9d4bSMatthew Wilcox struct dma_page *page; 72141e9d4bSMatthew Wilcox struct dma_pool *pool; 73141e9d4bSMatthew Wilcox 74141e9d4bSMatthew Wilcox next = buf; 75141e9d4bSMatthew Wilcox size = PAGE_SIZE; 76141e9d4bSMatthew Wilcox 77141e9d4bSMatthew Wilcox temp = scnprintf(next, size, "poolinfo - 0.1\n"); 78141e9d4bSMatthew Wilcox size -= temp; 79141e9d4bSMatthew Wilcox next += temp; 80141e9d4bSMatthew Wilcox 81141e9d4bSMatthew Wilcox mutex_lock(&pools_lock); 82141e9d4bSMatthew Wilcox list_for_each_entry(pool, &dev->dma_pools, pools) { 83141e9d4bSMatthew Wilcox unsigned pages = 0; 84141e9d4bSMatthew Wilcox unsigned blocks = 0; 85141e9d4bSMatthew Wilcox 86c4956823SThomas Gleixner spin_lock_irq(&pool->lock); 87141e9d4bSMatthew Wilcox list_for_each_entry(page, &pool->page_list, page_list) { 88141e9d4bSMatthew Wilcox pages++; 89141e9d4bSMatthew Wilcox blocks += page->in_use; 90141e9d4bSMatthew Wilcox } 91c4956823SThomas Gleixner spin_unlock_irq(&pool->lock); 92141e9d4bSMatthew Wilcox 93141e9d4bSMatthew Wilcox /* per-pool info, no real statistics yet */ 945b5e0928SAlexey Dobriyan temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n", 95a35a3455SMatthew Wilcox pool->name, blocks, 96a35a3455SMatthew Wilcox pages * (pool->allocation / pool->size), 97141e9d4bSMatthew Wilcox pool->size, pages); 98141e9d4bSMatthew Wilcox size -= temp; 99141e9d4bSMatthew Wilcox next += temp; 100141e9d4bSMatthew Wilcox } 101141e9d4bSMatthew Wilcox mutex_unlock(&pools_lock); 102141e9d4bSMatthew Wilcox 103141e9d4bSMatthew Wilcox return PAGE_SIZE - size; 104141e9d4bSMatthew Wilcox } 105e87aa773SMatthew Wilcox 1060825a6f9SJoe Perches static DEVICE_ATTR(pools, 0444, show_pools, NULL); 107141e9d4bSMatthew Wilcox 108141e9d4bSMatthew Wilcox /** 109141e9d4bSMatthew Wilcox * dma_pool_create - Creates a pool of consistent memory blocks, for dma. 110141e9d4bSMatthew Wilcox * @name: name of pool, for diagnostics 111141e9d4bSMatthew Wilcox * @dev: device that will be doing the DMA 112141e9d4bSMatthew Wilcox * @size: size of the blocks in this pool. 113141e9d4bSMatthew Wilcox * @align: alignment requirement for blocks; must be a power of two 114e34f44b3SMatthew Wilcox * @boundary: returned blocks won't cross this power of two boundary 115a862f68aSMike Rapoport * Context: not in_interrupt() 116141e9d4bSMatthew Wilcox * 117a862f68aSMike Rapoport * Given one of these pools, dma_pool_alloc() 118141e9d4bSMatthew Wilcox * may be used to allocate memory. Such memory will all have "consistent" 119141e9d4bSMatthew Wilcox * DMA mappings, accessible by the device and its driver without using 120141e9d4bSMatthew Wilcox * cache flushing primitives. The actual size of blocks allocated may be 121141e9d4bSMatthew Wilcox * larger than requested because of alignment. 122141e9d4bSMatthew Wilcox * 123e34f44b3SMatthew Wilcox * If @boundary is nonzero, objects returned from dma_pool_alloc() won't 124141e9d4bSMatthew Wilcox * cross that size boundary. This is useful for devices which have 125141e9d4bSMatthew Wilcox * addressing restrictions on individual DMA transfers, such as not crossing 126141e9d4bSMatthew Wilcox * boundaries of 4KBytes. 127a862f68aSMike Rapoport * 128a862f68aSMike Rapoport * Return: a dma allocation pool with the requested characteristics, or 129a862f68aSMike Rapoport * %NULL if one can't be created. 130141e9d4bSMatthew Wilcox */ 131e87aa773SMatthew Wilcox struct dma_pool *dma_pool_create(const char *name, struct device *dev, 132e34f44b3SMatthew Wilcox size_t size, size_t align, size_t boundary) 133141e9d4bSMatthew Wilcox { 134141e9d4bSMatthew Wilcox struct dma_pool *retval; 135e34f44b3SMatthew Wilcox size_t allocation; 13601c2965fSSebastian Andrzej Siewior bool empty = false; 137141e9d4bSMatthew Wilcox 138baa2ef83SPaul McQuade if (align == 0) 139141e9d4bSMatthew Wilcox align = 1; 140baa2ef83SPaul McQuade else if (align & (align - 1)) 141399154beSMatthew Wilcox return NULL; 142399154beSMatthew Wilcox 143baa2ef83SPaul McQuade if (size == 0) 144141e9d4bSMatthew Wilcox return NULL; 145baa2ef83SPaul McQuade else if (size < 4) 146a35a3455SMatthew Wilcox size = 4; 147399154beSMatthew Wilcox 148399154beSMatthew Wilcox size = ALIGN(size, align); 149e34f44b3SMatthew Wilcox allocation = max_t(size_t, size, PAGE_SIZE); 150141e9d4bSMatthew Wilcox 151baa2ef83SPaul McQuade if (!boundary) 152e34f44b3SMatthew Wilcox boundary = allocation; 153baa2ef83SPaul McQuade else if ((boundary < size) || (boundary & (boundary - 1))) 154e34f44b3SMatthew Wilcox return NULL; 155e34f44b3SMatthew Wilcox 156e34f44b3SMatthew Wilcox retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev)); 157e34f44b3SMatthew Wilcox if (!retval) 158141e9d4bSMatthew Wilcox return retval; 159141e9d4bSMatthew Wilcox 160e34f44b3SMatthew Wilcox strlcpy(retval->name, name, sizeof(retval->name)); 161141e9d4bSMatthew Wilcox 162141e9d4bSMatthew Wilcox retval->dev = dev; 163141e9d4bSMatthew Wilcox 164141e9d4bSMatthew Wilcox INIT_LIST_HEAD(&retval->page_list); 165141e9d4bSMatthew Wilcox spin_lock_init(&retval->lock); 166141e9d4bSMatthew Wilcox retval->size = size; 167e34f44b3SMatthew Wilcox retval->boundary = boundary; 168141e9d4bSMatthew Wilcox retval->allocation = allocation; 169141e9d4bSMatthew Wilcox 170cc6b664aSDaeseok Youn INIT_LIST_HEAD(&retval->pools); 171141e9d4bSMatthew Wilcox 17201c2965fSSebastian Andrzej Siewior /* 17301c2965fSSebastian Andrzej Siewior * pools_lock ensures that the ->dma_pools list does not get corrupted. 17401c2965fSSebastian Andrzej Siewior * pools_reg_lock ensures that there is not a race between 17501c2965fSSebastian Andrzej Siewior * dma_pool_create() and dma_pool_destroy() or within dma_pool_create() 17601c2965fSSebastian Andrzej Siewior * when the first invocation of dma_pool_create() failed on 17701c2965fSSebastian Andrzej Siewior * device_create_file() and the second assumes that it has been done (I 17801c2965fSSebastian Andrzej Siewior * know it is a short window). 17901c2965fSSebastian Andrzej Siewior */ 18001c2965fSSebastian Andrzej Siewior mutex_lock(&pools_reg_lock); 181141e9d4bSMatthew Wilcox mutex_lock(&pools_lock); 18201c2965fSSebastian Andrzej Siewior if (list_empty(&dev->dma_pools)) 18301c2965fSSebastian Andrzej Siewior empty = true; 184cc6b664aSDaeseok Youn list_add(&retval->pools, &dev->dma_pools); 185cc6b664aSDaeseok Youn mutex_unlock(&pools_lock); 18601c2965fSSebastian Andrzej Siewior if (empty) { 18701c2965fSSebastian Andrzej Siewior int err; 188141e9d4bSMatthew Wilcox 18901c2965fSSebastian Andrzej Siewior err = device_create_file(dev, &dev_attr_pools); 19001c2965fSSebastian Andrzej Siewior if (err) { 19101c2965fSSebastian Andrzej Siewior mutex_lock(&pools_lock); 19201c2965fSSebastian Andrzej Siewior list_del(&retval->pools); 19301c2965fSSebastian Andrzej Siewior mutex_unlock(&pools_lock); 19401c2965fSSebastian Andrzej Siewior mutex_unlock(&pools_reg_lock); 19501c2965fSSebastian Andrzej Siewior kfree(retval); 19601c2965fSSebastian Andrzej Siewior return NULL; 19701c2965fSSebastian Andrzej Siewior } 19801c2965fSSebastian Andrzej Siewior } 19901c2965fSSebastian Andrzej Siewior mutex_unlock(&pools_reg_lock); 200141e9d4bSMatthew Wilcox return retval; 201141e9d4bSMatthew Wilcox } 202e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_create); 203141e9d4bSMatthew Wilcox 204a35a3455SMatthew Wilcox static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) 205a35a3455SMatthew Wilcox { 206a35a3455SMatthew Wilcox unsigned int offset = 0; 207e34f44b3SMatthew Wilcox unsigned int next_boundary = pool->boundary; 208a35a3455SMatthew Wilcox 209a35a3455SMatthew Wilcox do { 210a35a3455SMatthew Wilcox unsigned int next = offset + pool->size; 211e34f44b3SMatthew Wilcox if (unlikely((next + pool->size) >= next_boundary)) { 212e34f44b3SMatthew Wilcox next = next_boundary; 213e34f44b3SMatthew Wilcox next_boundary += pool->boundary; 214e34f44b3SMatthew Wilcox } 215a35a3455SMatthew Wilcox *(int *)(page->vaddr + offset) = next; 216a35a3455SMatthew Wilcox offset = next; 217a35a3455SMatthew Wilcox } while (offset < pool->allocation); 218a35a3455SMatthew Wilcox } 219a35a3455SMatthew Wilcox 220e87aa773SMatthew Wilcox static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) 221141e9d4bSMatthew Wilcox { 222141e9d4bSMatthew Wilcox struct dma_page *page; 223141e9d4bSMatthew Wilcox 224a35a3455SMatthew Wilcox page = kmalloc(sizeof(*page), mem_flags); 225141e9d4bSMatthew Wilcox if (!page) 226141e9d4bSMatthew Wilcox return NULL; 227a35a3455SMatthew Wilcox page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, 228e87aa773SMatthew Wilcox &page->dma, mem_flags); 229141e9d4bSMatthew Wilcox if (page->vaddr) { 230b5ee5befSAndi Kleen #ifdef DMAPOOL_DEBUG 231141e9d4bSMatthew Wilcox memset(page->vaddr, POOL_POISON_FREED, pool->allocation); 232141e9d4bSMatthew Wilcox #endif 233a35a3455SMatthew Wilcox pool_initialise_page(pool, page); 234141e9d4bSMatthew Wilcox page->in_use = 0; 235a35a3455SMatthew Wilcox page->offset = 0; 236141e9d4bSMatthew Wilcox } else { 237141e9d4bSMatthew Wilcox kfree(page); 238141e9d4bSMatthew Wilcox page = NULL; 239141e9d4bSMatthew Wilcox } 240141e9d4bSMatthew Wilcox return page; 241141e9d4bSMatthew Wilcox } 242141e9d4bSMatthew Wilcox 243d9e7e37bSNicholas Krause static inline bool is_page_busy(struct dma_page *page) 244141e9d4bSMatthew Wilcox { 245a35a3455SMatthew Wilcox return page->in_use != 0; 246141e9d4bSMatthew Wilcox } 247141e9d4bSMatthew Wilcox 248e87aa773SMatthew Wilcox static void pool_free_page(struct dma_pool *pool, struct dma_page *page) 249141e9d4bSMatthew Wilcox { 250141e9d4bSMatthew Wilcox dma_addr_t dma = page->dma; 251141e9d4bSMatthew Wilcox 252b5ee5befSAndi Kleen #ifdef DMAPOOL_DEBUG 253141e9d4bSMatthew Wilcox memset(page->vaddr, POOL_POISON_FREED, pool->allocation); 254141e9d4bSMatthew Wilcox #endif 255141e9d4bSMatthew Wilcox dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma); 256141e9d4bSMatthew Wilcox list_del(&page->page_list); 257141e9d4bSMatthew Wilcox kfree(page); 258141e9d4bSMatthew Wilcox } 259141e9d4bSMatthew Wilcox 260141e9d4bSMatthew Wilcox /** 261141e9d4bSMatthew Wilcox * dma_pool_destroy - destroys a pool of dma memory blocks. 262141e9d4bSMatthew Wilcox * @pool: dma pool that will be destroyed 263141e9d4bSMatthew Wilcox * Context: !in_interrupt() 264141e9d4bSMatthew Wilcox * 265141e9d4bSMatthew Wilcox * Caller guarantees that no more memory from the pool is in use, 266141e9d4bSMatthew Wilcox * and that nothing will try to use the pool after this call. 267141e9d4bSMatthew Wilcox */ 268e87aa773SMatthew Wilcox void dma_pool_destroy(struct dma_pool *pool) 269141e9d4bSMatthew Wilcox { 27042286f83SAndy Shevchenko struct dma_page *page, *tmp; 27101c2965fSSebastian Andrzej Siewior bool empty = false; 27201c2965fSSebastian Andrzej Siewior 27344d7175dSSergey Senozhatsky if (unlikely(!pool)) 27444d7175dSSergey Senozhatsky return; 27544d7175dSSergey Senozhatsky 27601c2965fSSebastian Andrzej Siewior mutex_lock(&pools_reg_lock); 277141e9d4bSMatthew Wilcox mutex_lock(&pools_lock); 278141e9d4bSMatthew Wilcox list_del(&pool->pools); 279141e9d4bSMatthew Wilcox if (pool->dev && list_empty(&pool->dev->dma_pools)) 28001c2965fSSebastian Andrzej Siewior empty = true; 281141e9d4bSMatthew Wilcox mutex_unlock(&pools_lock); 28201c2965fSSebastian Andrzej Siewior if (empty) 28301c2965fSSebastian Andrzej Siewior device_remove_file(pool->dev, &dev_attr_pools); 28401c2965fSSebastian Andrzej Siewior mutex_unlock(&pools_reg_lock); 285141e9d4bSMatthew Wilcox 28642286f83SAndy Shevchenko list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) { 287a35a3455SMatthew Wilcox if (is_page_busy(page)) { 288141e9d4bSMatthew Wilcox if (pool->dev) 28941a04814SAndy Shevchenko dev_err(pool->dev, "%s %s, %p busy\n", __func__, 290141e9d4bSMatthew Wilcox pool->name, page->vaddr); 291141e9d4bSMatthew Wilcox else 29241a04814SAndy Shevchenko pr_err("%s %s, %p busy\n", __func__, 293141e9d4bSMatthew Wilcox pool->name, page->vaddr); 294141e9d4bSMatthew Wilcox /* leak the still-in-use consistent memory */ 295141e9d4bSMatthew Wilcox list_del(&page->page_list); 296141e9d4bSMatthew Wilcox kfree(page); 297141e9d4bSMatthew Wilcox } else 298141e9d4bSMatthew Wilcox pool_free_page(pool, page); 299141e9d4bSMatthew Wilcox } 300141e9d4bSMatthew Wilcox 301141e9d4bSMatthew Wilcox kfree(pool); 302141e9d4bSMatthew Wilcox } 303e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_destroy); 304141e9d4bSMatthew Wilcox 305141e9d4bSMatthew Wilcox /** 306141e9d4bSMatthew Wilcox * dma_pool_alloc - get a block of consistent memory 307141e9d4bSMatthew Wilcox * @pool: dma pool that will produce the block 308141e9d4bSMatthew Wilcox * @mem_flags: GFP_* bitmask 309141e9d4bSMatthew Wilcox * @handle: pointer to dma address of block 310141e9d4bSMatthew Wilcox * 311a862f68aSMike Rapoport * Return: the kernel virtual address of a currently unused block, 312141e9d4bSMatthew Wilcox * and reports its dma address through the handle. 3136182a094SMatthew Wilcox * If such a memory block can't be allocated, %NULL is returned. 314141e9d4bSMatthew Wilcox */ 315e87aa773SMatthew Wilcox void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 316e87aa773SMatthew Wilcox dma_addr_t *handle) 317141e9d4bSMatthew Wilcox { 318141e9d4bSMatthew Wilcox unsigned long flags; 319141e9d4bSMatthew Wilcox struct dma_page *page; 320141e9d4bSMatthew Wilcox size_t offset; 321141e9d4bSMatthew Wilcox void *retval; 322141e9d4bSMatthew Wilcox 323*0f2f89b6SDaniel Vetter might_alloc(mem_flags); 324ea05c844SDima Zavin 325141e9d4bSMatthew Wilcox spin_lock_irqsave(&pool->lock, flags); 326141e9d4bSMatthew Wilcox list_for_each_entry(page, &pool->page_list, page_list) { 327a35a3455SMatthew Wilcox if (page->offset < pool->allocation) 328141e9d4bSMatthew Wilcox goto ready; 329141e9d4bSMatthew Wilcox } 330141e9d4bSMatthew Wilcox 331387870f2SMarek Szyprowski /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */ 332141e9d4bSMatthew Wilcox spin_unlock_irqrestore(&pool->lock, flags); 333141e9d4bSMatthew Wilcox 334fa23f56dSSean O. Stalley page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO)); 335387870f2SMarek Szyprowski if (!page) 336387870f2SMarek Szyprowski return NULL; 337141e9d4bSMatthew Wilcox 3382cae367eSMatthew Wilcox spin_lock_irqsave(&pool->lock, flags); 339141e9d4bSMatthew Wilcox 340387870f2SMarek Szyprowski list_add(&page->page_list, &pool->page_list); 341141e9d4bSMatthew Wilcox ready: 342141e9d4bSMatthew Wilcox page->in_use++; 343a35a3455SMatthew Wilcox offset = page->offset; 344a35a3455SMatthew Wilcox page->offset = *(int *)(page->vaddr + offset); 345141e9d4bSMatthew Wilcox retval = offset + page->vaddr; 346141e9d4bSMatthew Wilcox *handle = offset + page->dma; 347b5ee5befSAndi Kleen #ifdef DMAPOOL_DEBUG 3485de55b26SMatthieu CASTET { 3495de55b26SMatthieu CASTET int i; 3505de55b26SMatthieu CASTET u8 *data = retval; 3515de55b26SMatthieu CASTET /* page->offset is stored in first 4 bytes */ 3525de55b26SMatthieu CASTET for (i = sizeof(page->offset); i < pool->size; i++) { 3535de55b26SMatthieu CASTET if (data[i] == POOL_POISON_FREED) 3545de55b26SMatthieu CASTET continue; 3555de55b26SMatthieu CASTET if (pool->dev) 35641a04814SAndy Shevchenko dev_err(pool->dev, "%s %s, %p (corrupted)\n", 35741a04814SAndy Shevchenko __func__, pool->name, retval); 3585de55b26SMatthieu CASTET else 35941a04814SAndy Shevchenko pr_err("%s %s, %p (corrupted)\n", 36041a04814SAndy Shevchenko __func__, pool->name, retval); 3615de55b26SMatthieu CASTET 3625de55b26SMatthieu CASTET /* 3635de55b26SMatthieu CASTET * Dump the first 4 bytes even if they are not 3645de55b26SMatthieu CASTET * POOL_POISON_FREED 3655de55b26SMatthieu CASTET */ 3665de55b26SMatthieu CASTET print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, 3675de55b26SMatthieu CASTET data, pool->size, 1); 3685de55b26SMatthieu CASTET break; 3695de55b26SMatthieu CASTET } 3705de55b26SMatthieu CASTET } 371fa23f56dSSean O. Stalley if (!(mem_flags & __GFP_ZERO)) 372141e9d4bSMatthew Wilcox memset(retval, POOL_POISON_ALLOCATED, pool->size); 373141e9d4bSMatthew Wilcox #endif 374141e9d4bSMatthew Wilcox spin_unlock_irqrestore(&pool->lock, flags); 375fa23f56dSSean O. Stalley 3766471384aSAlexander Potapenko if (want_init_on_alloc(mem_flags)) 377fa23f56dSSean O. Stalley memset(retval, 0, pool->size); 378fa23f56dSSean O. Stalley 379141e9d4bSMatthew Wilcox return retval; 380141e9d4bSMatthew Wilcox } 381e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_alloc); 382141e9d4bSMatthew Wilcox 383e87aa773SMatthew Wilcox static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) 384141e9d4bSMatthew Wilcox { 385141e9d4bSMatthew Wilcox struct dma_page *page; 386141e9d4bSMatthew Wilcox 387141e9d4bSMatthew Wilcox list_for_each_entry(page, &pool->page_list, page_list) { 388141e9d4bSMatthew Wilcox if (dma < page->dma) 389141e9d4bSMatthew Wilcox continue; 390676bd991SRobin Murphy if ((dma - page->dma) < pool->allocation) 391141e9d4bSMatthew Wilcox return page; 392141e9d4bSMatthew Wilcox } 39384bc227dSRolf Eike Beer return NULL; 39484bc227dSRolf Eike Beer } 395141e9d4bSMatthew Wilcox 396141e9d4bSMatthew Wilcox /** 397141e9d4bSMatthew Wilcox * dma_pool_free - put block back into dma pool 398141e9d4bSMatthew Wilcox * @pool: the dma pool holding the block 399141e9d4bSMatthew Wilcox * @vaddr: virtual address of block 400141e9d4bSMatthew Wilcox * @dma: dma address of block 401141e9d4bSMatthew Wilcox * 402141e9d4bSMatthew Wilcox * Caller promises neither device nor driver will again touch this block 403141e9d4bSMatthew Wilcox * unless it is first re-allocated. 404141e9d4bSMatthew Wilcox */ 405e87aa773SMatthew Wilcox void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) 406141e9d4bSMatthew Wilcox { 407141e9d4bSMatthew Wilcox struct dma_page *page; 408141e9d4bSMatthew Wilcox unsigned long flags; 409a35a3455SMatthew Wilcox unsigned int offset; 410141e9d4bSMatthew Wilcox 41184bc227dSRolf Eike Beer spin_lock_irqsave(&pool->lock, flags); 412e87aa773SMatthew Wilcox page = pool_find_page(pool, dma); 413e87aa773SMatthew Wilcox if (!page) { 41484bc227dSRolf Eike Beer spin_unlock_irqrestore(&pool->lock, flags); 415141e9d4bSMatthew Wilcox if (pool->dev) 41641a04814SAndy Shevchenko dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n", 41741a04814SAndy Shevchenko __func__, pool->name, vaddr, &dma); 418141e9d4bSMatthew Wilcox else 41941a04814SAndy Shevchenko pr_err("%s %s, %p/%pad (bad dma)\n", 42041a04814SAndy Shevchenko __func__, pool->name, vaddr, &dma); 421141e9d4bSMatthew Wilcox return; 422141e9d4bSMatthew Wilcox } 423141e9d4bSMatthew Wilcox 424a35a3455SMatthew Wilcox offset = vaddr - page->vaddr; 4256471384aSAlexander Potapenko if (want_init_on_free()) 4266471384aSAlexander Potapenko memset(vaddr, 0, pool->size); 427b5ee5befSAndi Kleen #ifdef DMAPOOL_DEBUG 428a35a3455SMatthew Wilcox if ((dma - page->dma) != offset) { 42984bc227dSRolf Eike Beer spin_unlock_irqrestore(&pool->lock, flags); 430141e9d4bSMatthew Wilcox if (pool->dev) 43141a04814SAndy Shevchenko dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n", 43241a04814SAndy Shevchenko __func__, pool->name, vaddr, &dma); 433141e9d4bSMatthew Wilcox else 43441a04814SAndy Shevchenko pr_err("%s %s, %p (bad vaddr)/%pad\n", 43541a04814SAndy Shevchenko __func__, pool->name, vaddr, &dma); 436141e9d4bSMatthew Wilcox return; 437141e9d4bSMatthew Wilcox } 438a35a3455SMatthew Wilcox { 439a35a3455SMatthew Wilcox unsigned int chain = page->offset; 440a35a3455SMatthew Wilcox while (chain < pool->allocation) { 441a35a3455SMatthew Wilcox if (chain != offset) { 442a35a3455SMatthew Wilcox chain = *(int *)(page->vaddr + chain); 443a35a3455SMatthew Wilcox continue; 444a35a3455SMatthew Wilcox } 44584bc227dSRolf Eike Beer spin_unlock_irqrestore(&pool->lock, flags); 446141e9d4bSMatthew Wilcox if (pool->dev) 44741a04814SAndy Shevchenko dev_err(pool->dev, "%s %s, dma %pad already free\n", 44841a04814SAndy Shevchenko __func__, pool->name, &dma); 449141e9d4bSMatthew Wilcox else 45041a04814SAndy Shevchenko pr_err("%s %s, dma %pad already free\n", 45141a04814SAndy Shevchenko __func__, pool->name, &dma); 452141e9d4bSMatthew Wilcox return; 453141e9d4bSMatthew Wilcox } 454a35a3455SMatthew Wilcox } 455141e9d4bSMatthew Wilcox memset(vaddr, POOL_POISON_FREED, pool->size); 456141e9d4bSMatthew Wilcox #endif 457141e9d4bSMatthew Wilcox 458141e9d4bSMatthew Wilcox page->in_use--; 459a35a3455SMatthew Wilcox *(int *)vaddr = page->offset; 460a35a3455SMatthew Wilcox page->offset = offset; 461141e9d4bSMatthew Wilcox /* 462141e9d4bSMatthew Wilcox * Resist a temptation to do 463a35a3455SMatthew Wilcox * if (!is_page_busy(page)) pool_free_page(pool, page); 464141e9d4bSMatthew Wilcox * Better have a few empty pages hang around. 465141e9d4bSMatthew Wilcox */ 466141e9d4bSMatthew Wilcox spin_unlock_irqrestore(&pool->lock, flags); 467141e9d4bSMatthew Wilcox } 468e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_free); 469141e9d4bSMatthew Wilcox 470141e9d4bSMatthew Wilcox /* 471141e9d4bSMatthew Wilcox * Managed DMA pool 472141e9d4bSMatthew Wilcox */ 473141e9d4bSMatthew Wilcox static void dmam_pool_release(struct device *dev, void *res) 474141e9d4bSMatthew Wilcox { 475141e9d4bSMatthew Wilcox struct dma_pool *pool = *(struct dma_pool **)res; 476141e9d4bSMatthew Wilcox 477141e9d4bSMatthew Wilcox dma_pool_destroy(pool); 478141e9d4bSMatthew Wilcox } 479141e9d4bSMatthew Wilcox 480141e9d4bSMatthew Wilcox static int dmam_pool_match(struct device *dev, void *res, void *match_data) 481141e9d4bSMatthew Wilcox { 482141e9d4bSMatthew Wilcox return *(struct dma_pool **)res == match_data; 483141e9d4bSMatthew Wilcox } 484141e9d4bSMatthew Wilcox 485141e9d4bSMatthew Wilcox /** 486141e9d4bSMatthew Wilcox * dmam_pool_create - Managed dma_pool_create() 487141e9d4bSMatthew Wilcox * @name: name of pool, for diagnostics 488141e9d4bSMatthew Wilcox * @dev: device that will be doing the DMA 489141e9d4bSMatthew Wilcox * @size: size of the blocks in this pool. 490141e9d4bSMatthew Wilcox * @align: alignment requirement for blocks; must be a power of two 491141e9d4bSMatthew Wilcox * @allocation: returned blocks won't cross this boundary (or zero) 492141e9d4bSMatthew Wilcox * 493141e9d4bSMatthew Wilcox * Managed dma_pool_create(). DMA pool created with this function is 494141e9d4bSMatthew Wilcox * automatically destroyed on driver detach. 495a862f68aSMike Rapoport * 496a862f68aSMike Rapoport * Return: a managed dma allocation pool with the requested 497a862f68aSMike Rapoport * characteristics, or %NULL if one can't be created. 498141e9d4bSMatthew Wilcox */ 499141e9d4bSMatthew Wilcox struct dma_pool *dmam_pool_create(const char *name, struct device *dev, 500141e9d4bSMatthew Wilcox size_t size, size_t align, size_t allocation) 501141e9d4bSMatthew Wilcox { 502141e9d4bSMatthew Wilcox struct dma_pool **ptr, *pool; 503141e9d4bSMatthew Wilcox 504141e9d4bSMatthew Wilcox ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL); 505141e9d4bSMatthew Wilcox if (!ptr) 506141e9d4bSMatthew Wilcox return NULL; 507141e9d4bSMatthew Wilcox 508141e9d4bSMatthew Wilcox pool = *ptr = dma_pool_create(name, dev, size, align, allocation); 509141e9d4bSMatthew Wilcox if (pool) 510141e9d4bSMatthew Wilcox devres_add(dev, ptr); 511141e9d4bSMatthew Wilcox else 512141e9d4bSMatthew Wilcox devres_free(ptr); 513141e9d4bSMatthew Wilcox 514141e9d4bSMatthew Wilcox return pool; 515141e9d4bSMatthew Wilcox } 516e87aa773SMatthew Wilcox EXPORT_SYMBOL(dmam_pool_create); 517141e9d4bSMatthew Wilcox 518141e9d4bSMatthew Wilcox /** 519141e9d4bSMatthew Wilcox * dmam_pool_destroy - Managed dma_pool_destroy() 520141e9d4bSMatthew Wilcox * @pool: dma pool that will be destroyed 521141e9d4bSMatthew Wilcox * 522141e9d4bSMatthew Wilcox * Managed dma_pool_destroy(). 523141e9d4bSMatthew Wilcox */ 524141e9d4bSMatthew Wilcox void dmam_pool_destroy(struct dma_pool *pool) 525141e9d4bSMatthew Wilcox { 526141e9d4bSMatthew Wilcox struct device *dev = pool->dev; 527141e9d4bSMatthew Wilcox 528172cb4b3SAndy Shevchenko WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool)); 529141e9d4bSMatthew Wilcox } 530141e9d4bSMatthew Wilcox EXPORT_SYMBOL(dmam_pool_destroy); 531