16182a094SMatthew Wilcox /* 26182a094SMatthew Wilcox * DMA Pool allocator 36182a094SMatthew Wilcox * 46182a094SMatthew Wilcox * Copyright 2001 David Brownell 56182a094SMatthew Wilcox * Copyright 2007 Intel Corporation 66182a094SMatthew Wilcox * Author: Matthew Wilcox <willy@linux.intel.com> 76182a094SMatthew Wilcox * 86182a094SMatthew Wilcox * This software may be redistributed and/or modified under the terms of 96182a094SMatthew Wilcox * the GNU General Public License ("GPL") version 2 as published by the 106182a094SMatthew Wilcox * Free Software Foundation. 116182a094SMatthew Wilcox * 126182a094SMatthew Wilcox * This allocator returns small blocks of a given size which are DMA-able by 136182a094SMatthew Wilcox * the given device. It uses the dma_alloc_coherent page allocator to get 146182a094SMatthew Wilcox * new pages, then splits them up into blocks of the required size. 156182a094SMatthew Wilcox * Many older drivers still have their own code to do this. 166182a094SMatthew Wilcox * 176182a094SMatthew Wilcox * The current design of this allocator is fairly simple. The pool is 186182a094SMatthew Wilcox * represented by the 'struct dma_pool' which keeps a doubly-linked list of 196182a094SMatthew Wilcox * allocated pages. Each page in the page_list is split into blocks of at 20a35a3455SMatthew Wilcox * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked 21a35a3455SMatthew Wilcox * list of free blocks within the page. Used blocks aren't tracked, but we 22a35a3455SMatthew Wilcox * keep a count of how many are currently allocated from each page. 236182a094SMatthew Wilcox */ 24141e9d4bSMatthew Wilcox 25141e9d4bSMatthew Wilcox #include <linux/device.h> 26141e9d4bSMatthew Wilcox #include <linux/dma-mapping.h> 27141e9d4bSMatthew Wilcox #include <linux/dmapool.h> 286182a094SMatthew Wilcox #include <linux/kernel.h> 296182a094SMatthew Wilcox #include <linux/list.h> 30*b95f1b31SPaul Gortmaker #include <linux/export.h> 316182a094SMatthew Wilcox #include <linux/mutex.h> 32141e9d4bSMatthew Wilcox #include <linux/poison.h> 33141e9d4bSMatthew Wilcox #include <linux/sched.h> 346182a094SMatthew Wilcox #include <linux/slab.h> 356182a094SMatthew Wilcox #include <linux/spinlock.h> 366182a094SMatthew Wilcox #include <linux/string.h> 376182a094SMatthew Wilcox #include <linux/types.h> 386182a094SMatthew Wilcox #include <linux/wait.h> 39141e9d4bSMatthew Wilcox 40b5ee5befSAndi Kleen #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON) 41b5ee5befSAndi Kleen #define DMAPOOL_DEBUG 1 42b5ee5befSAndi Kleen #endif 43b5ee5befSAndi Kleen 44141e9d4bSMatthew Wilcox struct dma_pool { /* the pool */ 45141e9d4bSMatthew Wilcox struct list_head page_list; 46141e9d4bSMatthew Wilcox spinlock_t lock; 47141e9d4bSMatthew Wilcox size_t size; 48141e9d4bSMatthew Wilcox struct device *dev; 49141e9d4bSMatthew Wilcox size_t allocation; 50e34f44b3SMatthew Wilcox size_t boundary; 51141e9d4bSMatthew Wilcox char name[32]; 52141e9d4bSMatthew Wilcox wait_queue_head_t waitq; 53141e9d4bSMatthew Wilcox struct list_head pools; 54141e9d4bSMatthew Wilcox }; 55141e9d4bSMatthew Wilcox 56141e9d4bSMatthew Wilcox struct dma_page { /* cacheable header for 'allocation' bytes */ 57141e9d4bSMatthew Wilcox struct list_head page_list; 58141e9d4bSMatthew Wilcox void *vaddr; 59141e9d4bSMatthew Wilcox dma_addr_t dma; 60a35a3455SMatthew Wilcox unsigned int in_use; 61a35a3455SMatthew Wilcox unsigned int offset; 62141e9d4bSMatthew Wilcox }; 63141e9d4bSMatthew Wilcox 64141e9d4bSMatthew Wilcox #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000) 65141e9d4bSMatthew Wilcox 66141e9d4bSMatthew Wilcox static DEFINE_MUTEX(pools_lock); 67141e9d4bSMatthew Wilcox 68141e9d4bSMatthew Wilcox static ssize_t 69141e9d4bSMatthew Wilcox show_pools(struct device *dev, struct device_attribute *attr, char *buf) 70141e9d4bSMatthew Wilcox { 71141e9d4bSMatthew Wilcox unsigned temp; 72141e9d4bSMatthew Wilcox unsigned size; 73141e9d4bSMatthew Wilcox char *next; 74141e9d4bSMatthew Wilcox struct dma_page *page; 75141e9d4bSMatthew Wilcox struct dma_pool *pool; 76141e9d4bSMatthew Wilcox 77141e9d4bSMatthew Wilcox next = buf; 78141e9d4bSMatthew Wilcox size = PAGE_SIZE; 79141e9d4bSMatthew Wilcox 80141e9d4bSMatthew Wilcox temp = scnprintf(next, size, "poolinfo - 0.1\n"); 81141e9d4bSMatthew Wilcox size -= temp; 82141e9d4bSMatthew Wilcox next += temp; 83141e9d4bSMatthew Wilcox 84141e9d4bSMatthew Wilcox mutex_lock(&pools_lock); 85141e9d4bSMatthew Wilcox list_for_each_entry(pool, &dev->dma_pools, pools) { 86141e9d4bSMatthew Wilcox unsigned pages = 0; 87141e9d4bSMatthew Wilcox unsigned blocks = 0; 88141e9d4bSMatthew Wilcox 89c4956823SThomas Gleixner spin_lock_irq(&pool->lock); 90141e9d4bSMatthew Wilcox list_for_each_entry(page, &pool->page_list, page_list) { 91141e9d4bSMatthew Wilcox pages++; 92141e9d4bSMatthew Wilcox blocks += page->in_use; 93141e9d4bSMatthew Wilcox } 94c4956823SThomas Gleixner spin_unlock_irq(&pool->lock); 95141e9d4bSMatthew Wilcox 96141e9d4bSMatthew Wilcox /* per-pool info, no real statistics yet */ 97141e9d4bSMatthew Wilcox temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n", 98a35a3455SMatthew Wilcox pool->name, blocks, 99a35a3455SMatthew Wilcox pages * (pool->allocation / pool->size), 100141e9d4bSMatthew Wilcox pool->size, pages); 101141e9d4bSMatthew Wilcox size -= temp; 102141e9d4bSMatthew Wilcox next += temp; 103141e9d4bSMatthew Wilcox } 104141e9d4bSMatthew Wilcox mutex_unlock(&pools_lock); 105141e9d4bSMatthew Wilcox 106141e9d4bSMatthew Wilcox return PAGE_SIZE - size; 107141e9d4bSMatthew Wilcox } 108e87aa773SMatthew Wilcox 109141e9d4bSMatthew Wilcox static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL); 110141e9d4bSMatthew Wilcox 111141e9d4bSMatthew Wilcox /** 112141e9d4bSMatthew Wilcox * dma_pool_create - Creates a pool of consistent memory blocks, for dma. 113141e9d4bSMatthew Wilcox * @name: name of pool, for diagnostics 114141e9d4bSMatthew Wilcox * @dev: device that will be doing the DMA 115141e9d4bSMatthew Wilcox * @size: size of the blocks in this pool. 116141e9d4bSMatthew Wilcox * @align: alignment requirement for blocks; must be a power of two 117e34f44b3SMatthew Wilcox * @boundary: returned blocks won't cross this power of two boundary 118141e9d4bSMatthew Wilcox * Context: !in_interrupt() 119141e9d4bSMatthew Wilcox * 120141e9d4bSMatthew Wilcox * Returns a dma allocation pool with the requested characteristics, or 121141e9d4bSMatthew Wilcox * null if one can't be created. Given one of these pools, dma_pool_alloc() 122141e9d4bSMatthew Wilcox * may be used to allocate memory. Such memory will all have "consistent" 123141e9d4bSMatthew Wilcox * DMA mappings, accessible by the device and its driver without using 124141e9d4bSMatthew Wilcox * cache flushing primitives. The actual size of blocks allocated may be 125141e9d4bSMatthew Wilcox * larger than requested because of alignment. 126141e9d4bSMatthew Wilcox * 127e34f44b3SMatthew Wilcox * If @boundary is nonzero, objects returned from dma_pool_alloc() won't 128141e9d4bSMatthew Wilcox * cross that size boundary. This is useful for devices which have 129141e9d4bSMatthew Wilcox * addressing restrictions on individual DMA transfers, such as not crossing 130141e9d4bSMatthew Wilcox * boundaries of 4KBytes. 131141e9d4bSMatthew Wilcox */ 132e87aa773SMatthew Wilcox struct dma_pool *dma_pool_create(const char *name, struct device *dev, 133e34f44b3SMatthew Wilcox size_t size, size_t align, size_t boundary) 134141e9d4bSMatthew Wilcox { 135141e9d4bSMatthew Wilcox struct dma_pool *retval; 136e34f44b3SMatthew Wilcox size_t allocation; 137141e9d4bSMatthew Wilcox 138399154beSMatthew Wilcox if (align == 0) { 139141e9d4bSMatthew Wilcox align = 1; 140399154beSMatthew Wilcox } else if (align & (align - 1)) { 141399154beSMatthew Wilcox return NULL; 142399154beSMatthew Wilcox } 143399154beSMatthew Wilcox 144a35a3455SMatthew Wilcox if (size == 0) { 145141e9d4bSMatthew Wilcox return NULL; 146a35a3455SMatthew Wilcox } else if (size < 4) { 147a35a3455SMatthew Wilcox size = 4; 148a35a3455SMatthew Wilcox } 149399154beSMatthew Wilcox 150399154beSMatthew Wilcox if ((size % align) != 0) 151399154beSMatthew Wilcox size = ALIGN(size, align); 152141e9d4bSMatthew Wilcox 153e34f44b3SMatthew Wilcox allocation = max_t(size_t, size, PAGE_SIZE); 154141e9d4bSMatthew Wilcox 155e34f44b3SMatthew Wilcox if (!boundary) { 156e34f44b3SMatthew Wilcox boundary = allocation; 157e34f44b3SMatthew Wilcox } else if ((boundary < size) || (boundary & (boundary - 1))) { 158e34f44b3SMatthew Wilcox return NULL; 159e34f44b3SMatthew Wilcox } 160e34f44b3SMatthew Wilcox 161e34f44b3SMatthew Wilcox retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev)); 162e34f44b3SMatthew Wilcox if (!retval) 163141e9d4bSMatthew Wilcox return retval; 164141e9d4bSMatthew Wilcox 165e34f44b3SMatthew Wilcox strlcpy(retval->name, name, sizeof(retval->name)); 166141e9d4bSMatthew Wilcox 167141e9d4bSMatthew Wilcox retval->dev = dev; 168141e9d4bSMatthew Wilcox 169141e9d4bSMatthew Wilcox INIT_LIST_HEAD(&retval->page_list); 170141e9d4bSMatthew Wilcox spin_lock_init(&retval->lock); 171141e9d4bSMatthew Wilcox retval->size = size; 172e34f44b3SMatthew Wilcox retval->boundary = boundary; 173141e9d4bSMatthew Wilcox retval->allocation = allocation; 174141e9d4bSMatthew Wilcox init_waitqueue_head(&retval->waitq); 175141e9d4bSMatthew Wilcox 176141e9d4bSMatthew Wilcox if (dev) { 177141e9d4bSMatthew Wilcox int ret; 178141e9d4bSMatthew Wilcox 179141e9d4bSMatthew Wilcox mutex_lock(&pools_lock); 180141e9d4bSMatthew Wilcox if (list_empty(&dev->dma_pools)) 181141e9d4bSMatthew Wilcox ret = device_create_file(dev, &dev_attr_pools); 182141e9d4bSMatthew Wilcox else 183141e9d4bSMatthew Wilcox ret = 0; 184141e9d4bSMatthew Wilcox /* note: not currently insisting "name" be unique */ 185141e9d4bSMatthew Wilcox if (!ret) 186141e9d4bSMatthew Wilcox list_add(&retval->pools, &dev->dma_pools); 187141e9d4bSMatthew Wilcox else { 188141e9d4bSMatthew Wilcox kfree(retval); 189141e9d4bSMatthew Wilcox retval = NULL; 190141e9d4bSMatthew Wilcox } 191141e9d4bSMatthew Wilcox mutex_unlock(&pools_lock); 192141e9d4bSMatthew Wilcox } else 193141e9d4bSMatthew Wilcox INIT_LIST_HEAD(&retval->pools); 194141e9d4bSMatthew Wilcox 195141e9d4bSMatthew Wilcox return retval; 196141e9d4bSMatthew Wilcox } 197e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_create); 198141e9d4bSMatthew Wilcox 199a35a3455SMatthew Wilcox static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) 200a35a3455SMatthew Wilcox { 201a35a3455SMatthew Wilcox unsigned int offset = 0; 202e34f44b3SMatthew Wilcox unsigned int next_boundary = pool->boundary; 203a35a3455SMatthew Wilcox 204a35a3455SMatthew Wilcox do { 205a35a3455SMatthew Wilcox unsigned int next = offset + pool->size; 206e34f44b3SMatthew Wilcox if (unlikely((next + pool->size) >= next_boundary)) { 207e34f44b3SMatthew Wilcox next = next_boundary; 208e34f44b3SMatthew Wilcox next_boundary += pool->boundary; 209e34f44b3SMatthew Wilcox } 210a35a3455SMatthew Wilcox *(int *)(page->vaddr + offset) = next; 211a35a3455SMatthew Wilcox offset = next; 212a35a3455SMatthew Wilcox } while (offset < pool->allocation); 213a35a3455SMatthew Wilcox } 214a35a3455SMatthew Wilcox 215e87aa773SMatthew Wilcox static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) 216141e9d4bSMatthew Wilcox { 217141e9d4bSMatthew Wilcox struct dma_page *page; 218141e9d4bSMatthew Wilcox 219a35a3455SMatthew Wilcox page = kmalloc(sizeof(*page), mem_flags); 220141e9d4bSMatthew Wilcox if (!page) 221141e9d4bSMatthew Wilcox return NULL; 222a35a3455SMatthew Wilcox page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, 223e87aa773SMatthew Wilcox &page->dma, mem_flags); 224141e9d4bSMatthew Wilcox if (page->vaddr) { 225b5ee5befSAndi Kleen #ifdef DMAPOOL_DEBUG 226141e9d4bSMatthew Wilcox memset(page->vaddr, POOL_POISON_FREED, pool->allocation); 227141e9d4bSMatthew Wilcox #endif 228a35a3455SMatthew Wilcox pool_initialise_page(pool, page); 229141e9d4bSMatthew Wilcox list_add(&page->page_list, &pool->page_list); 230141e9d4bSMatthew Wilcox page->in_use = 0; 231a35a3455SMatthew Wilcox page->offset = 0; 232141e9d4bSMatthew Wilcox } else { 233141e9d4bSMatthew Wilcox kfree(page); 234141e9d4bSMatthew Wilcox page = NULL; 235141e9d4bSMatthew Wilcox } 236141e9d4bSMatthew Wilcox return page; 237141e9d4bSMatthew Wilcox } 238141e9d4bSMatthew Wilcox 239a35a3455SMatthew Wilcox static inline int is_page_busy(struct dma_page *page) 240141e9d4bSMatthew Wilcox { 241a35a3455SMatthew Wilcox return page->in_use != 0; 242141e9d4bSMatthew Wilcox } 243141e9d4bSMatthew Wilcox 244e87aa773SMatthew Wilcox static void pool_free_page(struct dma_pool *pool, struct dma_page *page) 245141e9d4bSMatthew Wilcox { 246141e9d4bSMatthew Wilcox dma_addr_t dma = page->dma; 247141e9d4bSMatthew Wilcox 248b5ee5befSAndi Kleen #ifdef DMAPOOL_DEBUG 249141e9d4bSMatthew Wilcox memset(page->vaddr, POOL_POISON_FREED, pool->allocation); 250141e9d4bSMatthew Wilcox #endif 251141e9d4bSMatthew Wilcox dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma); 252141e9d4bSMatthew Wilcox list_del(&page->page_list); 253141e9d4bSMatthew Wilcox kfree(page); 254141e9d4bSMatthew Wilcox } 255141e9d4bSMatthew Wilcox 256141e9d4bSMatthew Wilcox /** 257141e9d4bSMatthew Wilcox * dma_pool_destroy - destroys a pool of dma memory blocks. 258141e9d4bSMatthew Wilcox * @pool: dma pool that will be destroyed 259141e9d4bSMatthew Wilcox * Context: !in_interrupt() 260141e9d4bSMatthew Wilcox * 261141e9d4bSMatthew Wilcox * Caller guarantees that no more memory from the pool is in use, 262141e9d4bSMatthew Wilcox * and that nothing will try to use the pool after this call. 263141e9d4bSMatthew Wilcox */ 264e87aa773SMatthew Wilcox void dma_pool_destroy(struct dma_pool *pool) 265141e9d4bSMatthew Wilcox { 266141e9d4bSMatthew Wilcox mutex_lock(&pools_lock); 267141e9d4bSMatthew Wilcox list_del(&pool->pools); 268141e9d4bSMatthew Wilcox if (pool->dev && list_empty(&pool->dev->dma_pools)) 269141e9d4bSMatthew Wilcox device_remove_file(pool->dev, &dev_attr_pools); 270141e9d4bSMatthew Wilcox mutex_unlock(&pools_lock); 271141e9d4bSMatthew Wilcox 272141e9d4bSMatthew Wilcox while (!list_empty(&pool->page_list)) { 273141e9d4bSMatthew Wilcox struct dma_page *page; 274141e9d4bSMatthew Wilcox page = list_entry(pool->page_list.next, 275141e9d4bSMatthew Wilcox struct dma_page, page_list); 276a35a3455SMatthew Wilcox if (is_page_busy(page)) { 277141e9d4bSMatthew Wilcox if (pool->dev) 278e87aa773SMatthew Wilcox dev_err(pool->dev, 279e87aa773SMatthew Wilcox "dma_pool_destroy %s, %p busy\n", 280141e9d4bSMatthew Wilcox pool->name, page->vaddr); 281141e9d4bSMatthew Wilcox else 282e87aa773SMatthew Wilcox printk(KERN_ERR 283e87aa773SMatthew Wilcox "dma_pool_destroy %s, %p busy\n", 284141e9d4bSMatthew Wilcox pool->name, page->vaddr); 285141e9d4bSMatthew Wilcox /* leak the still-in-use consistent memory */ 286141e9d4bSMatthew Wilcox list_del(&page->page_list); 287141e9d4bSMatthew Wilcox kfree(page); 288141e9d4bSMatthew Wilcox } else 289141e9d4bSMatthew Wilcox pool_free_page(pool, page); 290141e9d4bSMatthew Wilcox } 291141e9d4bSMatthew Wilcox 292141e9d4bSMatthew Wilcox kfree(pool); 293141e9d4bSMatthew Wilcox } 294e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_destroy); 295141e9d4bSMatthew Wilcox 296141e9d4bSMatthew Wilcox /** 297141e9d4bSMatthew Wilcox * dma_pool_alloc - get a block of consistent memory 298141e9d4bSMatthew Wilcox * @pool: dma pool that will produce the block 299141e9d4bSMatthew Wilcox * @mem_flags: GFP_* bitmask 300141e9d4bSMatthew Wilcox * @handle: pointer to dma address of block 301141e9d4bSMatthew Wilcox * 302141e9d4bSMatthew Wilcox * This returns the kernel virtual address of a currently unused block, 303141e9d4bSMatthew Wilcox * and reports its dma address through the handle. 3046182a094SMatthew Wilcox * If such a memory block can't be allocated, %NULL is returned. 305141e9d4bSMatthew Wilcox */ 306e87aa773SMatthew Wilcox void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 307e87aa773SMatthew Wilcox dma_addr_t *handle) 308141e9d4bSMatthew Wilcox { 309141e9d4bSMatthew Wilcox unsigned long flags; 310141e9d4bSMatthew Wilcox struct dma_page *page; 311141e9d4bSMatthew Wilcox size_t offset; 312141e9d4bSMatthew Wilcox void *retval; 313141e9d4bSMatthew Wilcox 314ea05c844SDima Zavin might_sleep_if(mem_flags & __GFP_WAIT); 315ea05c844SDima Zavin 316141e9d4bSMatthew Wilcox spin_lock_irqsave(&pool->lock, flags); 3172cae367eSMatthew Wilcox restart: 318141e9d4bSMatthew Wilcox list_for_each_entry(page, &pool->page_list, page_list) { 319a35a3455SMatthew Wilcox if (page->offset < pool->allocation) 320141e9d4bSMatthew Wilcox goto ready; 321141e9d4bSMatthew Wilcox } 322e87aa773SMatthew Wilcox page = pool_alloc_page(pool, GFP_ATOMIC); 323e87aa773SMatthew Wilcox if (!page) { 324141e9d4bSMatthew Wilcox if (mem_flags & __GFP_WAIT) { 325141e9d4bSMatthew Wilcox DECLARE_WAITQUEUE(wait, current); 326141e9d4bSMatthew Wilcox 327684265d4SAndrew Morton __set_current_state(TASK_UNINTERRUPTIBLE); 3282cae367eSMatthew Wilcox __add_wait_queue(&pool->waitq, &wait); 329141e9d4bSMatthew Wilcox spin_unlock_irqrestore(&pool->lock, flags); 330141e9d4bSMatthew Wilcox 331141e9d4bSMatthew Wilcox schedule_timeout(POOL_TIMEOUT_JIFFIES); 332141e9d4bSMatthew Wilcox 3332cae367eSMatthew Wilcox spin_lock_irqsave(&pool->lock, flags); 3342cae367eSMatthew Wilcox __remove_wait_queue(&pool->waitq, &wait); 335141e9d4bSMatthew Wilcox goto restart; 336141e9d4bSMatthew Wilcox } 337141e9d4bSMatthew Wilcox retval = NULL; 338141e9d4bSMatthew Wilcox goto done; 339141e9d4bSMatthew Wilcox } 340141e9d4bSMatthew Wilcox 341141e9d4bSMatthew Wilcox ready: 342141e9d4bSMatthew Wilcox page->in_use++; 343a35a3455SMatthew Wilcox offset = page->offset; 344a35a3455SMatthew Wilcox page->offset = *(int *)(page->vaddr + offset); 345141e9d4bSMatthew Wilcox retval = offset + page->vaddr; 346141e9d4bSMatthew Wilcox *handle = offset + page->dma; 347b5ee5befSAndi Kleen #ifdef DMAPOOL_DEBUG 348141e9d4bSMatthew Wilcox memset(retval, POOL_POISON_ALLOCATED, pool->size); 349141e9d4bSMatthew Wilcox #endif 350141e9d4bSMatthew Wilcox done: 351141e9d4bSMatthew Wilcox spin_unlock_irqrestore(&pool->lock, flags); 352141e9d4bSMatthew Wilcox return retval; 353141e9d4bSMatthew Wilcox } 354e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_alloc); 355141e9d4bSMatthew Wilcox 356e87aa773SMatthew Wilcox static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) 357141e9d4bSMatthew Wilcox { 358141e9d4bSMatthew Wilcox struct dma_page *page; 359141e9d4bSMatthew Wilcox 360141e9d4bSMatthew Wilcox list_for_each_entry(page, &pool->page_list, page_list) { 361141e9d4bSMatthew Wilcox if (dma < page->dma) 362141e9d4bSMatthew Wilcox continue; 363141e9d4bSMatthew Wilcox if (dma < (page->dma + pool->allocation)) 364141e9d4bSMatthew Wilcox return page; 365141e9d4bSMatthew Wilcox } 36684bc227dSRolf Eike Beer return NULL; 36784bc227dSRolf Eike Beer } 368141e9d4bSMatthew Wilcox 369141e9d4bSMatthew Wilcox /** 370141e9d4bSMatthew Wilcox * dma_pool_free - put block back into dma pool 371141e9d4bSMatthew Wilcox * @pool: the dma pool holding the block 372141e9d4bSMatthew Wilcox * @vaddr: virtual address of block 373141e9d4bSMatthew Wilcox * @dma: dma address of block 374141e9d4bSMatthew Wilcox * 375141e9d4bSMatthew Wilcox * Caller promises neither device nor driver will again touch this block 376141e9d4bSMatthew Wilcox * unless it is first re-allocated. 377141e9d4bSMatthew Wilcox */ 378e87aa773SMatthew Wilcox void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) 379141e9d4bSMatthew Wilcox { 380141e9d4bSMatthew Wilcox struct dma_page *page; 381141e9d4bSMatthew Wilcox unsigned long flags; 382a35a3455SMatthew Wilcox unsigned int offset; 383141e9d4bSMatthew Wilcox 38484bc227dSRolf Eike Beer spin_lock_irqsave(&pool->lock, flags); 385e87aa773SMatthew Wilcox page = pool_find_page(pool, dma); 386e87aa773SMatthew Wilcox if (!page) { 38784bc227dSRolf Eike Beer spin_unlock_irqrestore(&pool->lock, flags); 388141e9d4bSMatthew Wilcox if (pool->dev) 389e87aa773SMatthew Wilcox dev_err(pool->dev, 390e87aa773SMatthew Wilcox "dma_pool_free %s, %p/%lx (bad dma)\n", 391141e9d4bSMatthew Wilcox pool->name, vaddr, (unsigned long)dma); 392141e9d4bSMatthew Wilcox else 393141e9d4bSMatthew Wilcox printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n", 394141e9d4bSMatthew Wilcox pool->name, vaddr, (unsigned long)dma); 395141e9d4bSMatthew Wilcox return; 396141e9d4bSMatthew Wilcox } 397141e9d4bSMatthew Wilcox 398a35a3455SMatthew Wilcox offset = vaddr - page->vaddr; 399b5ee5befSAndi Kleen #ifdef DMAPOOL_DEBUG 400a35a3455SMatthew Wilcox if ((dma - page->dma) != offset) { 40184bc227dSRolf Eike Beer spin_unlock_irqrestore(&pool->lock, flags); 402141e9d4bSMatthew Wilcox if (pool->dev) 403e87aa773SMatthew Wilcox dev_err(pool->dev, 404e87aa773SMatthew Wilcox "dma_pool_free %s, %p (bad vaddr)/%Lx\n", 405141e9d4bSMatthew Wilcox pool->name, vaddr, (unsigned long long)dma); 406141e9d4bSMatthew Wilcox else 407e87aa773SMatthew Wilcox printk(KERN_ERR 408e87aa773SMatthew Wilcox "dma_pool_free %s, %p (bad vaddr)/%Lx\n", 409141e9d4bSMatthew Wilcox pool->name, vaddr, (unsigned long long)dma); 410141e9d4bSMatthew Wilcox return; 411141e9d4bSMatthew Wilcox } 412a35a3455SMatthew Wilcox { 413a35a3455SMatthew Wilcox unsigned int chain = page->offset; 414a35a3455SMatthew Wilcox while (chain < pool->allocation) { 415a35a3455SMatthew Wilcox if (chain != offset) { 416a35a3455SMatthew Wilcox chain = *(int *)(page->vaddr + chain); 417a35a3455SMatthew Wilcox continue; 418a35a3455SMatthew Wilcox } 41984bc227dSRolf Eike Beer spin_unlock_irqrestore(&pool->lock, flags); 420141e9d4bSMatthew Wilcox if (pool->dev) 421a35a3455SMatthew Wilcox dev_err(pool->dev, "dma_pool_free %s, dma %Lx " 422a35a3455SMatthew Wilcox "already free\n", pool->name, 423a35a3455SMatthew Wilcox (unsigned long long)dma); 424141e9d4bSMatthew Wilcox else 425a35a3455SMatthew Wilcox printk(KERN_ERR "dma_pool_free %s, dma %Lx " 426a35a3455SMatthew Wilcox "already free\n", pool->name, 427a35a3455SMatthew Wilcox (unsigned long long)dma); 428141e9d4bSMatthew Wilcox return; 429141e9d4bSMatthew Wilcox } 430a35a3455SMatthew Wilcox } 431141e9d4bSMatthew Wilcox memset(vaddr, POOL_POISON_FREED, pool->size); 432141e9d4bSMatthew Wilcox #endif 433141e9d4bSMatthew Wilcox 434141e9d4bSMatthew Wilcox page->in_use--; 435a35a3455SMatthew Wilcox *(int *)vaddr = page->offset; 436a35a3455SMatthew Wilcox page->offset = offset; 437141e9d4bSMatthew Wilcox if (waitqueue_active(&pool->waitq)) 4382cae367eSMatthew Wilcox wake_up_locked(&pool->waitq); 439141e9d4bSMatthew Wilcox /* 440141e9d4bSMatthew Wilcox * Resist a temptation to do 441a35a3455SMatthew Wilcox * if (!is_page_busy(page)) pool_free_page(pool, page); 442141e9d4bSMatthew Wilcox * Better have a few empty pages hang around. 443141e9d4bSMatthew Wilcox */ 444141e9d4bSMatthew Wilcox spin_unlock_irqrestore(&pool->lock, flags); 445141e9d4bSMatthew Wilcox } 446e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_free); 447141e9d4bSMatthew Wilcox 448141e9d4bSMatthew Wilcox /* 449141e9d4bSMatthew Wilcox * Managed DMA pool 450141e9d4bSMatthew Wilcox */ 451141e9d4bSMatthew Wilcox static void dmam_pool_release(struct device *dev, void *res) 452141e9d4bSMatthew Wilcox { 453141e9d4bSMatthew Wilcox struct dma_pool *pool = *(struct dma_pool **)res; 454141e9d4bSMatthew Wilcox 455141e9d4bSMatthew Wilcox dma_pool_destroy(pool); 456141e9d4bSMatthew Wilcox } 457141e9d4bSMatthew Wilcox 458141e9d4bSMatthew Wilcox static int dmam_pool_match(struct device *dev, void *res, void *match_data) 459141e9d4bSMatthew Wilcox { 460141e9d4bSMatthew Wilcox return *(struct dma_pool **)res == match_data; 461141e9d4bSMatthew Wilcox } 462141e9d4bSMatthew Wilcox 463141e9d4bSMatthew Wilcox /** 464141e9d4bSMatthew Wilcox * dmam_pool_create - Managed dma_pool_create() 465141e9d4bSMatthew Wilcox * @name: name of pool, for diagnostics 466141e9d4bSMatthew Wilcox * @dev: device that will be doing the DMA 467141e9d4bSMatthew Wilcox * @size: size of the blocks in this pool. 468141e9d4bSMatthew Wilcox * @align: alignment requirement for blocks; must be a power of two 469141e9d4bSMatthew Wilcox * @allocation: returned blocks won't cross this boundary (or zero) 470141e9d4bSMatthew Wilcox * 471141e9d4bSMatthew Wilcox * Managed dma_pool_create(). DMA pool created with this function is 472141e9d4bSMatthew Wilcox * automatically destroyed on driver detach. 473141e9d4bSMatthew Wilcox */ 474141e9d4bSMatthew Wilcox struct dma_pool *dmam_pool_create(const char *name, struct device *dev, 475141e9d4bSMatthew Wilcox size_t size, size_t align, size_t allocation) 476141e9d4bSMatthew Wilcox { 477141e9d4bSMatthew Wilcox struct dma_pool **ptr, *pool; 478141e9d4bSMatthew Wilcox 479141e9d4bSMatthew Wilcox ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL); 480141e9d4bSMatthew Wilcox if (!ptr) 481141e9d4bSMatthew Wilcox return NULL; 482141e9d4bSMatthew Wilcox 483141e9d4bSMatthew Wilcox pool = *ptr = dma_pool_create(name, dev, size, align, allocation); 484141e9d4bSMatthew Wilcox if (pool) 485141e9d4bSMatthew Wilcox devres_add(dev, ptr); 486141e9d4bSMatthew Wilcox else 487141e9d4bSMatthew Wilcox devres_free(ptr); 488141e9d4bSMatthew Wilcox 489141e9d4bSMatthew Wilcox return pool; 490141e9d4bSMatthew Wilcox } 491e87aa773SMatthew Wilcox EXPORT_SYMBOL(dmam_pool_create); 492141e9d4bSMatthew Wilcox 493141e9d4bSMatthew Wilcox /** 494141e9d4bSMatthew Wilcox * dmam_pool_destroy - Managed dma_pool_destroy() 495141e9d4bSMatthew Wilcox * @pool: dma pool that will be destroyed 496141e9d4bSMatthew Wilcox * 497141e9d4bSMatthew Wilcox * Managed dma_pool_destroy(). 498141e9d4bSMatthew Wilcox */ 499141e9d4bSMatthew Wilcox void dmam_pool_destroy(struct dma_pool *pool) 500141e9d4bSMatthew Wilcox { 501141e9d4bSMatthew Wilcox struct device *dev = pool->dev; 502141e9d4bSMatthew Wilcox 503141e9d4bSMatthew Wilcox WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool)); 504ae891a1bSMaxin B John dma_pool_destroy(pool); 505141e9d4bSMatthew Wilcox } 506141e9d4bSMatthew Wilcox EXPORT_SYMBOL(dmam_pool_destroy); 507