1141e9d4bSMatthew Wilcox 2141e9d4bSMatthew Wilcox #include <linux/device.h> 3141e9d4bSMatthew Wilcox #include <linux/mm.h> 4141e9d4bSMatthew Wilcox #include <asm/io.h> /* Needed for i386 to build */ 5141e9d4bSMatthew Wilcox #include <linux/dma-mapping.h> 6141e9d4bSMatthew Wilcox #include <linux/dmapool.h> 7141e9d4bSMatthew Wilcox #include <linux/slab.h> 8141e9d4bSMatthew Wilcox #include <linux/module.h> 9141e9d4bSMatthew Wilcox #include <linux/poison.h> 10141e9d4bSMatthew Wilcox #include <linux/sched.h> 11141e9d4bSMatthew Wilcox 12141e9d4bSMatthew Wilcox /* 13141e9d4bSMatthew Wilcox * Pool allocator ... wraps the dma_alloc_coherent page allocator, so 14141e9d4bSMatthew Wilcox * small blocks are easily used by drivers for bus mastering controllers. 15141e9d4bSMatthew Wilcox * This should probably be sharing the guts of the slab allocator. 16141e9d4bSMatthew Wilcox */ 17141e9d4bSMatthew Wilcox 18141e9d4bSMatthew Wilcox struct dma_pool { /* the pool */ 19141e9d4bSMatthew Wilcox struct list_head page_list; 20141e9d4bSMatthew Wilcox spinlock_t lock; 21141e9d4bSMatthew Wilcox size_t blocks_per_page; 22141e9d4bSMatthew Wilcox size_t size; 23141e9d4bSMatthew Wilcox struct device *dev; 24141e9d4bSMatthew Wilcox size_t allocation; 25141e9d4bSMatthew Wilcox char name[32]; 26141e9d4bSMatthew Wilcox wait_queue_head_t waitq; 27141e9d4bSMatthew Wilcox struct list_head pools; 28141e9d4bSMatthew Wilcox }; 29141e9d4bSMatthew Wilcox 30141e9d4bSMatthew Wilcox struct dma_page { /* cacheable header for 'allocation' bytes */ 31141e9d4bSMatthew Wilcox struct list_head page_list; 32141e9d4bSMatthew Wilcox void *vaddr; 33141e9d4bSMatthew Wilcox dma_addr_t dma; 34141e9d4bSMatthew Wilcox unsigned in_use; 35141e9d4bSMatthew Wilcox unsigned long bitmap[0]; 36141e9d4bSMatthew Wilcox }; 37141e9d4bSMatthew Wilcox 38141e9d4bSMatthew Wilcox #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000) 39141e9d4bSMatthew Wilcox 40141e9d4bSMatthew Wilcox static DEFINE_MUTEX(pools_lock); 41141e9d4bSMatthew Wilcox 42141e9d4bSMatthew Wilcox static ssize_t 43141e9d4bSMatthew Wilcox show_pools(struct device *dev, struct device_attribute *attr, char *buf) 44141e9d4bSMatthew Wilcox { 45141e9d4bSMatthew Wilcox unsigned temp; 46141e9d4bSMatthew Wilcox unsigned size; 47141e9d4bSMatthew Wilcox char *next; 48141e9d4bSMatthew Wilcox struct dma_page *page; 49141e9d4bSMatthew Wilcox struct dma_pool *pool; 50141e9d4bSMatthew Wilcox 51141e9d4bSMatthew Wilcox next = buf; 52141e9d4bSMatthew Wilcox size = PAGE_SIZE; 53141e9d4bSMatthew Wilcox 54141e9d4bSMatthew Wilcox temp = scnprintf(next, size, "poolinfo - 0.1\n"); 55141e9d4bSMatthew Wilcox size -= temp; 56141e9d4bSMatthew Wilcox next += temp; 57141e9d4bSMatthew Wilcox 58141e9d4bSMatthew Wilcox mutex_lock(&pools_lock); 59141e9d4bSMatthew Wilcox list_for_each_entry(pool, &dev->dma_pools, pools) { 60141e9d4bSMatthew Wilcox unsigned pages = 0; 61141e9d4bSMatthew Wilcox unsigned blocks = 0; 62141e9d4bSMatthew Wilcox 63141e9d4bSMatthew Wilcox list_for_each_entry(page, &pool->page_list, page_list) { 64141e9d4bSMatthew Wilcox pages++; 65141e9d4bSMatthew Wilcox blocks += page->in_use; 66141e9d4bSMatthew Wilcox } 67141e9d4bSMatthew Wilcox 68141e9d4bSMatthew Wilcox /* per-pool info, no real statistics yet */ 69141e9d4bSMatthew Wilcox temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n", 70141e9d4bSMatthew Wilcox pool->name, 71141e9d4bSMatthew Wilcox blocks, pages * pool->blocks_per_page, 72141e9d4bSMatthew Wilcox pool->size, pages); 73141e9d4bSMatthew Wilcox size -= temp; 74141e9d4bSMatthew Wilcox next += temp; 75141e9d4bSMatthew Wilcox } 76141e9d4bSMatthew Wilcox mutex_unlock(&pools_lock); 77141e9d4bSMatthew Wilcox 78141e9d4bSMatthew Wilcox return PAGE_SIZE - size; 79141e9d4bSMatthew Wilcox } 80e87aa773SMatthew Wilcox 81141e9d4bSMatthew Wilcox static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL); 82141e9d4bSMatthew Wilcox 83141e9d4bSMatthew Wilcox /** 84141e9d4bSMatthew Wilcox * dma_pool_create - Creates a pool of consistent memory blocks, for dma. 85141e9d4bSMatthew Wilcox * @name: name of pool, for diagnostics 86141e9d4bSMatthew Wilcox * @dev: device that will be doing the DMA 87141e9d4bSMatthew Wilcox * @size: size of the blocks in this pool. 88141e9d4bSMatthew Wilcox * @align: alignment requirement for blocks; must be a power of two 89141e9d4bSMatthew Wilcox * @allocation: returned blocks won't cross this boundary (or zero) 90141e9d4bSMatthew Wilcox * Context: !in_interrupt() 91141e9d4bSMatthew Wilcox * 92141e9d4bSMatthew Wilcox * Returns a dma allocation pool with the requested characteristics, or 93141e9d4bSMatthew Wilcox * null if one can't be created. Given one of these pools, dma_pool_alloc() 94141e9d4bSMatthew Wilcox * may be used to allocate memory. Such memory will all have "consistent" 95141e9d4bSMatthew Wilcox * DMA mappings, accessible by the device and its driver without using 96141e9d4bSMatthew Wilcox * cache flushing primitives. The actual size of blocks allocated may be 97141e9d4bSMatthew Wilcox * larger than requested because of alignment. 98141e9d4bSMatthew Wilcox * 99141e9d4bSMatthew Wilcox * If allocation is nonzero, objects returned from dma_pool_alloc() won't 100141e9d4bSMatthew Wilcox * cross that size boundary. This is useful for devices which have 101141e9d4bSMatthew Wilcox * addressing restrictions on individual DMA transfers, such as not crossing 102141e9d4bSMatthew Wilcox * boundaries of 4KBytes. 103141e9d4bSMatthew Wilcox */ 104e87aa773SMatthew Wilcox struct dma_pool *dma_pool_create(const char *name, struct device *dev, 105141e9d4bSMatthew Wilcox size_t size, size_t align, size_t allocation) 106141e9d4bSMatthew Wilcox { 107141e9d4bSMatthew Wilcox struct dma_pool *retval; 108141e9d4bSMatthew Wilcox 109*399154beSMatthew Wilcox if (align == 0) { 110141e9d4bSMatthew Wilcox align = 1; 111*399154beSMatthew Wilcox } else if (align & (align - 1)) { 112*399154beSMatthew Wilcox return NULL; 113*399154beSMatthew Wilcox } 114*399154beSMatthew Wilcox 115141e9d4bSMatthew Wilcox if (size == 0) 116141e9d4bSMatthew Wilcox return NULL; 117*399154beSMatthew Wilcox 118*399154beSMatthew Wilcox if ((size % align) != 0) 119*399154beSMatthew Wilcox size = ALIGN(size, align); 120141e9d4bSMatthew Wilcox 121141e9d4bSMatthew Wilcox if (allocation == 0) { 122141e9d4bSMatthew Wilcox if (PAGE_SIZE < size) 123141e9d4bSMatthew Wilcox allocation = size; 124141e9d4bSMatthew Wilcox else 125141e9d4bSMatthew Wilcox allocation = PAGE_SIZE; 126e87aa773SMatthew Wilcox /* FIXME: round up for less fragmentation */ 127141e9d4bSMatthew Wilcox } else if (allocation < size) 128141e9d4bSMatthew Wilcox return NULL; 129141e9d4bSMatthew Wilcox 130e87aa773SMatthew Wilcox if (! 131e87aa773SMatthew Wilcox (retval = 132e87aa773SMatthew Wilcox kmalloc_node(sizeof *retval, GFP_KERNEL, dev_to_node(dev)))) 133141e9d4bSMatthew Wilcox return retval; 134141e9d4bSMatthew Wilcox 135141e9d4bSMatthew Wilcox strlcpy(retval->name, name, sizeof retval->name); 136141e9d4bSMatthew Wilcox 137141e9d4bSMatthew Wilcox retval->dev = dev; 138141e9d4bSMatthew Wilcox 139141e9d4bSMatthew Wilcox INIT_LIST_HEAD(&retval->page_list); 140141e9d4bSMatthew Wilcox spin_lock_init(&retval->lock); 141141e9d4bSMatthew Wilcox retval->size = size; 142141e9d4bSMatthew Wilcox retval->allocation = allocation; 143141e9d4bSMatthew Wilcox retval->blocks_per_page = allocation / size; 144141e9d4bSMatthew Wilcox init_waitqueue_head(&retval->waitq); 145141e9d4bSMatthew Wilcox 146141e9d4bSMatthew Wilcox if (dev) { 147141e9d4bSMatthew Wilcox int ret; 148141e9d4bSMatthew Wilcox 149141e9d4bSMatthew Wilcox mutex_lock(&pools_lock); 150141e9d4bSMatthew Wilcox if (list_empty(&dev->dma_pools)) 151141e9d4bSMatthew Wilcox ret = device_create_file(dev, &dev_attr_pools); 152141e9d4bSMatthew Wilcox else 153141e9d4bSMatthew Wilcox ret = 0; 154141e9d4bSMatthew Wilcox /* note: not currently insisting "name" be unique */ 155141e9d4bSMatthew Wilcox if (!ret) 156141e9d4bSMatthew Wilcox list_add(&retval->pools, &dev->dma_pools); 157141e9d4bSMatthew Wilcox else { 158141e9d4bSMatthew Wilcox kfree(retval); 159141e9d4bSMatthew Wilcox retval = NULL; 160141e9d4bSMatthew Wilcox } 161141e9d4bSMatthew Wilcox mutex_unlock(&pools_lock); 162141e9d4bSMatthew Wilcox } else 163141e9d4bSMatthew Wilcox INIT_LIST_HEAD(&retval->pools); 164141e9d4bSMatthew Wilcox 165141e9d4bSMatthew Wilcox return retval; 166141e9d4bSMatthew Wilcox } 167e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_create); 168141e9d4bSMatthew Wilcox 169e87aa773SMatthew Wilcox static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) 170141e9d4bSMatthew Wilcox { 171141e9d4bSMatthew Wilcox struct dma_page *page; 172141e9d4bSMatthew Wilcox int mapsize; 173141e9d4bSMatthew Wilcox 174141e9d4bSMatthew Wilcox mapsize = pool->blocks_per_page; 175141e9d4bSMatthew Wilcox mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG; 176141e9d4bSMatthew Wilcox mapsize *= sizeof(long); 177141e9d4bSMatthew Wilcox 178141e9d4bSMatthew Wilcox page = kmalloc(mapsize + sizeof *page, mem_flags); 179141e9d4bSMatthew Wilcox if (!page) 180141e9d4bSMatthew Wilcox return NULL; 181141e9d4bSMatthew Wilcox page->vaddr = dma_alloc_coherent(pool->dev, 182141e9d4bSMatthew Wilcox pool->allocation, 183e87aa773SMatthew Wilcox &page->dma, mem_flags); 184141e9d4bSMatthew Wilcox if (page->vaddr) { 185e87aa773SMatthew Wilcox memset(page->bitmap, 0xff, mapsize); /* bit set == free */ 186141e9d4bSMatthew Wilcox #ifdef CONFIG_DEBUG_SLAB 187141e9d4bSMatthew Wilcox memset(page->vaddr, POOL_POISON_FREED, pool->allocation); 188141e9d4bSMatthew Wilcox #endif 189141e9d4bSMatthew Wilcox list_add(&page->page_list, &pool->page_list); 190141e9d4bSMatthew Wilcox page->in_use = 0; 191141e9d4bSMatthew Wilcox } else { 192141e9d4bSMatthew Wilcox kfree(page); 193141e9d4bSMatthew Wilcox page = NULL; 194141e9d4bSMatthew Wilcox } 195141e9d4bSMatthew Wilcox return page; 196141e9d4bSMatthew Wilcox } 197141e9d4bSMatthew Wilcox 198e87aa773SMatthew Wilcox static inline int is_page_busy(int blocks, unsigned long *bitmap) 199141e9d4bSMatthew Wilcox { 200141e9d4bSMatthew Wilcox while (blocks > 0) { 201141e9d4bSMatthew Wilcox if (*bitmap++ != ~0UL) 202141e9d4bSMatthew Wilcox return 1; 203141e9d4bSMatthew Wilcox blocks -= BITS_PER_LONG; 204141e9d4bSMatthew Wilcox } 205141e9d4bSMatthew Wilcox return 0; 206141e9d4bSMatthew Wilcox } 207141e9d4bSMatthew Wilcox 208e87aa773SMatthew Wilcox static void pool_free_page(struct dma_pool *pool, struct dma_page *page) 209141e9d4bSMatthew Wilcox { 210141e9d4bSMatthew Wilcox dma_addr_t dma = page->dma; 211141e9d4bSMatthew Wilcox 212141e9d4bSMatthew Wilcox #ifdef CONFIG_DEBUG_SLAB 213141e9d4bSMatthew Wilcox memset(page->vaddr, POOL_POISON_FREED, pool->allocation); 214141e9d4bSMatthew Wilcox #endif 215141e9d4bSMatthew Wilcox dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma); 216141e9d4bSMatthew Wilcox list_del(&page->page_list); 217141e9d4bSMatthew Wilcox kfree(page); 218141e9d4bSMatthew Wilcox } 219141e9d4bSMatthew Wilcox 220141e9d4bSMatthew Wilcox /** 221141e9d4bSMatthew Wilcox * dma_pool_destroy - destroys a pool of dma memory blocks. 222141e9d4bSMatthew Wilcox * @pool: dma pool that will be destroyed 223141e9d4bSMatthew Wilcox * Context: !in_interrupt() 224141e9d4bSMatthew Wilcox * 225141e9d4bSMatthew Wilcox * Caller guarantees that no more memory from the pool is in use, 226141e9d4bSMatthew Wilcox * and that nothing will try to use the pool after this call. 227141e9d4bSMatthew Wilcox */ 228e87aa773SMatthew Wilcox void dma_pool_destroy(struct dma_pool *pool) 229141e9d4bSMatthew Wilcox { 230141e9d4bSMatthew Wilcox mutex_lock(&pools_lock); 231141e9d4bSMatthew Wilcox list_del(&pool->pools); 232141e9d4bSMatthew Wilcox if (pool->dev && list_empty(&pool->dev->dma_pools)) 233141e9d4bSMatthew Wilcox device_remove_file(pool->dev, &dev_attr_pools); 234141e9d4bSMatthew Wilcox mutex_unlock(&pools_lock); 235141e9d4bSMatthew Wilcox 236141e9d4bSMatthew Wilcox while (!list_empty(&pool->page_list)) { 237141e9d4bSMatthew Wilcox struct dma_page *page; 238141e9d4bSMatthew Wilcox page = list_entry(pool->page_list.next, 239141e9d4bSMatthew Wilcox struct dma_page, page_list); 240141e9d4bSMatthew Wilcox if (is_page_busy(pool->blocks_per_page, page->bitmap)) { 241141e9d4bSMatthew Wilcox if (pool->dev) 242e87aa773SMatthew Wilcox dev_err(pool->dev, 243e87aa773SMatthew Wilcox "dma_pool_destroy %s, %p busy\n", 244141e9d4bSMatthew Wilcox pool->name, page->vaddr); 245141e9d4bSMatthew Wilcox else 246e87aa773SMatthew Wilcox printk(KERN_ERR 247e87aa773SMatthew Wilcox "dma_pool_destroy %s, %p busy\n", 248141e9d4bSMatthew Wilcox pool->name, page->vaddr); 249141e9d4bSMatthew Wilcox /* leak the still-in-use consistent memory */ 250141e9d4bSMatthew Wilcox list_del(&page->page_list); 251141e9d4bSMatthew Wilcox kfree(page); 252141e9d4bSMatthew Wilcox } else 253141e9d4bSMatthew Wilcox pool_free_page(pool, page); 254141e9d4bSMatthew Wilcox } 255141e9d4bSMatthew Wilcox 256141e9d4bSMatthew Wilcox kfree(pool); 257141e9d4bSMatthew Wilcox } 258e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_destroy); 259141e9d4bSMatthew Wilcox 260141e9d4bSMatthew Wilcox /** 261141e9d4bSMatthew Wilcox * dma_pool_alloc - get a block of consistent memory 262141e9d4bSMatthew Wilcox * @pool: dma pool that will produce the block 263141e9d4bSMatthew Wilcox * @mem_flags: GFP_* bitmask 264141e9d4bSMatthew Wilcox * @handle: pointer to dma address of block 265141e9d4bSMatthew Wilcox * 266141e9d4bSMatthew Wilcox * This returns the kernel virtual address of a currently unused block, 267141e9d4bSMatthew Wilcox * and reports its dma address through the handle. 268141e9d4bSMatthew Wilcox * If such a memory block can't be allocated, null is returned. 269141e9d4bSMatthew Wilcox */ 270e87aa773SMatthew Wilcox void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 271e87aa773SMatthew Wilcox dma_addr_t *handle) 272141e9d4bSMatthew Wilcox { 273141e9d4bSMatthew Wilcox unsigned long flags; 274141e9d4bSMatthew Wilcox struct dma_page *page; 275141e9d4bSMatthew Wilcox int map, block; 276141e9d4bSMatthew Wilcox size_t offset; 277141e9d4bSMatthew Wilcox void *retval; 278141e9d4bSMatthew Wilcox 279141e9d4bSMatthew Wilcox spin_lock_irqsave(&pool->lock, flags); 2802cae367eSMatthew Wilcox restart: 281141e9d4bSMatthew Wilcox list_for_each_entry(page, &pool->page_list, page_list) { 282141e9d4bSMatthew Wilcox int i; 283141e9d4bSMatthew Wilcox /* only cachable accesses here ... */ 284141e9d4bSMatthew Wilcox for (map = 0, i = 0; 285e87aa773SMatthew Wilcox i < pool->blocks_per_page; i += BITS_PER_LONG, map++) { 286141e9d4bSMatthew Wilcox if (page->bitmap[map] == 0) 287141e9d4bSMatthew Wilcox continue; 288141e9d4bSMatthew Wilcox block = ffz(~page->bitmap[map]); 289141e9d4bSMatthew Wilcox if ((i + block) < pool->blocks_per_page) { 290141e9d4bSMatthew Wilcox clear_bit(block, &page->bitmap[map]); 291141e9d4bSMatthew Wilcox offset = (BITS_PER_LONG * map) + block; 292141e9d4bSMatthew Wilcox offset *= pool->size; 293141e9d4bSMatthew Wilcox goto ready; 294141e9d4bSMatthew Wilcox } 295141e9d4bSMatthew Wilcox } 296141e9d4bSMatthew Wilcox } 297e87aa773SMatthew Wilcox page = pool_alloc_page(pool, GFP_ATOMIC); 298e87aa773SMatthew Wilcox if (!page) { 299141e9d4bSMatthew Wilcox if (mem_flags & __GFP_WAIT) { 300141e9d4bSMatthew Wilcox DECLARE_WAITQUEUE(wait, current); 301141e9d4bSMatthew Wilcox 302141e9d4bSMatthew Wilcox __set_current_state(TASK_INTERRUPTIBLE); 3032cae367eSMatthew Wilcox __add_wait_queue(&pool->waitq, &wait); 304141e9d4bSMatthew Wilcox spin_unlock_irqrestore(&pool->lock, flags); 305141e9d4bSMatthew Wilcox 306141e9d4bSMatthew Wilcox schedule_timeout(POOL_TIMEOUT_JIFFIES); 307141e9d4bSMatthew Wilcox 3082cae367eSMatthew Wilcox spin_lock_irqsave(&pool->lock, flags); 3092cae367eSMatthew Wilcox __remove_wait_queue(&pool->waitq, &wait); 310141e9d4bSMatthew Wilcox goto restart; 311141e9d4bSMatthew Wilcox } 312141e9d4bSMatthew Wilcox retval = NULL; 313141e9d4bSMatthew Wilcox goto done; 314141e9d4bSMatthew Wilcox } 315141e9d4bSMatthew Wilcox 316141e9d4bSMatthew Wilcox clear_bit(0, &page->bitmap[0]); 317141e9d4bSMatthew Wilcox offset = 0; 318141e9d4bSMatthew Wilcox ready: 319141e9d4bSMatthew Wilcox page->in_use++; 320141e9d4bSMatthew Wilcox retval = offset + page->vaddr; 321141e9d4bSMatthew Wilcox *handle = offset + page->dma; 322141e9d4bSMatthew Wilcox #ifdef CONFIG_DEBUG_SLAB 323141e9d4bSMatthew Wilcox memset(retval, POOL_POISON_ALLOCATED, pool->size); 324141e9d4bSMatthew Wilcox #endif 325141e9d4bSMatthew Wilcox done: 326141e9d4bSMatthew Wilcox spin_unlock_irqrestore(&pool->lock, flags); 327141e9d4bSMatthew Wilcox return retval; 328141e9d4bSMatthew Wilcox } 329e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_alloc); 330141e9d4bSMatthew Wilcox 331e87aa773SMatthew Wilcox static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) 332141e9d4bSMatthew Wilcox { 333141e9d4bSMatthew Wilcox unsigned long flags; 334141e9d4bSMatthew Wilcox struct dma_page *page; 335141e9d4bSMatthew Wilcox 336141e9d4bSMatthew Wilcox spin_lock_irqsave(&pool->lock, flags); 337141e9d4bSMatthew Wilcox list_for_each_entry(page, &pool->page_list, page_list) { 338141e9d4bSMatthew Wilcox if (dma < page->dma) 339141e9d4bSMatthew Wilcox continue; 340141e9d4bSMatthew Wilcox if (dma < (page->dma + pool->allocation)) 341141e9d4bSMatthew Wilcox goto done; 342141e9d4bSMatthew Wilcox } 343141e9d4bSMatthew Wilcox page = NULL; 344141e9d4bSMatthew Wilcox done: 345141e9d4bSMatthew Wilcox spin_unlock_irqrestore(&pool->lock, flags); 346141e9d4bSMatthew Wilcox return page; 347141e9d4bSMatthew Wilcox } 348141e9d4bSMatthew Wilcox 349141e9d4bSMatthew Wilcox /** 350141e9d4bSMatthew Wilcox * dma_pool_free - put block back into dma pool 351141e9d4bSMatthew Wilcox * @pool: the dma pool holding the block 352141e9d4bSMatthew Wilcox * @vaddr: virtual address of block 353141e9d4bSMatthew Wilcox * @dma: dma address of block 354141e9d4bSMatthew Wilcox * 355141e9d4bSMatthew Wilcox * Caller promises neither device nor driver will again touch this block 356141e9d4bSMatthew Wilcox * unless it is first re-allocated. 357141e9d4bSMatthew Wilcox */ 358e87aa773SMatthew Wilcox void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) 359141e9d4bSMatthew Wilcox { 360141e9d4bSMatthew Wilcox struct dma_page *page; 361141e9d4bSMatthew Wilcox unsigned long flags; 362141e9d4bSMatthew Wilcox int map, block; 363141e9d4bSMatthew Wilcox 364e87aa773SMatthew Wilcox page = pool_find_page(pool, dma); 365e87aa773SMatthew Wilcox if (!page) { 366141e9d4bSMatthew Wilcox if (pool->dev) 367e87aa773SMatthew Wilcox dev_err(pool->dev, 368e87aa773SMatthew Wilcox "dma_pool_free %s, %p/%lx (bad dma)\n", 369141e9d4bSMatthew Wilcox pool->name, vaddr, (unsigned long)dma); 370141e9d4bSMatthew Wilcox else 371141e9d4bSMatthew Wilcox printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n", 372141e9d4bSMatthew Wilcox pool->name, vaddr, (unsigned long)dma); 373141e9d4bSMatthew Wilcox return; 374141e9d4bSMatthew Wilcox } 375141e9d4bSMatthew Wilcox 376141e9d4bSMatthew Wilcox block = dma - page->dma; 377141e9d4bSMatthew Wilcox block /= pool->size; 378141e9d4bSMatthew Wilcox map = block / BITS_PER_LONG; 379141e9d4bSMatthew Wilcox block %= BITS_PER_LONG; 380141e9d4bSMatthew Wilcox 381141e9d4bSMatthew Wilcox #ifdef CONFIG_DEBUG_SLAB 382141e9d4bSMatthew Wilcox if (((dma - page->dma) + (void *)page->vaddr) != vaddr) { 383141e9d4bSMatthew Wilcox if (pool->dev) 384e87aa773SMatthew Wilcox dev_err(pool->dev, 385e87aa773SMatthew Wilcox "dma_pool_free %s, %p (bad vaddr)/%Lx\n", 386141e9d4bSMatthew Wilcox pool->name, vaddr, (unsigned long long)dma); 387141e9d4bSMatthew Wilcox else 388e87aa773SMatthew Wilcox printk(KERN_ERR 389e87aa773SMatthew Wilcox "dma_pool_free %s, %p (bad vaddr)/%Lx\n", 390141e9d4bSMatthew Wilcox pool->name, vaddr, (unsigned long long)dma); 391141e9d4bSMatthew Wilcox return; 392141e9d4bSMatthew Wilcox } 393141e9d4bSMatthew Wilcox if (page->bitmap[map] & (1UL << block)) { 394141e9d4bSMatthew Wilcox if (pool->dev) 395e87aa773SMatthew Wilcox dev_err(pool->dev, 396e87aa773SMatthew Wilcox "dma_pool_free %s, dma %Lx already free\n", 397141e9d4bSMatthew Wilcox pool->name, (unsigned long long)dma); 398141e9d4bSMatthew Wilcox else 399e87aa773SMatthew Wilcox printk(KERN_ERR 400e87aa773SMatthew Wilcox "dma_pool_free %s, dma %Lx already free\n", 401141e9d4bSMatthew Wilcox pool->name, (unsigned long long)dma); 402141e9d4bSMatthew Wilcox return; 403141e9d4bSMatthew Wilcox } 404141e9d4bSMatthew Wilcox memset(vaddr, POOL_POISON_FREED, pool->size); 405141e9d4bSMatthew Wilcox #endif 406141e9d4bSMatthew Wilcox 407141e9d4bSMatthew Wilcox spin_lock_irqsave(&pool->lock, flags); 408141e9d4bSMatthew Wilcox page->in_use--; 409141e9d4bSMatthew Wilcox set_bit(block, &page->bitmap[map]); 410141e9d4bSMatthew Wilcox if (waitqueue_active(&pool->waitq)) 4112cae367eSMatthew Wilcox wake_up_locked(&pool->waitq); 412141e9d4bSMatthew Wilcox /* 413141e9d4bSMatthew Wilcox * Resist a temptation to do 414141e9d4bSMatthew Wilcox * if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page); 415141e9d4bSMatthew Wilcox * Better have a few empty pages hang around. 416141e9d4bSMatthew Wilcox */ 417141e9d4bSMatthew Wilcox spin_unlock_irqrestore(&pool->lock, flags); 418141e9d4bSMatthew Wilcox } 419e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_free); 420141e9d4bSMatthew Wilcox 421141e9d4bSMatthew Wilcox /* 422141e9d4bSMatthew Wilcox * Managed DMA pool 423141e9d4bSMatthew Wilcox */ 424141e9d4bSMatthew Wilcox static void dmam_pool_release(struct device *dev, void *res) 425141e9d4bSMatthew Wilcox { 426141e9d4bSMatthew Wilcox struct dma_pool *pool = *(struct dma_pool **)res; 427141e9d4bSMatthew Wilcox 428141e9d4bSMatthew Wilcox dma_pool_destroy(pool); 429141e9d4bSMatthew Wilcox } 430141e9d4bSMatthew Wilcox 431141e9d4bSMatthew Wilcox static int dmam_pool_match(struct device *dev, void *res, void *match_data) 432141e9d4bSMatthew Wilcox { 433141e9d4bSMatthew Wilcox return *(struct dma_pool **)res == match_data; 434141e9d4bSMatthew Wilcox } 435141e9d4bSMatthew Wilcox 436141e9d4bSMatthew Wilcox /** 437141e9d4bSMatthew Wilcox * dmam_pool_create - Managed dma_pool_create() 438141e9d4bSMatthew Wilcox * @name: name of pool, for diagnostics 439141e9d4bSMatthew Wilcox * @dev: device that will be doing the DMA 440141e9d4bSMatthew Wilcox * @size: size of the blocks in this pool. 441141e9d4bSMatthew Wilcox * @align: alignment requirement for blocks; must be a power of two 442141e9d4bSMatthew Wilcox * @allocation: returned blocks won't cross this boundary (or zero) 443141e9d4bSMatthew Wilcox * 444141e9d4bSMatthew Wilcox * Managed dma_pool_create(). DMA pool created with this function is 445141e9d4bSMatthew Wilcox * automatically destroyed on driver detach. 446141e9d4bSMatthew Wilcox */ 447141e9d4bSMatthew Wilcox struct dma_pool *dmam_pool_create(const char *name, struct device *dev, 448141e9d4bSMatthew Wilcox size_t size, size_t align, size_t allocation) 449141e9d4bSMatthew Wilcox { 450141e9d4bSMatthew Wilcox struct dma_pool **ptr, *pool; 451141e9d4bSMatthew Wilcox 452141e9d4bSMatthew Wilcox ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL); 453141e9d4bSMatthew Wilcox if (!ptr) 454141e9d4bSMatthew Wilcox return NULL; 455141e9d4bSMatthew Wilcox 456141e9d4bSMatthew Wilcox pool = *ptr = dma_pool_create(name, dev, size, align, allocation); 457141e9d4bSMatthew Wilcox if (pool) 458141e9d4bSMatthew Wilcox devres_add(dev, ptr); 459141e9d4bSMatthew Wilcox else 460141e9d4bSMatthew Wilcox devres_free(ptr); 461141e9d4bSMatthew Wilcox 462141e9d4bSMatthew Wilcox return pool; 463141e9d4bSMatthew Wilcox } 464e87aa773SMatthew Wilcox EXPORT_SYMBOL(dmam_pool_create); 465141e9d4bSMatthew Wilcox 466141e9d4bSMatthew Wilcox /** 467141e9d4bSMatthew Wilcox * dmam_pool_destroy - Managed dma_pool_destroy() 468141e9d4bSMatthew Wilcox * @pool: dma pool that will be destroyed 469141e9d4bSMatthew Wilcox * 470141e9d4bSMatthew Wilcox * Managed dma_pool_destroy(). 471141e9d4bSMatthew Wilcox */ 472141e9d4bSMatthew Wilcox void dmam_pool_destroy(struct dma_pool *pool) 473141e9d4bSMatthew Wilcox { 474141e9d4bSMatthew Wilcox struct device *dev = pool->dev; 475141e9d4bSMatthew Wilcox 476141e9d4bSMatthew Wilcox dma_pool_destroy(pool); 477141e9d4bSMatthew Wilcox WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool)); 478141e9d4bSMatthew Wilcox } 479141e9d4bSMatthew Wilcox EXPORT_SYMBOL(dmam_pool_destroy); 480