xref: /openbmc/linux/mm/dmapool.c (revision 6182a0943af2235756836ed7e021fa22b93ec68b)
1*6182a094SMatthew Wilcox /*
2*6182a094SMatthew Wilcox  * DMA Pool allocator
3*6182a094SMatthew Wilcox  *
4*6182a094SMatthew Wilcox  * Copyright 2001 David Brownell
5*6182a094SMatthew Wilcox  * Copyright 2007 Intel Corporation
6*6182a094SMatthew Wilcox  *   Author: Matthew Wilcox <willy@linux.intel.com>
7*6182a094SMatthew Wilcox  *
8*6182a094SMatthew Wilcox  * This software may be redistributed and/or modified under the terms of
9*6182a094SMatthew Wilcox  * the GNU General Public License ("GPL") version 2 as published by the
10*6182a094SMatthew Wilcox  * Free Software Foundation.
11*6182a094SMatthew Wilcox  *
12*6182a094SMatthew Wilcox  * This allocator returns small blocks of a given size which are DMA-able by
13*6182a094SMatthew Wilcox  * the given device.  It uses the dma_alloc_coherent page allocator to get
14*6182a094SMatthew Wilcox  * new pages, then splits them up into blocks of the required size.
15*6182a094SMatthew Wilcox  * Many older drivers still have their own code to do this.
16*6182a094SMatthew Wilcox  *
17*6182a094SMatthew Wilcox  * The current design of this allocator is fairly simple.  The pool is
18*6182a094SMatthew Wilcox  * represented by the 'struct dma_pool' which keeps a doubly-linked list of
19*6182a094SMatthew Wilcox  * allocated pages.  Each page in the page_list is split into blocks of at
20*6182a094SMatthew Wilcox  * least 'size' bytes.
21*6182a094SMatthew Wilcox  */
22141e9d4bSMatthew Wilcox 
23141e9d4bSMatthew Wilcox #include <linux/device.h>
24141e9d4bSMatthew Wilcox #include <linux/dma-mapping.h>
25141e9d4bSMatthew Wilcox #include <linux/dmapool.h>
26*6182a094SMatthew Wilcox #include <linux/kernel.h>
27*6182a094SMatthew Wilcox #include <linux/list.h>
28141e9d4bSMatthew Wilcox #include <linux/module.h>
29*6182a094SMatthew Wilcox #include <linux/mutex.h>
30141e9d4bSMatthew Wilcox #include <linux/poison.h>
31141e9d4bSMatthew Wilcox #include <linux/sched.h>
32*6182a094SMatthew Wilcox #include <linux/slab.h>
33*6182a094SMatthew Wilcox #include <linux/spinlock.h>
34*6182a094SMatthew Wilcox #include <linux/string.h>
35*6182a094SMatthew Wilcox #include <linux/types.h>
36*6182a094SMatthew Wilcox #include <linux/wait.h>
37141e9d4bSMatthew Wilcox 
38141e9d4bSMatthew Wilcox struct dma_pool {		/* the pool */
39141e9d4bSMatthew Wilcox 	struct list_head page_list;
40141e9d4bSMatthew Wilcox 	spinlock_t lock;
41141e9d4bSMatthew Wilcox 	size_t blocks_per_page;
42141e9d4bSMatthew Wilcox 	size_t size;
43141e9d4bSMatthew Wilcox 	struct device *dev;
44141e9d4bSMatthew Wilcox 	size_t allocation;
45141e9d4bSMatthew Wilcox 	char name[32];
46141e9d4bSMatthew Wilcox 	wait_queue_head_t waitq;
47141e9d4bSMatthew Wilcox 	struct list_head pools;
48141e9d4bSMatthew Wilcox };
49141e9d4bSMatthew Wilcox 
50141e9d4bSMatthew Wilcox struct dma_page {		/* cacheable header for 'allocation' bytes */
51141e9d4bSMatthew Wilcox 	struct list_head page_list;
52141e9d4bSMatthew Wilcox 	void *vaddr;
53141e9d4bSMatthew Wilcox 	dma_addr_t dma;
54141e9d4bSMatthew Wilcox 	unsigned in_use;
55141e9d4bSMatthew Wilcox 	unsigned long bitmap[0];
56141e9d4bSMatthew Wilcox };
57141e9d4bSMatthew Wilcox 
58141e9d4bSMatthew Wilcox #define	POOL_TIMEOUT_JIFFIES	((100 /* msec */ * HZ) / 1000)
59141e9d4bSMatthew Wilcox 
60141e9d4bSMatthew Wilcox static DEFINE_MUTEX(pools_lock);
61141e9d4bSMatthew Wilcox 
62141e9d4bSMatthew Wilcox static ssize_t
63141e9d4bSMatthew Wilcox show_pools(struct device *dev, struct device_attribute *attr, char *buf)
64141e9d4bSMatthew Wilcox {
65141e9d4bSMatthew Wilcox 	unsigned temp;
66141e9d4bSMatthew Wilcox 	unsigned size;
67141e9d4bSMatthew Wilcox 	char *next;
68141e9d4bSMatthew Wilcox 	struct dma_page *page;
69141e9d4bSMatthew Wilcox 	struct dma_pool *pool;
70141e9d4bSMatthew Wilcox 
71141e9d4bSMatthew Wilcox 	next = buf;
72141e9d4bSMatthew Wilcox 	size = PAGE_SIZE;
73141e9d4bSMatthew Wilcox 
74141e9d4bSMatthew Wilcox 	temp = scnprintf(next, size, "poolinfo - 0.1\n");
75141e9d4bSMatthew Wilcox 	size -= temp;
76141e9d4bSMatthew Wilcox 	next += temp;
77141e9d4bSMatthew Wilcox 
78141e9d4bSMatthew Wilcox 	mutex_lock(&pools_lock);
79141e9d4bSMatthew Wilcox 	list_for_each_entry(pool, &dev->dma_pools, pools) {
80141e9d4bSMatthew Wilcox 		unsigned pages = 0;
81141e9d4bSMatthew Wilcox 		unsigned blocks = 0;
82141e9d4bSMatthew Wilcox 
83141e9d4bSMatthew Wilcox 		list_for_each_entry(page, &pool->page_list, page_list) {
84141e9d4bSMatthew Wilcox 			pages++;
85141e9d4bSMatthew Wilcox 			blocks += page->in_use;
86141e9d4bSMatthew Wilcox 		}
87141e9d4bSMatthew Wilcox 
88141e9d4bSMatthew Wilcox 		/* per-pool info, no real statistics yet */
89141e9d4bSMatthew Wilcox 		temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
90141e9d4bSMatthew Wilcox 				 pool->name,
91141e9d4bSMatthew Wilcox 				 blocks, pages * pool->blocks_per_page,
92141e9d4bSMatthew Wilcox 				 pool->size, pages);
93141e9d4bSMatthew Wilcox 		size -= temp;
94141e9d4bSMatthew Wilcox 		next += temp;
95141e9d4bSMatthew Wilcox 	}
96141e9d4bSMatthew Wilcox 	mutex_unlock(&pools_lock);
97141e9d4bSMatthew Wilcox 
98141e9d4bSMatthew Wilcox 	return PAGE_SIZE - size;
99141e9d4bSMatthew Wilcox }
100e87aa773SMatthew Wilcox 
101141e9d4bSMatthew Wilcox static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
102141e9d4bSMatthew Wilcox 
103141e9d4bSMatthew Wilcox /**
104141e9d4bSMatthew Wilcox  * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
105141e9d4bSMatthew Wilcox  * @name: name of pool, for diagnostics
106141e9d4bSMatthew Wilcox  * @dev: device that will be doing the DMA
107141e9d4bSMatthew Wilcox  * @size: size of the blocks in this pool.
108141e9d4bSMatthew Wilcox  * @align: alignment requirement for blocks; must be a power of two
109141e9d4bSMatthew Wilcox  * @allocation: returned blocks won't cross this boundary (or zero)
110141e9d4bSMatthew Wilcox  * Context: !in_interrupt()
111141e9d4bSMatthew Wilcox  *
112141e9d4bSMatthew Wilcox  * Returns a dma allocation pool with the requested characteristics, or
113141e9d4bSMatthew Wilcox  * null if one can't be created.  Given one of these pools, dma_pool_alloc()
114141e9d4bSMatthew Wilcox  * may be used to allocate memory.  Such memory will all have "consistent"
115141e9d4bSMatthew Wilcox  * DMA mappings, accessible by the device and its driver without using
116141e9d4bSMatthew Wilcox  * cache flushing primitives.  The actual size of blocks allocated may be
117141e9d4bSMatthew Wilcox  * larger than requested because of alignment.
118141e9d4bSMatthew Wilcox  *
119141e9d4bSMatthew Wilcox  * If allocation is nonzero, objects returned from dma_pool_alloc() won't
120141e9d4bSMatthew Wilcox  * cross that size boundary.  This is useful for devices which have
121141e9d4bSMatthew Wilcox  * addressing restrictions on individual DMA transfers, such as not crossing
122141e9d4bSMatthew Wilcox  * boundaries of 4KBytes.
123141e9d4bSMatthew Wilcox  */
124e87aa773SMatthew Wilcox struct dma_pool *dma_pool_create(const char *name, struct device *dev,
125141e9d4bSMatthew Wilcox 				 size_t size, size_t align, size_t allocation)
126141e9d4bSMatthew Wilcox {
127141e9d4bSMatthew Wilcox 	struct dma_pool *retval;
128141e9d4bSMatthew Wilcox 
129399154beSMatthew Wilcox 	if (align == 0) {
130141e9d4bSMatthew Wilcox 		align = 1;
131399154beSMatthew Wilcox 	} else if (align & (align - 1)) {
132399154beSMatthew Wilcox 		return NULL;
133399154beSMatthew Wilcox 	}
134399154beSMatthew Wilcox 
135141e9d4bSMatthew Wilcox 	if (size == 0)
136141e9d4bSMatthew Wilcox 		return NULL;
137399154beSMatthew Wilcox 
138399154beSMatthew Wilcox 	if ((size % align) != 0)
139399154beSMatthew Wilcox 		size = ALIGN(size, align);
140141e9d4bSMatthew Wilcox 
141141e9d4bSMatthew Wilcox 	if (allocation == 0) {
142141e9d4bSMatthew Wilcox 		if (PAGE_SIZE < size)
143141e9d4bSMatthew Wilcox 			allocation = size;
144141e9d4bSMatthew Wilcox 		else
145141e9d4bSMatthew Wilcox 			allocation = PAGE_SIZE;
146e87aa773SMatthew Wilcox 		/* FIXME: round up for less fragmentation */
147141e9d4bSMatthew Wilcox 	} else if (allocation < size)
148141e9d4bSMatthew Wilcox 		return NULL;
149141e9d4bSMatthew Wilcox 
150e87aa773SMatthew Wilcox 	if (!
151e87aa773SMatthew Wilcox 	    (retval =
152e87aa773SMatthew Wilcox 	     kmalloc_node(sizeof *retval, GFP_KERNEL, dev_to_node(dev))))
153141e9d4bSMatthew Wilcox 		return retval;
154141e9d4bSMatthew Wilcox 
155141e9d4bSMatthew Wilcox 	strlcpy(retval->name, name, sizeof retval->name);
156141e9d4bSMatthew Wilcox 
157141e9d4bSMatthew Wilcox 	retval->dev = dev;
158141e9d4bSMatthew Wilcox 
159141e9d4bSMatthew Wilcox 	INIT_LIST_HEAD(&retval->page_list);
160141e9d4bSMatthew Wilcox 	spin_lock_init(&retval->lock);
161141e9d4bSMatthew Wilcox 	retval->size = size;
162141e9d4bSMatthew Wilcox 	retval->allocation = allocation;
163141e9d4bSMatthew Wilcox 	retval->blocks_per_page = allocation / size;
164141e9d4bSMatthew Wilcox 	init_waitqueue_head(&retval->waitq);
165141e9d4bSMatthew Wilcox 
166141e9d4bSMatthew Wilcox 	if (dev) {
167141e9d4bSMatthew Wilcox 		int ret;
168141e9d4bSMatthew Wilcox 
169141e9d4bSMatthew Wilcox 		mutex_lock(&pools_lock);
170141e9d4bSMatthew Wilcox 		if (list_empty(&dev->dma_pools))
171141e9d4bSMatthew Wilcox 			ret = device_create_file(dev, &dev_attr_pools);
172141e9d4bSMatthew Wilcox 		else
173141e9d4bSMatthew Wilcox 			ret = 0;
174141e9d4bSMatthew Wilcox 		/* note:  not currently insisting "name" be unique */
175141e9d4bSMatthew Wilcox 		if (!ret)
176141e9d4bSMatthew Wilcox 			list_add(&retval->pools, &dev->dma_pools);
177141e9d4bSMatthew Wilcox 		else {
178141e9d4bSMatthew Wilcox 			kfree(retval);
179141e9d4bSMatthew Wilcox 			retval = NULL;
180141e9d4bSMatthew Wilcox 		}
181141e9d4bSMatthew Wilcox 		mutex_unlock(&pools_lock);
182141e9d4bSMatthew Wilcox 	} else
183141e9d4bSMatthew Wilcox 		INIT_LIST_HEAD(&retval->pools);
184141e9d4bSMatthew Wilcox 
185141e9d4bSMatthew Wilcox 	return retval;
186141e9d4bSMatthew Wilcox }
187e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_create);
188141e9d4bSMatthew Wilcox 
189e87aa773SMatthew Wilcox static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
190141e9d4bSMatthew Wilcox {
191141e9d4bSMatthew Wilcox 	struct dma_page *page;
192141e9d4bSMatthew Wilcox 	int mapsize;
193141e9d4bSMatthew Wilcox 
194141e9d4bSMatthew Wilcox 	mapsize = pool->blocks_per_page;
195141e9d4bSMatthew Wilcox 	mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG;
196141e9d4bSMatthew Wilcox 	mapsize *= sizeof(long);
197141e9d4bSMatthew Wilcox 
198141e9d4bSMatthew Wilcox 	page = kmalloc(mapsize + sizeof *page, mem_flags);
199141e9d4bSMatthew Wilcox 	if (!page)
200141e9d4bSMatthew Wilcox 		return NULL;
201141e9d4bSMatthew Wilcox 	page->vaddr = dma_alloc_coherent(pool->dev,
202141e9d4bSMatthew Wilcox 					 pool->allocation,
203e87aa773SMatthew Wilcox 					 &page->dma, mem_flags);
204141e9d4bSMatthew Wilcox 	if (page->vaddr) {
205e87aa773SMatthew Wilcox 		memset(page->bitmap, 0xff, mapsize);	/* bit set == free */
206141e9d4bSMatthew Wilcox #ifdef	CONFIG_DEBUG_SLAB
207141e9d4bSMatthew Wilcox 		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
208141e9d4bSMatthew Wilcox #endif
209141e9d4bSMatthew Wilcox 		list_add(&page->page_list, &pool->page_list);
210141e9d4bSMatthew Wilcox 		page->in_use = 0;
211141e9d4bSMatthew Wilcox 	} else {
212141e9d4bSMatthew Wilcox 		kfree(page);
213141e9d4bSMatthew Wilcox 		page = NULL;
214141e9d4bSMatthew Wilcox 	}
215141e9d4bSMatthew Wilcox 	return page;
216141e9d4bSMatthew Wilcox }
217141e9d4bSMatthew Wilcox 
218e87aa773SMatthew Wilcox static inline int is_page_busy(int blocks, unsigned long *bitmap)
219141e9d4bSMatthew Wilcox {
220141e9d4bSMatthew Wilcox 	while (blocks > 0) {
221141e9d4bSMatthew Wilcox 		if (*bitmap++ != ~0UL)
222141e9d4bSMatthew Wilcox 			return 1;
223141e9d4bSMatthew Wilcox 		blocks -= BITS_PER_LONG;
224141e9d4bSMatthew Wilcox 	}
225141e9d4bSMatthew Wilcox 	return 0;
226141e9d4bSMatthew Wilcox }
227141e9d4bSMatthew Wilcox 
228e87aa773SMatthew Wilcox static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
229141e9d4bSMatthew Wilcox {
230141e9d4bSMatthew Wilcox 	dma_addr_t dma = page->dma;
231141e9d4bSMatthew Wilcox 
232141e9d4bSMatthew Wilcox #ifdef	CONFIG_DEBUG_SLAB
233141e9d4bSMatthew Wilcox 	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
234141e9d4bSMatthew Wilcox #endif
235141e9d4bSMatthew Wilcox 	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
236141e9d4bSMatthew Wilcox 	list_del(&page->page_list);
237141e9d4bSMatthew Wilcox 	kfree(page);
238141e9d4bSMatthew Wilcox }
239141e9d4bSMatthew Wilcox 
240141e9d4bSMatthew Wilcox /**
241141e9d4bSMatthew Wilcox  * dma_pool_destroy - destroys a pool of dma memory blocks.
242141e9d4bSMatthew Wilcox  * @pool: dma pool that will be destroyed
243141e9d4bSMatthew Wilcox  * Context: !in_interrupt()
244141e9d4bSMatthew Wilcox  *
245141e9d4bSMatthew Wilcox  * Caller guarantees that no more memory from the pool is in use,
246141e9d4bSMatthew Wilcox  * and that nothing will try to use the pool after this call.
247141e9d4bSMatthew Wilcox  */
248e87aa773SMatthew Wilcox void dma_pool_destroy(struct dma_pool *pool)
249141e9d4bSMatthew Wilcox {
250141e9d4bSMatthew Wilcox 	mutex_lock(&pools_lock);
251141e9d4bSMatthew Wilcox 	list_del(&pool->pools);
252141e9d4bSMatthew Wilcox 	if (pool->dev && list_empty(&pool->dev->dma_pools))
253141e9d4bSMatthew Wilcox 		device_remove_file(pool->dev, &dev_attr_pools);
254141e9d4bSMatthew Wilcox 	mutex_unlock(&pools_lock);
255141e9d4bSMatthew Wilcox 
256141e9d4bSMatthew Wilcox 	while (!list_empty(&pool->page_list)) {
257141e9d4bSMatthew Wilcox 		struct dma_page *page;
258141e9d4bSMatthew Wilcox 		page = list_entry(pool->page_list.next,
259141e9d4bSMatthew Wilcox 				  struct dma_page, page_list);
260141e9d4bSMatthew Wilcox 		if (is_page_busy(pool->blocks_per_page, page->bitmap)) {
261141e9d4bSMatthew Wilcox 			if (pool->dev)
262e87aa773SMatthew Wilcox 				dev_err(pool->dev,
263e87aa773SMatthew Wilcox 					"dma_pool_destroy %s, %p busy\n",
264141e9d4bSMatthew Wilcox 					pool->name, page->vaddr);
265141e9d4bSMatthew Wilcox 			else
266e87aa773SMatthew Wilcox 				printk(KERN_ERR
267e87aa773SMatthew Wilcox 				       "dma_pool_destroy %s, %p busy\n",
268141e9d4bSMatthew Wilcox 				       pool->name, page->vaddr);
269141e9d4bSMatthew Wilcox 			/* leak the still-in-use consistent memory */
270141e9d4bSMatthew Wilcox 			list_del(&page->page_list);
271141e9d4bSMatthew Wilcox 			kfree(page);
272141e9d4bSMatthew Wilcox 		} else
273141e9d4bSMatthew Wilcox 			pool_free_page(pool, page);
274141e9d4bSMatthew Wilcox 	}
275141e9d4bSMatthew Wilcox 
276141e9d4bSMatthew Wilcox 	kfree(pool);
277141e9d4bSMatthew Wilcox }
278e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_destroy);
279141e9d4bSMatthew Wilcox 
280141e9d4bSMatthew Wilcox /**
281141e9d4bSMatthew Wilcox  * dma_pool_alloc - get a block of consistent memory
282141e9d4bSMatthew Wilcox  * @pool: dma pool that will produce the block
283141e9d4bSMatthew Wilcox  * @mem_flags: GFP_* bitmask
284141e9d4bSMatthew Wilcox  * @handle: pointer to dma address of block
285141e9d4bSMatthew Wilcox  *
286141e9d4bSMatthew Wilcox  * This returns the kernel virtual address of a currently unused block,
287141e9d4bSMatthew Wilcox  * and reports its dma address through the handle.
288*6182a094SMatthew Wilcox  * If such a memory block can't be allocated, %NULL is returned.
289141e9d4bSMatthew Wilcox  */
290e87aa773SMatthew Wilcox void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
291e87aa773SMatthew Wilcox 		     dma_addr_t *handle)
292141e9d4bSMatthew Wilcox {
293141e9d4bSMatthew Wilcox 	unsigned long flags;
294141e9d4bSMatthew Wilcox 	struct dma_page *page;
295141e9d4bSMatthew Wilcox 	int map, block;
296141e9d4bSMatthew Wilcox 	size_t offset;
297141e9d4bSMatthew Wilcox 	void *retval;
298141e9d4bSMatthew Wilcox 
299141e9d4bSMatthew Wilcox 	spin_lock_irqsave(&pool->lock, flags);
3002cae367eSMatthew Wilcox  restart:
301141e9d4bSMatthew Wilcox 	list_for_each_entry(page, &pool->page_list, page_list) {
302141e9d4bSMatthew Wilcox 		int i;
303141e9d4bSMatthew Wilcox 		/* only cachable accesses here ... */
304141e9d4bSMatthew Wilcox 		for (map = 0, i = 0;
305e87aa773SMatthew Wilcox 		     i < pool->blocks_per_page; i += BITS_PER_LONG, map++) {
306141e9d4bSMatthew Wilcox 			if (page->bitmap[map] == 0)
307141e9d4bSMatthew Wilcox 				continue;
308141e9d4bSMatthew Wilcox 			block = ffz(~page->bitmap[map]);
309141e9d4bSMatthew Wilcox 			if ((i + block) < pool->blocks_per_page) {
310141e9d4bSMatthew Wilcox 				clear_bit(block, &page->bitmap[map]);
311141e9d4bSMatthew Wilcox 				offset = (BITS_PER_LONG * map) + block;
312141e9d4bSMatthew Wilcox 				offset *= pool->size;
313141e9d4bSMatthew Wilcox 				goto ready;
314141e9d4bSMatthew Wilcox 			}
315141e9d4bSMatthew Wilcox 		}
316141e9d4bSMatthew Wilcox 	}
317e87aa773SMatthew Wilcox 	page = pool_alloc_page(pool, GFP_ATOMIC);
318e87aa773SMatthew Wilcox 	if (!page) {
319141e9d4bSMatthew Wilcox 		if (mem_flags & __GFP_WAIT) {
320141e9d4bSMatthew Wilcox 			DECLARE_WAITQUEUE(wait, current);
321141e9d4bSMatthew Wilcox 
322141e9d4bSMatthew Wilcox 			__set_current_state(TASK_INTERRUPTIBLE);
3232cae367eSMatthew Wilcox 			__add_wait_queue(&pool->waitq, &wait);
324141e9d4bSMatthew Wilcox 			spin_unlock_irqrestore(&pool->lock, flags);
325141e9d4bSMatthew Wilcox 
326141e9d4bSMatthew Wilcox 			schedule_timeout(POOL_TIMEOUT_JIFFIES);
327141e9d4bSMatthew Wilcox 
3282cae367eSMatthew Wilcox 			spin_lock_irqsave(&pool->lock, flags);
3292cae367eSMatthew Wilcox 			__remove_wait_queue(&pool->waitq, &wait);
330141e9d4bSMatthew Wilcox 			goto restart;
331141e9d4bSMatthew Wilcox 		}
332141e9d4bSMatthew Wilcox 		retval = NULL;
333141e9d4bSMatthew Wilcox 		goto done;
334141e9d4bSMatthew Wilcox 	}
335141e9d4bSMatthew Wilcox 
336141e9d4bSMatthew Wilcox 	clear_bit(0, &page->bitmap[0]);
337141e9d4bSMatthew Wilcox 	offset = 0;
338141e9d4bSMatthew Wilcox  ready:
339141e9d4bSMatthew Wilcox 	page->in_use++;
340141e9d4bSMatthew Wilcox 	retval = offset + page->vaddr;
341141e9d4bSMatthew Wilcox 	*handle = offset + page->dma;
342141e9d4bSMatthew Wilcox #ifdef	CONFIG_DEBUG_SLAB
343141e9d4bSMatthew Wilcox 	memset(retval, POOL_POISON_ALLOCATED, pool->size);
344141e9d4bSMatthew Wilcox #endif
345141e9d4bSMatthew Wilcox  done:
346141e9d4bSMatthew Wilcox 	spin_unlock_irqrestore(&pool->lock, flags);
347141e9d4bSMatthew Wilcox 	return retval;
348141e9d4bSMatthew Wilcox }
349e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_alloc);
350141e9d4bSMatthew Wilcox 
351e87aa773SMatthew Wilcox static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
352141e9d4bSMatthew Wilcox {
353141e9d4bSMatthew Wilcox 	unsigned long flags;
354141e9d4bSMatthew Wilcox 	struct dma_page *page;
355141e9d4bSMatthew Wilcox 
356141e9d4bSMatthew Wilcox 	spin_lock_irqsave(&pool->lock, flags);
357141e9d4bSMatthew Wilcox 	list_for_each_entry(page, &pool->page_list, page_list) {
358141e9d4bSMatthew Wilcox 		if (dma < page->dma)
359141e9d4bSMatthew Wilcox 			continue;
360141e9d4bSMatthew Wilcox 		if (dma < (page->dma + pool->allocation))
361141e9d4bSMatthew Wilcox 			goto done;
362141e9d4bSMatthew Wilcox 	}
363141e9d4bSMatthew Wilcox 	page = NULL;
364141e9d4bSMatthew Wilcox  done:
365141e9d4bSMatthew Wilcox 	spin_unlock_irqrestore(&pool->lock, flags);
366141e9d4bSMatthew Wilcox 	return page;
367141e9d4bSMatthew Wilcox }
368141e9d4bSMatthew Wilcox 
369141e9d4bSMatthew Wilcox /**
370141e9d4bSMatthew Wilcox  * dma_pool_free - put block back into dma pool
371141e9d4bSMatthew Wilcox  * @pool: the dma pool holding the block
372141e9d4bSMatthew Wilcox  * @vaddr: virtual address of block
373141e9d4bSMatthew Wilcox  * @dma: dma address of block
374141e9d4bSMatthew Wilcox  *
375141e9d4bSMatthew Wilcox  * Caller promises neither device nor driver will again touch this block
376141e9d4bSMatthew Wilcox  * unless it is first re-allocated.
377141e9d4bSMatthew Wilcox  */
378e87aa773SMatthew Wilcox void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
379141e9d4bSMatthew Wilcox {
380141e9d4bSMatthew Wilcox 	struct dma_page *page;
381141e9d4bSMatthew Wilcox 	unsigned long flags;
382141e9d4bSMatthew Wilcox 	int map, block;
383141e9d4bSMatthew Wilcox 
384e87aa773SMatthew Wilcox 	page = pool_find_page(pool, dma);
385e87aa773SMatthew Wilcox 	if (!page) {
386141e9d4bSMatthew Wilcox 		if (pool->dev)
387e87aa773SMatthew Wilcox 			dev_err(pool->dev,
388e87aa773SMatthew Wilcox 				"dma_pool_free %s, %p/%lx (bad dma)\n",
389141e9d4bSMatthew Wilcox 				pool->name, vaddr, (unsigned long)dma);
390141e9d4bSMatthew Wilcox 		else
391141e9d4bSMatthew Wilcox 			printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
392141e9d4bSMatthew Wilcox 			       pool->name, vaddr, (unsigned long)dma);
393141e9d4bSMatthew Wilcox 		return;
394141e9d4bSMatthew Wilcox 	}
395141e9d4bSMatthew Wilcox 
396141e9d4bSMatthew Wilcox 	block = dma - page->dma;
397141e9d4bSMatthew Wilcox 	block /= pool->size;
398141e9d4bSMatthew Wilcox 	map = block / BITS_PER_LONG;
399141e9d4bSMatthew Wilcox 	block %= BITS_PER_LONG;
400141e9d4bSMatthew Wilcox 
401141e9d4bSMatthew Wilcox #ifdef	CONFIG_DEBUG_SLAB
402141e9d4bSMatthew Wilcox 	if (((dma - page->dma) + (void *)page->vaddr) != vaddr) {
403141e9d4bSMatthew Wilcox 		if (pool->dev)
404e87aa773SMatthew Wilcox 			dev_err(pool->dev,
405e87aa773SMatthew Wilcox 				"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
406141e9d4bSMatthew Wilcox 				pool->name, vaddr, (unsigned long long)dma);
407141e9d4bSMatthew Wilcox 		else
408e87aa773SMatthew Wilcox 			printk(KERN_ERR
409e87aa773SMatthew Wilcox 			       "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
410141e9d4bSMatthew Wilcox 			       pool->name, vaddr, (unsigned long long)dma);
411141e9d4bSMatthew Wilcox 		return;
412141e9d4bSMatthew Wilcox 	}
413141e9d4bSMatthew Wilcox 	if (page->bitmap[map] & (1UL << block)) {
414141e9d4bSMatthew Wilcox 		if (pool->dev)
415e87aa773SMatthew Wilcox 			dev_err(pool->dev,
416e87aa773SMatthew Wilcox 				"dma_pool_free %s, dma %Lx already free\n",
417141e9d4bSMatthew Wilcox 				pool->name, (unsigned long long)dma);
418141e9d4bSMatthew Wilcox 		else
419e87aa773SMatthew Wilcox 			printk(KERN_ERR
420e87aa773SMatthew Wilcox 			       "dma_pool_free %s, dma %Lx already free\n",
421141e9d4bSMatthew Wilcox 			       pool->name, (unsigned long long)dma);
422141e9d4bSMatthew Wilcox 		return;
423141e9d4bSMatthew Wilcox 	}
424141e9d4bSMatthew Wilcox 	memset(vaddr, POOL_POISON_FREED, pool->size);
425141e9d4bSMatthew Wilcox #endif
426141e9d4bSMatthew Wilcox 
427141e9d4bSMatthew Wilcox 	spin_lock_irqsave(&pool->lock, flags);
428141e9d4bSMatthew Wilcox 	page->in_use--;
429141e9d4bSMatthew Wilcox 	set_bit(block, &page->bitmap[map]);
430141e9d4bSMatthew Wilcox 	if (waitqueue_active(&pool->waitq))
4312cae367eSMatthew Wilcox 		wake_up_locked(&pool->waitq);
432141e9d4bSMatthew Wilcox 	/*
433141e9d4bSMatthew Wilcox 	 * Resist a temptation to do
434141e9d4bSMatthew Wilcox 	 *    if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page);
435141e9d4bSMatthew Wilcox 	 * Better have a few empty pages hang around.
436141e9d4bSMatthew Wilcox 	 */
437141e9d4bSMatthew Wilcox 	spin_unlock_irqrestore(&pool->lock, flags);
438141e9d4bSMatthew Wilcox }
439e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_free);
440141e9d4bSMatthew Wilcox 
441141e9d4bSMatthew Wilcox /*
442141e9d4bSMatthew Wilcox  * Managed DMA pool
443141e9d4bSMatthew Wilcox  */
444141e9d4bSMatthew Wilcox static void dmam_pool_release(struct device *dev, void *res)
445141e9d4bSMatthew Wilcox {
446141e9d4bSMatthew Wilcox 	struct dma_pool *pool = *(struct dma_pool **)res;
447141e9d4bSMatthew Wilcox 
448141e9d4bSMatthew Wilcox 	dma_pool_destroy(pool);
449141e9d4bSMatthew Wilcox }
450141e9d4bSMatthew Wilcox 
451141e9d4bSMatthew Wilcox static int dmam_pool_match(struct device *dev, void *res, void *match_data)
452141e9d4bSMatthew Wilcox {
453141e9d4bSMatthew Wilcox 	return *(struct dma_pool **)res == match_data;
454141e9d4bSMatthew Wilcox }
455141e9d4bSMatthew Wilcox 
456141e9d4bSMatthew Wilcox /**
457141e9d4bSMatthew Wilcox  * dmam_pool_create - Managed dma_pool_create()
458141e9d4bSMatthew Wilcox  * @name: name of pool, for diagnostics
459141e9d4bSMatthew Wilcox  * @dev: device that will be doing the DMA
460141e9d4bSMatthew Wilcox  * @size: size of the blocks in this pool.
461141e9d4bSMatthew Wilcox  * @align: alignment requirement for blocks; must be a power of two
462141e9d4bSMatthew Wilcox  * @allocation: returned blocks won't cross this boundary (or zero)
463141e9d4bSMatthew Wilcox  *
464141e9d4bSMatthew Wilcox  * Managed dma_pool_create().  DMA pool created with this function is
465141e9d4bSMatthew Wilcox  * automatically destroyed on driver detach.
466141e9d4bSMatthew Wilcox  */
467141e9d4bSMatthew Wilcox struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
468141e9d4bSMatthew Wilcox 				  size_t size, size_t align, size_t allocation)
469141e9d4bSMatthew Wilcox {
470141e9d4bSMatthew Wilcox 	struct dma_pool **ptr, *pool;
471141e9d4bSMatthew Wilcox 
472141e9d4bSMatthew Wilcox 	ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
473141e9d4bSMatthew Wilcox 	if (!ptr)
474141e9d4bSMatthew Wilcox 		return NULL;
475141e9d4bSMatthew Wilcox 
476141e9d4bSMatthew Wilcox 	pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
477141e9d4bSMatthew Wilcox 	if (pool)
478141e9d4bSMatthew Wilcox 		devres_add(dev, ptr);
479141e9d4bSMatthew Wilcox 	else
480141e9d4bSMatthew Wilcox 		devres_free(ptr);
481141e9d4bSMatthew Wilcox 
482141e9d4bSMatthew Wilcox 	return pool;
483141e9d4bSMatthew Wilcox }
484e87aa773SMatthew Wilcox EXPORT_SYMBOL(dmam_pool_create);
485141e9d4bSMatthew Wilcox 
486141e9d4bSMatthew Wilcox /**
487141e9d4bSMatthew Wilcox  * dmam_pool_destroy - Managed dma_pool_destroy()
488141e9d4bSMatthew Wilcox  * @pool: dma pool that will be destroyed
489141e9d4bSMatthew Wilcox  *
490141e9d4bSMatthew Wilcox  * Managed dma_pool_destroy().
491141e9d4bSMatthew Wilcox  */
492141e9d4bSMatthew Wilcox void dmam_pool_destroy(struct dma_pool *pool)
493141e9d4bSMatthew Wilcox {
494141e9d4bSMatthew Wilcox 	struct device *dev = pool->dev;
495141e9d4bSMatthew Wilcox 
496141e9d4bSMatthew Wilcox 	dma_pool_destroy(pool);
497141e9d4bSMatthew Wilcox 	WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
498141e9d4bSMatthew Wilcox }
499141e9d4bSMatthew Wilcox EXPORT_SYMBOL(dmam_pool_destroy);
500