xref: /openbmc/linux/mm/dmapool.c (revision 7c77509c542927ee2a3c8812fad84957e51bf67d)
16182a094SMatthew Wilcox /*
26182a094SMatthew Wilcox  * DMA Pool allocator
36182a094SMatthew Wilcox  *
46182a094SMatthew Wilcox  * Copyright 2001 David Brownell
56182a094SMatthew Wilcox  * Copyright 2007 Intel Corporation
66182a094SMatthew Wilcox  *   Author: Matthew Wilcox <willy@linux.intel.com>
76182a094SMatthew Wilcox  *
86182a094SMatthew Wilcox  * This software may be redistributed and/or modified under the terms of
96182a094SMatthew Wilcox  * the GNU General Public License ("GPL") version 2 as published by the
106182a094SMatthew Wilcox  * Free Software Foundation.
116182a094SMatthew Wilcox  *
126182a094SMatthew Wilcox  * This allocator returns small blocks of a given size which are DMA-able by
136182a094SMatthew Wilcox  * the given device.  It uses the dma_alloc_coherent page allocator to get
146182a094SMatthew Wilcox  * new pages, then splits them up into blocks of the required size.
156182a094SMatthew Wilcox  * Many older drivers still have their own code to do this.
166182a094SMatthew Wilcox  *
176182a094SMatthew Wilcox  * The current design of this allocator is fairly simple.  The pool is
186182a094SMatthew Wilcox  * represented by the 'struct dma_pool' which keeps a doubly-linked list of
196182a094SMatthew Wilcox  * allocated pages.  Each page in the page_list is split into blocks of at
20a35a3455SMatthew Wilcox  * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
21a35a3455SMatthew Wilcox  * list of free blocks within the page.  Used blocks aren't tracked, but we
22a35a3455SMatthew Wilcox  * keep a count of how many are currently allocated from each page.
236182a094SMatthew Wilcox  */
24141e9d4bSMatthew Wilcox 
25141e9d4bSMatthew Wilcox #include <linux/device.h>
26141e9d4bSMatthew Wilcox #include <linux/dma-mapping.h>
27141e9d4bSMatthew Wilcox #include <linux/dmapool.h>
286182a094SMatthew Wilcox #include <linux/kernel.h>
296182a094SMatthew Wilcox #include <linux/list.h>
30b95f1b31SPaul Gortmaker #include <linux/export.h>
316182a094SMatthew Wilcox #include <linux/mutex.h>
32141e9d4bSMatthew Wilcox #include <linux/poison.h>
33141e9d4bSMatthew Wilcox #include <linux/sched.h>
346182a094SMatthew Wilcox #include <linux/slab.h>
35*7c77509cSPaul Gortmaker #include <linux/stat.h>
366182a094SMatthew Wilcox #include <linux/spinlock.h>
376182a094SMatthew Wilcox #include <linux/string.h>
386182a094SMatthew Wilcox #include <linux/types.h>
396182a094SMatthew Wilcox #include <linux/wait.h>
40141e9d4bSMatthew Wilcox 
41b5ee5befSAndi Kleen #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
42b5ee5befSAndi Kleen #define DMAPOOL_DEBUG 1
43b5ee5befSAndi Kleen #endif
44b5ee5befSAndi Kleen 
45141e9d4bSMatthew Wilcox struct dma_pool {		/* the pool */
46141e9d4bSMatthew Wilcox 	struct list_head page_list;
47141e9d4bSMatthew Wilcox 	spinlock_t lock;
48141e9d4bSMatthew Wilcox 	size_t size;
49141e9d4bSMatthew Wilcox 	struct device *dev;
50141e9d4bSMatthew Wilcox 	size_t allocation;
51e34f44b3SMatthew Wilcox 	size_t boundary;
52141e9d4bSMatthew Wilcox 	char name[32];
53141e9d4bSMatthew Wilcox 	wait_queue_head_t waitq;
54141e9d4bSMatthew Wilcox 	struct list_head pools;
55141e9d4bSMatthew Wilcox };
56141e9d4bSMatthew Wilcox 
57141e9d4bSMatthew Wilcox struct dma_page {		/* cacheable header for 'allocation' bytes */
58141e9d4bSMatthew Wilcox 	struct list_head page_list;
59141e9d4bSMatthew Wilcox 	void *vaddr;
60141e9d4bSMatthew Wilcox 	dma_addr_t dma;
61a35a3455SMatthew Wilcox 	unsigned int in_use;
62a35a3455SMatthew Wilcox 	unsigned int offset;
63141e9d4bSMatthew Wilcox };
64141e9d4bSMatthew Wilcox 
65141e9d4bSMatthew Wilcox #define	POOL_TIMEOUT_JIFFIES	((100 /* msec */ * HZ) / 1000)
66141e9d4bSMatthew Wilcox 
67141e9d4bSMatthew Wilcox static DEFINE_MUTEX(pools_lock);
68141e9d4bSMatthew Wilcox 
69141e9d4bSMatthew Wilcox static ssize_t
70141e9d4bSMatthew Wilcox show_pools(struct device *dev, struct device_attribute *attr, char *buf)
71141e9d4bSMatthew Wilcox {
72141e9d4bSMatthew Wilcox 	unsigned temp;
73141e9d4bSMatthew Wilcox 	unsigned size;
74141e9d4bSMatthew Wilcox 	char *next;
75141e9d4bSMatthew Wilcox 	struct dma_page *page;
76141e9d4bSMatthew Wilcox 	struct dma_pool *pool;
77141e9d4bSMatthew Wilcox 
78141e9d4bSMatthew Wilcox 	next = buf;
79141e9d4bSMatthew Wilcox 	size = PAGE_SIZE;
80141e9d4bSMatthew Wilcox 
81141e9d4bSMatthew Wilcox 	temp = scnprintf(next, size, "poolinfo - 0.1\n");
82141e9d4bSMatthew Wilcox 	size -= temp;
83141e9d4bSMatthew Wilcox 	next += temp;
84141e9d4bSMatthew Wilcox 
85141e9d4bSMatthew Wilcox 	mutex_lock(&pools_lock);
86141e9d4bSMatthew Wilcox 	list_for_each_entry(pool, &dev->dma_pools, pools) {
87141e9d4bSMatthew Wilcox 		unsigned pages = 0;
88141e9d4bSMatthew Wilcox 		unsigned blocks = 0;
89141e9d4bSMatthew Wilcox 
90c4956823SThomas Gleixner 		spin_lock_irq(&pool->lock);
91141e9d4bSMatthew Wilcox 		list_for_each_entry(page, &pool->page_list, page_list) {
92141e9d4bSMatthew Wilcox 			pages++;
93141e9d4bSMatthew Wilcox 			blocks += page->in_use;
94141e9d4bSMatthew Wilcox 		}
95c4956823SThomas Gleixner 		spin_unlock_irq(&pool->lock);
96141e9d4bSMatthew Wilcox 
97141e9d4bSMatthew Wilcox 		/* per-pool info, no real statistics yet */
98141e9d4bSMatthew Wilcox 		temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
99a35a3455SMatthew Wilcox 				 pool->name, blocks,
100a35a3455SMatthew Wilcox 				 pages * (pool->allocation / pool->size),
101141e9d4bSMatthew Wilcox 				 pool->size, pages);
102141e9d4bSMatthew Wilcox 		size -= temp;
103141e9d4bSMatthew Wilcox 		next += temp;
104141e9d4bSMatthew Wilcox 	}
105141e9d4bSMatthew Wilcox 	mutex_unlock(&pools_lock);
106141e9d4bSMatthew Wilcox 
107141e9d4bSMatthew Wilcox 	return PAGE_SIZE - size;
108141e9d4bSMatthew Wilcox }
109e87aa773SMatthew Wilcox 
110141e9d4bSMatthew Wilcox static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
111141e9d4bSMatthew Wilcox 
112141e9d4bSMatthew Wilcox /**
113141e9d4bSMatthew Wilcox  * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
114141e9d4bSMatthew Wilcox  * @name: name of pool, for diagnostics
115141e9d4bSMatthew Wilcox  * @dev: device that will be doing the DMA
116141e9d4bSMatthew Wilcox  * @size: size of the blocks in this pool.
117141e9d4bSMatthew Wilcox  * @align: alignment requirement for blocks; must be a power of two
118e34f44b3SMatthew Wilcox  * @boundary: returned blocks won't cross this power of two boundary
119141e9d4bSMatthew Wilcox  * Context: !in_interrupt()
120141e9d4bSMatthew Wilcox  *
121141e9d4bSMatthew Wilcox  * Returns a dma allocation pool with the requested characteristics, or
122141e9d4bSMatthew Wilcox  * null if one can't be created.  Given one of these pools, dma_pool_alloc()
123141e9d4bSMatthew Wilcox  * may be used to allocate memory.  Such memory will all have "consistent"
124141e9d4bSMatthew Wilcox  * DMA mappings, accessible by the device and its driver without using
125141e9d4bSMatthew Wilcox  * cache flushing primitives.  The actual size of blocks allocated may be
126141e9d4bSMatthew Wilcox  * larger than requested because of alignment.
127141e9d4bSMatthew Wilcox  *
128e34f44b3SMatthew Wilcox  * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
129141e9d4bSMatthew Wilcox  * cross that size boundary.  This is useful for devices which have
130141e9d4bSMatthew Wilcox  * addressing restrictions on individual DMA transfers, such as not crossing
131141e9d4bSMatthew Wilcox  * boundaries of 4KBytes.
132141e9d4bSMatthew Wilcox  */
133e87aa773SMatthew Wilcox struct dma_pool *dma_pool_create(const char *name, struct device *dev,
134e34f44b3SMatthew Wilcox 				 size_t size, size_t align, size_t boundary)
135141e9d4bSMatthew Wilcox {
136141e9d4bSMatthew Wilcox 	struct dma_pool *retval;
137e34f44b3SMatthew Wilcox 	size_t allocation;
138141e9d4bSMatthew Wilcox 
139399154beSMatthew Wilcox 	if (align == 0) {
140141e9d4bSMatthew Wilcox 		align = 1;
141399154beSMatthew Wilcox 	} else if (align & (align - 1)) {
142399154beSMatthew Wilcox 		return NULL;
143399154beSMatthew Wilcox 	}
144399154beSMatthew Wilcox 
145a35a3455SMatthew Wilcox 	if (size == 0) {
146141e9d4bSMatthew Wilcox 		return NULL;
147a35a3455SMatthew Wilcox 	} else if (size < 4) {
148a35a3455SMatthew Wilcox 		size = 4;
149a35a3455SMatthew Wilcox 	}
150399154beSMatthew Wilcox 
151399154beSMatthew Wilcox 	if ((size % align) != 0)
152399154beSMatthew Wilcox 		size = ALIGN(size, align);
153141e9d4bSMatthew Wilcox 
154e34f44b3SMatthew Wilcox 	allocation = max_t(size_t, size, PAGE_SIZE);
155141e9d4bSMatthew Wilcox 
156e34f44b3SMatthew Wilcox 	if (!boundary) {
157e34f44b3SMatthew Wilcox 		boundary = allocation;
158e34f44b3SMatthew Wilcox 	} else if ((boundary < size) || (boundary & (boundary - 1))) {
159e34f44b3SMatthew Wilcox 		return NULL;
160e34f44b3SMatthew Wilcox 	}
161e34f44b3SMatthew Wilcox 
162e34f44b3SMatthew Wilcox 	retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
163e34f44b3SMatthew Wilcox 	if (!retval)
164141e9d4bSMatthew Wilcox 		return retval;
165141e9d4bSMatthew Wilcox 
166e34f44b3SMatthew Wilcox 	strlcpy(retval->name, name, sizeof(retval->name));
167141e9d4bSMatthew Wilcox 
168141e9d4bSMatthew Wilcox 	retval->dev = dev;
169141e9d4bSMatthew Wilcox 
170141e9d4bSMatthew Wilcox 	INIT_LIST_HEAD(&retval->page_list);
171141e9d4bSMatthew Wilcox 	spin_lock_init(&retval->lock);
172141e9d4bSMatthew Wilcox 	retval->size = size;
173e34f44b3SMatthew Wilcox 	retval->boundary = boundary;
174141e9d4bSMatthew Wilcox 	retval->allocation = allocation;
175141e9d4bSMatthew Wilcox 	init_waitqueue_head(&retval->waitq);
176141e9d4bSMatthew Wilcox 
177141e9d4bSMatthew Wilcox 	if (dev) {
178141e9d4bSMatthew Wilcox 		int ret;
179141e9d4bSMatthew Wilcox 
180141e9d4bSMatthew Wilcox 		mutex_lock(&pools_lock);
181141e9d4bSMatthew Wilcox 		if (list_empty(&dev->dma_pools))
182141e9d4bSMatthew Wilcox 			ret = device_create_file(dev, &dev_attr_pools);
183141e9d4bSMatthew Wilcox 		else
184141e9d4bSMatthew Wilcox 			ret = 0;
185141e9d4bSMatthew Wilcox 		/* note:  not currently insisting "name" be unique */
186141e9d4bSMatthew Wilcox 		if (!ret)
187141e9d4bSMatthew Wilcox 			list_add(&retval->pools, &dev->dma_pools);
188141e9d4bSMatthew Wilcox 		else {
189141e9d4bSMatthew Wilcox 			kfree(retval);
190141e9d4bSMatthew Wilcox 			retval = NULL;
191141e9d4bSMatthew Wilcox 		}
192141e9d4bSMatthew Wilcox 		mutex_unlock(&pools_lock);
193141e9d4bSMatthew Wilcox 	} else
194141e9d4bSMatthew Wilcox 		INIT_LIST_HEAD(&retval->pools);
195141e9d4bSMatthew Wilcox 
196141e9d4bSMatthew Wilcox 	return retval;
197141e9d4bSMatthew Wilcox }
198e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_create);
199141e9d4bSMatthew Wilcox 
200a35a3455SMatthew Wilcox static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
201a35a3455SMatthew Wilcox {
202a35a3455SMatthew Wilcox 	unsigned int offset = 0;
203e34f44b3SMatthew Wilcox 	unsigned int next_boundary = pool->boundary;
204a35a3455SMatthew Wilcox 
205a35a3455SMatthew Wilcox 	do {
206a35a3455SMatthew Wilcox 		unsigned int next = offset + pool->size;
207e34f44b3SMatthew Wilcox 		if (unlikely((next + pool->size) >= next_boundary)) {
208e34f44b3SMatthew Wilcox 			next = next_boundary;
209e34f44b3SMatthew Wilcox 			next_boundary += pool->boundary;
210e34f44b3SMatthew Wilcox 		}
211a35a3455SMatthew Wilcox 		*(int *)(page->vaddr + offset) = next;
212a35a3455SMatthew Wilcox 		offset = next;
213a35a3455SMatthew Wilcox 	} while (offset < pool->allocation);
214a35a3455SMatthew Wilcox }
215a35a3455SMatthew Wilcox 
216e87aa773SMatthew Wilcox static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
217141e9d4bSMatthew Wilcox {
218141e9d4bSMatthew Wilcox 	struct dma_page *page;
219141e9d4bSMatthew Wilcox 
220a35a3455SMatthew Wilcox 	page = kmalloc(sizeof(*page), mem_flags);
221141e9d4bSMatthew Wilcox 	if (!page)
222141e9d4bSMatthew Wilcox 		return NULL;
223a35a3455SMatthew Wilcox 	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
224e87aa773SMatthew Wilcox 					 &page->dma, mem_flags);
225141e9d4bSMatthew Wilcox 	if (page->vaddr) {
226b5ee5befSAndi Kleen #ifdef	DMAPOOL_DEBUG
227141e9d4bSMatthew Wilcox 		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
228141e9d4bSMatthew Wilcox #endif
229a35a3455SMatthew Wilcox 		pool_initialise_page(pool, page);
230141e9d4bSMatthew Wilcox 		list_add(&page->page_list, &pool->page_list);
231141e9d4bSMatthew Wilcox 		page->in_use = 0;
232a35a3455SMatthew Wilcox 		page->offset = 0;
233141e9d4bSMatthew Wilcox 	} else {
234141e9d4bSMatthew Wilcox 		kfree(page);
235141e9d4bSMatthew Wilcox 		page = NULL;
236141e9d4bSMatthew Wilcox 	}
237141e9d4bSMatthew Wilcox 	return page;
238141e9d4bSMatthew Wilcox }
239141e9d4bSMatthew Wilcox 
240a35a3455SMatthew Wilcox static inline int is_page_busy(struct dma_page *page)
241141e9d4bSMatthew Wilcox {
242a35a3455SMatthew Wilcox 	return page->in_use != 0;
243141e9d4bSMatthew Wilcox }
244141e9d4bSMatthew Wilcox 
245e87aa773SMatthew Wilcox static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
246141e9d4bSMatthew Wilcox {
247141e9d4bSMatthew Wilcox 	dma_addr_t dma = page->dma;
248141e9d4bSMatthew Wilcox 
249b5ee5befSAndi Kleen #ifdef	DMAPOOL_DEBUG
250141e9d4bSMatthew Wilcox 	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
251141e9d4bSMatthew Wilcox #endif
252141e9d4bSMatthew Wilcox 	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
253141e9d4bSMatthew Wilcox 	list_del(&page->page_list);
254141e9d4bSMatthew Wilcox 	kfree(page);
255141e9d4bSMatthew Wilcox }
256141e9d4bSMatthew Wilcox 
257141e9d4bSMatthew Wilcox /**
258141e9d4bSMatthew Wilcox  * dma_pool_destroy - destroys a pool of dma memory blocks.
259141e9d4bSMatthew Wilcox  * @pool: dma pool that will be destroyed
260141e9d4bSMatthew Wilcox  * Context: !in_interrupt()
261141e9d4bSMatthew Wilcox  *
262141e9d4bSMatthew Wilcox  * Caller guarantees that no more memory from the pool is in use,
263141e9d4bSMatthew Wilcox  * and that nothing will try to use the pool after this call.
264141e9d4bSMatthew Wilcox  */
265e87aa773SMatthew Wilcox void dma_pool_destroy(struct dma_pool *pool)
266141e9d4bSMatthew Wilcox {
267141e9d4bSMatthew Wilcox 	mutex_lock(&pools_lock);
268141e9d4bSMatthew Wilcox 	list_del(&pool->pools);
269141e9d4bSMatthew Wilcox 	if (pool->dev && list_empty(&pool->dev->dma_pools))
270141e9d4bSMatthew Wilcox 		device_remove_file(pool->dev, &dev_attr_pools);
271141e9d4bSMatthew Wilcox 	mutex_unlock(&pools_lock);
272141e9d4bSMatthew Wilcox 
273141e9d4bSMatthew Wilcox 	while (!list_empty(&pool->page_list)) {
274141e9d4bSMatthew Wilcox 		struct dma_page *page;
275141e9d4bSMatthew Wilcox 		page = list_entry(pool->page_list.next,
276141e9d4bSMatthew Wilcox 				  struct dma_page, page_list);
277a35a3455SMatthew Wilcox 		if (is_page_busy(page)) {
278141e9d4bSMatthew Wilcox 			if (pool->dev)
279e87aa773SMatthew Wilcox 				dev_err(pool->dev,
280e87aa773SMatthew Wilcox 					"dma_pool_destroy %s, %p busy\n",
281141e9d4bSMatthew Wilcox 					pool->name, page->vaddr);
282141e9d4bSMatthew Wilcox 			else
283e87aa773SMatthew Wilcox 				printk(KERN_ERR
284e87aa773SMatthew Wilcox 				       "dma_pool_destroy %s, %p busy\n",
285141e9d4bSMatthew Wilcox 				       pool->name, page->vaddr);
286141e9d4bSMatthew Wilcox 			/* leak the still-in-use consistent memory */
287141e9d4bSMatthew Wilcox 			list_del(&page->page_list);
288141e9d4bSMatthew Wilcox 			kfree(page);
289141e9d4bSMatthew Wilcox 		} else
290141e9d4bSMatthew Wilcox 			pool_free_page(pool, page);
291141e9d4bSMatthew Wilcox 	}
292141e9d4bSMatthew Wilcox 
293141e9d4bSMatthew Wilcox 	kfree(pool);
294141e9d4bSMatthew Wilcox }
295e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_destroy);
296141e9d4bSMatthew Wilcox 
297141e9d4bSMatthew Wilcox /**
298141e9d4bSMatthew Wilcox  * dma_pool_alloc - get a block of consistent memory
299141e9d4bSMatthew Wilcox  * @pool: dma pool that will produce the block
300141e9d4bSMatthew Wilcox  * @mem_flags: GFP_* bitmask
301141e9d4bSMatthew Wilcox  * @handle: pointer to dma address of block
302141e9d4bSMatthew Wilcox  *
303141e9d4bSMatthew Wilcox  * This returns the kernel virtual address of a currently unused block,
304141e9d4bSMatthew Wilcox  * and reports its dma address through the handle.
3056182a094SMatthew Wilcox  * If such a memory block can't be allocated, %NULL is returned.
306141e9d4bSMatthew Wilcox  */
307e87aa773SMatthew Wilcox void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
308e87aa773SMatthew Wilcox 		     dma_addr_t *handle)
309141e9d4bSMatthew Wilcox {
310141e9d4bSMatthew Wilcox 	unsigned long flags;
311141e9d4bSMatthew Wilcox 	struct dma_page *page;
312141e9d4bSMatthew Wilcox 	size_t offset;
313141e9d4bSMatthew Wilcox 	void *retval;
314141e9d4bSMatthew Wilcox 
315ea05c844SDima Zavin 	might_sleep_if(mem_flags & __GFP_WAIT);
316ea05c844SDima Zavin 
317141e9d4bSMatthew Wilcox 	spin_lock_irqsave(&pool->lock, flags);
3182cae367eSMatthew Wilcox  restart:
319141e9d4bSMatthew Wilcox 	list_for_each_entry(page, &pool->page_list, page_list) {
320a35a3455SMatthew Wilcox 		if (page->offset < pool->allocation)
321141e9d4bSMatthew Wilcox 			goto ready;
322141e9d4bSMatthew Wilcox 	}
323e87aa773SMatthew Wilcox 	page = pool_alloc_page(pool, GFP_ATOMIC);
324e87aa773SMatthew Wilcox 	if (!page) {
325141e9d4bSMatthew Wilcox 		if (mem_flags & __GFP_WAIT) {
326141e9d4bSMatthew Wilcox 			DECLARE_WAITQUEUE(wait, current);
327141e9d4bSMatthew Wilcox 
328684265d4SAndrew Morton 			__set_current_state(TASK_UNINTERRUPTIBLE);
3292cae367eSMatthew Wilcox 			__add_wait_queue(&pool->waitq, &wait);
330141e9d4bSMatthew Wilcox 			spin_unlock_irqrestore(&pool->lock, flags);
331141e9d4bSMatthew Wilcox 
332141e9d4bSMatthew Wilcox 			schedule_timeout(POOL_TIMEOUT_JIFFIES);
333141e9d4bSMatthew Wilcox 
3342cae367eSMatthew Wilcox 			spin_lock_irqsave(&pool->lock, flags);
3352cae367eSMatthew Wilcox 			__remove_wait_queue(&pool->waitq, &wait);
336141e9d4bSMatthew Wilcox 			goto restart;
337141e9d4bSMatthew Wilcox 		}
338141e9d4bSMatthew Wilcox 		retval = NULL;
339141e9d4bSMatthew Wilcox 		goto done;
340141e9d4bSMatthew Wilcox 	}
341141e9d4bSMatthew Wilcox 
342141e9d4bSMatthew Wilcox  ready:
343141e9d4bSMatthew Wilcox 	page->in_use++;
344a35a3455SMatthew Wilcox 	offset = page->offset;
345a35a3455SMatthew Wilcox 	page->offset = *(int *)(page->vaddr + offset);
346141e9d4bSMatthew Wilcox 	retval = offset + page->vaddr;
347141e9d4bSMatthew Wilcox 	*handle = offset + page->dma;
348b5ee5befSAndi Kleen #ifdef	DMAPOOL_DEBUG
349141e9d4bSMatthew Wilcox 	memset(retval, POOL_POISON_ALLOCATED, pool->size);
350141e9d4bSMatthew Wilcox #endif
351141e9d4bSMatthew Wilcox  done:
352141e9d4bSMatthew Wilcox 	spin_unlock_irqrestore(&pool->lock, flags);
353141e9d4bSMatthew Wilcox 	return retval;
354141e9d4bSMatthew Wilcox }
355e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_alloc);
356141e9d4bSMatthew Wilcox 
357e87aa773SMatthew Wilcox static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
358141e9d4bSMatthew Wilcox {
359141e9d4bSMatthew Wilcox 	struct dma_page *page;
360141e9d4bSMatthew Wilcox 
361141e9d4bSMatthew Wilcox 	list_for_each_entry(page, &pool->page_list, page_list) {
362141e9d4bSMatthew Wilcox 		if (dma < page->dma)
363141e9d4bSMatthew Wilcox 			continue;
364141e9d4bSMatthew Wilcox 		if (dma < (page->dma + pool->allocation))
365141e9d4bSMatthew Wilcox 			return page;
366141e9d4bSMatthew Wilcox 	}
36784bc227dSRolf Eike Beer 	return NULL;
36884bc227dSRolf Eike Beer }
369141e9d4bSMatthew Wilcox 
370141e9d4bSMatthew Wilcox /**
371141e9d4bSMatthew Wilcox  * dma_pool_free - put block back into dma pool
372141e9d4bSMatthew Wilcox  * @pool: the dma pool holding the block
373141e9d4bSMatthew Wilcox  * @vaddr: virtual address of block
374141e9d4bSMatthew Wilcox  * @dma: dma address of block
375141e9d4bSMatthew Wilcox  *
376141e9d4bSMatthew Wilcox  * Caller promises neither device nor driver will again touch this block
377141e9d4bSMatthew Wilcox  * unless it is first re-allocated.
378141e9d4bSMatthew Wilcox  */
379e87aa773SMatthew Wilcox void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
380141e9d4bSMatthew Wilcox {
381141e9d4bSMatthew Wilcox 	struct dma_page *page;
382141e9d4bSMatthew Wilcox 	unsigned long flags;
383a35a3455SMatthew Wilcox 	unsigned int offset;
384141e9d4bSMatthew Wilcox 
38584bc227dSRolf Eike Beer 	spin_lock_irqsave(&pool->lock, flags);
386e87aa773SMatthew Wilcox 	page = pool_find_page(pool, dma);
387e87aa773SMatthew Wilcox 	if (!page) {
38884bc227dSRolf Eike Beer 		spin_unlock_irqrestore(&pool->lock, flags);
389141e9d4bSMatthew Wilcox 		if (pool->dev)
390e87aa773SMatthew Wilcox 			dev_err(pool->dev,
391e87aa773SMatthew Wilcox 				"dma_pool_free %s, %p/%lx (bad dma)\n",
392141e9d4bSMatthew Wilcox 				pool->name, vaddr, (unsigned long)dma);
393141e9d4bSMatthew Wilcox 		else
394141e9d4bSMatthew Wilcox 			printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
395141e9d4bSMatthew Wilcox 			       pool->name, vaddr, (unsigned long)dma);
396141e9d4bSMatthew Wilcox 		return;
397141e9d4bSMatthew Wilcox 	}
398141e9d4bSMatthew Wilcox 
399a35a3455SMatthew Wilcox 	offset = vaddr - page->vaddr;
400b5ee5befSAndi Kleen #ifdef	DMAPOOL_DEBUG
401a35a3455SMatthew Wilcox 	if ((dma - page->dma) != offset) {
40284bc227dSRolf Eike Beer 		spin_unlock_irqrestore(&pool->lock, flags);
403141e9d4bSMatthew Wilcox 		if (pool->dev)
404e87aa773SMatthew Wilcox 			dev_err(pool->dev,
405e87aa773SMatthew Wilcox 				"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
406141e9d4bSMatthew Wilcox 				pool->name, vaddr, (unsigned long long)dma);
407141e9d4bSMatthew Wilcox 		else
408e87aa773SMatthew Wilcox 			printk(KERN_ERR
409e87aa773SMatthew Wilcox 			       "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
410141e9d4bSMatthew Wilcox 			       pool->name, vaddr, (unsigned long long)dma);
411141e9d4bSMatthew Wilcox 		return;
412141e9d4bSMatthew Wilcox 	}
413a35a3455SMatthew Wilcox 	{
414a35a3455SMatthew Wilcox 		unsigned int chain = page->offset;
415a35a3455SMatthew Wilcox 		while (chain < pool->allocation) {
416a35a3455SMatthew Wilcox 			if (chain != offset) {
417a35a3455SMatthew Wilcox 				chain = *(int *)(page->vaddr + chain);
418a35a3455SMatthew Wilcox 				continue;
419a35a3455SMatthew Wilcox 			}
42084bc227dSRolf Eike Beer 			spin_unlock_irqrestore(&pool->lock, flags);
421141e9d4bSMatthew Wilcox 			if (pool->dev)
422a35a3455SMatthew Wilcox 				dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
423a35a3455SMatthew Wilcox 					"already free\n", pool->name,
424a35a3455SMatthew Wilcox 					(unsigned long long)dma);
425141e9d4bSMatthew Wilcox 			else
426a35a3455SMatthew Wilcox 				printk(KERN_ERR "dma_pool_free %s, dma %Lx "
427a35a3455SMatthew Wilcox 					"already free\n", pool->name,
428a35a3455SMatthew Wilcox 					(unsigned long long)dma);
429141e9d4bSMatthew Wilcox 			return;
430141e9d4bSMatthew Wilcox 		}
431a35a3455SMatthew Wilcox 	}
432141e9d4bSMatthew Wilcox 	memset(vaddr, POOL_POISON_FREED, pool->size);
433141e9d4bSMatthew Wilcox #endif
434141e9d4bSMatthew Wilcox 
435141e9d4bSMatthew Wilcox 	page->in_use--;
436a35a3455SMatthew Wilcox 	*(int *)vaddr = page->offset;
437a35a3455SMatthew Wilcox 	page->offset = offset;
438141e9d4bSMatthew Wilcox 	if (waitqueue_active(&pool->waitq))
4392cae367eSMatthew Wilcox 		wake_up_locked(&pool->waitq);
440141e9d4bSMatthew Wilcox 	/*
441141e9d4bSMatthew Wilcox 	 * Resist a temptation to do
442a35a3455SMatthew Wilcox 	 *    if (!is_page_busy(page)) pool_free_page(pool, page);
443141e9d4bSMatthew Wilcox 	 * Better have a few empty pages hang around.
444141e9d4bSMatthew Wilcox 	 */
445141e9d4bSMatthew Wilcox 	spin_unlock_irqrestore(&pool->lock, flags);
446141e9d4bSMatthew Wilcox }
447e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_free);
448141e9d4bSMatthew Wilcox 
449141e9d4bSMatthew Wilcox /*
450141e9d4bSMatthew Wilcox  * Managed DMA pool
451141e9d4bSMatthew Wilcox  */
452141e9d4bSMatthew Wilcox static void dmam_pool_release(struct device *dev, void *res)
453141e9d4bSMatthew Wilcox {
454141e9d4bSMatthew Wilcox 	struct dma_pool *pool = *(struct dma_pool **)res;
455141e9d4bSMatthew Wilcox 
456141e9d4bSMatthew Wilcox 	dma_pool_destroy(pool);
457141e9d4bSMatthew Wilcox }
458141e9d4bSMatthew Wilcox 
459141e9d4bSMatthew Wilcox static int dmam_pool_match(struct device *dev, void *res, void *match_data)
460141e9d4bSMatthew Wilcox {
461141e9d4bSMatthew Wilcox 	return *(struct dma_pool **)res == match_data;
462141e9d4bSMatthew Wilcox }
463141e9d4bSMatthew Wilcox 
464141e9d4bSMatthew Wilcox /**
465141e9d4bSMatthew Wilcox  * dmam_pool_create - Managed dma_pool_create()
466141e9d4bSMatthew Wilcox  * @name: name of pool, for diagnostics
467141e9d4bSMatthew Wilcox  * @dev: device that will be doing the DMA
468141e9d4bSMatthew Wilcox  * @size: size of the blocks in this pool.
469141e9d4bSMatthew Wilcox  * @align: alignment requirement for blocks; must be a power of two
470141e9d4bSMatthew Wilcox  * @allocation: returned blocks won't cross this boundary (or zero)
471141e9d4bSMatthew Wilcox  *
472141e9d4bSMatthew Wilcox  * Managed dma_pool_create().  DMA pool created with this function is
473141e9d4bSMatthew Wilcox  * automatically destroyed on driver detach.
474141e9d4bSMatthew Wilcox  */
475141e9d4bSMatthew Wilcox struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
476141e9d4bSMatthew Wilcox 				  size_t size, size_t align, size_t allocation)
477141e9d4bSMatthew Wilcox {
478141e9d4bSMatthew Wilcox 	struct dma_pool **ptr, *pool;
479141e9d4bSMatthew Wilcox 
480141e9d4bSMatthew Wilcox 	ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
481141e9d4bSMatthew Wilcox 	if (!ptr)
482141e9d4bSMatthew Wilcox 		return NULL;
483141e9d4bSMatthew Wilcox 
484141e9d4bSMatthew Wilcox 	pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
485141e9d4bSMatthew Wilcox 	if (pool)
486141e9d4bSMatthew Wilcox 		devres_add(dev, ptr);
487141e9d4bSMatthew Wilcox 	else
488141e9d4bSMatthew Wilcox 		devres_free(ptr);
489141e9d4bSMatthew Wilcox 
490141e9d4bSMatthew Wilcox 	return pool;
491141e9d4bSMatthew Wilcox }
492e87aa773SMatthew Wilcox EXPORT_SYMBOL(dmam_pool_create);
493141e9d4bSMatthew Wilcox 
494141e9d4bSMatthew Wilcox /**
495141e9d4bSMatthew Wilcox  * dmam_pool_destroy - Managed dma_pool_destroy()
496141e9d4bSMatthew Wilcox  * @pool: dma pool that will be destroyed
497141e9d4bSMatthew Wilcox  *
498141e9d4bSMatthew Wilcox  * Managed dma_pool_destroy().
499141e9d4bSMatthew Wilcox  */
500141e9d4bSMatthew Wilcox void dmam_pool_destroy(struct dma_pool *pool)
501141e9d4bSMatthew Wilcox {
502141e9d4bSMatthew Wilcox 	struct device *dev = pool->dev;
503141e9d4bSMatthew Wilcox 
504141e9d4bSMatthew Wilcox 	WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
505ae891a1bSMaxin B John 	dma_pool_destroy(pool);
506141e9d4bSMatthew Wilcox }
507141e9d4bSMatthew Wilcox EXPORT_SYMBOL(dmam_pool_destroy);
508