xref: /openbmc/linux/mm/dmapool.c (revision 6471384af2a6530696fc0203bafe4de41a23c9ef)
1b2139ce0SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
26182a094SMatthew Wilcox /*
36182a094SMatthew Wilcox  * DMA Pool allocator
46182a094SMatthew Wilcox  *
56182a094SMatthew Wilcox  * Copyright 2001 David Brownell
66182a094SMatthew Wilcox  * Copyright 2007 Intel Corporation
76182a094SMatthew Wilcox  *   Author: Matthew Wilcox <willy@linux.intel.com>
86182a094SMatthew Wilcox  *
96182a094SMatthew Wilcox  * This allocator returns small blocks of a given size which are DMA-able by
106182a094SMatthew Wilcox  * the given device.  It uses the dma_alloc_coherent page allocator to get
116182a094SMatthew Wilcox  * new pages, then splits them up into blocks of the required size.
126182a094SMatthew Wilcox  * Many older drivers still have their own code to do this.
136182a094SMatthew Wilcox  *
146182a094SMatthew Wilcox  * The current design of this allocator is fairly simple.  The pool is
156182a094SMatthew Wilcox  * represented by the 'struct dma_pool' which keeps a doubly-linked list of
166182a094SMatthew Wilcox  * allocated pages.  Each page in the page_list is split into blocks of at
17a35a3455SMatthew Wilcox  * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
18a35a3455SMatthew Wilcox  * list of free blocks within the page.  Used blocks aren't tracked, but we
19a35a3455SMatthew Wilcox  * keep a count of how many are currently allocated from each page.
206182a094SMatthew Wilcox  */
21141e9d4bSMatthew Wilcox 
22141e9d4bSMatthew Wilcox #include <linux/device.h>
23141e9d4bSMatthew Wilcox #include <linux/dma-mapping.h>
24141e9d4bSMatthew Wilcox #include <linux/dmapool.h>
256182a094SMatthew Wilcox #include <linux/kernel.h>
266182a094SMatthew Wilcox #include <linux/list.h>
27b95f1b31SPaul Gortmaker #include <linux/export.h>
286182a094SMatthew Wilcox #include <linux/mutex.h>
29141e9d4bSMatthew Wilcox #include <linux/poison.h>
30141e9d4bSMatthew Wilcox #include <linux/sched.h>
316182a094SMatthew Wilcox #include <linux/slab.h>
327c77509cSPaul Gortmaker #include <linux/stat.h>
336182a094SMatthew Wilcox #include <linux/spinlock.h>
346182a094SMatthew Wilcox #include <linux/string.h>
356182a094SMatthew Wilcox #include <linux/types.h>
366182a094SMatthew Wilcox #include <linux/wait.h>
37141e9d4bSMatthew Wilcox 
38b5ee5befSAndi Kleen #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
39b5ee5befSAndi Kleen #define DMAPOOL_DEBUG 1
40b5ee5befSAndi Kleen #endif
41b5ee5befSAndi Kleen 
42141e9d4bSMatthew Wilcox struct dma_pool {		/* the pool */
43141e9d4bSMatthew Wilcox 	struct list_head page_list;
44141e9d4bSMatthew Wilcox 	spinlock_t lock;
45141e9d4bSMatthew Wilcox 	size_t size;
46141e9d4bSMatthew Wilcox 	struct device *dev;
47141e9d4bSMatthew Wilcox 	size_t allocation;
48e34f44b3SMatthew Wilcox 	size_t boundary;
49141e9d4bSMatthew Wilcox 	char name[32];
50141e9d4bSMatthew Wilcox 	struct list_head pools;
51141e9d4bSMatthew Wilcox };
52141e9d4bSMatthew Wilcox 
53141e9d4bSMatthew Wilcox struct dma_page {		/* cacheable header for 'allocation' bytes */
54141e9d4bSMatthew Wilcox 	struct list_head page_list;
55141e9d4bSMatthew Wilcox 	void *vaddr;
56141e9d4bSMatthew Wilcox 	dma_addr_t dma;
57a35a3455SMatthew Wilcox 	unsigned int in_use;
58a35a3455SMatthew Wilcox 	unsigned int offset;
59141e9d4bSMatthew Wilcox };
60141e9d4bSMatthew Wilcox 
61141e9d4bSMatthew Wilcox static DEFINE_MUTEX(pools_lock);
6201c2965fSSebastian Andrzej Siewior static DEFINE_MUTEX(pools_reg_lock);
63141e9d4bSMatthew Wilcox 
64141e9d4bSMatthew Wilcox static ssize_t
65141e9d4bSMatthew Wilcox show_pools(struct device *dev, struct device_attribute *attr, char *buf)
66141e9d4bSMatthew Wilcox {
67141e9d4bSMatthew Wilcox 	unsigned temp;
68141e9d4bSMatthew Wilcox 	unsigned size;
69141e9d4bSMatthew Wilcox 	char *next;
70141e9d4bSMatthew Wilcox 	struct dma_page *page;
71141e9d4bSMatthew Wilcox 	struct dma_pool *pool;
72141e9d4bSMatthew Wilcox 
73141e9d4bSMatthew Wilcox 	next = buf;
74141e9d4bSMatthew Wilcox 	size = PAGE_SIZE;
75141e9d4bSMatthew Wilcox 
76141e9d4bSMatthew Wilcox 	temp = scnprintf(next, size, "poolinfo - 0.1\n");
77141e9d4bSMatthew Wilcox 	size -= temp;
78141e9d4bSMatthew Wilcox 	next += temp;
79141e9d4bSMatthew Wilcox 
80141e9d4bSMatthew Wilcox 	mutex_lock(&pools_lock);
81141e9d4bSMatthew Wilcox 	list_for_each_entry(pool, &dev->dma_pools, pools) {
82141e9d4bSMatthew Wilcox 		unsigned pages = 0;
83141e9d4bSMatthew Wilcox 		unsigned blocks = 0;
84141e9d4bSMatthew Wilcox 
85c4956823SThomas Gleixner 		spin_lock_irq(&pool->lock);
86141e9d4bSMatthew Wilcox 		list_for_each_entry(page, &pool->page_list, page_list) {
87141e9d4bSMatthew Wilcox 			pages++;
88141e9d4bSMatthew Wilcox 			blocks += page->in_use;
89141e9d4bSMatthew Wilcox 		}
90c4956823SThomas Gleixner 		spin_unlock_irq(&pool->lock);
91141e9d4bSMatthew Wilcox 
92141e9d4bSMatthew Wilcox 		/* per-pool info, no real statistics yet */
935b5e0928SAlexey Dobriyan 		temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n",
94a35a3455SMatthew Wilcox 				 pool->name, blocks,
95a35a3455SMatthew Wilcox 				 pages * (pool->allocation / pool->size),
96141e9d4bSMatthew Wilcox 				 pool->size, pages);
97141e9d4bSMatthew Wilcox 		size -= temp;
98141e9d4bSMatthew Wilcox 		next += temp;
99141e9d4bSMatthew Wilcox 	}
100141e9d4bSMatthew Wilcox 	mutex_unlock(&pools_lock);
101141e9d4bSMatthew Wilcox 
102141e9d4bSMatthew Wilcox 	return PAGE_SIZE - size;
103141e9d4bSMatthew Wilcox }
104e87aa773SMatthew Wilcox 
1050825a6f9SJoe Perches static DEVICE_ATTR(pools, 0444, show_pools, NULL);
106141e9d4bSMatthew Wilcox 
107141e9d4bSMatthew Wilcox /**
108141e9d4bSMatthew Wilcox  * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
109141e9d4bSMatthew Wilcox  * @name: name of pool, for diagnostics
110141e9d4bSMatthew Wilcox  * @dev: device that will be doing the DMA
111141e9d4bSMatthew Wilcox  * @size: size of the blocks in this pool.
112141e9d4bSMatthew Wilcox  * @align: alignment requirement for blocks; must be a power of two
113e34f44b3SMatthew Wilcox  * @boundary: returned blocks won't cross this power of two boundary
114a862f68aSMike Rapoport  * Context: not in_interrupt()
115141e9d4bSMatthew Wilcox  *
116a862f68aSMike Rapoport  * Given one of these pools, dma_pool_alloc()
117141e9d4bSMatthew Wilcox  * may be used to allocate memory.  Such memory will all have "consistent"
118141e9d4bSMatthew Wilcox  * DMA mappings, accessible by the device and its driver without using
119141e9d4bSMatthew Wilcox  * cache flushing primitives.  The actual size of blocks allocated may be
120141e9d4bSMatthew Wilcox  * larger than requested because of alignment.
121141e9d4bSMatthew Wilcox  *
122e34f44b3SMatthew Wilcox  * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
123141e9d4bSMatthew Wilcox  * cross that size boundary.  This is useful for devices which have
124141e9d4bSMatthew Wilcox  * addressing restrictions on individual DMA transfers, such as not crossing
125141e9d4bSMatthew Wilcox  * boundaries of 4KBytes.
126a862f68aSMike Rapoport  *
127a862f68aSMike Rapoport  * Return: a dma allocation pool with the requested characteristics, or
128a862f68aSMike Rapoport  * %NULL if one can't be created.
129141e9d4bSMatthew Wilcox  */
130e87aa773SMatthew Wilcox struct dma_pool *dma_pool_create(const char *name, struct device *dev,
131e34f44b3SMatthew Wilcox 				 size_t size, size_t align, size_t boundary)
132141e9d4bSMatthew Wilcox {
133141e9d4bSMatthew Wilcox 	struct dma_pool *retval;
134e34f44b3SMatthew Wilcox 	size_t allocation;
13501c2965fSSebastian Andrzej Siewior 	bool empty = false;
136141e9d4bSMatthew Wilcox 
137baa2ef83SPaul McQuade 	if (align == 0)
138141e9d4bSMatthew Wilcox 		align = 1;
139baa2ef83SPaul McQuade 	else if (align & (align - 1))
140399154beSMatthew Wilcox 		return NULL;
141399154beSMatthew Wilcox 
142baa2ef83SPaul McQuade 	if (size == 0)
143141e9d4bSMatthew Wilcox 		return NULL;
144baa2ef83SPaul McQuade 	else if (size < 4)
145a35a3455SMatthew Wilcox 		size = 4;
146399154beSMatthew Wilcox 
147399154beSMatthew Wilcox 	if ((size % align) != 0)
148399154beSMatthew Wilcox 		size = ALIGN(size, align);
149141e9d4bSMatthew Wilcox 
150e34f44b3SMatthew Wilcox 	allocation = max_t(size_t, size, PAGE_SIZE);
151141e9d4bSMatthew Wilcox 
152baa2ef83SPaul McQuade 	if (!boundary)
153e34f44b3SMatthew Wilcox 		boundary = allocation;
154baa2ef83SPaul McQuade 	else if ((boundary < size) || (boundary & (boundary - 1)))
155e34f44b3SMatthew Wilcox 		return NULL;
156e34f44b3SMatthew Wilcox 
157e34f44b3SMatthew Wilcox 	retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
158e34f44b3SMatthew Wilcox 	if (!retval)
159141e9d4bSMatthew Wilcox 		return retval;
160141e9d4bSMatthew Wilcox 
161e34f44b3SMatthew Wilcox 	strlcpy(retval->name, name, sizeof(retval->name));
162141e9d4bSMatthew Wilcox 
163141e9d4bSMatthew Wilcox 	retval->dev = dev;
164141e9d4bSMatthew Wilcox 
165141e9d4bSMatthew Wilcox 	INIT_LIST_HEAD(&retval->page_list);
166141e9d4bSMatthew Wilcox 	spin_lock_init(&retval->lock);
167141e9d4bSMatthew Wilcox 	retval->size = size;
168e34f44b3SMatthew Wilcox 	retval->boundary = boundary;
169141e9d4bSMatthew Wilcox 	retval->allocation = allocation;
170141e9d4bSMatthew Wilcox 
171cc6b664aSDaeseok Youn 	INIT_LIST_HEAD(&retval->pools);
172141e9d4bSMatthew Wilcox 
17301c2965fSSebastian Andrzej Siewior 	/*
17401c2965fSSebastian Andrzej Siewior 	 * pools_lock ensures that the ->dma_pools list does not get corrupted.
17501c2965fSSebastian Andrzej Siewior 	 * pools_reg_lock ensures that there is not a race between
17601c2965fSSebastian Andrzej Siewior 	 * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
17701c2965fSSebastian Andrzej Siewior 	 * when the first invocation of dma_pool_create() failed on
17801c2965fSSebastian Andrzej Siewior 	 * device_create_file() and the second assumes that it has been done (I
17901c2965fSSebastian Andrzej Siewior 	 * know it is a short window).
18001c2965fSSebastian Andrzej Siewior 	 */
18101c2965fSSebastian Andrzej Siewior 	mutex_lock(&pools_reg_lock);
182141e9d4bSMatthew Wilcox 	mutex_lock(&pools_lock);
18301c2965fSSebastian Andrzej Siewior 	if (list_empty(&dev->dma_pools))
18401c2965fSSebastian Andrzej Siewior 		empty = true;
185cc6b664aSDaeseok Youn 	list_add(&retval->pools, &dev->dma_pools);
186cc6b664aSDaeseok Youn 	mutex_unlock(&pools_lock);
18701c2965fSSebastian Andrzej Siewior 	if (empty) {
18801c2965fSSebastian Andrzej Siewior 		int err;
189141e9d4bSMatthew Wilcox 
19001c2965fSSebastian Andrzej Siewior 		err = device_create_file(dev, &dev_attr_pools);
19101c2965fSSebastian Andrzej Siewior 		if (err) {
19201c2965fSSebastian Andrzej Siewior 			mutex_lock(&pools_lock);
19301c2965fSSebastian Andrzej Siewior 			list_del(&retval->pools);
19401c2965fSSebastian Andrzej Siewior 			mutex_unlock(&pools_lock);
19501c2965fSSebastian Andrzej Siewior 			mutex_unlock(&pools_reg_lock);
19601c2965fSSebastian Andrzej Siewior 			kfree(retval);
19701c2965fSSebastian Andrzej Siewior 			return NULL;
19801c2965fSSebastian Andrzej Siewior 		}
19901c2965fSSebastian Andrzej Siewior 	}
20001c2965fSSebastian Andrzej Siewior 	mutex_unlock(&pools_reg_lock);
201141e9d4bSMatthew Wilcox 	return retval;
202141e9d4bSMatthew Wilcox }
203e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_create);
204141e9d4bSMatthew Wilcox 
205a35a3455SMatthew Wilcox static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
206a35a3455SMatthew Wilcox {
207a35a3455SMatthew Wilcox 	unsigned int offset = 0;
208e34f44b3SMatthew Wilcox 	unsigned int next_boundary = pool->boundary;
209a35a3455SMatthew Wilcox 
210a35a3455SMatthew Wilcox 	do {
211a35a3455SMatthew Wilcox 		unsigned int next = offset + pool->size;
212e34f44b3SMatthew Wilcox 		if (unlikely((next + pool->size) >= next_boundary)) {
213e34f44b3SMatthew Wilcox 			next = next_boundary;
214e34f44b3SMatthew Wilcox 			next_boundary += pool->boundary;
215e34f44b3SMatthew Wilcox 		}
216a35a3455SMatthew Wilcox 		*(int *)(page->vaddr + offset) = next;
217a35a3455SMatthew Wilcox 		offset = next;
218a35a3455SMatthew Wilcox 	} while (offset < pool->allocation);
219a35a3455SMatthew Wilcox }
220a35a3455SMatthew Wilcox 
221e87aa773SMatthew Wilcox static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
222141e9d4bSMatthew Wilcox {
223141e9d4bSMatthew Wilcox 	struct dma_page *page;
224141e9d4bSMatthew Wilcox 
225a35a3455SMatthew Wilcox 	page = kmalloc(sizeof(*page), mem_flags);
226141e9d4bSMatthew Wilcox 	if (!page)
227141e9d4bSMatthew Wilcox 		return NULL;
228a35a3455SMatthew Wilcox 	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
229e87aa773SMatthew Wilcox 					 &page->dma, mem_flags);
230141e9d4bSMatthew Wilcox 	if (page->vaddr) {
231b5ee5befSAndi Kleen #ifdef	DMAPOOL_DEBUG
232141e9d4bSMatthew Wilcox 		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
233141e9d4bSMatthew Wilcox #endif
234a35a3455SMatthew Wilcox 		pool_initialise_page(pool, page);
235141e9d4bSMatthew Wilcox 		page->in_use = 0;
236a35a3455SMatthew Wilcox 		page->offset = 0;
237141e9d4bSMatthew Wilcox 	} else {
238141e9d4bSMatthew Wilcox 		kfree(page);
239141e9d4bSMatthew Wilcox 		page = NULL;
240141e9d4bSMatthew Wilcox 	}
241141e9d4bSMatthew Wilcox 	return page;
242141e9d4bSMatthew Wilcox }
243141e9d4bSMatthew Wilcox 
244d9e7e37bSNicholas Krause static inline bool is_page_busy(struct dma_page *page)
245141e9d4bSMatthew Wilcox {
246a35a3455SMatthew Wilcox 	return page->in_use != 0;
247141e9d4bSMatthew Wilcox }
248141e9d4bSMatthew Wilcox 
249e87aa773SMatthew Wilcox static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
250141e9d4bSMatthew Wilcox {
251141e9d4bSMatthew Wilcox 	dma_addr_t dma = page->dma;
252141e9d4bSMatthew Wilcox 
253b5ee5befSAndi Kleen #ifdef	DMAPOOL_DEBUG
254141e9d4bSMatthew Wilcox 	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
255141e9d4bSMatthew Wilcox #endif
256141e9d4bSMatthew Wilcox 	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
257141e9d4bSMatthew Wilcox 	list_del(&page->page_list);
258141e9d4bSMatthew Wilcox 	kfree(page);
259141e9d4bSMatthew Wilcox }
260141e9d4bSMatthew Wilcox 
261141e9d4bSMatthew Wilcox /**
262141e9d4bSMatthew Wilcox  * dma_pool_destroy - destroys a pool of dma memory blocks.
263141e9d4bSMatthew Wilcox  * @pool: dma pool that will be destroyed
264141e9d4bSMatthew Wilcox  * Context: !in_interrupt()
265141e9d4bSMatthew Wilcox  *
266141e9d4bSMatthew Wilcox  * Caller guarantees that no more memory from the pool is in use,
267141e9d4bSMatthew Wilcox  * and that nothing will try to use the pool after this call.
268141e9d4bSMatthew Wilcox  */
269e87aa773SMatthew Wilcox void dma_pool_destroy(struct dma_pool *pool)
270141e9d4bSMatthew Wilcox {
27101c2965fSSebastian Andrzej Siewior 	bool empty = false;
27201c2965fSSebastian Andrzej Siewior 
27344d7175dSSergey Senozhatsky 	if (unlikely(!pool))
27444d7175dSSergey Senozhatsky 		return;
27544d7175dSSergey Senozhatsky 
27601c2965fSSebastian Andrzej Siewior 	mutex_lock(&pools_reg_lock);
277141e9d4bSMatthew Wilcox 	mutex_lock(&pools_lock);
278141e9d4bSMatthew Wilcox 	list_del(&pool->pools);
279141e9d4bSMatthew Wilcox 	if (pool->dev && list_empty(&pool->dev->dma_pools))
28001c2965fSSebastian Andrzej Siewior 		empty = true;
281141e9d4bSMatthew Wilcox 	mutex_unlock(&pools_lock);
28201c2965fSSebastian Andrzej Siewior 	if (empty)
28301c2965fSSebastian Andrzej Siewior 		device_remove_file(pool->dev, &dev_attr_pools);
28401c2965fSSebastian Andrzej Siewior 	mutex_unlock(&pools_reg_lock);
285141e9d4bSMatthew Wilcox 
286141e9d4bSMatthew Wilcox 	while (!list_empty(&pool->page_list)) {
287141e9d4bSMatthew Wilcox 		struct dma_page *page;
288141e9d4bSMatthew Wilcox 		page = list_entry(pool->page_list.next,
289141e9d4bSMatthew Wilcox 				  struct dma_page, page_list);
290a35a3455SMatthew Wilcox 		if (is_page_busy(page)) {
291141e9d4bSMatthew Wilcox 			if (pool->dev)
292e87aa773SMatthew Wilcox 				dev_err(pool->dev,
293e87aa773SMatthew Wilcox 					"dma_pool_destroy %s, %p busy\n",
294141e9d4bSMatthew Wilcox 					pool->name, page->vaddr);
295141e9d4bSMatthew Wilcox 			else
2961170532bSJoe Perches 				pr_err("dma_pool_destroy %s, %p busy\n",
297141e9d4bSMatthew Wilcox 				       pool->name, page->vaddr);
298141e9d4bSMatthew Wilcox 			/* leak the still-in-use consistent memory */
299141e9d4bSMatthew Wilcox 			list_del(&page->page_list);
300141e9d4bSMatthew Wilcox 			kfree(page);
301141e9d4bSMatthew Wilcox 		} else
302141e9d4bSMatthew Wilcox 			pool_free_page(pool, page);
303141e9d4bSMatthew Wilcox 	}
304141e9d4bSMatthew Wilcox 
305141e9d4bSMatthew Wilcox 	kfree(pool);
306141e9d4bSMatthew Wilcox }
307e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_destroy);
308141e9d4bSMatthew Wilcox 
309141e9d4bSMatthew Wilcox /**
310141e9d4bSMatthew Wilcox  * dma_pool_alloc - get a block of consistent memory
311141e9d4bSMatthew Wilcox  * @pool: dma pool that will produce the block
312141e9d4bSMatthew Wilcox  * @mem_flags: GFP_* bitmask
313141e9d4bSMatthew Wilcox  * @handle: pointer to dma address of block
314141e9d4bSMatthew Wilcox  *
315a862f68aSMike Rapoport  * Return: the kernel virtual address of a currently unused block,
316141e9d4bSMatthew Wilcox  * and reports its dma address through the handle.
3176182a094SMatthew Wilcox  * If such a memory block can't be allocated, %NULL is returned.
318141e9d4bSMatthew Wilcox  */
319e87aa773SMatthew Wilcox void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
320e87aa773SMatthew Wilcox 		     dma_addr_t *handle)
321141e9d4bSMatthew Wilcox {
322141e9d4bSMatthew Wilcox 	unsigned long flags;
323141e9d4bSMatthew Wilcox 	struct dma_page *page;
324141e9d4bSMatthew Wilcox 	size_t offset;
325141e9d4bSMatthew Wilcox 	void *retval;
326141e9d4bSMatthew Wilcox 
327d0164adcSMel Gorman 	might_sleep_if(gfpflags_allow_blocking(mem_flags));
328ea05c844SDima Zavin 
329141e9d4bSMatthew Wilcox 	spin_lock_irqsave(&pool->lock, flags);
330141e9d4bSMatthew Wilcox 	list_for_each_entry(page, &pool->page_list, page_list) {
331a35a3455SMatthew Wilcox 		if (page->offset < pool->allocation)
332141e9d4bSMatthew Wilcox 			goto ready;
333141e9d4bSMatthew Wilcox 	}
334141e9d4bSMatthew Wilcox 
335387870f2SMarek Szyprowski 	/* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
336141e9d4bSMatthew Wilcox 	spin_unlock_irqrestore(&pool->lock, flags);
337141e9d4bSMatthew Wilcox 
338fa23f56dSSean O. Stalley 	page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
339387870f2SMarek Szyprowski 	if (!page)
340387870f2SMarek Szyprowski 		return NULL;
341141e9d4bSMatthew Wilcox 
3422cae367eSMatthew Wilcox 	spin_lock_irqsave(&pool->lock, flags);
343141e9d4bSMatthew Wilcox 
344387870f2SMarek Szyprowski 	list_add(&page->page_list, &pool->page_list);
345141e9d4bSMatthew Wilcox  ready:
346141e9d4bSMatthew Wilcox 	page->in_use++;
347a35a3455SMatthew Wilcox 	offset = page->offset;
348a35a3455SMatthew Wilcox 	page->offset = *(int *)(page->vaddr + offset);
349141e9d4bSMatthew Wilcox 	retval = offset + page->vaddr;
350141e9d4bSMatthew Wilcox 	*handle = offset + page->dma;
351b5ee5befSAndi Kleen #ifdef	DMAPOOL_DEBUG
3525de55b26SMatthieu CASTET 	{
3535de55b26SMatthieu CASTET 		int i;
3545de55b26SMatthieu CASTET 		u8 *data = retval;
3555de55b26SMatthieu CASTET 		/* page->offset is stored in first 4 bytes */
3565de55b26SMatthieu CASTET 		for (i = sizeof(page->offset); i < pool->size; i++) {
3575de55b26SMatthieu CASTET 			if (data[i] == POOL_POISON_FREED)
3585de55b26SMatthieu CASTET 				continue;
3595de55b26SMatthieu CASTET 			if (pool->dev)
3605de55b26SMatthieu CASTET 				dev_err(pool->dev,
3615835f251SHiroshige Sato 					"dma_pool_alloc %s, %p (corrupted)\n",
3625de55b26SMatthieu CASTET 					pool->name, retval);
3635de55b26SMatthieu CASTET 			else
3645835f251SHiroshige Sato 				pr_err("dma_pool_alloc %s, %p (corrupted)\n",
3655de55b26SMatthieu CASTET 					pool->name, retval);
3665de55b26SMatthieu CASTET 
3675de55b26SMatthieu CASTET 			/*
3685de55b26SMatthieu CASTET 			 * Dump the first 4 bytes even if they are not
3695de55b26SMatthieu CASTET 			 * POOL_POISON_FREED
3705de55b26SMatthieu CASTET 			 */
3715de55b26SMatthieu CASTET 			print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
3725de55b26SMatthieu CASTET 					data, pool->size, 1);
3735de55b26SMatthieu CASTET 			break;
3745de55b26SMatthieu CASTET 		}
3755de55b26SMatthieu CASTET 	}
376fa23f56dSSean O. Stalley 	if (!(mem_flags & __GFP_ZERO))
377141e9d4bSMatthew Wilcox 		memset(retval, POOL_POISON_ALLOCATED, pool->size);
378141e9d4bSMatthew Wilcox #endif
379141e9d4bSMatthew Wilcox 	spin_unlock_irqrestore(&pool->lock, flags);
380fa23f56dSSean O. Stalley 
381*6471384aSAlexander Potapenko 	if (want_init_on_alloc(mem_flags))
382fa23f56dSSean O. Stalley 		memset(retval, 0, pool->size);
383fa23f56dSSean O. Stalley 
384141e9d4bSMatthew Wilcox 	return retval;
385141e9d4bSMatthew Wilcox }
386e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_alloc);
387141e9d4bSMatthew Wilcox 
388e87aa773SMatthew Wilcox static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
389141e9d4bSMatthew Wilcox {
390141e9d4bSMatthew Wilcox 	struct dma_page *page;
391141e9d4bSMatthew Wilcox 
392141e9d4bSMatthew Wilcox 	list_for_each_entry(page, &pool->page_list, page_list) {
393141e9d4bSMatthew Wilcox 		if (dma < page->dma)
394141e9d4bSMatthew Wilcox 			continue;
395676bd991SRobin Murphy 		if ((dma - page->dma) < pool->allocation)
396141e9d4bSMatthew Wilcox 			return page;
397141e9d4bSMatthew Wilcox 	}
39884bc227dSRolf Eike Beer 	return NULL;
39984bc227dSRolf Eike Beer }
400141e9d4bSMatthew Wilcox 
401141e9d4bSMatthew Wilcox /**
402141e9d4bSMatthew Wilcox  * dma_pool_free - put block back into dma pool
403141e9d4bSMatthew Wilcox  * @pool: the dma pool holding the block
404141e9d4bSMatthew Wilcox  * @vaddr: virtual address of block
405141e9d4bSMatthew Wilcox  * @dma: dma address of block
406141e9d4bSMatthew Wilcox  *
407141e9d4bSMatthew Wilcox  * Caller promises neither device nor driver will again touch this block
408141e9d4bSMatthew Wilcox  * unless it is first re-allocated.
409141e9d4bSMatthew Wilcox  */
410e87aa773SMatthew Wilcox void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
411141e9d4bSMatthew Wilcox {
412141e9d4bSMatthew Wilcox 	struct dma_page *page;
413141e9d4bSMatthew Wilcox 	unsigned long flags;
414a35a3455SMatthew Wilcox 	unsigned int offset;
415141e9d4bSMatthew Wilcox 
41684bc227dSRolf Eike Beer 	spin_lock_irqsave(&pool->lock, flags);
417e87aa773SMatthew Wilcox 	page = pool_find_page(pool, dma);
418e87aa773SMatthew Wilcox 	if (!page) {
41984bc227dSRolf Eike Beer 		spin_unlock_irqrestore(&pool->lock, flags);
420141e9d4bSMatthew Wilcox 		if (pool->dev)
421e87aa773SMatthew Wilcox 			dev_err(pool->dev,
422e87aa773SMatthew Wilcox 				"dma_pool_free %s, %p/%lx (bad dma)\n",
423141e9d4bSMatthew Wilcox 				pool->name, vaddr, (unsigned long)dma);
424141e9d4bSMatthew Wilcox 		else
4251170532bSJoe Perches 			pr_err("dma_pool_free %s, %p/%lx (bad dma)\n",
426141e9d4bSMatthew Wilcox 			       pool->name, vaddr, (unsigned long)dma);
427141e9d4bSMatthew Wilcox 		return;
428141e9d4bSMatthew Wilcox 	}
429141e9d4bSMatthew Wilcox 
430a35a3455SMatthew Wilcox 	offset = vaddr - page->vaddr;
431*6471384aSAlexander Potapenko 	if (want_init_on_free())
432*6471384aSAlexander Potapenko 		memset(vaddr, 0, pool->size);
433b5ee5befSAndi Kleen #ifdef	DMAPOOL_DEBUG
434a35a3455SMatthew Wilcox 	if ((dma - page->dma) != offset) {
43584bc227dSRolf Eike Beer 		spin_unlock_irqrestore(&pool->lock, flags);
436141e9d4bSMatthew Wilcox 		if (pool->dev)
437e87aa773SMatthew Wilcox 			dev_err(pool->dev,
438199eaa05SMiles Chen 				"dma_pool_free %s, %p (bad vaddr)/%pad\n",
439199eaa05SMiles Chen 				pool->name, vaddr, &dma);
440141e9d4bSMatthew Wilcox 		else
441199eaa05SMiles Chen 			pr_err("dma_pool_free %s, %p (bad vaddr)/%pad\n",
442199eaa05SMiles Chen 			       pool->name, vaddr, &dma);
443141e9d4bSMatthew Wilcox 		return;
444141e9d4bSMatthew Wilcox 	}
445a35a3455SMatthew Wilcox 	{
446a35a3455SMatthew Wilcox 		unsigned int chain = page->offset;
447a35a3455SMatthew Wilcox 		while (chain < pool->allocation) {
448a35a3455SMatthew Wilcox 			if (chain != offset) {
449a35a3455SMatthew Wilcox 				chain = *(int *)(page->vaddr + chain);
450a35a3455SMatthew Wilcox 				continue;
451a35a3455SMatthew Wilcox 			}
45284bc227dSRolf Eike Beer 			spin_unlock_irqrestore(&pool->lock, flags);
453141e9d4bSMatthew Wilcox 			if (pool->dev)
454199eaa05SMiles Chen 				dev_err(pool->dev, "dma_pool_free %s, dma %pad already free\n",
455199eaa05SMiles Chen 					pool->name, &dma);
456141e9d4bSMatthew Wilcox 			else
457199eaa05SMiles Chen 				pr_err("dma_pool_free %s, dma %pad already free\n",
458199eaa05SMiles Chen 				       pool->name, &dma);
459141e9d4bSMatthew Wilcox 			return;
460141e9d4bSMatthew Wilcox 		}
461a35a3455SMatthew Wilcox 	}
462141e9d4bSMatthew Wilcox 	memset(vaddr, POOL_POISON_FREED, pool->size);
463141e9d4bSMatthew Wilcox #endif
464141e9d4bSMatthew Wilcox 
465141e9d4bSMatthew Wilcox 	page->in_use--;
466a35a3455SMatthew Wilcox 	*(int *)vaddr = page->offset;
467a35a3455SMatthew Wilcox 	page->offset = offset;
468141e9d4bSMatthew Wilcox 	/*
469141e9d4bSMatthew Wilcox 	 * Resist a temptation to do
470a35a3455SMatthew Wilcox 	 *    if (!is_page_busy(page)) pool_free_page(pool, page);
471141e9d4bSMatthew Wilcox 	 * Better have a few empty pages hang around.
472141e9d4bSMatthew Wilcox 	 */
473141e9d4bSMatthew Wilcox 	spin_unlock_irqrestore(&pool->lock, flags);
474141e9d4bSMatthew Wilcox }
475e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_free);
476141e9d4bSMatthew Wilcox 
477141e9d4bSMatthew Wilcox /*
478141e9d4bSMatthew Wilcox  * Managed DMA pool
479141e9d4bSMatthew Wilcox  */
480141e9d4bSMatthew Wilcox static void dmam_pool_release(struct device *dev, void *res)
481141e9d4bSMatthew Wilcox {
482141e9d4bSMatthew Wilcox 	struct dma_pool *pool = *(struct dma_pool **)res;
483141e9d4bSMatthew Wilcox 
484141e9d4bSMatthew Wilcox 	dma_pool_destroy(pool);
485141e9d4bSMatthew Wilcox }
486141e9d4bSMatthew Wilcox 
487141e9d4bSMatthew Wilcox static int dmam_pool_match(struct device *dev, void *res, void *match_data)
488141e9d4bSMatthew Wilcox {
489141e9d4bSMatthew Wilcox 	return *(struct dma_pool **)res == match_data;
490141e9d4bSMatthew Wilcox }
491141e9d4bSMatthew Wilcox 
492141e9d4bSMatthew Wilcox /**
493141e9d4bSMatthew Wilcox  * dmam_pool_create - Managed dma_pool_create()
494141e9d4bSMatthew Wilcox  * @name: name of pool, for diagnostics
495141e9d4bSMatthew Wilcox  * @dev: device that will be doing the DMA
496141e9d4bSMatthew Wilcox  * @size: size of the blocks in this pool.
497141e9d4bSMatthew Wilcox  * @align: alignment requirement for blocks; must be a power of two
498141e9d4bSMatthew Wilcox  * @allocation: returned blocks won't cross this boundary (or zero)
499141e9d4bSMatthew Wilcox  *
500141e9d4bSMatthew Wilcox  * Managed dma_pool_create().  DMA pool created with this function is
501141e9d4bSMatthew Wilcox  * automatically destroyed on driver detach.
502a862f68aSMike Rapoport  *
503a862f68aSMike Rapoport  * Return: a managed dma allocation pool with the requested
504a862f68aSMike Rapoport  * characteristics, or %NULL if one can't be created.
505141e9d4bSMatthew Wilcox  */
506141e9d4bSMatthew Wilcox struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
507141e9d4bSMatthew Wilcox 				  size_t size, size_t align, size_t allocation)
508141e9d4bSMatthew Wilcox {
509141e9d4bSMatthew Wilcox 	struct dma_pool **ptr, *pool;
510141e9d4bSMatthew Wilcox 
511141e9d4bSMatthew Wilcox 	ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
512141e9d4bSMatthew Wilcox 	if (!ptr)
513141e9d4bSMatthew Wilcox 		return NULL;
514141e9d4bSMatthew Wilcox 
515141e9d4bSMatthew Wilcox 	pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
516141e9d4bSMatthew Wilcox 	if (pool)
517141e9d4bSMatthew Wilcox 		devres_add(dev, ptr);
518141e9d4bSMatthew Wilcox 	else
519141e9d4bSMatthew Wilcox 		devres_free(ptr);
520141e9d4bSMatthew Wilcox 
521141e9d4bSMatthew Wilcox 	return pool;
522141e9d4bSMatthew Wilcox }
523e87aa773SMatthew Wilcox EXPORT_SYMBOL(dmam_pool_create);
524141e9d4bSMatthew Wilcox 
525141e9d4bSMatthew Wilcox /**
526141e9d4bSMatthew Wilcox  * dmam_pool_destroy - Managed dma_pool_destroy()
527141e9d4bSMatthew Wilcox  * @pool: dma pool that will be destroyed
528141e9d4bSMatthew Wilcox  *
529141e9d4bSMatthew Wilcox  * Managed dma_pool_destroy().
530141e9d4bSMatthew Wilcox  */
531141e9d4bSMatthew Wilcox void dmam_pool_destroy(struct dma_pool *pool)
532141e9d4bSMatthew Wilcox {
533141e9d4bSMatthew Wilcox 	struct device *dev = pool->dev;
534141e9d4bSMatthew Wilcox 
535172cb4b3SAndy Shevchenko 	WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));
536141e9d4bSMatthew Wilcox }
537141e9d4bSMatthew Wilcox EXPORT_SYMBOL(dmam_pool_destroy);
538