xref: /openbmc/linux/mm/dmapool.c (revision da9619a30e73b59605ed998bf7bc4359f5c0029a)
1b2139ce0SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
26182a094SMatthew Wilcox /*
36182a094SMatthew Wilcox  * DMA Pool allocator
46182a094SMatthew Wilcox  *
56182a094SMatthew Wilcox  * Copyright 2001 David Brownell
66182a094SMatthew Wilcox  * Copyright 2007 Intel Corporation
76182a094SMatthew Wilcox  *   Author: Matthew Wilcox <willy@linux.intel.com>
86182a094SMatthew Wilcox  *
96182a094SMatthew Wilcox  * This allocator returns small blocks of a given size which are DMA-able by
106182a094SMatthew Wilcox  * the given device.  It uses the dma_alloc_coherent page allocator to get
116182a094SMatthew Wilcox  * new pages, then splits them up into blocks of the required size.
126182a094SMatthew Wilcox  * Many older drivers still have their own code to do this.
136182a094SMatthew Wilcox  *
146182a094SMatthew Wilcox  * The current design of this allocator is fairly simple.  The pool is
156182a094SMatthew Wilcox  * represented by the 'struct dma_pool' which keeps a doubly-linked list of
166182a094SMatthew Wilcox  * allocated pages.  Each page in the page_list is split into blocks of at
17a35a3455SMatthew Wilcox  * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
18*da9619a3SKeith Busch  * list of free blocks across all pages.  Used blocks aren't tracked, but we
19a35a3455SMatthew Wilcox  * keep a count of how many are currently allocated from each page.
206182a094SMatthew Wilcox  */
21141e9d4bSMatthew Wilcox 
22141e9d4bSMatthew Wilcox #include <linux/device.h>
23141e9d4bSMatthew Wilcox #include <linux/dma-mapping.h>
24141e9d4bSMatthew Wilcox #include <linux/dmapool.h>
256182a094SMatthew Wilcox #include <linux/kernel.h>
266182a094SMatthew Wilcox #include <linux/list.h>
27b95f1b31SPaul Gortmaker #include <linux/export.h>
286182a094SMatthew Wilcox #include <linux/mutex.h>
29141e9d4bSMatthew Wilcox #include <linux/poison.h>
30141e9d4bSMatthew Wilcox #include <linux/sched.h>
310f2f89b6SDaniel Vetter #include <linux/sched/mm.h>
326182a094SMatthew Wilcox #include <linux/slab.h>
337c77509cSPaul Gortmaker #include <linux/stat.h>
346182a094SMatthew Wilcox #include <linux/spinlock.h>
356182a094SMatthew Wilcox #include <linux/string.h>
366182a094SMatthew Wilcox #include <linux/types.h>
376182a094SMatthew Wilcox #include <linux/wait.h>
38141e9d4bSMatthew Wilcox 
39b5ee5befSAndi Kleen #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
40b5ee5befSAndi Kleen #define DMAPOOL_DEBUG 1
41b5ee5befSAndi Kleen #endif
42b5ee5befSAndi Kleen 
43*da9619a3SKeith Busch struct dma_block {
44*da9619a3SKeith Busch 	struct dma_block *next_block;
45*da9619a3SKeith Busch 	dma_addr_t dma;
46*da9619a3SKeith Busch };
47*da9619a3SKeith Busch 
48141e9d4bSMatthew Wilcox struct dma_pool {		/* the pool */
49141e9d4bSMatthew Wilcox 	struct list_head page_list;
50141e9d4bSMatthew Wilcox 	spinlock_t lock;
51*da9619a3SKeith Busch 	struct dma_block *next_block;
52*da9619a3SKeith Busch 	size_t nr_blocks;
53*da9619a3SKeith Busch 	size_t nr_active;
54*da9619a3SKeith Busch 	size_t nr_pages;
55141e9d4bSMatthew Wilcox 	struct device *dev;
5679023352STony Battersby 	unsigned int size;
5779023352STony Battersby 	unsigned int allocation;
5879023352STony Battersby 	unsigned int boundary;
59141e9d4bSMatthew Wilcox 	char name[32];
60141e9d4bSMatthew Wilcox 	struct list_head pools;
61141e9d4bSMatthew Wilcox };
62141e9d4bSMatthew Wilcox 
63141e9d4bSMatthew Wilcox struct dma_page {		/* cacheable header for 'allocation' bytes */
64141e9d4bSMatthew Wilcox 	struct list_head page_list;
65141e9d4bSMatthew Wilcox 	void *vaddr;
66141e9d4bSMatthew Wilcox 	dma_addr_t dma;
67141e9d4bSMatthew Wilcox };
68141e9d4bSMatthew Wilcox 
69141e9d4bSMatthew Wilcox static DEFINE_MUTEX(pools_lock);
7001c2965fSSebastian Andrzej Siewior static DEFINE_MUTEX(pools_reg_lock);
71141e9d4bSMatthew Wilcox 
72e8df2c70SYueHaibing static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf)
73141e9d4bSMatthew Wilcox {
74141e9d4bSMatthew Wilcox 	struct dma_pool *pool;
75*da9619a3SKeith Busch 	unsigned size;
76141e9d4bSMatthew Wilcox 
7708cc96c8STony Battersby 	size = sysfs_emit(buf, "poolinfo - 0.1\n");
78141e9d4bSMatthew Wilcox 
79141e9d4bSMatthew Wilcox 	mutex_lock(&pools_lock);
80141e9d4bSMatthew Wilcox 	list_for_each_entry(pool, &dev->dma_pools, pools) {
81141e9d4bSMatthew Wilcox 		/* per-pool info, no real statistics yet */
82*da9619a3SKeith Busch 		size += sysfs_emit_at(buf, size, "%-16s %4zu %4zu %4u %2zu\n",
83*da9619a3SKeith Busch 				      pool->name, pool->nr_active,
84*da9619a3SKeith Busch 				      pool->nr_blocks, pool->size,
85*da9619a3SKeith Busch 				      pool->nr_pages);
86141e9d4bSMatthew Wilcox 	}
87141e9d4bSMatthew Wilcox 	mutex_unlock(&pools_lock);
88141e9d4bSMatthew Wilcox 
8908cc96c8STony Battersby 	return size;
90141e9d4bSMatthew Wilcox }
91e87aa773SMatthew Wilcox 
92e8df2c70SYueHaibing static DEVICE_ATTR_RO(pools);
93141e9d4bSMatthew Wilcox 
94d93e08b7SKeith Busch #ifdef DMAPOOL_DEBUG
95*da9619a3SKeith Busch static void pool_check_block(struct dma_pool *pool, struct dma_block *block,
96*da9619a3SKeith Busch 			     gfp_t mem_flags)
97d93e08b7SKeith Busch {
98*da9619a3SKeith Busch 	u8 *data = (void *)block;
99d93e08b7SKeith Busch 	int i;
100*da9619a3SKeith Busch 
101*da9619a3SKeith Busch 	for (i = sizeof(struct dma_block); i < pool->size; i++) {
102d93e08b7SKeith Busch 		if (data[i] == POOL_POISON_FREED)
103d93e08b7SKeith Busch 			continue;
104*da9619a3SKeith Busch 		dev_err(pool->dev, "%s %s, %p (corrupted)\n", __func__,
105*da9619a3SKeith Busch 			pool->name, block);
106d93e08b7SKeith Busch 
107d93e08b7SKeith Busch 		/*
108d93e08b7SKeith Busch 		 * Dump the first 4 bytes even if they are not
109d93e08b7SKeith Busch 		 * POOL_POISON_FREED
110d93e08b7SKeith Busch 		 */
111d93e08b7SKeith Busch 		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
112d93e08b7SKeith Busch 				data, pool->size, 1);
113d93e08b7SKeith Busch 		break;
114d93e08b7SKeith Busch 	}
115*da9619a3SKeith Busch 
116d93e08b7SKeith Busch 	if (!want_init_on_alloc(mem_flags))
117*da9619a3SKeith Busch 		memset(block, POOL_POISON_ALLOCATED, pool->size);
118d93e08b7SKeith Busch }
119d93e08b7SKeith Busch 
120*da9619a3SKeith Busch static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
121d93e08b7SKeith Busch {
122*da9619a3SKeith Busch 	struct dma_page *page;
123d93e08b7SKeith Busch 
124*da9619a3SKeith Busch 	list_for_each_entry(page, &pool->page_list, page_list) {
125*da9619a3SKeith Busch 		if (dma < page->dma)
126*da9619a3SKeith Busch 			continue;
127*da9619a3SKeith Busch 		if ((dma - page->dma) < pool->allocation)
128*da9619a3SKeith Busch 			return page;
129*da9619a3SKeith Busch 	}
130*da9619a3SKeith Busch 	return NULL;
131*da9619a3SKeith Busch }
132*da9619a3SKeith Busch 
133*da9619a3SKeith Busch static bool pool_block_err(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
134*da9619a3SKeith Busch {
135*da9619a3SKeith Busch 	struct dma_block *block = pool->next_block;
136*da9619a3SKeith Busch 	struct dma_page *page;
137*da9619a3SKeith Busch 
138*da9619a3SKeith Busch 	page = pool_find_page(pool, dma);
139*da9619a3SKeith Busch 	if (!page) {
140*da9619a3SKeith Busch 		dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
141d93e08b7SKeith Busch 			__func__, pool->name, vaddr, &dma);
142d93e08b7SKeith Busch 		return true;
143d93e08b7SKeith Busch 	}
144d93e08b7SKeith Busch 
145*da9619a3SKeith Busch 	while (block) {
146*da9619a3SKeith Busch 		if (block != vaddr) {
147*da9619a3SKeith Busch 			block = block->next_block;
148d93e08b7SKeith Busch 			continue;
149d93e08b7SKeith Busch 		}
150d93e08b7SKeith Busch 		dev_err(pool->dev, "%s %s, dma %pad already free\n",
151d93e08b7SKeith Busch 			__func__, pool->name, &dma);
152d93e08b7SKeith Busch 		return true;
153d93e08b7SKeith Busch 	}
154*da9619a3SKeith Busch 
155d93e08b7SKeith Busch 	memset(vaddr, POOL_POISON_FREED, pool->size);
156d93e08b7SKeith Busch 	return false;
157d93e08b7SKeith Busch }
158d93e08b7SKeith Busch 
159d93e08b7SKeith Busch static void pool_init_page(struct dma_pool *pool, struct dma_page *page)
160d93e08b7SKeith Busch {
161d93e08b7SKeith Busch 	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
162d93e08b7SKeith Busch }
163d93e08b7SKeith Busch #else
164*da9619a3SKeith Busch static void pool_check_block(struct dma_pool *pool, struct dma_block *block,
165*da9619a3SKeith Busch 			     gfp_t mem_flags)
166d93e08b7SKeith Busch {
167d93e08b7SKeith Busch }
168d93e08b7SKeith Busch 
169*da9619a3SKeith Busch static bool pool_block_err(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
170d93e08b7SKeith Busch {
1718ecc3695SKeith Busch 	if (want_init_on_free())
1728ecc3695SKeith Busch 		memset(vaddr, 0, pool->size);
173d93e08b7SKeith Busch 	return false;
174d93e08b7SKeith Busch }
175d93e08b7SKeith Busch 
176d93e08b7SKeith Busch static void pool_init_page(struct dma_pool *pool, struct dma_page *page)
177d93e08b7SKeith Busch {
178d93e08b7SKeith Busch }
179d93e08b7SKeith Busch #endif
180d93e08b7SKeith Busch 
181*da9619a3SKeith Busch static struct dma_block *pool_block_pop(struct dma_pool *pool)
182*da9619a3SKeith Busch {
183*da9619a3SKeith Busch 	struct dma_block *block = pool->next_block;
184*da9619a3SKeith Busch 
185*da9619a3SKeith Busch 	if (block) {
186*da9619a3SKeith Busch 		pool->next_block = block->next_block;
187*da9619a3SKeith Busch 		pool->nr_active++;
188*da9619a3SKeith Busch 	}
189*da9619a3SKeith Busch 	return block;
190*da9619a3SKeith Busch }
191*da9619a3SKeith Busch 
192*da9619a3SKeith Busch static void pool_block_push(struct dma_pool *pool, struct dma_block *block,
193*da9619a3SKeith Busch 			    dma_addr_t dma)
194*da9619a3SKeith Busch {
195*da9619a3SKeith Busch 	block->dma = dma;
196*da9619a3SKeith Busch 	block->next_block = pool->next_block;
197*da9619a3SKeith Busch 	pool->next_block = block;
198*da9619a3SKeith Busch }
199*da9619a3SKeith Busch 
200*da9619a3SKeith Busch 
201141e9d4bSMatthew Wilcox /**
202141e9d4bSMatthew Wilcox  * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
203141e9d4bSMatthew Wilcox  * @name: name of pool, for diagnostics
204141e9d4bSMatthew Wilcox  * @dev: device that will be doing the DMA
205141e9d4bSMatthew Wilcox  * @size: size of the blocks in this pool.
206141e9d4bSMatthew Wilcox  * @align: alignment requirement for blocks; must be a power of two
207e34f44b3SMatthew Wilcox  * @boundary: returned blocks won't cross this power of two boundary
208a862f68aSMike Rapoport  * Context: not in_interrupt()
209141e9d4bSMatthew Wilcox  *
210a862f68aSMike Rapoport  * Given one of these pools, dma_pool_alloc()
211141e9d4bSMatthew Wilcox  * may be used to allocate memory.  Such memory will all have "consistent"
212141e9d4bSMatthew Wilcox  * DMA mappings, accessible by the device and its driver without using
213141e9d4bSMatthew Wilcox  * cache flushing primitives.  The actual size of blocks allocated may be
214141e9d4bSMatthew Wilcox  * larger than requested because of alignment.
215141e9d4bSMatthew Wilcox  *
216e34f44b3SMatthew Wilcox  * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
217141e9d4bSMatthew Wilcox  * cross that size boundary.  This is useful for devices which have
218141e9d4bSMatthew Wilcox  * addressing restrictions on individual DMA transfers, such as not crossing
219141e9d4bSMatthew Wilcox  * boundaries of 4KBytes.
220a862f68aSMike Rapoport  *
221a862f68aSMike Rapoport  * Return: a dma allocation pool with the requested characteristics, or
222a862f68aSMike Rapoport  * %NULL if one can't be created.
223141e9d4bSMatthew Wilcox  */
224e87aa773SMatthew Wilcox struct dma_pool *dma_pool_create(const char *name, struct device *dev,
225e34f44b3SMatthew Wilcox 				 size_t size, size_t align, size_t boundary)
226141e9d4bSMatthew Wilcox {
227141e9d4bSMatthew Wilcox 	struct dma_pool *retval;
228e34f44b3SMatthew Wilcox 	size_t allocation;
22901c2965fSSebastian Andrzej Siewior 	bool empty = false;
230141e9d4bSMatthew Wilcox 
23167a540c6STony Battersby 	if (!dev)
23267a540c6STony Battersby 		return NULL;
23367a540c6STony Battersby 
234baa2ef83SPaul McQuade 	if (align == 0)
235141e9d4bSMatthew Wilcox 		align = 1;
236baa2ef83SPaul McQuade 	else if (align & (align - 1))
237399154beSMatthew Wilcox 		return NULL;
238399154beSMatthew Wilcox 
23979023352STony Battersby 	if (size == 0 || size > INT_MAX)
240141e9d4bSMatthew Wilcox 		return NULL;
241*da9619a3SKeith Busch 	if (size < sizeof(struct dma_block))
242*da9619a3SKeith Busch 		size = sizeof(struct dma_block);
243399154beSMatthew Wilcox 
244399154beSMatthew Wilcox 	size = ALIGN(size, align);
245e34f44b3SMatthew Wilcox 	allocation = max_t(size_t, size, PAGE_SIZE);
246141e9d4bSMatthew Wilcox 
247baa2ef83SPaul McQuade 	if (!boundary)
248e34f44b3SMatthew Wilcox 		boundary = allocation;
249baa2ef83SPaul McQuade 	else if ((boundary < size) || (boundary & (boundary - 1)))
250e34f44b3SMatthew Wilcox 		return NULL;
251e34f44b3SMatthew Wilcox 
25279023352STony Battersby 	boundary = min(boundary, allocation);
25379023352STony Battersby 
254*da9619a3SKeith Busch 	retval = kzalloc(sizeof(*retval), GFP_KERNEL);
255e34f44b3SMatthew Wilcox 	if (!retval)
256141e9d4bSMatthew Wilcox 		return retval;
257141e9d4bSMatthew Wilcox 
258943f229eSZhiyuan Dai 	strscpy(retval->name, name, sizeof(retval->name));
259141e9d4bSMatthew Wilcox 
260141e9d4bSMatthew Wilcox 	retval->dev = dev;
261141e9d4bSMatthew Wilcox 
262141e9d4bSMatthew Wilcox 	INIT_LIST_HEAD(&retval->page_list);
263141e9d4bSMatthew Wilcox 	spin_lock_init(&retval->lock);
264141e9d4bSMatthew Wilcox 	retval->size = size;
265e34f44b3SMatthew Wilcox 	retval->boundary = boundary;
266141e9d4bSMatthew Wilcox 	retval->allocation = allocation;
267cc6b664aSDaeseok Youn 	INIT_LIST_HEAD(&retval->pools);
268141e9d4bSMatthew Wilcox 
26901c2965fSSebastian Andrzej Siewior 	/*
27001c2965fSSebastian Andrzej Siewior 	 * pools_lock ensures that the ->dma_pools list does not get corrupted.
27101c2965fSSebastian Andrzej Siewior 	 * pools_reg_lock ensures that there is not a race between
27201c2965fSSebastian Andrzej Siewior 	 * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
27301c2965fSSebastian Andrzej Siewior 	 * when the first invocation of dma_pool_create() failed on
27401c2965fSSebastian Andrzej Siewior 	 * device_create_file() and the second assumes that it has been done (I
27501c2965fSSebastian Andrzej Siewior 	 * know it is a short window).
27601c2965fSSebastian Andrzej Siewior 	 */
27701c2965fSSebastian Andrzej Siewior 	mutex_lock(&pools_reg_lock);
278141e9d4bSMatthew Wilcox 	mutex_lock(&pools_lock);
27901c2965fSSebastian Andrzej Siewior 	if (list_empty(&dev->dma_pools))
28001c2965fSSebastian Andrzej Siewior 		empty = true;
281cc6b664aSDaeseok Youn 	list_add(&retval->pools, &dev->dma_pools);
282cc6b664aSDaeseok Youn 	mutex_unlock(&pools_lock);
28301c2965fSSebastian Andrzej Siewior 	if (empty) {
28401c2965fSSebastian Andrzej Siewior 		int err;
285141e9d4bSMatthew Wilcox 
28601c2965fSSebastian Andrzej Siewior 		err = device_create_file(dev, &dev_attr_pools);
28701c2965fSSebastian Andrzej Siewior 		if (err) {
28801c2965fSSebastian Andrzej Siewior 			mutex_lock(&pools_lock);
28901c2965fSSebastian Andrzej Siewior 			list_del(&retval->pools);
29001c2965fSSebastian Andrzej Siewior 			mutex_unlock(&pools_lock);
29101c2965fSSebastian Andrzej Siewior 			mutex_unlock(&pools_reg_lock);
29201c2965fSSebastian Andrzej Siewior 			kfree(retval);
29301c2965fSSebastian Andrzej Siewior 			return NULL;
29401c2965fSSebastian Andrzej Siewior 		}
29501c2965fSSebastian Andrzej Siewior 	}
29601c2965fSSebastian Andrzej Siewior 	mutex_unlock(&pools_reg_lock);
297141e9d4bSMatthew Wilcox 	return retval;
298141e9d4bSMatthew Wilcox }
299e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_create);
300141e9d4bSMatthew Wilcox 
301a35a3455SMatthew Wilcox static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
302a35a3455SMatthew Wilcox {
303*da9619a3SKeith Busch 	unsigned int next_boundary = pool->boundary, offset = 0;
304*da9619a3SKeith Busch 	struct dma_block *block, *first = NULL, *last = NULL;
305a35a3455SMatthew Wilcox 
306f0bccea6SKeith Busch 	pool_init_page(pool, page);
307*da9619a3SKeith Busch 	while (offset + pool->size <= pool->allocation) {
308*da9619a3SKeith Busch 		if (offset + pool->size > next_boundary) {
309*da9619a3SKeith Busch 			offset = next_boundary;
310e34f44b3SMatthew Wilcox 			next_boundary += pool->boundary;
311*da9619a3SKeith Busch 			continue;
312e34f44b3SMatthew Wilcox 		}
313*da9619a3SKeith Busch 
314*da9619a3SKeith Busch 		block = page->vaddr + offset;
315*da9619a3SKeith Busch 		block->dma = page->dma + offset;
316*da9619a3SKeith Busch 		block->next_block = NULL;
317*da9619a3SKeith Busch 
318*da9619a3SKeith Busch 		if (last)
319*da9619a3SKeith Busch 			last->next_block = block;
320*da9619a3SKeith Busch 		else
321*da9619a3SKeith Busch 			first = block;
322*da9619a3SKeith Busch 		last = block;
323*da9619a3SKeith Busch 
324*da9619a3SKeith Busch 		offset += pool->size;
325*da9619a3SKeith Busch 		pool->nr_blocks++;
326*da9619a3SKeith Busch 	}
327*da9619a3SKeith Busch 
328*da9619a3SKeith Busch 	last->next_block = pool->next_block;
329*da9619a3SKeith Busch 	pool->next_block = first;
330*da9619a3SKeith Busch 
331*da9619a3SKeith Busch 	list_add(&page->page_list, &pool->page_list);
332*da9619a3SKeith Busch 	pool->nr_pages++;
333a35a3455SMatthew Wilcox }
334a35a3455SMatthew Wilcox 
335e87aa773SMatthew Wilcox static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
336141e9d4bSMatthew Wilcox {
337141e9d4bSMatthew Wilcox 	struct dma_page *page;
338141e9d4bSMatthew Wilcox 
339a35a3455SMatthew Wilcox 	page = kmalloc(sizeof(*page), mem_flags);
340141e9d4bSMatthew Wilcox 	if (!page)
341141e9d4bSMatthew Wilcox 		return NULL;
3425407df10SKeith Busch 
343a35a3455SMatthew Wilcox 	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
344e87aa773SMatthew Wilcox 					 &page->dma, mem_flags);
3455407df10SKeith Busch 	if (!page->vaddr) {
3465407df10SKeith Busch 		kfree(page);
3475407df10SKeith Busch 		return NULL;
3485407df10SKeith Busch 	}
3495407df10SKeith Busch 
350141e9d4bSMatthew Wilcox 	return page;
351141e9d4bSMatthew Wilcox }
352141e9d4bSMatthew Wilcox 
353141e9d4bSMatthew Wilcox /**
354141e9d4bSMatthew Wilcox  * dma_pool_destroy - destroys a pool of dma memory blocks.
355141e9d4bSMatthew Wilcox  * @pool: dma pool that will be destroyed
356141e9d4bSMatthew Wilcox  * Context: !in_interrupt()
357141e9d4bSMatthew Wilcox  *
358141e9d4bSMatthew Wilcox  * Caller guarantees that no more memory from the pool is in use,
359141e9d4bSMatthew Wilcox  * and that nothing will try to use the pool after this call.
360141e9d4bSMatthew Wilcox  */
361e87aa773SMatthew Wilcox void dma_pool_destroy(struct dma_pool *pool)
362141e9d4bSMatthew Wilcox {
36342286f83SAndy Shevchenko 	struct dma_page *page, *tmp;
364*da9619a3SKeith Busch 	bool empty = false, busy = false;
36501c2965fSSebastian Andrzej Siewior 
36644d7175dSSergey Senozhatsky 	if (unlikely(!pool))
36744d7175dSSergey Senozhatsky 		return;
36844d7175dSSergey Senozhatsky 
36901c2965fSSebastian Andrzej Siewior 	mutex_lock(&pools_reg_lock);
370141e9d4bSMatthew Wilcox 	mutex_lock(&pools_lock);
371141e9d4bSMatthew Wilcox 	list_del(&pool->pools);
37267a540c6STony Battersby 	if (list_empty(&pool->dev->dma_pools))
37301c2965fSSebastian Andrzej Siewior 		empty = true;
374141e9d4bSMatthew Wilcox 	mutex_unlock(&pools_lock);
37501c2965fSSebastian Andrzej Siewior 	if (empty)
37601c2965fSSebastian Andrzej Siewior 		device_remove_file(pool->dev, &dev_attr_pools);
37701c2965fSSebastian Andrzej Siewior 	mutex_unlock(&pools_reg_lock);
378141e9d4bSMatthew Wilcox 
379*da9619a3SKeith Busch 	if (pool->nr_active) {
380*da9619a3SKeith Busch 		dev_err(pool->dev, "%s %s busy\n", __func__, pool->name);
381*da9619a3SKeith Busch 		busy = true;
382*da9619a3SKeith Busch 	}
383*da9619a3SKeith Busch 
38442286f83SAndy Shevchenko 	list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
385*da9619a3SKeith Busch 		if (!busy)
386cc669954SKeith Busch 			dma_free_coherent(pool->dev, pool->allocation,
387cc669954SKeith Busch 					  page->vaddr, page->dma);
388141e9d4bSMatthew Wilcox 		list_del(&page->page_list);
389141e9d4bSMatthew Wilcox 		kfree(page);
390141e9d4bSMatthew Wilcox 	}
391141e9d4bSMatthew Wilcox 
392141e9d4bSMatthew Wilcox 	kfree(pool);
393141e9d4bSMatthew Wilcox }
394e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_destroy);
395141e9d4bSMatthew Wilcox 
396141e9d4bSMatthew Wilcox /**
397141e9d4bSMatthew Wilcox  * dma_pool_alloc - get a block of consistent memory
398141e9d4bSMatthew Wilcox  * @pool: dma pool that will produce the block
399141e9d4bSMatthew Wilcox  * @mem_flags: GFP_* bitmask
400141e9d4bSMatthew Wilcox  * @handle: pointer to dma address of block
401141e9d4bSMatthew Wilcox  *
402a862f68aSMike Rapoport  * Return: the kernel virtual address of a currently unused block,
403141e9d4bSMatthew Wilcox  * and reports its dma address through the handle.
4046182a094SMatthew Wilcox  * If such a memory block can't be allocated, %NULL is returned.
405141e9d4bSMatthew Wilcox  */
406e87aa773SMatthew Wilcox void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
407e87aa773SMatthew Wilcox 		     dma_addr_t *handle)
408141e9d4bSMatthew Wilcox {
409*da9619a3SKeith Busch 	struct dma_block *block;
410141e9d4bSMatthew Wilcox 	struct dma_page *page;
411*da9619a3SKeith Busch 	unsigned long flags;
412141e9d4bSMatthew Wilcox 
4130f2f89b6SDaniel Vetter 	might_alloc(mem_flags);
414ea05c844SDima Zavin 
415141e9d4bSMatthew Wilcox 	spin_lock_irqsave(&pool->lock, flags);
416*da9619a3SKeith Busch 	block = pool_block_pop(pool);
417*da9619a3SKeith Busch 	if (!block) {
418*da9619a3SKeith Busch 		/*
419*da9619a3SKeith Busch 		 * pool_alloc_page() might sleep, so temporarily drop
420*da9619a3SKeith Busch 		 * &pool->lock
421*da9619a3SKeith Busch 		 */
422141e9d4bSMatthew Wilcox 		spin_unlock_irqrestore(&pool->lock, flags);
423141e9d4bSMatthew Wilcox 
424fa23f56dSSean O. Stalley 		page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
425387870f2SMarek Szyprowski 		if (!page)
426387870f2SMarek Szyprowski 			return NULL;
427141e9d4bSMatthew Wilcox 
4282cae367eSMatthew Wilcox 		spin_lock_irqsave(&pool->lock, flags);
429*da9619a3SKeith Busch 		pool_initialise_page(pool, page);
430*da9619a3SKeith Busch 		block = pool_block_pop(pool);
431*da9619a3SKeith Busch 	}
432141e9d4bSMatthew Wilcox 	spin_unlock_irqrestore(&pool->lock, flags);
433fa23f56dSSean O. Stalley 
434*da9619a3SKeith Busch 	*handle = block->dma;
435*da9619a3SKeith Busch 	pool_check_block(pool, block, mem_flags);
4366471384aSAlexander Potapenko 	if (want_init_on_alloc(mem_flags))
437*da9619a3SKeith Busch 		memset(block, 0, pool->size);
438fa23f56dSSean O. Stalley 
439*da9619a3SKeith Busch 	return block;
440141e9d4bSMatthew Wilcox }
441e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_alloc);
442141e9d4bSMatthew Wilcox 
443141e9d4bSMatthew Wilcox /**
444141e9d4bSMatthew Wilcox  * dma_pool_free - put block back into dma pool
445141e9d4bSMatthew Wilcox  * @pool: the dma pool holding the block
446141e9d4bSMatthew Wilcox  * @vaddr: virtual address of block
447141e9d4bSMatthew Wilcox  * @dma: dma address of block
448141e9d4bSMatthew Wilcox  *
449141e9d4bSMatthew Wilcox  * Caller promises neither device nor driver will again touch this block
450141e9d4bSMatthew Wilcox  * unless it is first re-allocated.
451141e9d4bSMatthew Wilcox  */
452e87aa773SMatthew Wilcox void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
453141e9d4bSMatthew Wilcox {
454*da9619a3SKeith Busch 	struct dma_block *block = vaddr;
455141e9d4bSMatthew Wilcox 	unsigned long flags;
456141e9d4bSMatthew Wilcox 
45784bc227dSRolf Eike Beer 	spin_lock_irqsave(&pool->lock, flags);
458*da9619a3SKeith Busch 	if (!pool_block_err(pool, vaddr, dma)) {
459*da9619a3SKeith Busch 		pool_block_push(pool, block, dma);
460*da9619a3SKeith Busch 		pool->nr_active--;
461141e9d4bSMatthew Wilcox 	}
462141e9d4bSMatthew Wilcox 	spin_unlock_irqrestore(&pool->lock, flags);
463141e9d4bSMatthew Wilcox }
464e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_free);
465141e9d4bSMatthew Wilcox 
466141e9d4bSMatthew Wilcox /*
467141e9d4bSMatthew Wilcox  * Managed DMA pool
468141e9d4bSMatthew Wilcox  */
469141e9d4bSMatthew Wilcox static void dmam_pool_release(struct device *dev, void *res)
470141e9d4bSMatthew Wilcox {
471141e9d4bSMatthew Wilcox 	struct dma_pool *pool = *(struct dma_pool **)res;
472141e9d4bSMatthew Wilcox 
473141e9d4bSMatthew Wilcox 	dma_pool_destroy(pool);
474141e9d4bSMatthew Wilcox }
475141e9d4bSMatthew Wilcox 
476141e9d4bSMatthew Wilcox static int dmam_pool_match(struct device *dev, void *res, void *match_data)
477141e9d4bSMatthew Wilcox {
478141e9d4bSMatthew Wilcox 	return *(struct dma_pool **)res == match_data;
479141e9d4bSMatthew Wilcox }
480141e9d4bSMatthew Wilcox 
481141e9d4bSMatthew Wilcox /**
482141e9d4bSMatthew Wilcox  * dmam_pool_create - Managed dma_pool_create()
483141e9d4bSMatthew Wilcox  * @name: name of pool, for diagnostics
484141e9d4bSMatthew Wilcox  * @dev: device that will be doing the DMA
485141e9d4bSMatthew Wilcox  * @size: size of the blocks in this pool.
486141e9d4bSMatthew Wilcox  * @align: alignment requirement for blocks; must be a power of two
487141e9d4bSMatthew Wilcox  * @allocation: returned blocks won't cross this boundary (or zero)
488141e9d4bSMatthew Wilcox  *
489141e9d4bSMatthew Wilcox  * Managed dma_pool_create().  DMA pool created with this function is
490141e9d4bSMatthew Wilcox  * automatically destroyed on driver detach.
491a862f68aSMike Rapoport  *
492a862f68aSMike Rapoport  * Return: a managed dma allocation pool with the requested
493a862f68aSMike Rapoport  * characteristics, or %NULL if one can't be created.
494141e9d4bSMatthew Wilcox  */
495141e9d4bSMatthew Wilcox struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
496141e9d4bSMatthew Wilcox 				  size_t size, size_t align, size_t allocation)
497141e9d4bSMatthew Wilcox {
498141e9d4bSMatthew Wilcox 	struct dma_pool **ptr, *pool;
499141e9d4bSMatthew Wilcox 
500141e9d4bSMatthew Wilcox 	ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
501141e9d4bSMatthew Wilcox 	if (!ptr)
502141e9d4bSMatthew Wilcox 		return NULL;
503141e9d4bSMatthew Wilcox 
504141e9d4bSMatthew Wilcox 	pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
505141e9d4bSMatthew Wilcox 	if (pool)
506141e9d4bSMatthew Wilcox 		devres_add(dev, ptr);
507141e9d4bSMatthew Wilcox 	else
508141e9d4bSMatthew Wilcox 		devres_free(ptr);
509141e9d4bSMatthew Wilcox 
510141e9d4bSMatthew Wilcox 	return pool;
511141e9d4bSMatthew Wilcox }
512e87aa773SMatthew Wilcox EXPORT_SYMBOL(dmam_pool_create);
513141e9d4bSMatthew Wilcox 
514141e9d4bSMatthew Wilcox /**
515141e9d4bSMatthew Wilcox  * dmam_pool_destroy - Managed dma_pool_destroy()
516141e9d4bSMatthew Wilcox  * @pool: dma pool that will be destroyed
517141e9d4bSMatthew Wilcox  *
518141e9d4bSMatthew Wilcox  * Managed dma_pool_destroy().
519141e9d4bSMatthew Wilcox  */
520141e9d4bSMatthew Wilcox void dmam_pool_destroy(struct dma_pool *pool)
521141e9d4bSMatthew Wilcox {
522141e9d4bSMatthew Wilcox 	struct device *dev = pool->dev;
523141e9d4bSMatthew Wilcox 
524172cb4b3SAndy Shevchenko 	WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));
525141e9d4bSMatthew Wilcox }
526141e9d4bSMatthew Wilcox EXPORT_SYMBOL(dmam_pool_destroy);
527