xref: /openbmc/linux/mm/dmapool.c (revision 9f297db3)
1b2139ce0SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
26182a094SMatthew Wilcox /*
36182a094SMatthew Wilcox  * DMA Pool allocator
46182a094SMatthew Wilcox  *
56182a094SMatthew Wilcox  * Copyright 2001 David Brownell
66182a094SMatthew Wilcox  * Copyright 2007 Intel Corporation
76182a094SMatthew Wilcox  *   Author: Matthew Wilcox <willy@linux.intel.com>
86182a094SMatthew Wilcox  *
96182a094SMatthew Wilcox  * This allocator returns small blocks of a given size which are DMA-able by
106182a094SMatthew Wilcox  * the given device.  It uses the dma_alloc_coherent page allocator to get
116182a094SMatthew Wilcox  * new pages, then splits them up into blocks of the required size.
126182a094SMatthew Wilcox  * Many older drivers still have their own code to do this.
136182a094SMatthew Wilcox  *
146182a094SMatthew Wilcox  * The current design of this allocator is fairly simple.  The pool is
156182a094SMatthew Wilcox  * represented by the 'struct dma_pool' which keeps a doubly-linked list of
166182a094SMatthew Wilcox  * allocated pages.  Each page in the page_list is split into blocks of at
17a35a3455SMatthew Wilcox  * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
18da9619a3SKeith Busch  * list of free blocks across all pages.  Used blocks aren't tracked, but we
19a35a3455SMatthew Wilcox  * keep a count of how many are currently allocated from each page.
206182a094SMatthew Wilcox  */
21141e9d4bSMatthew Wilcox 
22141e9d4bSMatthew Wilcox #include <linux/device.h>
23141e9d4bSMatthew Wilcox #include <linux/dma-mapping.h>
24141e9d4bSMatthew Wilcox #include <linux/dmapool.h>
256182a094SMatthew Wilcox #include <linux/kernel.h>
266182a094SMatthew Wilcox #include <linux/list.h>
27b95f1b31SPaul Gortmaker #include <linux/export.h>
286182a094SMatthew Wilcox #include <linux/mutex.h>
29141e9d4bSMatthew Wilcox #include <linux/poison.h>
30141e9d4bSMatthew Wilcox #include <linux/sched.h>
310f2f89b6SDaniel Vetter #include <linux/sched/mm.h>
326182a094SMatthew Wilcox #include <linux/slab.h>
337c77509cSPaul Gortmaker #include <linux/stat.h>
346182a094SMatthew Wilcox #include <linux/spinlock.h>
356182a094SMatthew Wilcox #include <linux/string.h>
366182a094SMatthew Wilcox #include <linux/types.h>
376182a094SMatthew Wilcox #include <linux/wait.h>
38141e9d4bSMatthew Wilcox 
39b5ee5befSAndi Kleen #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
40b5ee5befSAndi Kleen #define DMAPOOL_DEBUG 1
41b5ee5befSAndi Kleen #endif
42b5ee5befSAndi Kleen 
43da9619a3SKeith Busch struct dma_block {
44da9619a3SKeith Busch 	struct dma_block *next_block;
45da9619a3SKeith Busch 	dma_addr_t dma;
46da9619a3SKeith Busch };
47da9619a3SKeith Busch 
48141e9d4bSMatthew Wilcox struct dma_pool {		/* the pool */
49141e9d4bSMatthew Wilcox 	struct list_head page_list;
50141e9d4bSMatthew Wilcox 	spinlock_t lock;
51da9619a3SKeith Busch 	struct dma_block *next_block;
52da9619a3SKeith Busch 	size_t nr_blocks;
53da9619a3SKeith Busch 	size_t nr_active;
54da9619a3SKeith Busch 	size_t nr_pages;
55141e9d4bSMatthew Wilcox 	struct device *dev;
5679023352STony Battersby 	unsigned int size;
5779023352STony Battersby 	unsigned int allocation;
5879023352STony Battersby 	unsigned int boundary;
59141e9d4bSMatthew Wilcox 	char name[32];
60141e9d4bSMatthew Wilcox 	struct list_head pools;
61141e9d4bSMatthew Wilcox };
62141e9d4bSMatthew Wilcox 
63141e9d4bSMatthew Wilcox struct dma_page {		/* cacheable header for 'allocation' bytes */
64141e9d4bSMatthew Wilcox 	struct list_head page_list;
65141e9d4bSMatthew Wilcox 	void *vaddr;
66141e9d4bSMatthew Wilcox 	dma_addr_t dma;
67141e9d4bSMatthew Wilcox };
68141e9d4bSMatthew Wilcox 
69141e9d4bSMatthew Wilcox static DEFINE_MUTEX(pools_lock);
7001c2965fSSebastian Andrzej Siewior static DEFINE_MUTEX(pools_reg_lock);
71141e9d4bSMatthew Wilcox 
pools_show(struct device * dev,struct device_attribute * attr,char * buf)72e8df2c70SYueHaibing static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf)
73141e9d4bSMatthew Wilcox {
74141e9d4bSMatthew Wilcox 	struct dma_pool *pool;
75da9619a3SKeith Busch 	unsigned size;
76141e9d4bSMatthew Wilcox 
7708cc96c8STony Battersby 	size = sysfs_emit(buf, "poolinfo - 0.1\n");
78141e9d4bSMatthew Wilcox 
79141e9d4bSMatthew Wilcox 	mutex_lock(&pools_lock);
80141e9d4bSMatthew Wilcox 	list_for_each_entry(pool, &dev->dma_pools, pools) {
81141e9d4bSMatthew Wilcox 		/* per-pool info, no real statistics yet */
82da9619a3SKeith Busch 		size += sysfs_emit_at(buf, size, "%-16s %4zu %4zu %4u %2zu\n",
83da9619a3SKeith Busch 				      pool->name, pool->nr_active,
84da9619a3SKeith Busch 				      pool->nr_blocks, pool->size,
85da9619a3SKeith Busch 				      pool->nr_pages);
86141e9d4bSMatthew Wilcox 	}
87141e9d4bSMatthew Wilcox 	mutex_unlock(&pools_lock);
88141e9d4bSMatthew Wilcox 
8908cc96c8STony Battersby 	return size;
90141e9d4bSMatthew Wilcox }
91e87aa773SMatthew Wilcox 
92e8df2c70SYueHaibing static DEVICE_ATTR_RO(pools);
93141e9d4bSMatthew Wilcox 
94d93e08b7SKeith Busch #ifdef DMAPOOL_DEBUG
pool_check_block(struct dma_pool * pool,struct dma_block * block,gfp_t mem_flags)95da9619a3SKeith Busch static void pool_check_block(struct dma_pool *pool, struct dma_block *block,
96da9619a3SKeith Busch 			     gfp_t mem_flags)
97d93e08b7SKeith Busch {
98da9619a3SKeith Busch 	u8 *data = (void *)block;
99d93e08b7SKeith Busch 	int i;
100da9619a3SKeith Busch 
101da9619a3SKeith Busch 	for (i = sizeof(struct dma_block); i < pool->size; i++) {
102d93e08b7SKeith Busch 		if (data[i] == POOL_POISON_FREED)
103d93e08b7SKeith Busch 			continue;
104da9619a3SKeith Busch 		dev_err(pool->dev, "%s %s, %p (corrupted)\n", __func__,
105da9619a3SKeith Busch 			pool->name, block);
106d93e08b7SKeith Busch 
107d93e08b7SKeith Busch 		/*
108d93e08b7SKeith Busch 		 * Dump the first 4 bytes even if they are not
109d93e08b7SKeith Busch 		 * POOL_POISON_FREED
110d93e08b7SKeith Busch 		 */
111d93e08b7SKeith Busch 		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
112d93e08b7SKeith Busch 				data, pool->size, 1);
113d93e08b7SKeith Busch 		break;
114d93e08b7SKeith Busch 	}
115da9619a3SKeith Busch 
116d93e08b7SKeith Busch 	if (!want_init_on_alloc(mem_flags))
117da9619a3SKeith Busch 		memset(block, POOL_POISON_ALLOCATED, pool->size);
118d93e08b7SKeith Busch }
119d93e08b7SKeith Busch 
pool_find_page(struct dma_pool * pool,dma_addr_t dma)120da9619a3SKeith Busch static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
121d93e08b7SKeith Busch {
122da9619a3SKeith Busch 	struct dma_page *page;
123d93e08b7SKeith Busch 
124da9619a3SKeith Busch 	list_for_each_entry(page, &pool->page_list, page_list) {
125da9619a3SKeith Busch 		if (dma < page->dma)
126da9619a3SKeith Busch 			continue;
127da9619a3SKeith Busch 		if ((dma - page->dma) < pool->allocation)
128da9619a3SKeith Busch 			return page;
129da9619a3SKeith Busch 	}
130da9619a3SKeith Busch 	return NULL;
131da9619a3SKeith Busch }
132da9619a3SKeith Busch 
pool_block_err(struct dma_pool * pool,void * vaddr,dma_addr_t dma)133da9619a3SKeith Busch static bool pool_block_err(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
134da9619a3SKeith Busch {
135da9619a3SKeith Busch 	struct dma_block *block = pool->next_block;
136da9619a3SKeith Busch 	struct dma_page *page;
137da9619a3SKeith Busch 
138da9619a3SKeith Busch 	page = pool_find_page(pool, dma);
139da9619a3SKeith Busch 	if (!page) {
140da9619a3SKeith Busch 		dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
141d93e08b7SKeith Busch 			__func__, pool->name, vaddr, &dma);
142d93e08b7SKeith Busch 		return true;
143d93e08b7SKeith Busch 	}
144d93e08b7SKeith Busch 
145da9619a3SKeith Busch 	while (block) {
146da9619a3SKeith Busch 		if (block != vaddr) {
147da9619a3SKeith Busch 			block = block->next_block;
148d93e08b7SKeith Busch 			continue;
149d93e08b7SKeith Busch 		}
150d93e08b7SKeith Busch 		dev_err(pool->dev, "%s %s, dma %pad already free\n",
151d93e08b7SKeith Busch 			__func__, pool->name, &dma);
152d93e08b7SKeith Busch 		return true;
153d93e08b7SKeith Busch 	}
154da9619a3SKeith Busch 
155d93e08b7SKeith Busch 	memset(vaddr, POOL_POISON_FREED, pool->size);
156d93e08b7SKeith Busch 	return false;
157d93e08b7SKeith Busch }
158d93e08b7SKeith Busch 
pool_init_page(struct dma_pool * pool,struct dma_page * page)159d93e08b7SKeith Busch static void pool_init_page(struct dma_pool *pool, struct dma_page *page)
160d93e08b7SKeith Busch {
161d93e08b7SKeith Busch 	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
162d93e08b7SKeith Busch }
163d93e08b7SKeith Busch #else
pool_check_block(struct dma_pool * pool,struct dma_block * block,gfp_t mem_flags)164da9619a3SKeith Busch static void pool_check_block(struct dma_pool *pool, struct dma_block *block,
165da9619a3SKeith Busch 			     gfp_t mem_flags)
166d93e08b7SKeith Busch {
167d93e08b7SKeith Busch }
168d93e08b7SKeith Busch 
pool_block_err(struct dma_pool * pool,void * vaddr,dma_addr_t dma)169da9619a3SKeith Busch static bool pool_block_err(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
170d93e08b7SKeith Busch {
1718ecc3695SKeith Busch 	if (want_init_on_free())
1728ecc3695SKeith Busch 		memset(vaddr, 0, pool->size);
173d93e08b7SKeith Busch 	return false;
174d93e08b7SKeith Busch }
175d93e08b7SKeith Busch 
pool_init_page(struct dma_pool * pool,struct dma_page * page)176d93e08b7SKeith Busch static void pool_init_page(struct dma_pool *pool, struct dma_page *page)
177d93e08b7SKeith Busch {
178d93e08b7SKeith Busch }
179d93e08b7SKeith Busch #endif
180d93e08b7SKeith Busch 
pool_block_pop(struct dma_pool * pool)181da9619a3SKeith Busch static struct dma_block *pool_block_pop(struct dma_pool *pool)
182da9619a3SKeith Busch {
183da9619a3SKeith Busch 	struct dma_block *block = pool->next_block;
184da9619a3SKeith Busch 
185da9619a3SKeith Busch 	if (block) {
186da9619a3SKeith Busch 		pool->next_block = block->next_block;
187da9619a3SKeith Busch 		pool->nr_active++;
188da9619a3SKeith Busch 	}
189da9619a3SKeith Busch 	return block;
190da9619a3SKeith Busch }
191da9619a3SKeith Busch 
pool_block_push(struct dma_pool * pool,struct dma_block * block,dma_addr_t dma)192da9619a3SKeith Busch static void pool_block_push(struct dma_pool *pool, struct dma_block *block,
193da9619a3SKeith Busch 			    dma_addr_t dma)
194da9619a3SKeith Busch {
195da9619a3SKeith Busch 	block->dma = dma;
196da9619a3SKeith Busch 	block->next_block = pool->next_block;
197da9619a3SKeith Busch 	pool->next_block = block;
198da9619a3SKeith Busch }
199da9619a3SKeith Busch 
200da9619a3SKeith Busch 
201141e9d4bSMatthew Wilcox /**
202141e9d4bSMatthew Wilcox  * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
203141e9d4bSMatthew Wilcox  * @name: name of pool, for diagnostics
204141e9d4bSMatthew Wilcox  * @dev: device that will be doing the DMA
205141e9d4bSMatthew Wilcox  * @size: size of the blocks in this pool.
206141e9d4bSMatthew Wilcox  * @align: alignment requirement for blocks; must be a power of two
207e34f44b3SMatthew Wilcox  * @boundary: returned blocks won't cross this power of two boundary
208a862f68aSMike Rapoport  * Context: not in_interrupt()
209141e9d4bSMatthew Wilcox  *
210a862f68aSMike Rapoport  * Given one of these pools, dma_pool_alloc()
211141e9d4bSMatthew Wilcox  * may be used to allocate memory.  Such memory will all have "consistent"
212141e9d4bSMatthew Wilcox  * DMA mappings, accessible by the device and its driver without using
213141e9d4bSMatthew Wilcox  * cache flushing primitives.  The actual size of blocks allocated may be
214141e9d4bSMatthew Wilcox  * larger than requested because of alignment.
215141e9d4bSMatthew Wilcox  *
216e34f44b3SMatthew Wilcox  * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
217141e9d4bSMatthew Wilcox  * cross that size boundary.  This is useful for devices which have
218141e9d4bSMatthew Wilcox  * addressing restrictions on individual DMA transfers, such as not crossing
219141e9d4bSMatthew Wilcox  * boundaries of 4KBytes.
220a862f68aSMike Rapoport  *
221a862f68aSMike Rapoport  * Return: a dma allocation pool with the requested characteristics, or
222a862f68aSMike Rapoport  * %NULL if one can't be created.
223141e9d4bSMatthew Wilcox  */
dma_pool_create(const char * name,struct device * dev,size_t size,size_t align,size_t boundary)224e87aa773SMatthew Wilcox struct dma_pool *dma_pool_create(const char *name, struct device *dev,
225e34f44b3SMatthew Wilcox 				 size_t size, size_t align, size_t boundary)
226141e9d4bSMatthew Wilcox {
227141e9d4bSMatthew Wilcox 	struct dma_pool *retval;
228e34f44b3SMatthew Wilcox 	size_t allocation;
229*9f297db3SKeith Busch 	bool empty;
230141e9d4bSMatthew Wilcox 
23167a540c6STony Battersby 	if (!dev)
23267a540c6STony Battersby 		return NULL;
23367a540c6STony Battersby 
234baa2ef83SPaul McQuade 	if (align == 0)
235141e9d4bSMatthew Wilcox 		align = 1;
236baa2ef83SPaul McQuade 	else if (align & (align - 1))
237399154beSMatthew Wilcox 		return NULL;
238399154beSMatthew Wilcox 
23979023352STony Battersby 	if (size == 0 || size > INT_MAX)
240141e9d4bSMatthew Wilcox 		return NULL;
241da9619a3SKeith Busch 	if (size < sizeof(struct dma_block))
242da9619a3SKeith Busch 		size = sizeof(struct dma_block);
243399154beSMatthew Wilcox 
244399154beSMatthew Wilcox 	size = ALIGN(size, align);
245e34f44b3SMatthew Wilcox 	allocation = max_t(size_t, size, PAGE_SIZE);
246141e9d4bSMatthew Wilcox 
247baa2ef83SPaul McQuade 	if (!boundary)
248e34f44b3SMatthew Wilcox 		boundary = allocation;
249baa2ef83SPaul McQuade 	else if ((boundary < size) || (boundary & (boundary - 1)))
250e34f44b3SMatthew Wilcox 		return NULL;
251e34f44b3SMatthew Wilcox 
25279023352STony Battersby 	boundary = min(boundary, allocation);
25379023352STony Battersby 
254da9619a3SKeith Busch 	retval = kzalloc(sizeof(*retval), GFP_KERNEL);
255e34f44b3SMatthew Wilcox 	if (!retval)
256141e9d4bSMatthew Wilcox 		return retval;
257141e9d4bSMatthew Wilcox 
258943f229eSZhiyuan Dai 	strscpy(retval->name, name, sizeof(retval->name));
259141e9d4bSMatthew Wilcox 
260141e9d4bSMatthew Wilcox 	retval->dev = dev;
261141e9d4bSMatthew Wilcox 
262141e9d4bSMatthew Wilcox 	INIT_LIST_HEAD(&retval->page_list);
263141e9d4bSMatthew Wilcox 	spin_lock_init(&retval->lock);
264141e9d4bSMatthew Wilcox 	retval->size = size;
265e34f44b3SMatthew Wilcox 	retval->boundary = boundary;
266141e9d4bSMatthew Wilcox 	retval->allocation = allocation;
267cc6b664aSDaeseok Youn 	INIT_LIST_HEAD(&retval->pools);
268141e9d4bSMatthew Wilcox 
26901c2965fSSebastian Andrzej Siewior 	/*
27001c2965fSSebastian Andrzej Siewior 	 * pools_lock ensures that the ->dma_pools list does not get corrupted.
27101c2965fSSebastian Andrzej Siewior 	 * pools_reg_lock ensures that there is not a race between
27201c2965fSSebastian Andrzej Siewior 	 * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
27301c2965fSSebastian Andrzej Siewior 	 * when the first invocation of dma_pool_create() failed on
27401c2965fSSebastian Andrzej Siewior 	 * device_create_file() and the second assumes that it has been done (I
27501c2965fSSebastian Andrzej Siewior 	 * know it is a short window).
27601c2965fSSebastian Andrzej Siewior 	 */
27701c2965fSSebastian Andrzej Siewior 	mutex_lock(&pools_reg_lock);
278141e9d4bSMatthew Wilcox 	mutex_lock(&pools_lock);
279*9f297db3SKeith Busch 	empty = list_empty(&dev->dma_pools);
280cc6b664aSDaeseok Youn 	list_add(&retval->pools, &dev->dma_pools);
281cc6b664aSDaeseok Youn 	mutex_unlock(&pools_lock);
28201c2965fSSebastian Andrzej Siewior 	if (empty) {
28301c2965fSSebastian Andrzej Siewior 		int err;
284141e9d4bSMatthew Wilcox 
28501c2965fSSebastian Andrzej Siewior 		err = device_create_file(dev, &dev_attr_pools);
28601c2965fSSebastian Andrzej Siewior 		if (err) {
28701c2965fSSebastian Andrzej Siewior 			mutex_lock(&pools_lock);
28801c2965fSSebastian Andrzej Siewior 			list_del(&retval->pools);
28901c2965fSSebastian Andrzej Siewior 			mutex_unlock(&pools_lock);
29001c2965fSSebastian Andrzej Siewior 			mutex_unlock(&pools_reg_lock);
29101c2965fSSebastian Andrzej Siewior 			kfree(retval);
29201c2965fSSebastian Andrzej Siewior 			return NULL;
29301c2965fSSebastian Andrzej Siewior 		}
29401c2965fSSebastian Andrzej Siewior 	}
29501c2965fSSebastian Andrzej Siewior 	mutex_unlock(&pools_reg_lock);
296141e9d4bSMatthew Wilcox 	return retval;
297141e9d4bSMatthew Wilcox }
298e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_create);
299141e9d4bSMatthew Wilcox 
pool_initialise_page(struct dma_pool * pool,struct dma_page * page)300a35a3455SMatthew Wilcox static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
301a35a3455SMatthew Wilcox {
302da9619a3SKeith Busch 	unsigned int next_boundary = pool->boundary, offset = 0;
303da9619a3SKeith Busch 	struct dma_block *block, *first = NULL, *last = NULL;
304a35a3455SMatthew Wilcox 
305f0bccea6SKeith Busch 	pool_init_page(pool, page);
306da9619a3SKeith Busch 	while (offset + pool->size <= pool->allocation) {
307da9619a3SKeith Busch 		if (offset + pool->size > next_boundary) {
308da9619a3SKeith Busch 			offset = next_boundary;
309e34f44b3SMatthew Wilcox 			next_boundary += pool->boundary;
310da9619a3SKeith Busch 			continue;
311e34f44b3SMatthew Wilcox 		}
312da9619a3SKeith Busch 
313da9619a3SKeith Busch 		block = page->vaddr + offset;
314da9619a3SKeith Busch 		block->dma = page->dma + offset;
315da9619a3SKeith Busch 		block->next_block = NULL;
316da9619a3SKeith Busch 
317da9619a3SKeith Busch 		if (last)
318da9619a3SKeith Busch 			last->next_block = block;
319da9619a3SKeith Busch 		else
320da9619a3SKeith Busch 			first = block;
321da9619a3SKeith Busch 		last = block;
322da9619a3SKeith Busch 
323da9619a3SKeith Busch 		offset += pool->size;
324da9619a3SKeith Busch 		pool->nr_blocks++;
325da9619a3SKeith Busch 	}
326da9619a3SKeith Busch 
327da9619a3SKeith Busch 	last->next_block = pool->next_block;
328da9619a3SKeith Busch 	pool->next_block = first;
329da9619a3SKeith Busch 
330da9619a3SKeith Busch 	list_add(&page->page_list, &pool->page_list);
331da9619a3SKeith Busch 	pool->nr_pages++;
332a35a3455SMatthew Wilcox }
333a35a3455SMatthew Wilcox 
pool_alloc_page(struct dma_pool * pool,gfp_t mem_flags)334e87aa773SMatthew Wilcox static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
335141e9d4bSMatthew Wilcox {
336141e9d4bSMatthew Wilcox 	struct dma_page *page;
337141e9d4bSMatthew Wilcox 
338a35a3455SMatthew Wilcox 	page = kmalloc(sizeof(*page), mem_flags);
339141e9d4bSMatthew Wilcox 	if (!page)
340141e9d4bSMatthew Wilcox 		return NULL;
3415407df10SKeith Busch 
342a35a3455SMatthew Wilcox 	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
343e87aa773SMatthew Wilcox 					 &page->dma, mem_flags);
3445407df10SKeith Busch 	if (!page->vaddr) {
3455407df10SKeith Busch 		kfree(page);
3465407df10SKeith Busch 		return NULL;
3475407df10SKeith Busch 	}
3485407df10SKeith Busch 
349141e9d4bSMatthew Wilcox 	return page;
350141e9d4bSMatthew Wilcox }
351141e9d4bSMatthew Wilcox 
352141e9d4bSMatthew Wilcox /**
353141e9d4bSMatthew Wilcox  * dma_pool_destroy - destroys a pool of dma memory blocks.
354141e9d4bSMatthew Wilcox  * @pool: dma pool that will be destroyed
355141e9d4bSMatthew Wilcox  * Context: !in_interrupt()
356141e9d4bSMatthew Wilcox  *
357141e9d4bSMatthew Wilcox  * Caller guarantees that no more memory from the pool is in use,
358141e9d4bSMatthew Wilcox  * and that nothing will try to use the pool after this call.
359141e9d4bSMatthew Wilcox  */
dma_pool_destroy(struct dma_pool * pool)360e87aa773SMatthew Wilcox void dma_pool_destroy(struct dma_pool *pool)
361141e9d4bSMatthew Wilcox {
36242286f83SAndy Shevchenko 	struct dma_page *page, *tmp;
363*9f297db3SKeith Busch 	bool empty, busy = false;
36401c2965fSSebastian Andrzej Siewior 
36544d7175dSSergey Senozhatsky 	if (unlikely(!pool))
36644d7175dSSergey Senozhatsky 		return;
36744d7175dSSergey Senozhatsky 
36801c2965fSSebastian Andrzej Siewior 	mutex_lock(&pools_reg_lock);
369141e9d4bSMatthew Wilcox 	mutex_lock(&pools_lock);
370141e9d4bSMatthew Wilcox 	list_del(&pool->pools);
371*9f297db3SKeith Busch 	empty = list_empty(&pool->dev->dma_pools);
372141e9d4bSMatthew Wilcox 	mutex_unlock(&pools_lock);
37301c2965fSSebastian Andrzej Siewior 	if (empty)
37401c2965fSSebastian Andrzej Siewior 		device_remove_file(pool->dev, &dev_attr_pools);
37501c2965fSSebastian Andrzej Siewior 	mutex_unlock(&pools_reg_lock);
376141e9d4bSMatthew Wilcox 
377da9619a3SKeith Busch 	if (pool->nr_active) {
378da9619a3SKeith Busch 		dev_err(pool->dev, "%s %s busy\n", __func__, pool->name);
379da9619a3SKeith Busch 		busy = true;
380da9619a3SKeith Busch 	}
381da9619a3SKeith Busch 
38242286f83SAndy Shevchenko 	list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
383da9619a3SKeith Busch 		if (!busy)
384cc669954SKeith Busch 			dma_free_coherent(pool->dev, pool->allocation,
385cc669954SKeith Busch 					  page->vaddr, page->dma);
386141e9d4bSMatthew Wilcox 		list_del(&page->page_list);
387141e9d4bSMatthew Wilcox 		kfree(page);
388141e9d4bSMatthew Wilcox 	}
389141e9d4bSMatthew Wilcox 
390141e9d4bSMatthew Wilcox 	kfree(pool);
391141e9d4bSMatthew Wilcox }
392e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_destroy);
393141e9d4bSMatthew Wilcox 
394141e9d4bSMatthew Wilcox /**
395141e9d4bSMatthew Wilcox  * dma_pool_alloc - get a block of consistent memory
396141e9d4bSMatthew Wilcox  * @pool: dma pool that will produce the block
397141e9d4bSMatthew Wilcox  * @mem_flags: GFP_* bitmask
398141e9d4bSMatthew Wilcox  * @handle: pointer to dma address of block
399141e9d4bSMatthew Wilcox  *
400a862f68aSMike Rapoport  * Return: the kernel virtual address of a currently unused block,
401141e9d4bSMatthew Wilcox  * and reports its dma address through the handle.
4026182a094SMatthew Wilcox  * If such a memory block can't be allocated, %NULL is returned.
403141e9d4bSMatthew Wilcox  */
dma_pool_alloc(struct dma_pool * pool,gfp_t mem_flags,dma_addr_t * handle)404e87aa773SMatthew Wilcox void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
405e87aa773SMatthew Wilcox 		     dma_addr_t *handle)
406141e9d4bSMatthew Wilcox {
407da9619a3SKeith Busch 	struct dma_block *block;
408141e9d4bSMatthew Wilcox 	struct dma_page *page;
409da9619a3SKeith Busch 	unsigned long flags;
410141e9d4bSMatthew Wilcox 
4110f2f89b6SDaniel Vetter 	might_alloc(mem_flags);
412ea05c844SDima Zavin 
413141e9d4bSMatthew Wilcox 	spin_lock_irqsave(&pool->lock, flags);
414da9619a3SKeith Busch 	block = pool_block_pop(pool);
415da9619a3SKeith Busch 	if (!block) {
416da9619a3SKeith Busch 		/*
417da9619a3SKeith Busch 		 * pool_alloc_page() might sleep, so temporarily drop
418da9619a3SKeith Busch 		 * &pool->lock
419da9619a3SKeith Busch 		 */
420141e9d4bSMatthew Wilcox 		spin_unlock_irqrestore(&pool->lock, flags);
421141e9d4bSMatthew Wilcox 
422fa23f56dSSean O. Stalley 		page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
423387870f2SMarek Szyprowski 		if (!page)
424387870f2SMarek Szyprowski 			return NULL;
425141e9d4bSMatthew Wilcox 
4262cae367eSMatthew Wilcox 		spin_lock_irqsave(&pool->lock, flags);
427da9619a3SKeith Busch 		pool_initialise_page(pool, page);
428da9619a3SKeith Busch 		block = pool_block_pop(pool);
429da9619a3SKeith Busch 	}
430141e9d4bSMatthew Wilcox 	spin_unlock_irqrestore(&pool->lock, flags);
431fa23f56dSSean O. Stalley 
432da9619a3SKeith Busch 	*handle = block->dma;
433da9619a3SKeith Busch 	pool_check_block(pool, block, mem_flags);
4346471384aSAlexander Potapenko 	if (want_init_on_alloc(mem_flags))
435da9619a3SKeith Busch 		memset(block, 0, pool->size);
436fa23f56dSSean O. Stalley 
437da9619a3SKeith Busch 	return block;
438141e9d4bSMatthew Wilcox }
439e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_alloc);
440141e9d4bSMatthew Wilcox 
441141e9d4bSMatthew Wilcox /**
442141e9d4bSMatthew Wilcox  * dma_pool_free - put block back into dma pool
443141e9d4bSMatthew Wilcox  * @pool: the dma pool holding the block
444141e9d4bSMatthew Wilcox  * @vaddr: virtual address of block
445141e9d4bSMatthew Wilcox  * @dma: dma address of block
446141e9d4bSMatthew Wilcox  *
447141e9d4bSMatthew Wilcox  * Caller promises neither device nor driver will again touch this block
448141e9d4bSMatthew Wilcox  * unless it is first re-allocated.
449141e9d4bSMatthew Wilcox  */
dma_pool_free(struct dma_pool * pool,void * vaddr,dma_addr_t dma)450e87aa773SMatthew Wilcox void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
451141e9d4bSMatthew Wilcox {
452da9619a3SKeith Busch 	struct dma_block *block = vaddr;
453141e9d4bSMatthew Wilcox 	unsigned long flags;
454141e9d4bSMatthew Wilcox 
45584bc227dSRolf Eike Beer 	spin_lock_irqsave(&pool->lock, flags);
456da9619a3SKeith Busch 	if (!pool_block_err(pool, vaddr, dma)) {
457da9619a3SKeith Busch 		pool_block_push(pool, block, dma);
458da9619a3SKeith Busch 		pool->nr_active--;
459141e9d4bSMatthew Wilcox 	}
460141e9d4bSMatthew Wilcox 	spin_unlock_irqrestore(&pool->lock, flags);
461141e9d4bSMatthew Wilcox }
462e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_free);
463141e9d4bSMatthew Wilcox 
464141e9d4bSMatthew Wilcox /*
465141e9d4bSMatthew Wilcox  * Managed DMA pool
466141e9d4bSMatthew Wilcox  */
dmam_pool_release(struct device * dev,void * res)467141e9d4bSMatthew Wilcox static void dmam_pool_release(struct device *dev, void *res)
468141e9d4bSMatthew Wilcox {
469141e9d4bSMatthew Wilcox 	struct dma_pool *pool = *(struct dma_pool **)res;
470141e9d4bSMatthew Wilcox 
471141e9d4bSMatthew Wilcox 	dma_pool_destroy(pool);
472141e9d4bSMatthew Wilcox }
473141e9d4bSMatthew Wilcox 
dmam_pool_match(struct device * dev,void * res,void * match_data)474141e9d4bSMatthew Wilcox static int dmam_pool_match(struct device *dev, void *res, void *match_data)
475141e9d4bSMatthew Wilcox {
476141e9d4bSMatthew Wilcox 	return *(struct dma_pool **)res == match_data;
477141e9d4bSMatthew Wilcox }
478141e9d4bSMatthew Wilcox 
479141e9d4bSMatthew Wilcox /**
480141e9d4bSMatthew Wilcox  * dmam_pool_create - Managed dma_pool_create()
481141e9d4bSMatthew Wilcox  * @name: name of pool, for diagnostics
482141e9d4bSMatthew Wilcox  * @dev: device that will be doing the DMA
483141e9d4bSMatthew Wilcox  * @size: size of the blocks in this pool.
484141e9d4bSMatthew Wilcox  * @align: alignment requirement for blocks; must be a power of two
485141e9d4bSMatthew Wilcox  * @allocation: returned blocks won't cross this boundary (or zero)
486141e9d4bSMatthew Wilcox  *
487141e9d4bSMatthew Wilcox  * Managed dma_pool_create().  DMA pool created with this function is
488141e9d4bSMatthew Wilcox  * automatically destroyed on driver detach.
489a862f68aSMike Rapoport  *
490a862f68aSMike Rapoport  * Return: a managed dma allocation pool with the requested
491a862f68aSMike Rapoport  * characteristics, or %NULL if one can't be created.
492141e9d4bSMatthew Wilcox  */
dmam_pool_create(const char * name,struct device * dev,size_t size,size_t align,size_t allocation)493141e9d4bSMatthew Wilcox struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
494141e9d4bSMatthew Wilcox 				  size_t size, size_t align, size_t allocation)
495141e9d4bSMatthew Wilcox {
496141e9d4bSMatthew Wilcox 	struct dma_pool **ptr, *pool;
497141e9d4bSMatthew Wilcox 
498141e9d4bSMatthew Wilcox 	ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
499141e9d4bSMatthew Wilcox 	if (!ptr)
500141e9d4bSMatthew Wilcox 		return NULL;
501141e9d4bSMatthew Wilcox 
502141e9d4bSMatthew Wilcox 	pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
503141e9d4bSMatthew Wilcox 	if (pool)
504141e9d4bSMatthew Wilcox 		devres_add(dev, ptr);
505141e9d4bSMatthew Wilcox 	else
506141e9d4bSMatthew Wilcox 		devres_free(ptr);
507141e9d4bSMatthew Wilcox 
508141e9d4bSMatthew Wilcox 	return pool;
509141e9d4bSMatthew Wilcox }
510e87aa773SMatthew Wilcox EXPORT_SYMBOL(dmam_pool_create);
511141e9d4bSMatthew Wilcox 
512141e9d4bSMatthew Wilcox /**
513141e9d4bSMatthew Wilcox  * dmam_pool_destroy - Managed dma_pool_destroy()
514141e9d4bSMatthew Wilcox  * @pool: dma pool that will be destroyed
515141e9d4bSMatthew Wilcox  *
516141e9d4bSMatthew Wilcox  * Managed dma_pool_destroy().
517141e9d4bSMatthew Wilcox  */
dmam_pool_destroy(struct dma_pool * pool)518141e9d4bSMatthew Wilcox void dmam_pool_destroy(struct dma_pool *pool)
519141e9d4bSMatthew Wilcox {
520141e9d4bSMatthew Wilcox 	struct device *dev = pool->dev;
521141e9d4bSMatthew Wilcox 
522172cb4b3SAndy Shevchenko 	WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));
523141e9d4bSMatthew Wilcox }
524141e9d4bSMatthew Wilcox EXPORT_SYMBOL(dmam_pool_destroy);
525