xref: /openbmc/linux/mm/dmapool.c (revision 8ecc369554219060367fc589661d2b7ab201e923)
1b2139ce0SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
26182a094SMatthew Wilcox /*
36182a094SMatthew Wilcox  * DMA Pool allocator
46182a094SMatthew Wilcox  *
56182a094SMatthew Wilcox  * Copyright 2001 David Brownell
66182a094SMatthew Wilcox  * Copyright 2007 Intel Corporation
76182a094SMatthew Wilcox  *   Author: Matthew Wilcox <willy@linux.intel.com>
86182a094SMatthew Wilcox  *
96182a094SMatthew Wilcox  * This allocator returns small blocks of a given size which are DMA-able by
106182a094SMatthew Wilcox  * the given device.  It uses the dma_alloc_coherent page allocator to get
116182a094SMatthew Wilcox  * new pages, then splits them up into blocks of the required size.
126182a094SMatthew Wilcox  * Many older drivers still have their own code to do this.
136182a094SMatthew Wilcox  *
146182a094SMatthew Wilcox  * The current design of this allocator is fairly simple.  The pool is
156182a094SMatthew Wilcox  * represented by the 'struct dma_pool' which keeps a doubly-linked list of
166182a094SMatthew Wilcox  * allocated pages.  Each page in the page_list is split into blocks of at
17a35a3455SMatthew Wilcox  * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
18a35a3455SMatthew Wilcox  * list of free blocks within the page.  Used blocks aren't tracked, but we
19a35a3455SMatthew Wilcox  * keep a count of how many are currently allocated from each page.
206182a094SMatthew Wilcox  */
21141e9d4bSMatthew Wilcox 
22141e9d4bSMatthew Wilcox #include <linux/device.h>
23141e9d4bSMatthew Wilcox #include <linux/dma-mapping.h>
24141e9d4bSMatthew Wilcox #include <linux/dmapool.h>
256182a094SMatthew Wilcox #include <linux/kernel.h>
266182a094SMatthew Wilcox #include <linux/list.h>
27b95f1b31SPaul Gortmaker #include <linux/export.h>
286182a094SMatthew Wilcox #include <linux/mutex.h>
29141e9d4bSMatthew Wilcox #include <linux/poison.h>
30141e9d4bSMatthew Wilcox #include <linux/sched.h>
310f2f89b6SDaniel Vetter #include <linux/sched/mm.h>
326182a094SMatthew Wilcox #include <linux/slab.h>
337c77509cSPaul Gortmaker #include <linux/stat.h>
346182a094SMatthew Wilcox #include <linux/spinlock.h>
356182a094SMatthew Wilcox #include <linux/string.h>
366182a094SMatthew Wilcox #include <linux/types.h>
376182a094SMatthew Wilcox #include <linux/wait.h>
38141e9d4bSMatthew Wilcox 
39b5ee5befSAndi Kleen #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
40b5ee5befSAndi Kleen #define DMAPOOL_DEBUG 1
41b5ee5befSAndi Kleen #endif
42b5ee5befSAndi Kleen 
43141e9d4bSMatthew Wilcox struct dma_pool {		/* the pool */
44141e9d4bSMatthew Wilcox 	struct list_head page_list;
45141e9d4bSMatthew Wilcox 	spinlock_t lock;
46141e9d4bSMatthew Wilcox 	struct device *dev;
4779023352STony Battersby 	unsigned int size;
4879023352STony Battersby 	unsigned int allocation;
4979023352STony Battersby 	unsigned int boundary;
50141e9d4bSMatthew Wilcox 	char name[32];
51141e9d4bSMatthew Wilcox 	struct list_head pools;
52141e9d4bSMatthew Wilcox };
53141e9d4bSMatthew Wilcox 
54141e9d4bSMatthew Wilcox struct dma_page {		/* cacheable header for 'allocation' bytes */
55141e9d4bSMatthew Wilcox 	struct list_head page_list;
56141e9d4bSMatthew Wilcox 	void *vaddr;
57141e9d4bSMatthew Wilcox 	dma_addr_t dma;
58a35a3455SMatthew Wilcox 	unsigned int in_use;
59a35a3455SMatthew Wilcox 	unsigned int offset;
60141e9d4bSMatthew Wilcox };
61141e9d4bSMatthew Wilcox 
62141e9d4bSMatthew Wilcox static DEFINE_MUTEX(pools_lock);
6301c2965fSSebastian Andrzej Siewior static DEFINE_MUTEX(pools_reg_lock);
64141e9d4bSMatthew Wilcox 
65e8df2c70SYueHaibing static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf)
66141e9d4bSMatthew Wilcox {
6708cc96c8STony Battersby 	int size;
68141e9d4bSMatthew Wilcox 	struct dma_page *page;
69141e9d4bSMatthew Wilcox 	struct dma_pool *pool;
70141e9d4bSMatthew Wilcox 
7108cc96c8STony Battersby 	size = sysfs_emit(buf, "poolinfo - 0.1\n");
72141e9d4bSMatthew Wilcox 
73141e9d4bSMatthew Wilcox 	mutex_lock(&pools_lock);
74141e9d4bSMatthew Wilcox 	list_for_each_entry(pool, &dev->dma_pools, pools) {
75141e9d4bSMatthew Wilcox 		unsigned pages = 0;
7679023352STony Battersby 		size_t blocks = 0;
77141e9d4bSMatthew Wilcox 
78c4956823SThomas Gleixner 		spin_lock_irq(&pool->lock);
79141e9d4bSMatthew Wilcox 		list_for_each_entry(page, &pool->page_list, page_list) {
80141e9d4bSMatthew Wilcox 			pages++;
81141e9d4bSMatthew Wilcox 			blocks += page->in_use;
82141e9d4bSMatthew Wilcox 		}
83c4956823SThomas Gleixner 		spin_unlock_irq(&pool->lock);
84141e9d4bSMatthew Wilcox 
85141e9d4bSMatthew Wilcox 		/* per-pool info, no real statistics yet */
8679023352STony Battersby 		size += sysfs_emit_at(buf, size, "%-16s %4zu %4zu %4u %2u\n",
87a35a3455SMatthew Wilcox 				      pool->name, blocks,
8879023352STony Battersby 				      (size_t) pages *
8979023352STony Battersby 				      (pool->allocation / pool->size),
90141e9d4bSMatthew Wilcox 				      pool->size, pages);
91141e9d4bSMatthew Wilcox 	}
92141e9d4bSMatthew Wilcox 	mutex_unlock(&pools_lock);
93141e9d4bSMatthew Wilcox 
9408cc96c8STony Battersby 	return size;
95141e9d4bSMatthew Wilcox }
96e87aa773SMatthew Wilcox 
97e8df2c70SYueHaibing static DEVICE_ATTR_RO(pools);
98141e9d4bSMatthew Wilcox 
99d93e08b7SKeith Busch #ifdef DMAPOOL_DEBUG
100d93e08b7SKeith Busch static void pool_check_block(struct dma_pool *pool, void *retval,
101d93e08b7SKeith Busch 			     unsigned int offset, gfp_t mem_flags)
102d93e08b7SKeith Busch {
103d93e08b7SKeith Busch 	int i;
104d93e08b7SKeith Busch 	u8 *data = retval;
105d93e08b7SKeith Busch 	/* page->offset is stored in first 4 bytes */
106d93e08b7SKeith Busch 	for (i = sizeof(offset); i < pool->size; i++) {
107d93e08b7SKeith Busch 		if (data[i] == POOL_POISON_FREED)
108d93e08b7SKeith Busch 			continue;
109d93e08b7SKeith Busch 		dev_err(pool->dev, "%s %s, %p (corrupted)\n",
110d93e08b7SKeith Busch 			__func__, pool->name, retval);
111d93e08b7SKeith Busch 
112d93e08b7SKeith Busch 		/*
113d93e08b7SKeith Busch 		 * Dump the first 4 bytes even if they are not
114d93e08b7SKeith Busch 		 * POOL_POISON_FREED
115d93e08b7SKeith Busch 		 */
116d93e08b7SKeith Busch 		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
117d93e08b7SKeith Busch 				data, pool->size, 1);
118d93e08b7SKeith Busch 		break;
119d93e08b7SKeith Busch 	}
120d93e08b7SKeith Busch 	if (!want_init_on_alloc(mem_flags))
121d93e08b7SKeith Busch 		memset(retval, POOL_POISON_ALLOCATED, pool->size);
122d93e08b7SKeith Busch }
123d93e08b7SKeith Busch 
124d93e08b7SKeith Busch static bool pool_page_err(struct dma_pool *pool, struct dma_page *page,
125d93e08b7SKeith Busch 			  void *vaddr, dma_addr_t dma)
126d93e08b7SKeith Busch {
127d93e08b7SKeith Busch 	unsigned int offset = vaddr - page->vaddr;
128d93e08b7SKeith Busch 	unsigned int chain = page->offset;
129d93e08b7SKeith Busch 
130d93e08b7SKeith Busch 	if ((dma - page->dma) != offset) {
131d93e08b7SKeith Busch 		dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n",
132d93e08b7SKeith Busch 			__func__, pool->name, vaddr, &dma);
133d93e08b7SKeith Busch 		return true;
134d93e08b7SKeith Busch 	}
135d93e08b7SKeith Busch 
136d93e08b7SKeith Busch 	while (chain < pool->allocation) {
137d93e08b7SKeith Busch 		if (chain != offset) {
138d93e08b7SKeith Busch 			chain = *(int *)(page->vaddr + chain);
139d93e08b7SKeith Busch 			continue;
140d93e08b7SKeith Busch 		}
141d93e08b7SKeith Busch 		dev_err(pool->dev, "%s %s, dma %pad already free\n",
142d93e08b7SKeith Busch 			__func__, pool->name, &dma);
143d93e08b7SKeith Busch 		return true;
144d93e08b7SKeith Busch 	}
145d93e08b7SKeith Busch 	memset(vaddr, POOL_POISON_FREED, pool->size);
146d93e08b7SKeith Busch 	return false;
147d93e08b7SKeith Busch }
148d93e08b7SKeith Busch 
149d93e08b7SKeith Busch static void pool_init_page(struct dma_pool *pool, struct dma_page *page)
150d93e08b7SKeith Busch {
151d93e08b7SKeith Busch 	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
152d93e08b7SKeith Busch }
153d93e08b7SKeith Busch #else
154d93e08b7SKeith Busch static void pool_check_block(struct dma_pool *pool, void *retval,
155d93e08b7SKeith Busch 			     unsigned int offset, gfp_t mem_flags)
156d93e08b7SKeith Busch 
157d93e08b7SKeith Busch {
158d93e08b7SKeith Busch }
159d93e08b7SKeith Busch 
160d93e08b7SKeith Busch static bool pool_page_err(struct dma_pool *pool, struct dma_page *page,
161d93e08b7SKeith Busch 			  void *vaddr, dma_addr_t dma)
162d93e08b7SKeith Busch {
163*8ecc3695SKeith Busch 	if (want_init_on_free())
164*8ecc3695SKeith Busch 		memset(vaddr, 0, pool->size);
165d93e08b7SKeith Busch 	return false;
166d93e08b7SKeith Busch }
167d93e08b7SKeith Busch 
168d93e08b7SKeith Busch static void pool_init_page(struct dma_pool *pool, struct dma_page *page)
169d93e08b7SKeith Busch {
170d93e08b7SKeith Busch }
171d93e08b7SKeith Busch #endif
172d93e08b7SKeith Busch 
173141e9d4bSMatthew Wilcox /**
174141e9d4bSMatthew Wilcox  * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
175141e9d4bSMatthew Wilcox  * @name: name of pool, for diagnostics
176141e9d4bSMatthew Wilcox  * @dev: device that will be doing the DMA
177141e9d4bSMatthew Wilcox  * @size: size of the blocks in this pool.
178141e9d4bSMatthew Wilcox  * @align: alignment requirement for blocks; must be a power of two
179e34f44b3SMatthew Wilcox  * @boundary: returned blocks won't cross this power of two boundary
180a862f68aSMike Rapoport  * Context: not in_interrupt()
181141e9d4bSMatthew Wilcox  *
182a862f68aSMike Rapoport  * Given one of these pools, dma_pool_alloc()
183141e9d4bSMatthew Wilcox  * may be used to allocate memory.  Such memory will all have "consistent"
184141e9d4bSMatthew Wilcox  * DMA mappings, accessible by the device and its driver without using
185141e9d4bSMatthew Wilcox  * cache flushing primitives.  The actual size of blocks allocated may be
186141e9d4bSMatthew Wilcox  * larger than requested because of alignment.
187141e9d4bSMatthew Wilcox  *
188e34f44b3SMatthew Wilcox  * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
189141e9d4bSMatthew Wilcox  * cross that size boundary.  This is useful for devices which have
190141e9d4bSMatthew Wilcox  * addressing restrictions on individual DMA transfers, such as not crossing
191141e9d4bSMatthew Wilcox  * boundaries of 4KBytes.
192a862f68aSMike Rapoport  *
193a862f68aSMike Rapoport  * Return: a dma allocation pool with the requested characteristics, or
194a862f68aSMike Rapoport  * %NULL if one can't be created.
195141e9d4bSMatthew Wilcox  */
196e87aa773SMatthew Wilcox struct dma_pool *dma_pool_create(const char *name, struct device *dev,
197e34f44b3SMatthew Wilcox 				 size_t size, size_t align, size_t boundary)
198141e9d4bSMatthew Wilcox {
199141e9d4bSMatthew Wilcox 	struct dma_pool *retval;
200e34f44b3SMatthew Wilcox 	size_t allocation;
20101c2965fSSebastian Andrzej Siewior 	bool empty = false;
202141e9d4bSMatthew Wilcox 
20367a540c6STony Battersby 	if (!dev)
20467a540c6STony Battersby 		return NULL;
20567a540c6STony Battersby 
206baa2ef83SPaul McQuade 	if (align == 0)
207141e9d4bSMatthew Wilcox 		align = 1;
208baa2ef83SPaul McQuade 	else if (align & (align - 1))
209399154beSMatthew Wilcox 		return NULL;
210399154beSMatthew Wilcox 
21179023352STony Battersby 	if (size == 0 || size > INT_MAX)
212141e9d4bSMatthew Wilcox 		return NULL;
213baa2ef83SPaul McQuade 	else if (size < 4)
214a35a3455SMatthew Wilcox 		size = 4;
215399154beSMatthew Wilcox 
216399154beSMatthew Wilcox 	size = ALIGN(size, align);
217e34f44b3SMatthew Wilcox 	allocation = max_t(size_t, size, PAGE_SIZE);
218141e9d4bSMatthew Wilcox 
219baa2ef83SPaul McQuade 	if (!boundary)
220e34f44b3SMatthew Wilcox 		boundary = allocation;
221baa2ef83SPaul McQuade 	else if ((boundary < size) || (boundary & (boundary - 1)))
222e34f44b3SMatthew Wilcox 		return NULL;
223e34f44b3SMatthew Wilcox 
22479023352STony Battersby 	boundary = min(boundary, allocation);
22579023352STony Battersby 
226cc6266f0SChristian König 	retval = kmalloc(sizeof(*retval), GFP_KERNEL);
227e34f44b3SMatthew Wilcox 	if (!retval)
228141e9d4bSMatthew Wilcox 		return retval;
229141e9d4bSMatthew Wilcox 
230943f229eSZhiyuan Dai 	strscpy(retval->name, name, sizeof(retval->name));
231141e9d4bSMatthew Wilcox 
232141e9d4bSMatthew Wilcox 	retval->dev = dev;
233141e9d4bSMatthew Wilcox 
234141e9d4bSMatthew Wilcox 	INIT_LIST_HEAD(&retval->page_list);
235141e9d4bSMatthew Wilcox 	spin_lock_init(&retval->lock);
236141e9d4bSMatthew Wilcox 	retval->size = size;
237e34f44b3SMatthew Wilcox 	retval->boundary = boundary;
238141e9d4bSMatthew Wilcox 	retval->allocation = allocation;
239141e9d4bSMatthew Wilcox 
240cc6b664aSDaeseok Youn 	INIT_LIST_HEAD(&retval->pools);
241141e9d4bSMatthew Wilcox 
24201c2965fSSebastian Andrzej Siewior 	/*
24301c2965fSSebastian Andrzej Siewior 	 * pools_lock ensures that the ->dma_pools list does not get corrupted.
24401c2965fSSebastian Andrzej Siewior 	 * pools_reg_lock ensures that there is not a race between
24501c2965fSSebastian Andrzej Siewior 	 * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
24601c2965fSSebastian Andrzej Siewior 	 * when the first invocation of dma_pool_create() failed on
24701c2965fSSebastian Andrzej Siewior 	 * device_create_file() and the second assumes that it has been done (I
24801c2965fSSebastian Andrzej Siewior 	 * know it is a short window).
24901c2965fSSebastian Andrzej Siewior 	 */
25001c2965fSSebastian Andrzej Siewior 	mutex_lock(&pools_reg_lock);
251141e9d4bSMatthew Wilcox 	mutex_lock(&pools_lock);
25201c2965fSSebastian Andrzej Siewior 	if (list_empty(&dev->dma_pools))
25301c2965fSSebastian Andrzej Siewior 		empty = true;
254cc6b664aSDaeseok Youn 	list_add(&retval->pools, &dev->dma_pools);
255cc6b664aSDaeseok Youn 	mutex_unlock(&pools_lock);
25601c2965fSSebastian Andrzej Siewior 	if (empty) {
25701c2965fSSebastian Andrzej Siewior 		int err;
258141e9d4bSMatthew Wilcox 
25901c2965fSSebastian Andrzej Siewior 		err = device_create_file(dev, &dev_attr_pools);
26001c2965fSSebastian Andrzej Siewior 		if (err) {
26101c2965fSSebastian Andrzej Siewior 			mutex_lock(&pools_lock);
26201c2965fSSebastian Andrzej Siewior 			list_del(&retval->pools);
26301c2965fSSebastian Andrzej Siewior 			mutex_unlock(&pools_lock);
26401c2965fSSebastian Andrzej Siewior 			mutex_unlock(&pools_reg_lock);
26501c2965fSSebastian Andrzej Siewior 			kfree(retval);
26601c2965fSSebastian Andrzej Siewior 			return NULL;
26701c2965fSSebastian Andrzej Siewior 		}
26801c2965fSSebastian Andrzej Siewior 	}
26901c2965fSSebastian Andrzej Siewior 	mutex_unlock(&pools_reg_lock);
270141e9d4bSMatthew Wilcox 	return retval;
271141e9d4bSMatthew Wilcox }
272e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_create);
273141e9d4bSMatthew Wilcox 
274a35a3455SMatthew Wilcox static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
275a35a3455SMatthew Wilcox {
276a35a3455SMatthew Wilcox 	unsigned int offset = 0;
277e34f44b3SMatthew Wilcox 	unsigned int next_boundary = pool->boundary;
278a35a3455SMatthew Wilcox 
279f0bccea6SKeith Busch 	pool_init_page(pool, page);
280f0bccea6SKeith Busch 	page->in_use = 0;
281f0bccea6SKeith Busch 	page->offset = 0;
282a35a3455SMatthew Wilcox 	do {
283a35a3455SMatthew Wilcox 		unsigned int next = offset + pool->size;
284e34f44b3SMatthew Wilcox 		if (unlikely((next + pool->size) >= next_boundary)) {
285e34f44b3SMatthew Wilcox 			next = next_boundary;
286e34f44b3SMatthew Wilcox 			next_boundary += pool->boundary;
287e34f44b3SMatthew Wilcox 		}
288a35a3455SMatthew Wilcox 		*(int *)(page->vaddr + offset) = next;
289a35a3455SMatthew Wilcox 		offset = next;
290a35a3455SMatthew Wilcox 	} while (offset < pool->allocation);
291a35a3455SMatthew Wilcox }
292a35a3455SMatthew Wilcox 
293e87aa773SMatthew Wilcox static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
294141e9d4bSMatthew Wilcox {
295141e9d4bSMatthew Wilcox 	struct dma_page *page;
296141e9d4bSMatthew Wilcox 
297a35a3455SMatthew Wilcox 	page = kmalloc(sizeof(*page), mem_flags);
298141e9d4bSMatthew Wilcox 	if (!page)
299141e9d4bSMatthew Wilcox 		return NULL;
3005407df10SKeith Busch 
301a35a3455SMatthew Wilcox 	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
302e87aa773SMatthew Wilcox 					 &page->dma, mem_flags);
3035407df10SKeith Busch 	if (!page->vaddr) {
3045407df10SKeith Busch 		kfree(page);
3055407df10SKeith Busch 		return NULL;
3065407df10SKeith Busch 	}
3075407df10SKeith Busch 
308a35a3455SMatthew Wilcox 	pool_initialise_page(pool, page);
309141e9d4bSMatthew Wilcox 	return page;
310141e9d4bSMatthew Wilcox }
311141e9d4bSMatthew Wilcox 
312d9e7e37bSNicholas Krause static inline bool is_page_busy(struct dma_page *page)
313141e9d4bSMatthew Wilcox {
314a35a3455SMatthew Wilcox 	return page->in_use != 0;
315141e9d4bSMatthew Wilcox }
316141e9d4bSMatthew Wilcox 
317141e9d4bSMatthew Wilcox /**
318141e9d4bSMatthew Wilcox  * dma_pool_destroy - destroys a pool of dma memory blocks.
319141e9d4bSMatthew Wilcox  * @pool: dma pool that will be destroyed
320141e9d4bSMatthew Wilcox  * Context: !in_interrupt()
321141e9d4bSMatthew Wilcox  *
322141e9d4bSMatthew Wilcox  * Caller guarantees that no more memory from the pool is in use,
323141e9d4bSMatthew Wilcox  * and that nothing will try to use the pool after this call.
324141e9d4bSMatthew Wilcox  */
325e87aa773SMatthew Wilcox void dma_pool_destroy(struct dma_pool *pool)
326141e9d4bSMatthew Wilcox {
32742286f83SAndy Shevchenko 	struct dma_page *page, *tmp;
32801c2965fSSebastian Andrzej Siewior 	bool empty = false;
32901c2965fSSebastian Andrzej Siewior 
33044d7175dSSergey Senozhatsky 	if (unlikely(!pool))
33144d7175dSSergey Senozhatsky 		return;
33244d7175dSSergey Senozhatsky 
33301c2965fSSebastian Andrzej Siewior 	mutex_lock(&pools_reg_lock);
334141e9d4bSMatthew Wilcox 	mutex_lock(&pools_lock);
335141e9d4bSMatthew Wilcox 	list_del(&pool->pools);
33667a540c6STony Battersby 	if (list_empty(&pool->dev->dma_pools))
33701c2965fSSebastian Andrzej Siewior 		empty = true;
338141e9d4bSMatthew Wilcox 	mutex_unlock(&pools_lock);
33901c2965fSSebastian Andrzej Siewior 	if (empty)
34001c2965fSSebastian Andrzej Siewior 		device_remove_file(pool->dev, &dev_attr_pools);
34101c2965fSSebastian Andrzej Siewior 	mutex_unlock(&pools_reg_lock);
342141e9d4bSMatthew Wilcox 
34342286f83SAndy Shevchenko 	list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
344cc669954SKeith Busch 		if (!is_page_busy(page))
345cc669954SKeith Busch 			dma_free_coherent(pool->dev, pool->allocation,
346cc669954SKeith Busch 					  page->vaddr, page->dma);
347cc669954SKeith Busch 		else
34841a04814SAndy Shevchenko 			dev_err(pool->dev, "%s %s, %p busy\n", __func__,
349141e9d4bSMatthew Wilcox 				pool->name, page->vaddr);
350141e9d4bSMatthew Wilcox 		list_del(&page->page_list);
351141e9d4bSMatthew Wilcox 		kfree(page);
352141e9d4bSMatthew Wilcox 	}
353141e9d4bSMatthew Wilcox 
354141e9d4bSMatthew Wilcox 	kfree(pool);
355141e9d4bSMatthew Wilcox }
356e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_destroy);
357141e9d4bSMatthew Wilcox 
358141e9d4bSMatthew Wilcox /**
359141e9d4bSMatthew Wilcox  * dma_pool_alloc - get a block of consistent memory
360141e9d4bSMatthew Wilcox  * @pool: dma pool that will produce the block
361141e9d4bSMatthew Wilcox  * @mem_flags: GFP_* bitmask
362141e9d4bSMatthew Wilcox  * @handle: pointer to dma address of block
363141e9d4bSMatthew Wilcox  *
364a862f68aSMike Rapoport  * Return: the kernel virtual address of a currently unused block,
365141e9d4bSMatthew Wilcox  * and reports its dma address through the handle.
3666182a094SMatthew Wilcox  * If such a memory block can't be allocated, %NULL is returned.
367141e9d4bSMatthew Wilcox  */
368e87aa773SMatthew Wilcox void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
369e87aa773SMatthew Wilcox 		     dma_addr_t *handle)
370141e9d4bSMatthew Wilcox {
371141e9d4bSMatthew Wilcox 	unsigned long flags;
372141e9d4bSMatthew Wilcox 	struct dma_page *page;
37379023352STony Battersby 	unsigned int offset;
374141e9d4bSMatthew Wilcox 	void *retval;
375141e9d4bSMatthew Wilcox 
3760f2f89b6SDaniel Vetter 	might_alloc(mem_flags);
377ea05c844SDima Zavin 
378141e9d4bSMatthew Wilcox 	spin_lock_irqsave(&pool->lock, flags);
379141e9d4bSMatthew Wilcox 	list_for_each_entry(page, &pool->page_list, page_list) {
380a35a3455SMatthew Wilcox 		if (page->offset < pool->allocation)
381141e9d4bSMatthew Wilcox 			goto ready;
382141e9d4bSMatthew Wilcox 	}
383141e9d4bSMatthew Wilcox 
384387870f2SMarek Szyprowski 	/* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
385141e9d4bSMatthew Wilcox 	spin_unlock_irqrestore(&pool->lock, flags);
386141e9d4bSMatthew Wilcox 
387fa23f56dSSean O. Stalley 	page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
388387870f2SMarek Szyprowski 	if (!page)
389387870f2SMarek Szyprowski 		return NULL;
390141e9d4bSMatthew Wilcox 
3912cae367eSMatthew Wilcox 	spin_lock_irqsave(&pool->lock, flags);
392141e9d4bSMatthew Wilcox 
393387870f2SMarek Szyprowski 	list_add(&page->page_list, &pool->page_list);
394141e9d4bSMatthew Wilcox  ready:
395141e9d4bSMatthew Wilcox 	page->in_use++;
396a35a3455SMatthew Wilcox 	offset = page->offset;
397a35a3455SMatthew Wilcox 	page->offset = *(int *)(page->vaddr + offset);
398141e9d4bSMatthew Wilcox 	retval = offset + page->vaddr;
399141e9d4bSMatthew Wilcox 	*handle = offset + page->dma;
400d93e08b7SKeith Busch 	pool_check_block(pool, retval, offset, mem_flags);
401141e9d4bSMatthew Wilcox 	spin_unlock_irqrestore(&pool->lock, flags);
402fa23f56dSSean O. Stalley 
4036471384aSAlexander Potapenko 	if (want_init_on_alloc(mem_flags))
404fa23f56dSSean O. Stalley 		memset(retval, 0, pool->size);
405fa23f56dSSean O. Stalley 
406141e9d4bSMatthew Wilcox 	return retval;
407141e9d4bSMatthew Wilcox }
408e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_alloc);
409141e9d4bSMatthew Wilcox 
410e87aa773SMatthew Wilcox static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
411141e9d4bSMatthew Wilcox {
412141e9d4bSMatthew Wilcox 	struct dma_page *page;
413141e9d4bSMatthew Wilcox 
414141e9d4bSMatthew Wilcox 	list_for_each_entry(page, &pool->page_list, page_list) {
415141e9d4bSMatthew Wilcox 		if (dma < page->dma)
416141e9d4bSMatthew Wilcox 			continue;
417676bd991SRobin Murphy 		if ((dma - page->dma) < pool->allocation)
418141e9d4bSMatthew Wilcox 			return page;
419141e9d4bSMatthew Wilcox 	}
42084bc227dSRolf Eike Beer 	return NULL;
42184bc227dSRolf Eike Beer }
422141e9d4bSMatthew Wilcox 
423141e9d4bSMatthew Wilcox /**
424141e9d4bSMatthew Wilcox  * dma_pool_free - put block back into dma pool
425141e9d4bSMatthew Wilcox  * @pool: the dma pool holding the block
426141e9d4bSMatthew Wilcox  * @vaddr: virtual address of block
427141e9d4bSMatthew Wilcox  * @dma: dma address of block
428141e9d4bSMatthew Wilcox  *
429141e9d4bSMatthew Wilcox  * Caller promises neither device nor driver will again touch this block
430141e9d4bSMatthew Wilcox  * unless it is first re-allocated.
431141e9d4bSMatthew Wilcox  */
432e87aa773SMatthew Wilcox void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
433141e9d4bSMatthew Wilcox {
434141e9d4bSMatthew Wilcox 	struct dma_page *page;
435141e9d4bSMatthew Wilcox 	unsigned long flags;
436141e9d4bSMatthew Wilcox 
43784bc227dSRolf Eike Beer 	spin_lock_irqsave(&pool->lock, flags);
438e87aa773SMatthew Wilcox 	page = pool_find_page(pool, dma);
439e87aa773SMatthew Wilcox 	if (!page) {
44084bc227dSRolf Eike Beer 		spin_unlock_irqrestore(&pool->lock, flags);
44141a04814SAndy Shevchenko 		dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
44241a04814SAndy Shevchenko 			__func__, pool->name, vaddr, &dma);
443141e9d4bSMatthew Wilcox 		return;
444141e9d4bSMatthew Wilcox 	}
445141e9d4bSMatthew Wilcox 
446d93e08b7SKeith Busch 	if (pool_page_err(pool, page, vaddr, dma)) {
44784bc227dSRolf Eike Beer 		spin_unlock_irqrestore(&pool->lock, flags);
448141e9d4bSMatthew Wilcox 		return;
449141e9d4bSMatthew Wilcox 	}
450141e9d4bSMatthew Wilcox 
451141e9d4bSMatthew Wilcox 	page->in_use--;
452a35a3455SMatthew Wilcox 	*(int *)vaddr = page->offset;
453d93e08b7SKeith Busch 	page->offset = vaddr - page->vaddr;
454141e9d4bSMatthew Wilcox 	/*
455141e9d4bSMatthew Wilcox 	 * Resist a temptation to do
456a35a3455SMatthew Wilcox 	 *    if (!is_page_busy(page)) pool_free_page(pool, page);
457141e9d4bSMatthew Wilcox 	 * Better have a few empty pages hang around.
458141e9d4bSMatthew Wilcox 	 */
459141e9d4bSMatthew Wilcox 	spin_unlock_irqrestore(&pool->lock, flags);
460141e9d4bSMatthew Wilcox }
461e87aa773SMatthew Wilcox EXPORT_SYMBOL(dma_pool_free);
462141e9d4bSMatthew Wilcox 
463141e9d4bSMatthew Wilcox /*
464141e9d4bSMatthew Wilcox  * Managed DMA pool
465141e9d4bSMatthew Wilcox  */
466141e9d4bSMatthew Wilcox static void dmam_pool_release(struct device *dev, void *res)
467141e9d4bSMatthew Wilcox {
468141e9d4bSMatthew Wilcox 	struct dma_pool *pool = *(struct dma_pool **)res;
469141e9d4bSMatthew Wilcox 
470141e9d4bSMatthew Wilcox 	dma_pool_destroy(pool);
471141e9d4bSMatthew Wilcox }
472141e9d4bSMatthew Wilcox 
473141e9d4bSMatthew Wilcox static int dmam_pool_match(struct device *dev, void *res, void *match_data)
474141e9d4bSMatthew Wilcox {
475141e9d4bSMatthew Wilcox 	return *(struct dma_pool **)res == match_data;
476141e9d4bSMatthew Wilcox }
477141e9d4bSMatthew Wilcox 
478141e9d4bSMatthew Wilcox /**
479141e9d4bSMatthew Wilcox  * dmam_pool_create - Managed dma_pool_create()
480141e9d4bSMatthew Wilcox  * @name: name of pool, for diagnostics
481141e9d4bSMatthew Wilcox  * @dev: device that will be doing the DMA
482141e9d4bSMatthew Wilcox  * @size: size of the blocks in this pool.
483141e9d4bSMatthew Wilcox  * @align: alignment requirement for blocks; must be a power of two
484141e9d4bSMatthew Wilcox  * @allocation: returned blocks won't cross this boundary (or zero)
485141e9d4bSMatthew Wilcox  *
486141e9d4bSMatthew Wilcox  * Managed dma_pool_create().  DMA pool created with this function is
487141e9d4bSMatthew Wilcox  * automatically destroyed on driver detach.
488a862f68aSMike Rapoport  *
489a862f68aSMike Rapoport  * Return: a managed dma allocation pool with the requested
490a862f68aSMike Rapoport  * characteristics, or %NULL if one can't be created.
491141e9d4bSMatthew Wilcox  */
492141e9d4bSMatthew Wilcox struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
493141e9d4bSMatthew Wilcox 				  size_t size, size_t align, size_t allocation)
494141e9d4bSMatthew Wilcox {
495141e9d4bSMatthew Wilcox 	struct dma_pool **ptr, *pool;
496141e9d4bSMatthew Wilcox 
497141e9d4bSMatthew Wilcox 	ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
498141e9d4bSMatthew Wilcox 	if (!ptr)
499141e9d4bSMatthew Wilcox 		return NULL;
500141e9d4bSMatthew Wilcox 
501141e9d4bSMatthew Wilcox 	pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
502141e9d4bSMatthew Wilcox 	if (pool)
503141e9d4bSMatthew Wilcox 		devres_add(dev, ptr);
504141e9d4bSMatthew Wilcox 	else
505141e9d4bSMatthew Wilcox 		devres_free(ptr);
506141e9d4bSMatthew Wilcox 
507141e9d4bSMatthew Wilcox 	return pool;
508141e9d4bSMatthew Wilcox }
509e87aa773SMatthew Wilcox EXPORT_SYMBOL(dmam_pool_create);
510141e9d4bSMatthew Wilcox 
511141e9d4bSMatthew Wilcox /**
512141e9d4bSMatthew Wilcox  * dmam_pool_destroy - Managed dma_pool_destroy()
513141e9d4bSMatthew Wilcox  * @pool: dma pool that will be destroyed
514141e9d4bSMatthew Wilcox  *
515141e9d4bSMatthew Wilcox  * Managed dma_pool_destroy().
516141e9d4bSMatthew Wilcox  */
517141e9d4bSMatthew Wilcox void dmam_pool_destroy(struct dma_pool *pool)
518141e9d4bSMatthew Wilcox {
519141e9d4bSMatthew Wilcox 	struct device *dev = pool->dev;
520141e9d4bSMatthew Wilcox 
521172cb4b3SAndy Shevchenko 	WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));
522141e9d4bSMatthew Wilcox }
523141e9d4bSMatthew Wilcox EXPORT_SYMBOL(dmam_pool_destroy);
524