1b2139ce0SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
26182a094SMatthew Wilcox /*
36182a094SMatthew Wilcox * DMA Pool allocator
46182a094SMatthew Wilcox *
56182a094SMatthew Wilcox * Copyright 2001 David Brownell
66182a094SMatthew Wilcox * Copyright 2007 Intel Corporation
76182a094SMatthew Wilcox * Author: Matthew Wilcox <willy@linux.intel.com>
86182a094SMatthew Wilcox *
96182a094SMatthew Wilcox * This allocator returns small blocks of a given size which are DMA-able by
106182a094SMatthew Wilcox * the given device. It uses the dma_alloc_coherent page allocator to get
116182a094SMatthew Wilcox * new pages, then splits them up into blocks of the required size.
126182a094SMatthew Wilcox * Many older drivers still have their own code to do this.
136182a094SMatthew Wilcox *
146182a094SMatthew Wilcox * The current design of this allocator is fairly simple. The pool is
156182a094SMatthew Wilcox * represented by the 'struct dma_pool' which keeps a doubly-linked list of
166182a094SMatthew Wilcox * allocated pages. Each page in the page_list is split into blocks of at
17a35a3455SMatthew Wilcox * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
18*da9619a3SKeith Busch * list of free blocks across all pages. Used blocks aren't tracked, but we
19a35a3455SMatthew Wilcox * keep a count of how many are currently allocated from each page.
206182a094SMatthew Wilcox */
21141e9d4bSMatthew Wilcox
22141e9d4bSMatthew Wilcox #include <linux/device.h>
23141e9d4bSMatthew Wilcox #include <linux/dma-mapping.h>
24141e9d4bSMatthew Wilcox #include <linux/dmapool.h>
256182a094SMatthew Wilcox #include <linux/kernel.h>
266182a094SMatthew Wilcox #include <linux/list.h>
27b95f1b31SPaul Gortmaker #include <linux/export.h>
286182a094SMatthew Wilcox #include <linux/mutex.h>
29141e9d4bSMatthew Wilcox #include <linux/poison.h>
30141e9d4bSMatthew Wilcox #include <linux/sched.h>
310f2f89b6SDaniel Vetter #include <linux/sched/mm.h>
326182a094SMatthew Wilcox #include <linux/slab.h>
337c77509cSPaul Gortmaker #include <linux/stat.h>
346182a094SMatthew Wilcox #include <linux/spinlock.h>
356182a094SMatthew Wilcox #include <linux/string.h>
366182a094SMatthew Wilcox #include <linux/types.h>
376182a094SMatthew Wilcox #include <linux/wait.h>
38141e9d4bSMatthew Wilcox
39b5ee5befSAndi Kleen #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
40b5ee5befSAndi Kleen #define DMAPOOL_DEBUG 1
41b5ee5befSAndi Kleen #endif
42b5ee5befSAndi Kleen
43*da9619a3SKeith Busch struct dma_block {
44*da9619a3SKeith Busch struct dma_block *next_block;
45*da9619a3SKeith Busch dma_addr_t dma;
46*da9619a3SKeith Busch };
47*da9619a3SKeith Busch
48141e9d4bSMatthew Wilcox struct dma_pool { /* the pool */
49141e9d4bSMatthew Wilcox struct list_head page_list;
50141e9d4bSMatthew Wilcox spinlock_t lock;
51*da9619a3SKeith Busch struct dma_block *next_block;
52*da9619a3SKeith Busch size_t nr_blocks;
53*da9619a3SKeith Busch size_t nr_active;
54*da9619a3SKeith Busch size_t nr_pages;
55141e9d4bSMatthew Wilcox struct device *dev;
5679023352STony Battersby unsigned int size;
5779023352STony Battersby unsigned int allocation;
5879023352STony Battersby unsigned int boundary;
59141e9d4bSMatthew Wilcox char name[32];
60141e9d4bSMatthew Wilcox struct list_head pools;
61141e9d4bSMatthew Wilcox };
62141e9d4bSMatthew Wilcox
63141e9d4bSMatthew Wilcox struct dma_page { /* cacheable header for 'allocation' bytes */
64141e9d4bSMatthew Wilcox struct list_head page_list;
65141e9d4bSMatthew Wilcox void *vaddr;
66141e9d4bSMatthew Wilcox dma_addr_t dma;
67141e9d4bSMatthew Wilcox };
68141e9d4bSMatthew Wilcox
69141e9d4bSMatthew Wilcox static DEFINE_MUTEX(pools_lock);
7001c2965fSSebastian Andrzej Siewior static DEFINE_MUTEX(pools_reg_lock);
71141e9d4bSMatthew Wilcox
pools_show(struct device * dev,struct device_attribute * attr,char * buf)72e8df2c70SYueHaibing static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf)
73141e9d4bSMatthew Wilcox {
74141e9d4bSMatthew Wilcox struct dma_pool *pool;
75*da9619a3SKeith Busch unsigned size;
76141e9d4bSMatthew Wilcox
7708cc96c8STony Battersby size = sysfs_emit(buf, "poolinfo - 0.1\n");
78141e9d4bSMatthew Wilcox
79141e9d4bSMatthew Wilcox mutex_lock(&pools_lock);
80141e9d4bSMatthew Wilcox list_for_each_entry(pool, &dev->dma_pools, pools) {
81141e9d4bSMatthew Wilcox /* per-pool info, no real statistics yet */
82*da9619a3SKeith Busch size += sysfs_emit_at(buf, size, "%-16s %4zu %4zu %4u %2zu\n",
83*da9619a3SKeith Busch pool->name, pool->nr_active,
84*da9619a3SKeith Busch pool->nr_blocks, pool->size,
85*da9619a3SKeith Busch pool->nr_pages);
86141e9d4bSMatthew Wilcox }
87141e9d4bSMatthew Wilcox mutex_unlock(&pools_lock);
88141e9d4bSMatthew Wilcox
8908cc96c8STony Battersby return size;
90141e9d4bSMatthew Wilcox }
91e87aa773SMatthew Wilcox
92e8df2c70SYueHaibing static DEVICE_ATTR_RO(pools);
93141e9d4bSMatthew Wilcox
94d93e08b7SKeith Busch #ifdef DMAPOOL_DEBUG
pool_check_block(struct dma_pool * pool,struct dma_block * block,gfp_t mem_flags)95*da9619a3SKeith Busch static void pool_check_block(struct dma_pool *pool, struct dma_block *block,
96*da9619a3SKeith Busch gfp_t mem_flags)
97d93e08b7SKeith Busch {
98*da9619a3SKeith Busch u8 *data = (void *)block;
99d93e08b7SKeith Busch int i;
100*da9619a3SKeith Busch
101*da9619a3SKeith Busch for (i = sizeof(struct dma_block); i < pool->size; i++) {
102d93e08b7SKeith Busch if (data[i] == POOL_POISON_FREED)
103d93e08b7SKeith Busch continue;
104*da9619a3SKeith Busch dev_err(pool->dev, "%s %s, %p (corrupted)\n", __func__,
105*da9619a3SKeith Busch pool->name, block);
106d93e08b7SKeith Busch
107d93e08b7SKeith Busch /*
108d93e08b7SKeith Busch * Dump the first 4 bytes even if they are not
109d93e08b7SKeith Busch * POOL_POISON_FREED
110d93e08b7SKeith Busch */
111d93e08b7SKeith Busch print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
112d93e08b7SKeith Busch data, pool->size, 1);
113d93e08b7SKeith Busch break;
114d93e08b7SKeith Busch }
115*da9619a3SKeith Busch
116d93e08b7SKeith Busch if (!want_init_on_alloc(mem_flags))
117*da9619a3SKeith Busch memset(block, POOL_POISON_ALLOCATED, pool->size);
118d93e08b7SKeith Busch }
119d93e08b7SKeith Busch
pool_find_page(struct dma_pool * pool,dma_addr_t dma)120*da9619a3SKeith Busch static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
121d93e08b7SKeith Busch {
122*da9619a3SKeith Busch struct dma_page *page;
123d93e08b7SKeith Busch
124*da9619a3SKeith Busch list_for_each_entry(page, &pool->page_list, page_list) {
125*da9619a3SKeith Busch if (dma < page->dma)
126*da9619a3SKeith Busch continue;
127*da9619a3SKeith Busch if ((dma - page->dma) < pool->allocation)
128*da9619a3SKeith Busch return page;
129*da9619a3SKeith Busch }
130*da9619a3SKeith Busch return NULL;
131*da9619a3SKeith Busch }
132*da9619a3SKeith Busch
pool_block_err(struct dma_pool * pool,void * vaddr,dma_addr_t dma)133*da9619a3SKeith Busch static bool pool_block_err(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
134*da9619a3SKeith Busch {
135*da9619a3SKeith Busch struct dma_block *block = pool->next_block;
136*da9619a3SKeith Busch struct dma_page *page;
137*da9619a3SKeith Busch
138*da9619a3SKeith Busch page = pool_find_page(pool, dma);
139*da9619a3SKeith Busch if (!page) {
140*da9619a3SKeith Busch dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
141d93e08b7SKeith Busch __func__, pool->name, vaddr, &dma);
142d93e08b7SKeith Busch return true;
143d93e08b7SKeith Busch }
144d93e08b7SKeith Busch
145*da9619a3SKeith Busch while (block) {
146*da9619a3SKeith Busch if (block != vaddr) {
147*da9619a3SKeith Busch block = block->next_block;
148d93e08b7SKeith Busch continue;
149d93e08b7SKeith Busch }
150d93e08b7SKeith Busch dev_err(pool->dev, "%s %s, dma %pad already free\n",
151d93e08b7SKeith Busch __func__, pool->name, &dma);
152d93e08b7SKeith Busch return true;
153d93e08b7SKeith Busch }
154*da9619a3SKeith Busch
155d93e08b7SKeith Busch memset(vaddr, POOL_POISON_FREED, pool->size);
156d93e08b7SKeith Busch return false;
157d93e08b7SKeith Busch }
158d93e08b7SKeith Busch
pool_init_page(struct dma_pool * pool,struct dma_page * page)159d93e08b7SKeith Busch static void pool_init_page(struct dma_pool *pool, struct dma_page *page)
160d93e08b7SKeith Busch {
161d93e08b7SKeith Busch memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
162d93e08b7SKeith Busch }
163d93e08b7SKeith Busch #else
pool_check_block(struct dma_pool * pool,struct dma_block * block,gfp_t mem_flags)164*da9619a3SKeith Busch static void pool_check_block(struct dma_pool *pool, struct dma_block *block,
165*da9619a3SKeith Busch gfp_t mem_flags)
166d93e08b7SKeith Busch {
167d93e08b7SKeith Busch }
168d93e08b7SKeith Busch
pool_block_err(struct dma_pool * pool,void * vaddr,dma_addr_t dma)169*da9619a3SKeith Busch static bool pool_block_err(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
170d93e08b7SKeith Busch {
1718ecc3695SKeith Busch if (want_init_on_free())
1728ecc3695SKeith Busch memset(vaddr, 0, pool->size);
173d93e08b7SKeith Busch return false;
174d93e08b7SKeith Busch }
175d93e08b7SKeith Busch
pool_init_page(struct dma_pool * pool,struct dma_page * page)176d93e08b7SKeith Busch static void pool_init_page(struct dma_pool *pool, struct dma_page *page)
177d93e08b7SKeith Busch {
178d93e08b7SKeith Busch }
179d93e08b7SKeith Busch #endif
180d93e08b7SKeith Busch
pool_block_pop(struct dma_pool * pool)181*da9619a3SKeith Busch static struct dma_block *pool_block_pop(struct dma_pool *pool)
182*da9619a3SKeith Busch {
183*da9619a3SKeith Busch struct dma_block *block = pool->next_block;
184*da9619a3SKeith Busch
185*da9619a3SKeith Busch if (block) {
186*da9619a3SKeith Busch pool->next_block = block->next_block;
187*da9619a3SKeith Busch pool->nr_active++;
188*da9619a3SKeith Busch }
189*da9619a3SKeith Busch return block;
190*da9619a3SKeith Busch }
191*da9619a3SKeith Busch
pool_block_push(struct dma_pool * pool,struct dma_block * block,dma_addr_t dma)192*da9619a3SKeith Busch static void pool_block_push(struct dma_pool *pool, struct dma_block *block,
193*da9619a3SKeith Busch dma_addr_t dma)
194*da9619a3SKeith Busch {
195*da9619a3SKeith Busch block->dma = dma;
196*da9619a3SKeith Busch block->next_block = pool->next_block;
197*da9619a3SKeith Busch pool->next_block = block;
198*da9619a3SKeith Busch }
199*da9619a3SKeith Busch
200*da9619a3SKeith Busch
201141e9d4bSMatthew Wilcox /**
202141e9d4bSMatthew Wilcox * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
203141e9d4bSMatthew Wilcox * @name: name of pool, for diagnostics
204141e9d4bSMatthew Wilcox * @dev: device that will be doing the DMA
205141e9d4bSMatthew Wilcox * @size: size of the blocks in this pool.
206141e9d4bSMatthew Wilcox * @align: alignment requirement for blocks; must be a power of two
207e34f44b3SMatthew Wilcox * @boundary: returned blocks won't cross this power of two boundary
208a862f68aSMike Rapoport * Context: not in_interrupt()
209141e9d4bSMatthew Wilcox *
210a862f68aSMike Rapoport * Given one of these pools, dma_pool_alloc()
211141e9d4bSMatthew Wilcox * may be used to allocate memory. Such memory will all have "consistent"
212141e9d4bSMatthew Wilcox * DMA mappings, accessible by the device and its driver without using
213141e9d4bSMatthew Wilcox * cache flushing primitives. The actual size of blocks allocated may be
214141e9d4bSMatthew Wilcox * larger than requested because of alignment.
215141e9d4bSMatthew Wilcox *
216e34f44b3SMatthew Wilcox * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
217141e9d4bSMatthew Wilcox * cross that size boundary. This is useful for devices which have
218141e9d4bSMatthew Wilcox * addressing restrictions on individual DMA transfers, such as not crossing
219141e9d4bSMatthew Wilcox * boundaries of 4KBytes.
220a862f68aSMike Rapoport *
221a862f68aSMike Rapoport * Return: a dma allocation pool with the requested characteristics, or
222a862f68aSMike Rapoport * %NULL if one can't be created.
223141e9d4bSMatthew Wilcox */
dma_pool_create(const char * name,struct device * dev,size_t size,size_t align,size_t boundary)224e87aa773SMatthew Wilcox struct dma_pool *dma_pool_create(const char *name, struct device *dev,
225e34f44b3SMatthew Wilcox size_t size, size_t align, size_t boundary)
226141e9d4bSMatthew Wilcox {
227141e9d4bSMatthew Wilcox struct dma_pool *retval;
228e34f44b3SMatthew Wilcox size_t allocation;
22901c2965fSSebastian Andrzej Siewior bool empty;
230141e9d4bSMatthew Wilcox
23167a540c6STony Battersby if (!dev)
23267a540c6STony Battersby return NULL;
23367a540c6STony Battersby
234baa2ef83SPaul McQuade if (align == 0)
235141e9d4bSMatthew Wilcox align = 1;
236baa2ef83SPaul McQuade else if (align & (align - 1))
237399154beSMatthew Wilcox return NULL;
238399154beSMatthew Wilcox
23979023352STony Battersby if (size == 0 || size > INT_MAX)
240141e9d4bSMatthew Wilcox return NULL;
241*da9619a3SKeith Busch if (size < sizeof(struct dma_block))
242*da9619a3SKeith Busch size = sizeof(struct dma_block);
243399154beSMatthew Wilcox
244399154beSMatthew Wilcox size = ALIGN(size, align);
245e34f44b3SMatthew Wilcox allocation = max_t(size_t, size, PAGE_SIZE);
246141e9d4bSMatthew Wilcox
247baa2ef83SPaul McQuade if (!boundary)
248e34f44b3SMatthew Wilcox boundary = allocation;
249baa2ef83SPaul McQuade else if ((boundary < size) || (boundary & (boundary - 1)))
250e34f44b3SMatthew Wilcox return NULL;
251e34f44b3SMatthew Wilcox
25279023352STony Battersby boundary = min(boundary, allocation);
25379023352STony Battersby
254*da9619a3SKeith Busch retval = kzalloc(sizeof(*retval), GFP_KERNEL);
255e34f44b3SMatthew Wilcox if (!retval)
256141e9d4bSMatthew Wilcox return retval;
257141e9d4bSMatthew Wilcox
258943f229eSZhiyuan Dai strscpy(retval->name, name, sizeof(retval->name));
259141e9d4bSMatthew Wilcox
260141e9d4bSMatthew Wilcox retval->dev = dev;
261141e9d4bSMatthew Wilcox
262141e9d4bSMatthew Wilcox INIT_LIST_HEAD(&retval->page_list);
263141e9d4bSMatthew Wilcox spin_lock_init(&retval->lock);
264141e9d4bSMatthew Wilcox retval->size = size;
265e34f44b3SMatthew Wilcox retval->boundary = boundary;
266141e9d4bSMatthew Wilcox retval->allocation = allocation;
267cc6b664aSDaeseok Youn INIT_LIST_HEAD(&retval->pools);
268141e9d4bSMatthew Wilcox
26901c2965fSSebastian Andrzej Siewior /*
27001c2965fSSebastian Andrzej Siewior * pools_lock ensures that the ->dma_pools list does not get corrupted.
27101c2965fSSebastian Andrzej Siewior * pools_reg_lock ensures that there is not a race between
27201c2965fSSebastian Andrzej Siewior * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
27301c2965fSSebastian Andrzej Siewior * when the first invocation of dma_pool_create() failed on
27401c2965fSSebastian Andrzej Siewior * device_create_file() and the second assumes that it has been done (I
27501c2965fSSebastian Andrzej Siewior * know it is a short window).
27601c2965fSSebastian Andrzej Siewior */
27701c2965fSSebastian Andrzej Siewior mutex_lock(&pools_reg_lock);
278141e9d4bSMatthew Wilcox mutex_lock(&pools_lock);
27901c2965fSSebastian Andrzej Siewior empty = list_empty(&dev->dma_pools);
28001c2965fSSebastian Andrzej Siewior list_add(&retval->pools, &dev->dma_pools);
281cc6b664aSDaeseok Youn mutex_unlock(&pools_lock);
282cc6b664aSDaeseok Youn if (empty) {
28301c2965fSSebastian Andrzej Siewior int err;
28401c2965fSSebastian Andrzej Siewior
285141e9d4bSMatthew Wilcox err = device_create_file(dev, &dev_attr_pools);
28601c2965fSSebastian Andrzej Siewior if (err) {
28701c2965fSSebastian Andrzej Siewior mutex_lock(&pools_lock);
28801c2965fSSebastian Andrzej Siewior list_del(&retval->pools);
28901c2965fSSebastian Andrzej Siewior mutex_unlock(&pools_lock);
29001c2965fSSebastian Andrzej Siewior mutex_unlock(&pools_reg_lock);
29101c2965fSSebastian Andrzej Siewior kfree(retval);
29201c2965fSSebastian Andrzej Siewior return NULL;
29301c2965fSSebastian Andrzej Siewior }
29401c2965fSSebastian Andrzej Siewior }
29501c2965fSSebastian Andrzej Siewior mutex_unlock(&pools_reg_lock);
29601c2965fSSebastian Andrzej Siewior return retval;
297141e9d4bSMatthew Wilcox }
298141e9d4bSMatthew Wilcox EXPORT_SYMBOL(dma_pool_create);
299e87aa773SMatthew Wilcox
pool_initialise_page(struct dma_pool * pool,struct dma_page * page)300141e9d4bSMatthew Wilcox static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
301a35a3455SMatthew Wilcox {
302a35a3455SMatthew Wilcox unsigned int next_boundary = pool->boundary, offset = 0;
303*da9619a3SKeith Busch struct dma_block *block, *first = NULL, *last = NULL;
304*da9619a3SKeith Busch
305a35a3455SMatthew Wilcox pool_init_page(pool, page);
306f0bccea6SKeith Busch while (offset + pool->size <= pool->allocation) {
307*da9619a3SKeith Busch if (offset + pool->size > next_boundary) {
308*da9619a3SKeith Busch offset = next_boundary;
309*da9619a3SKeith Busch next_boundary += pool->boundary;
310e34f44b3SMatthew Wilcox continue;
311*da9619a3SKeith Busch }
312e34f44b3SMatthew Wilcox
313*da9619a3SKeith Busch block = page->vaddr + offset;
314*da9619a3SKeith Busch block->dma = page->dma + offset;
315*da9619a3SKeith Busch block->next_block = NULL;
316*da9619a3SKeith Busch
317*da9619a3SKeith Busch if (last)
318*da9619a3SKeith Busch last->next_block = block;
319*da9619a3SKeith Busch else
320*da9619a3SKeith Busch first = block;
321*da9619a3SKeith Busch last = block;
322*da9619a3SKeith Busch
323*da9619a3SKeith Busch offset += pool->size;
324*da9619a3SKeith Busch pool->nr_blocks++;
325*da9619a3SKeith Busch }
326*da9619a3SKeith Busch
327*da9619a3SKeith Busch last->next_block = pool->next_block;
328*da9619a3SKeith Busch pool->next_block = first;
329*da9619a3SKeith Busch
330*da9619a3SKeith Busch list_add(&page->page_list, &pool->page_list);
331*da9619a3SKeith Busch pool->nr_pages++;
332*da9619a3SKeith Busch }
333a35a3455SMatthew Wilcox
pool_alloc_page(struct dma_pool * pool,gfp_t mem_flags)334a35a3455SMatthew Wilcox static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
335e87aa773SMatthew Wilcox {
336141e9d4bSMatthew Wilcox struct dma_page *page;
337141e9d4bSMatthew Wilcox
338141e9d4bSMatthew Wilcox page = kmalloc(sizeof(*page), mem_flags);
339a35a3455SMatthew Wilcox if (!page)
340141e9d4bSMatthew Wilcox return NULL;
341141e9d4bSMatthew Wilcox
3425407df10SKeith Busch page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
343a35a3455SMatthew Wilcox &page->dma, mem_flags);
344e87aa773SMatthew Wilcox if (!page->vaddr) {
3455407df10SKeith Busch kfree(page);
3465407df10SKeith Busch return NULL;
3475407df10SKeith Busch }
3485407df10SKeith Busch
3495407df10SKeith Busch return page;
350141e9d4bSMatthew Wilcox }
351141e9d4bSMatthew Wilcox
352141e9d4bSMatthew Wilcox /**
353141e9d4bSMatthew Wilcox * dma_pool_destroy - destroys a pool of dma memory blocks.
354141e9d4bSMatthew Wilcox * @pool: dma pool that will be destroyed
355141e9d4bSMatthew Wilcox * Context: !in_interrupt()
356141e9d4bSMatthew Wilcox *
357141e9d4bSMatthew Wilcox * Caller guarantees that no more memory from the pool is in use,
358141e9d4bSMatthew Wilcox * and that nothing will try to use the pool after this call.
359141e9d4bSMatthew Wilcox */
dma_pool_destroy(struct dma_pool * pool)360141e9d4bSMatthew Wilcox void dma_pool_destroy(struct dma_pool *pool)
361e87aa773SMatthew Wilcox {
362141e9d4bSMatthew Wilcox struct dma_page *page, *tmp;
36342286f83SAndy Shevchenko bool empty, busy = false;
364*da9619a3SKeith Busch
36501c2965fSSebastian Andrzej Siewior if (unlikely(!pool))
36644d7175dSSergey Senozhatsky return;
36744d7175dSSergey Senozhatsky
36844d7175dSSergey Senozhatsky mutex_lock(&pools_reg_lock);
36901c2965fSSebastian Andrzej Siewior mutex_lock(&pools_lock);
370141e9d4bSMatthew Wilcox list_del(&pool->pools);
371141e9d4bSMatthew Wilcox empty = list_empty(&pool->dev->dma_pools);
37267a540c6STony Battersby mutex_unlock(&pools_lock);
37301c2965fSSebastian Andrzej Siewior if (empty)
374141e9d4bSMatthew Wilcox device_remove_file(pool->dev, &dev_attr_pools);
37501c2965fSSebastian Andrzej Siewior mutex_unlock(&pools_reg_lock);
37601c2965fSSebastian Andrzej Siewior
37701c2965fSSebastian Andrzej Siewior if (pool->nr_active) {
378141e9d4bSMatthew Wilcox dev_err(pool->dev, "%s %s busy\n", __func__, pool->name);
379*da9619a3SKeith Busch busy = true;
380*da9619a3SKeith Busch }
381*da9619a3SKeith Busch
382*da9619a3SKeith Busch list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
383*da9619a3SKeith Busch if (!busy)
38442286f83SAndy Shevchenko dma_free_coherent(pool->dev, pool->allocation,
385*da9619a3SKeith Busch page->vaddr, page->dma);
386cc669954SKeith Busch list_del(&page->page_list);
387cc669954SKeith Busch kfree(page);
388141e9d4bSMatthew Wilcox }
389141e9d4bSMatthew Wilcox
390141e9d4bSMatthew Wilcox kfree(pool);
391141e9d4bSMatthew Wilcox }
392141e9d4bSMatthew Wilcox EXPORT_SYMBOL(dma_pool_destroy);
393141e9d4bSMatthew Wilcox
394e87aa773SMatthew Wilcox /**
395141e9d4bSMatthew Wilcox * dma_pool_alloc - get a block of consistent memory
396141e9d4bSMatthew Wilcox * @pool: dma pool that will produce the block
397141e9d4bSMatthew Wilcox * @mem_flags: GFP_* bitmask
398141e9d4bSMatthew Wilcox * @handle: pointer to dma address of block
399141e9d4bSMatthew Wilcox *
400141e9d4bSMatthew Wilcox * Return: the kernel virtual address of a currently unused block,
401141e9d4bSMatthew Wilcox * and reports its dma address through the handle.
402a862f68aSMike Rapoport * If such a memory block can't be allocated, %NULL is returned.
403141e9d4bSMatthew Wilcox */
dma_pool_alloc(struct dma_pool * pool,gfp_t mem_flags,dma_addr_t * handle)4046182a094SMatthew Wilcox void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
405141e9d4bSMatthew Wilcox dma_addr_t *handle)
406e87aa773SMatthew Wilcox {
407e87aa773SMatthew Wilcox struct dma_block *block;
408141e9d4bSMatthew Wilcox struct dma_page *page;
409*da9619a3SKeith Busch unsigned long flags;
410141e9d4bSMatthew Wilcox
411*da9619a3SKeith Busch might_alloc(mem_flags);
412141e9d4bSMatthew Wilcox
4130f2f89b6SDaniel Vetter spin_lock_irqsave(&pool->lock, flags);
414ea05c844SDima Zavin block = pool_block_pop(pool);
415141e9d4bSMatthew Wilcox if (!block) {
416*da9619a3SKeith Busch /*
417*da9619a3SKeith Busch * pool_alloc_page() might sleep, so temporarily drop
418*da9619a3SKeith Busch * &pool->lock
419*da9619a3SKeith Busch */
420*da9619a3SKeith Busch spin_unlock_irqrestore(&pool->lock, flags);
421*da9619a3SKeith Busch
422141e9d4bSMatthew Wilcox page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
423141e9d4bSMatthew Wilcox if (!page)
424fa23f56dSSean O. Stalley return NULL;
425387870f2SMarek Szyprowski
426387870f2SMarek Szyprowski spin_lock_irqsave(&pool->lock, flags);
427141e9d4bSMatthew Wilcox pool_initialise_page(pool, page);
4282cae367eSMatthew Wilcox block = pool_block_pop(pool);
429*da9619a3SKeith Busch }
430*da9619a3SKeith Busch spin_unlock_irqrestore(&pool->lock, flags);
431*da9619a3SKeith Busch
432141e9d4bSMatthew Wilcox *handle = block->dma;
433fa23f56dSSean O. Stalley pool_check_block(pool, block, mem_flags);
434*da9619a3SKeith Busch if (want_init_on_alloc(mem_flags))
435*da9619a3SKeith Busch memset(block, 0, pool->size);
4366471384aSAlexander Potapenko
437*da9619a3SKeith Busch return block;
438fa23f56dSSean O. Stalley }
439*da9619a3SKeith Busch EXPORT_SYMBOL(dma_pool_alloc);
440141e9d4bSMatthew Wilcox
441e87aa773SMatthew Wilcox /**
442141e9d4bSMatthew Wilcox * dma_pool_free - put block back into dma pool
443141e9d4bSMatthew Wilcox * @pool: the dma pool holding the block
444141e9d4bSMatthew Wilcox * @vaddr: virtual address of block
445141e9d4bSMatthew Wilcox * @dma: dma address of block
446141e9d4bSMatthew Wilcox *
447141e9d4bSMatthew Wilcox * Caller promises neither device nor driver will again touch this block
448141e9d4bSMatthew Wilcox * unless it is first re-allocated.
449141e9d4bSMatthew Wilcox */
dma_pool_free(struct dma_pool * pool,void * vaddr,dma_addr_t dma)450141e9d4bSMatthew Wilcox void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
451141e9d4bSMatthew Wilcox {
452e87aa773SMatthew Wilcox struct dma_block *block = vaddr;
453141e9d4bSMatthew Wilcox unsigned long flags;
454*da9619a3SKeith Busch
455141e9d4bSMatthew Wilcox spin_lock_irqsave(&pool->lock, flags);
456141e9d4bSMatthew Wilcox if (!pool_block_err(pool, vaddr, dma)) {
45784bc227dSRolf Eike Beer pool_block_push(pool, block, dma);
458*da9619a3SKeith Busch pool->nr_active--;
459*da9619a3SKeith Busch }
460*da9619a3SKeith Busch spin_unlock_irqrestore(&pool->lock, flags);
461141e9d4bSMatthew Wilcox }
462141e9d4bSMatthew Wilcox EXPORT_SYMBOL(dma_pool_free);
463141e9d4bSMatthew Wilcox
464e87aa773SMatthew Wilcox /*
465141e9d4bSMatthew Wilcox * Managed DMA pool
466141e9d4bSMatthew Wilcox */
dmam_pool_release(struct device * dev,void * res)467141e9d4bSMatthew Wilcox static void dmam_pool_release(struct device *dev, void *res)
468141e9d4bSMatthew Wilcox {
469141e9d4bSMatthew Wilcox struct dma_pool *pool = *(struct dma_pool **)res;
470141e9d4bSMatthew Wilcox
471141e9d4bSMatthew Wilcox dma_pool_destroy(pool);
472141e9d4bSMatthew Wilcox }
473141e9d4bSMatthew Wilcox
dmam_pool_match(struct device * dev,void * res,void * match_data)474141e9d4bSMatthew Wilcox static int dmam_pool_match(struct device *dev, void *res, void *match_data)
475141e9d4bSMatthew Wilcox {
476141e9d4bSMatthew Wilcox return *(struct dma_pool **)res == match_data;
477141e9d4bSMatthew Wilcox }
478141e9d4bSMatthew Wilcox
479141e9d4bSMatthew Wilcox /**
480141e9d4bSMatthew Wilcox * dmam_pool_create - Managed dma_pool_create()
481141e9d4bSMatthew Wilcox * @name: name of pool, for diagnostics
482141e9d4bSMatthew Wilcox * @dev: device that will be doing the DMA
483141e9d4bSMatthew Wilcox * @size: size of the blocks in this pool.
484141e9d4bSMatthew Wilcox * @align: alignment requirement for blocks; must be a power of two
485141e9d4bSMatthew Wilcox * @allocation: returned blocks won't cross this boundary (or zero)
486141e9d4bSMatthew Wilcox *
487141e9d4bSMatthew Wilcox * Managed dma_pool_create(). DMA pool created with this function is
488141e9d4bSMatthew Wilcox * automatically destroyed on driver detach.
489141e9d4bSMatthew Wilcox *
490141e9d4bSMatthew Wilcox * Return: a managed dma allocation pool with the requested
491a862f68aSMike Rapoport * characteristics, or %NULL if one can't be created.
492a862f68aSMike Rapoport */
dmam_pool_create(const char * name,struct device * dev,size_t size,size_t align,size_t allocation)493a862f68aSMike Rapoport struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
494141e9d4bSMatthew Wilcox size_t size, size_t align, size_t allocation)
495141e9d4bSMatthew Wilcox {
496141e9d4bSMatthew Wilcox struct dma_pool **ptr, *pool;
497141e9d4bSMatthew Wilcox
498141e9d4bSMatthew Wilcox ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
499141e9d4bSMatthew Wilcox if (!ptr)
500141e9d4bSMatthew Wilcox return NULL;
501141e9d4bSMatthew Wilcox
502141e9d4bSMatthew Wilcox pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
503141e9d4bSMatthew Wilcox if (pool)
504141e9d4bSMatthew Wilcox devres_add(dev, ptr);
505141e9d4bSMatthew Wilcox else
506141e9d4bSMatthew Wilcox devres_free(ptr);
507141e9d4bSMatthew Wilcox
508141e9d4bSMatthew Wilcox return pool;
509141e9d4bSMatthew Wilcox }
510141e9d4bSMatthew Wilcox EXPORT_SYMBOL(dmam_pool_create);
511141e9d4bSMatthew Wilcox
512e87aa773SMatthew Wilcox /**
513141e9d4bSMatthew Wilcox * dmam_pool_destroy - Managed dma_pool_destroy()
514141e9d4bSMatthew Wilcox * @pool: dma pool that will be destroyed
515141e9d4bSMatthew Wilcox *
516141e9d4bSMatthew Wilcox * Managed dma_pool_destroy().
517141e9d4bSMatthew Wilcox */
dmam_pool_destroy(struct dma_pool * pool)518141e9d4bSMatthew Wilcox void dmam_pool_destroy(struct dma_pool *pool)
519141e9d4bSMatthew Wilcox {
520141e9d4bSMatthew Wilcox struct device *dev = pool->dev;
521141e9d4bSMatthew Wilcox
522141e9d4bSMatthew Wilcox WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));
523141e9d4bSMatthew Wilcox }
524172cb4b3SAndy Shevchenko EXPORT_SYMBOL(dmam_pool_destroy);
525141e9d4bSMatthew Wilcox