xref: /openbmc/linux/mm/dmapool.c (revision a1e58bbd)
1 /*
2  * DMA Pool allocator
3  *
4  * Copyright 2001 David Brownell
5  * Copyright 2007 Intel Corporation
6  *   Author: Matthew Wilcox <willy@linux.intel.com>
7  *
8  * This software may be redistributed and/or modified under the terms of
9  * the GNU General Public License ("GPL") version 2 as published by the
10  * Free Software Foundation.
11  *
12  * This allocator returns small blocks of a given size which are DMA-able by
13  * the given device.  It uses the dma_alloc_coherent page allocator to get
14  * new pages, then splits them up into blocks of the required size.
15  * Many older drivers still have their own code to do this.
16  *
17  * The current design of this allocator is fairly simple.  The pool is
18  * represented by the 'struct dma_pool' which keeps a doubly-linked list of
19  * allocated pages.  Each page in the page_list is split into blocks of at
20  * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
21  * list of free blocks within the page.  Used blocks aren't tracked, but we
22  * keep a count of how many are currently allocated from each page.
23  */
24 
25 #include <linux/device.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/dmapool.h>
28 #include <linux/kernel.h>
29 #include <linux/list.h>
30 #include <linux/module.h>
31 #include <linux/mutex.h>
32 #include <linux/poison.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <linux/string.h>
37 #include <linux/types.h>
38 #include <linux/wait.h>
39 
40 struct dma_pool {		/* the pool */
41 	struct list_head page_list;
42 	spinlock_t lock;
43 	size_t size;
44 	struct device *dev;
45 	size_t allocation;
46 	size_t boundary;
47 	char name[32];
48 	wait_queue_head_t waitq;
49 	struct list_head pools;
50 };
51 
52 struct dma_page {		/* cacheable header for 'allocation' bytes */
53 	struct list_head page_list;
54 	void *vaddr;
55 	dma_addr_t dma;
56 	unsigned int in_use;
57 	unsigned int offset;
58 };
59 
60 #define	POOL_TIMEOUT_JIFFIES	((100 /* msec */ * HZ) / 1000)
61 
62 static DEFINE_MUTEX(pools_lock);
63 
64 static ssize_t
65 show_pools(struct device *dev, struct device_attribute *attr, char *buf)
66 {
67 	unsigned temp;
68 	unsigned size;
69 	char *next;
70 	struct dma_page *page;
71 	struct dma_pool *pool;
72 
73 	next = buf;
74 	size = PAGE_SIZE;
75 
76 	temp = scnprintf(next, size, "poolinfo - 0.1\n");
77 	size -= temp;
78 	next += temp;
79 
80 	mutex_lock(&pools_lock);
81 	list_for_each_entry(pool, &dev->dma_pools, pools) {
82 		unsigned pages = 0;
83 		unsigned blocks = 0;
84 
85 		list_for_each_entry(page, &pool->page_list, page_list) {
86 			pages++;
87 			blocks += page->in_use;
88 		}
89 
90 		/* per-pool info, no real statistics yet */
91 		temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
92 				 pool->name, blocks,
93 				 pages * (pool->allocation / pool->size),
94 				 pool->size, pages);
95 		size -= temp;
96 		next += temp;
97 	}
98 	mutex_unlock(&pools_lock);
99 
100 	return PAGE_SIZE - size;
101 }
102 
103 static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
104 
105 /**
106  * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
107  * @name: name of pool, for diagnostics
108  * @dev: device that will be doing the DMA
109  * @size: size of the blocks in this pool.
110  * @align: alignment requirement for blocks; must be a power of two
111  * @boundary: returned blocks won't cross this power of two boundary
112  * Context: !in_interrupt()
113  *
114  * Returns a dma allocation pool with the requested characteristics, or
115  * null if one can't be created.  Given one of these pools, dma_pool_alloc()
116  * may be used to allocate memory.  Such memory will all have "consistent"
117  * DMA mappings, accessible by the device and its driver without using
118  * cache flushing primitives.  The actual size of blocks allocated may be
119  * larger than requested because of alignment.
120  *
121  * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
122  * cross that size boundary.  This is useful for devices which have
123  * addressing restrictions on individual DMA transfers, such as not crossing
124  * boundaries of 4KBytes.
125  */
126 struct dma_pool *dma_pool_create(const char *name, struct device *dev,
127 				 size_t size, size_t align, size_t boundary)
128 {
129 	struct dma_pool *retval;
130 	size_t allocation;
131 
132 	if (align == 0) {
133 		align = 1;
134 	} else if (align & (align - 1)) {
135 		return NULL;
136 	}
137 
138 	if (size == 0) {
139 		return NULL;
140 	} else if (size < 4) {
141 		size = 4;
142 	}
143 
144 	if ((size % align) != 0)
145 		size = ALIGN(size, align);
146 
147 	allocation = max_t(size_t, size, PAGE_SIZE);
148 
149 	if (!boundary) {
150 		boundary = allocation;
151 	} else if ((boundary < size) || (boundary & (boundary - 1))) {
152 		return NULL;
153 	}
154 
155 	retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
156 	if (!retval)
157 		return retval;
158 
159 	strlcpy(retval->name, name, sizeof(retval->name));
160 
161 	retval->dev = dev;
162 
163 	INIT_LIST_HEAD(&retval->page_list);
164 	spin_lock_init(&retval->lock);
165 	retval->size = size;
166 	retval->boundary = boundary;
167 	retval->allocation = allocation;
168 	init_waitqueue_head(&retval->waitq);
169 
170 	if (dev) {
171 		int ret;
172 
173 		mutex_lock(&pools_lock);
174 		if (list_empty(&dev->dma_pools))
175 			ret = device_create_file(dev, &dev_attr_pools);
176 		else
177 			ret = 0;
178 		/* note:  not currently insisting "name" be unique */
179 		if (!ret)
180 			list_add(&retval->pools, &dev->dma_pools);
181 		else {
182 			kfree(retval);
183 			retval = NULL;
184 		}
185 		mutex_unlock(&pools_lock);
186 	} else
187 		INIT_LIST_HEAD(&retval->pools);
188 
189 	return retval;
190 }
191 EXPORT_SYMBOL(dma_pool_create);
192 
193 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
194 {
195 	unsigned int offset = 0;
196 	unsigned int next_boundary = pool->boundary;
197 
198 	do {
199 		unsigned int next = offset + pool->size;
200 		if (unlikely((next + pool->size) >= next_boundary)) {
201 			next = next_boundary;
202 			next_boundary += pool->boundary;
203 		}
204 		*(int *)(page->vaddr + offset) = next;
205 		offset = next;
206 	} while (offset < pool->allocation);
207 }
208 
209 static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
210 {
211 	struct dma_page *page;
212 
213 	page = kmalloc(sizeof(*page), mem_flags);
214 	if (!page)
215 		return NULL;
216 	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
217 					 &page->dma, mem_flags);
218 	if (page->vaddr) {
219 #ifdef	CONFIG_DEBUG_SLAB
220 		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
221 #endif
222 		pool_initialise_page(pool, page);
223 		list_add(&page->page_list, &pool->page_list);
224 		page->in_use = 0;
225 		page->offset = 0;
226 	} else {
227 		kfree(page);
228 		page = NULL;
229 	}
230 	return page;
231 }
232 
233 static inline int is_page_busy(struct dma_page *page)
234 {
235 	return page->in_use != 0;
236 }
237 
238 static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
239 {
240 	dma_addr_t dma = page->dma;
241 
242 #ifdef	CONFIG_DEBUG_SLAB
243 	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
244 #endif
245 	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
246 	list_del(&page->page_list);
247 	kfree(page);
248 }
249 
250 /**
251  * dma_pool_destroy - destroys a pool of dma memory blocks.
252  * @pool: dma pool that will be destroyed
253  * Context: !in_interrupt()
254  *
255  * Caller guarantees that no more memory from the pool is in use,
256  * and that nothing will try to use the pool after this call.
257  */
258 void dma_pool_destroy(struct dma_pool *pool)
259 {
260 	mutex_lock(&pools_lock);
261 	list_del(&pool->pools);
262 	if (pool->dev && list_empty(&pool->dev->dma_pools))
263 		device_remove_file(pool->dev, &dev_attr_pools);
264 	mutex_unlock(&pools_lock);
265 
266 	while (!list_empty(&pool->page_list)) {
267 		struct dma_page *page;
268 		page = list_entry(pool->page_list.next,
269 				  struct dma_page, page_list);
270 		if (is_page_busy(page)) {
271 			if (pool->dev)
272 				dev_err(pool->dev,
273 					"dma_pool_destroy %s, %p busy\n",
274 					pool->name, page->vaddr);
275 			else
276 				printk(KERN_ERR
277 				       "dma_pool_destroy %s, %p busy\n",
278 				       pool->name, page->vaddr);
279 			/* leak the still-in-use consistent memory */
280 			list_del(&page->page_list);
281 			kfree(page);
282 		} else
283 			pool_free_page(pool, page);
284 	}
285 
286 	kfree(pool);
287 }
288 EXPORT_SYMBOL(dma_pool_destroy);
289 
290 /**
291  * dma_pool_alloc - get a block of consistent memory
292  * @pool: dma pool that will produce the block
293  * @mem_flags: GFP_* bitmask
294  * @handle: pointer to dma address of block
295  *
296  * This returns the kernel virtual address of a currently unused block,
297  * and reports its dma address through the handle.
298  * If such a memory block can't be allocated, %NULL is returned.
299  */
300 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
301 		     dma_addr_t *handle)
302 {
303 	unsigned long flags;
304 	struct dma_page *page;
305 	size_t offset;
306 	void *retval;
307 
308 	spin_lock_irqsave(&pool->lock, flags);
309  restart:
310 	list_for_each_entry(page, &pool->page_list, page_list) {
311 		if (page->offset < pool->allocation)
312 			goto ready;
313 	}
314 	page = pool_alloc_page(pool, GFP_ATOMIC);
315 	if (!page) {
316 		if (mem_flags & __GFP_WAIT) {
317 			DECLARE_WAITQUEUE(wait, current);
318 
319 			__set_current_state(TASK_INTERRUPTIBLE);
320 			__add_wait_queue(&pool->waitq, &wait);
321 			spin_unlock_irqrestore(&pool->lock, flags);
322 
323 			schedule_timeout(POOL_TIMEOUT_JIFFIES);
324 
325 			spin_lock_irqsave(&pool->lock, flags);
326 			__remove_wait_queue(&pool->waitq, &wait);
327 			goto restart;
328 		}
329 		retval = NULL;
330 		goto done;
331 	}
332 
333  ready:
334 	page->in_use++;
335 	offset = page->offset;
336 	page->offset = *(int *)(page->vaddr + offset);
337 	retval = offset + page->vaddr;
338 	*handle = offset + page->dma;
339 #ifdef	CONFIG_DEBUG_SLAB
340 	memset(retval, POOL_POISON_ALLOCATED, pool->size);
341 #endif
342  done:
343 	spin_unlock_irqrestore(&pool->lock, flags);
344 	return retval;
345 }
346 EXPORT_SYMBOL(dma_pool_alloc);
347 
348 static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
349 {
350 	unsigned long flags;
351 	struct dma_page *page;
352 
353 	spin_lock_irqsave(&pool->lock, flags);
354 	list_for_each_entry(page, &pool->page_list, page_list) {
355 		if (dma < page->dma)
356 			continue;
357 		if (dma < (page->dma + pool->allocation))
358 			goto done;
359 	}
360 	page = NULL;
361  done:
362 	spin_unlock_irqrestore(&pool->lock, flags);
363 	return page;
364 }
365 
366 /**
367  * dma_pool_free - put block back into dma pool
368  * @pool: the dma pool holding the block
369  * @vaddr: virtual address of block
370  * @dma: dma address of block
371  *
372  * Caller promises neither device nor driver will again touch this block
373  * unless it is first re-allocated.
374  */
375 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
376 {
377 	struct dma_page *page;
378 	unsigned long flags;
379 	unsigned int offset;
380 
381 	page = pool_find_page(pool, dma);
382 	if (!page) {
383 		if (pool->dev)
384 			dev_err(pool->dev,
385 				"dma_pool_free %s, %p/%lx (bad dma)\n",
386 				pool->name, vaddr, (unsigned long)dma);
387 		else
388 			printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
389 			       pool->name, vaddr, (unsigned long)dma);
390 		return;
391 	}
392 
393 	offset = vaddr - page->vaddr;
394 #ifdef	CONFIG_DEBUG_SLAB
395 	if ((dma - page->dma) != offset) {
396 		if (pool->dev)
397 			dev_err(pool->dev,
398 				"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
399 				pool->name, vaddr, (unsigned long long)dma);
400 		else
401 			printk(KERN_ERR
402 			       "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
403 			       pool->name, vaddr, (unsigned long long)dma);
404 		return;
405 	}
406 	{
407 		unsigned int chain = page->offset;
408 		while (chain < pool->allocation) {
409 			if (chain != offset) {
410 				chain = *(int *)(page->vaddr + chain);
411 				continue;
412 			}
413 			if (pool->dev)
414 				dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
415 					"already free\n", pool->name,
416 					(unsigned long long)dma);
417 			else
418 				printk(KERN_ERR "dma_pool_free %s, dma %Lx "
419 					"already free\n", pool->name,
420 					(unsigned long long)dma);
421 			return;
422 		}
423 	}
424 	memset(vaddr, POOL_POISON_FREED, pool->size);
425 #endif
426 
427 	spin_lock_irqsave(&pool->lock, flags);
428 	page->in_use--;
429 	*(int *)vaddr = page->offset;
430 	page->offset = offset;
431 	if (waitqueue_active(&pool->waitq))
432 		wake_up_locked(&pool->waitq);
433 	/*
434 	 * Resist a temptation to do
435 	 *    if (!is_page_busy(page)) pool_free_page(pool, page);
436 	 * Better have a few empty pages hang around.
437 	 */
438 	spin_unlock_irqrestore(&pool->lock, flags);
439 }
440 EXPORT_SYMBOL(dma_pool_free);
441 
442 /*
443  * Managed DMA pool
444  */
445 static void dmam_pool_release(struct device *dev, void *res)
446 {
447 	struct dma_pool *pool = *(struct dma_pool **)res;
448 
449 	dma_pool_destroy(pool);
450 }
451 
452 static int dmam_pool_match(struct device *dev, void *res, void *match_data)
453 {
454 	return *(struct dma_pool **)res == match_data;
455 }
456 
457 /**
458  * dmam_pool_create - Managed dma_pool_create()
459  * @name: name of pool, for diagnostics
460  * @dev: device that will be doing the DMA
461  * @size: size of the blocks in this pool.
462  * @align: alignment requirement for blocks; must be a power of two
463  * @allocation: returned blocks won't cross this boundary (or zero)
464  *
465  * Managed dma_pool_create().  DMA pool created with this function is
466  * automatically destroyed on driver detach.
467  */
468 struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
469 				  size_t size, size_t align, size_t allocation)
470 {
471 	struct dma_pool **ptr, *pool;
472 
473 	ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
474 	if (!ptr)
475 		return NULL;
476 
477 	pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
478 	if (pool)
479 		devres_add(dev, ptr);
480 	else
481 		devres_free(ptr);
482 
483 	return pool;
484 }
485 EXPORT_SYMBOL(dmam_pool_create);
486 
487 /**
488  * dmam_pool_destroy - Managed dma_pool_destroy()
489  * @pool: dma pool that will be destroyed
490  *
491  * Managed dma_pool_destroy().
492  */
493 void dmam_pool_destroy(struct dma_pool *pool)
494 {
495 	struct device *dev = pool->dev;
496 
497 	dma_pool_destroy(pool);
498 	WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
499 }
500 EXPORT_SYMBOL(dmam_pool_destroy);
501