xref: /openbmc/linux/mm/dmapool.c (revision 06c6fad9)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * DMA Pool allocator
4   *
5   * Copyright 2001 David Brownell
6   * Copyright 2007 Intel Corporation
7   *   Author: Matthew Wilcox <willy@linux.intel.com>
8   *
9   * This allocator returns small blocks of a given size which are DMA-able by
10   * the given device.  It uses the dma_alloc_coherent page allocator to get
11   * new pages, then splits them up into blocks of the required size.
12   * Many older drivers still have their own code to do this.
13   *
14   * The current design of this allocator is fairly simple.  The pool is
15   * represented by the 'struct dma_pool' which keeps a doubly-linked list of
16   * allocated pages.  Each page in the page_list is split into blocks of at
17   * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
18   * list of free blocks within the page.  Used blocks aren't tracked, but we
19   * keep a count of how many are currently allocated from each page.
20   */
21  
22  #include <linux/device.h>
23  #include <linux/dma-mapping.h>
24  #include <linux/dmapool.h>
25  #include <linux/kernel.h>
26  #include <linux/list.h>
27  #include <linux/export.h>
28  #include <linux/mutex.h>
29  #include <linux/poison.h>
30  #include <linux/sched.h>
31  #include <linux/sched/mm.h>
32  #include <linux/slab.h>
33  #include <linux/stat.h>
34  #include <linux/spinlock.h>
35  #include <linux/string.h>
36  #include <linux/types.h>
37  #include <linux/wait.h>
38  
39  #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
40  #define DMAPOOL_DEBUG 1
41  #endif
42  
43  struct dma_pool {		/* the pool */
44  	struct list_head page_list;
45  	spinlock_t lock;
46  	size_t size;
47  	struct device *dev;
48  	size_t allocation;
49  	size_t boundary;
50  	char name[32];
51  	struct list_head pools;
52  };
53  
54  struct dma_page {		/* cacheable header for 'allocation' bytes */
55  	struct list_head page_list;
56  	void *vaddr;
57  	dma_addr_t dma;
58  	unsigned int in_use;
59  	unsigned int offset;
60  };
61  
62  static DEFINE_MUTEX(pools_lock);
63  static DEFINE_MUTEX(pools_reg_lock);
64  
65  static ssize_t
66  show_pools(struct device *dev, struct device_attribute *attr, char *buf)
67  {
68  	unsigned temp;
69  	unsigned size;
70  	char *next;
71  	struct dma_page *page;
72  	struct dma_pool *pool;
73  
74  	next = buf;
75  	size = PAGE_SIZE;
76  
77  	temp = scnprintf(next, size, "poolinfo - 0.1\n");
78  	size -= temp;
79  	next += temp;
80  
81  	mutex_lock(&pools_lock);
82  	list_for_each_entry(pool, &dev->dma_pools, pools) {
83  		unsigned pages = 0;
84  		unsigned blocks = 0;
85  
86  		spin_lock_irq(&pool->lock);
87  		list_for_each_entry(page, &pool->page_list, page_list) {
88  			pages++;
89  			blocks += page->in_use;
90  		}
91  		spin_unlock_irq(&pool->lock);
92  
93  		/* per-pool info, no real statistics yet */
94  		temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n",
95  				 pool->name, blocks,
96  				 pages * (pool->allocation / pool->size),
97  				 pool->size, pages);
98  		size -= temp;
99  		next += temp;
100  	}
101  	mutex_unlock(&pools_lock);
102  
103  	return PAGE_SIZE - size;
104  }
105  
106  static DEVICE_ATTR(pools, 0444, show_pools, NULL);
107  
108  /**
109   * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
110   * @name: name of pool, for diagnostics
111   * @dev: device that will be doing the DMA
112   * @size: size of the blocks in this pool.
113   * @align: alignment requirement for blocks; must be a power of two
114   * @boundary: returned blocks won't cross this power of two boundary
115   * Context: not in_interrupt()
116   *
117   * Given one of these pools, dma_pool_alloc()
118   * may be used to allocate memory.  Such memory will all have "consistent"
119   * DMA mappings, accessible by the device and its driver without using
120   * cache flushing primitives.  The actual size of blocks allocated may be
121   * larger than requested because of alignment.
122   *
123   * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
124   * cross that size boundary.  This is useful for devices which have
125   * addressing restrictions on individual DMA transfers, such as not crossing
126   * boundaries of 4KBytes.
127   *
128   * Return: a dma allocation pool with the requested characteristics, or
129   * %NULL if one can't be created.
130   */
131  struct dma_pool *dma_pool_create(const char *name, struct device *dev,
132  				 size_t size, size_t align, size_t boundary)
133  {
134  	struct dma_pool *retval;
135  	size_t allocation;
136  	bool empty = false;
137  
138  	if (align == 0)
139  		align = 1;
140  	else if (align & (align - 1))
141  		return NULL;
142  
143  	if (size == 0)
144  		return NULL;
145  	else if (size < 4)
146  		size = 4;
147  
148  	size = ALIGN(size, align);
149  	allocation = max_t(size_t, size, PAGE_SIZE);
150  
151  	if (!boundary)
152  		boundary = allocation;
153  	else if ((boundary < size) || (boundary & (boundary - 1)))
154  		return NULL;
155  
156  	retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
157  	if (!retval)
158  		return retval;
159  
160  	strscpy(retval->name, name, sizeof(retval->name));
161  
162  	retval->dev = dev;
163  
164  	INIT_LIST_HEAD(&retval->page_list);
165  	spin_lock_init(&retval->lock);
166  	retval->size = size;
167  	retval->boundary = boundary;
168  	retval->allocation = allocation;
169  
170  	INIT_LIST_HEAD(&retval->pools);
171  
172  	/*
173  	 * pools_lock ensures that the ->dma_pools list does not get corrupted.
174  	 * pools_reg_lock ensures that there is not a race between
175  	 * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
176  	 * when the first invocation of dma_pool_create() failed on
177  	 * device_create_file() and the second assumes that it has been done (I
178  	 * know it is a short window).
179  	 */
180  	mutex_lock(&pools_reg_lock);
181  	mutex_lock(&pools_lock);
182  	if (list_empty(&dev->dma_pools))
183  		empty = true;
184  	list_add(&retval->pools, &dev->dma_pools);
185  	mutex_unlock(&pools_lock);
186  	if (empty) {
187  		int err;
188  
189  		err = device_create_file(dev, &dev_attr_pools);
190  		if (err) {
191  			mutex_lock(&pools_lock);
192  			list_del(&retval->pools);
193  			mutex_unlock(&pools_lock);
194  			mutex_unlock(&pools_reg_lock);
195  			kfree(retval);
196  			return NULL;
197  		}
198  	}
199  	mutex_unlock(&pools_reg_lock);
200  	return retval;
201  }
202  EXPORT_SYMBOL(dma_pool_create);
203  
204  static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
205  {
206  	unsigned int offset = 0;
207  	unsigned int next_boundary = pool->boundary;
208  
209  	do {
210  		unsigned int next = offset + pool->size;
211  		if (unlikely((next + pool->size) >= next_boundary)) {
212  			next = next_boundary;
213  			next_boundary += pool->boundary;
214  		}
215  		*(int *)(page->vaddr + offset) = next;
216  		offset = next;
217  	} while (offset < pool->allocation);
218  }
219  
220  static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
221  {
222  	struct dma_page *page;
223  
224  	page = kmalloc(sizeof(*page), mem_flags);
225  	if (!page)
226  		return NULL;
227  	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
228  					 &page->dma, mem_flags);
229  	if (page->vaddr) {
230  #ifdef	DMAPOOL_DEBUG
231  		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
232  #endif
233  		pool_initialise_page(pool, page);
234  		page->in_use = 0;
235  		page->offset = 0;
236  	} else {
237  		kfree(page);
238  		page = NULL;
239  	}
240  	return page;
241  }
242  
243  static inline bool is_page_busy(struct dma_page *page)
244  {
245  	return page->in_use != 0;
246  }
247  
248  static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
249  {
250  	dma_addr_t dma = page->dma;
251  
252  #ifdef	DMAPOOL_DEBUG
253  	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
254  #endif
255  	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
256  	list_del(&page->page_list);
257  	kfree(page);
258  }
259  
260  /**
261   * dma_pool_destroy - destroys a pool of dma memory blocks.
262   * @pool: dma pool that will be destroyed
263   * Context: !in_interrupt()
264   *
265   * Caller guarantees that no more memory from the pool is in use,
266   * and that nothing will try to use the pool after this call.
267   */
268  void dma_pool_destroy(struct dma_pool *pool)
269  {
270  	struct dma_page *page, *tmp;
271  	bool empty = false;
272  
273  	if (unlikely(!pool))
274  		return;
275  
276  	mutex_lock(&pools_reg_lock);
277  	mutex_lock(&pools_lock);
278  	list_del(&pool->pools);
279  	if (pool->dev && list_empty(&pool->dev->dma_pools))
280  		empty = true;
281  	mutex_unlock(&pools_lock);
282  	if (empty)
283  		device_remove_file(pool->dev, &dev_attr_pools);
284  	mutex_unlock(&pools_reg_lock);
285  
286  	list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
287  		if (is_page_busy(page)) {
288  			if (pool->dev)
289  				dev_err(pool->dev, "%s %s, %p busy\n", __func__,
290  					pool->name, page->vaddr);
291  			else
292  				pr_err("%s %s, %p busy\n", __func__,
293  				       pool->name, page->vaddr);
294  			/* leak the still-in-use consistent memory */
295  			list_del(&page->page_list);
296  			kfree(page);
297  		} else
298  			pool_free_page(pool, page);
299  	}
300  
301  	kfree(pool);
302  }
303  EXPORT_SYMBOL(dma_pool_destroy);
304  
305  /**
306   * dma_pool_alloc - get a block of consistent memory
307   * @pool: dma pool that will produce the block
308   * @mem_flags: GFP_* bitmask
309   * @handle: pointer to dma address of block
310   *
311   * Return: the kernel virtual address of a currently unused block,
312   * and reports its dma address through the handle.
313   * If such a memory block can't be allocated, %NULL is returned.
314   */
315  void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
316  		     dma_addr_t *handle)
317  {
318  	unsigned long flags;
319  	struct dma_page *page;
320  	size_t offset;
321  	void *retval;
322  
323  	might_alloc(mem_flags);
324  
325  	spin_lock_irqsave(&pool->lock, flags);
326  	list_for_each_entry(page, &pool->page_list, page_list) {
327  		if (page->offset < pool->allocation)
328  			goto ready;
329  	}
330  
331  	/* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
332  	spin_unlock_irqrestore(&pool->lock, flags);
333  
334  	page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
335  	if (!page)
336  		return NULL;
337  
338  	spin_lock_irqsave(&pool->lock, flags);
339  
340  	list_add(&page->page_list, &pool->page_list);
341   ready:
342  	page->in_use++;
343  	offset = page->offset;
344  	page->offset = *(int *)(page->vaddr + offset);
345  	retval = offset + page->vaddr;
346  	*handle = offset + page->dma;
347  #ifdef	DMAPOOL_DEBUG
348  	{
349  		int i;
350  		u8 *data = retval;
351  		/* page->offset is stored in first 4 bytes */
352  		for (i = sizeof(page->offset); i < pool->size; i++) {
353  			if (data[i] == POOL_POISON_FREED)
354  				continue;
355  			if (pool->dev)
356  				dev_err(pool->dev, "%s %s, %p (corrupted)\n",
357  					__func__, pool->name, retval);
358  			else
359  				pr_err("%s %s, %p (corrupted)\n",
360  					__func__, pool->name, retval);
361  
362  			/*
363  			 * Dump the first 4 bytes even if they are not
364  			 * POOL_POISON_FREED
365  			 */
366  			print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
367  					data, pool->size, 1);
368  			break;
369  		}
370  	}
371  	if (!(mem_flags & __GFP_ZERO))
372  		memset(retval, POOL_POISON_ALLOCATED, pool->size);
373  #endif
374  	spin_unlock_irqrestore(&pool->lock, flags);
375  
376  	if (want_init_on_alloc(mem_flags))
377  		memset(retval, 0, pool->size);
378  
379  	return retval;
380  }
381  EXPORT_SYMBOL(dma_pool_alloc);
382  
383  static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
384  {
385  	struct dma_page *page;
386  
387  	list_for_each_entry(page, &pool->page_list, page_list) {
388  		if (dma < page->dma)
389  			continue;
390  		if ((dma - page->dma) < pool->allocation)
391  			return page;
392  	}
393  	return NULL;
394  }
395  
396  /**
397   * dma_pool_free - put block back into dma pool
398   * @pool: the dma pool holding the block
399   * @vaddr: virtual address of block
400   * @dma: dma address of block
401   *
402   * Caller promises neither device nor driver will again touch this block
403   * unless it is first re-allocated.
404   */
405  void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
406  {
407  	struct dma_page *page;
408  	unsigned long flags;
409  	unsigned int offset;
410  
411  	spin_lock_irqsave(&pool->lock, flags);
412  	page = pool_find_page(pool, dma);
413  	if (!page) {
414  		spin_unlock_irqrestore(&pool->lock, flags);
415  		if (pool->dev)
416  			dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
417  				__func__, pool->name, vaddr, &dma);
418  		else
419  			pr_err("%s %s, %p/%pad (bad dma)\n",
420  			       __func__, pool->name, vaddr, &dma);
421  		return;
422  	}
423  
424  	offset = vaddr - page->vaddr;
425  	if (want_init_on_free())
426  		memset(vaddr, 0, pool->size);
427  #ifdef	DMAPOOL_DEBUG
428  	if ((dma - page->dma) != offset) {
429  		spin_unlock_irqrestore(&pool->lock, flags);
430  		if (pool->dev)
431  			dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n",
432  				__func__, pool->name, vaddr, &dma);
433  		else
434  			pr_err("%s %s, %p (bad vaddr)/%pad\n",
435  			       __func__, pool->name, vaddr, &dma);
436  		return;
437  	}
438  	{
439  		unsigned int chain = page->offset;
440  		while (chain < pool->allocation) {
441  			if (chain != offset) {
442  				chain = *(int *)(page->vaddr + chain);
443  				continue;
444  			}
445  			spin_unlock_irqrestore(&pool->lock, flags);
446  			if (pool->dev)
447  				dev_err(pool->dev, "%s %s, dma %pad already free\n",
448  					__func__, pool->name, &dma);
449  			else
450  				pr_err("%s %s, dma %pad already free\n",
451  				       __func__, pool->name, &dma);
452  			return;
453  		}
454  	}
455  	memset(vaddr, POOL_POISON_FREED, pool->size);
456  #endif
457  
458  	page->in_use--;
459  	*(int *)vaddr = page->offset;
460  	page->offset = offset;
461  	/*
462  	 * Resist a temptation to do
463  	 *    if (!is_page_busy(page)) pool_free_page(pool, page);
464  	 * Better have a few empty pages hang around.
465  	 */
466  	spin_unlock_irqrestore(&pool->lock, flags);
467  }
468  EXPORT_SYMBOL(dma_pool_free);
469  
470  /*
471   * Managed DMA pool
472   */
473  static void dmam_pool_release(struct device *dev, void *res)
474  {
475  	struct dma_pool *pool = *(struct dma_pool **)res;
476  
477  	dma_pool_destroy(pool);
478  }
479  
480  static int dmam_pool_match(struct device *dev, void *res, void *match_data)
481  {
482  	return *(struct dma_pool **)res == match_data;
483  }
484  
485  /**
486   * dmam_pool_create - Managed dma_pool_create()
487   * @name: name of pool, for diagnostics
488   * @dev: device that will be doing the DMA
489   * @size: size of the blocks in this pool.
490   * @align: alignment requirement for blocks; must be a power of two
491   * @allocation: returned blocks won't cross this boundary (or zero)
492   *
493   * Managed dma_pool_create().  DMA pool created with this function is
494   * automatically destroyed on driver detach.
495   *
496   * Return: a managed dma allocation pool with the requested
497   * characteristics, or %NULL if one can't be created.
498   */
499  struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
500  				  size_t size, size_t align, size_t allocation)
501  {
502  	struct dma_pool **ptr, *pool;
503  
504  	ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
505  	if (!ptr)
506  		return NULL;
507  
508  	pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
509  	if (pool)
510  		devres_add(dev, ptr);
511  	else
512  		devres_free(ptr);
513  
514  	return pool;
515  }
516  EXPORT_SYMBOL(dmam_pool_create);
517  
518  /**
519   * dmam_pool_destroy - Managed dma_pool_destroy()
520   * @pool: dma pool that will be destroyed
521   *
522   * Managed dma_pool_destroy().
523   */
524  void dmam_pool_destroy(struct dma_pool *pool)
525  {
526  	struct device *dev = pool->dev;
527  
528  	WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));
529  }
530  EXPORT_SYMBOL(dmam_pool_destroy);
531