xref: /openbmc/linux/mm/cma.c (revision a6978d1b7bb8f3a25305e8ff7d367f7289614c5d)
1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   * Contiguous Memory Allocator
4   *
5   * Copyright (c) 2010-2011 by Samsung Electronics.
6   * Copyright IBM Corporation, 2013
7   * Copyright LG Electronics Inc., 2014
8   * Written by:
9   *	Marek Szyprowski <m.szyprowski@samsung.com>
10   *	Michal Nazarewicz <mina86@mina86.com>
11   *	Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
12   *	Joonsoo Kim <iamjoonsoo.kim@lge.com>
13   */
14  
15  #define pr_fmt(fmt) "cma: " fmt
16  
17  #ifdef CONFIG_CMA_DEBUG
18  #ifndef DEBUG
19  #  define DEBUG
20  #endif
21  #endif
22  #define CREATE_TRACE_POINTS
23  
24  #include <linux/memblock.h>
25  #include <linux/err.h>
26  #include <linux/mm.h>
27  #include <linux/sizes.h>
28  #include <linux/slab.h>
29  #include <linux/log2.h>
30  #include <linux/cma.h>
31  #include <linux/highmem.h>
32  #include <linux/io.h>
33  #include <linux/kmemleak.h>
34  #include <trace/events/cma.h>
35  
36  #include "internal.h"
37  #include "cma.h"
38  
39  struct cma cma_areas[MAX_CMA_AREAS];
40  unsigned cma_area_count;
41  static DEFINE_MUTEX(cma_mutex);
42  
43  phys_addr_t cma_get_base(const struct cma *cma)
44  {
45  	return PFN_PHYS(cma->base_pfn);
46  }
47  
48  unsigned long cma_get_size(const struct cma *cma)
49  {
50  	return cma->count << PAGE_SHIFT;
51  }
52  
53  const char *cma_get_name(const struct cma *cma)
54  {
55  	return cma->name;
56  }
57  
58  static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
59  					     unsigned int align_order)
60  {
61  	if (align_order <= cma->order_per_bit)
62  		return 0;
63  	return (1UL << (align_order - cma->order_per_bit)) - 1;
64  }
65  
66  /*
67   * Find the offset of the base PFN from the specified align_order.
68   * The value returned is represented in order_per_bits.
69   */
70  static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
71  					       unsigned int align_order)
72  {
73  	return (cma->base_pfn & ((1UL << align_order) - 1))
74  		>> cma->order_per_bit;
75  }
76  
77  static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
78  					      unsigned long pages)
79  {
80  	return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
81  }
82  
83  static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
84  			     unsigned long count)
85  {
86  	unsigned long bitmap_no, bitmap_count;
87  	unsigned long flags;
88  
89  	bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
90  	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
91  
92  	spin_lock_irqsave(&cma->lock, flags);
93  	bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
94  	spin_unlock_irqrestore(&cma->lock, flags);
95  }
96  
97  static void __init cma_activate_area(struct cma *cma)
98  {
99  	unsigned long base_pfn = cma->base_pfn, pfn;
100  	struct zone *zone;
101  
102  	cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
103  	if (!cma->bitmap)
104  		goto out_error;
105  
106  	/*
107  	 * alloc_contig_range() requires the pfn range specified to be in the
108  	 * same zone. Simplify by forcing the entire CMA resv range to be in the
109  	 * same zone.
110  	 */
111  	WARN_ON_ONCE(!pfn_valid(base_pfn));
112  	zone = page_zone(pfn_to_page(base_pfn));
113  	for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) {
114  		WARN_ON_ONCE(!pfn_valid(pfn));
115  		if (page_zone(pfn_to_page(pfn)) != zone)
116  			goto not_in_zone;
117  	}
118  
119  	for (pfn = base_pfn; pfn < base_pfn + cma->count;
120  	     pfn += pageblock_nr_pages)
121  		init_cma_reserved_pageblock(pfn_to_page(pfn));
122  
123  	spin_lock_init(&cma->lock);
124  
125  #ifdef CONFIG_CMA_DEBUGFS
126  	INIT_HLIST_HEAD(&cma->mem_head);
127  	spin_lock_init(&cma->mem_head_lock);
128  #endif
129  
130  	return;
131  
132  not_in_zone:
133  	bitmap_free(cma->bitmap);
134  out_error:
135  	/* Expose all pages to the buddy, they are useless for CMA. */
136  	if (!cma->reserve_pages_on_error) {
137  		for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++)
138  			free_reserved_page(pfn_to_page(pfn));
139  	}
140  	totalcma_pages -= cma->count;
141  	cma->count = 0;
142  	pr_err("CMA area %s could not be activated\n", cma->name);
143  	return;
144  }
145  
146  static int __init cma_init_reserved_areas(void)
147  {
148  	int i;
149  
150  	for (i = 0; i < cma_area_count; i++)
151  		cma_activate_area(&cma_areas[i]);
152  
153  	return 0;
154  }
155  core_initcall(cma_init_reserved_areas);
156  
157  void __init cma_reserve_pages_on_error(struct cma *cma)
158  {
159  	cma->reserve_pages_on_error = true;
160  }
161  
162  /**
163   * cma_init_reserved_mem() - create custom contiguous area from reserved memory
164   * @base: Base address of the reserved area
165   * @size: Size of the reserved area (in bytes),
166   * @order_per_bit: Order of pages represented by one bit on bitmap.
167   * @name: The name of the area. If this parameter is NULL, the name of
168   *        the area will be set to "cmaN", where N is a running counter of
169   *        used areas.
170   * @res_cma: Pointer to store the created cma region.
171   *
172   * This function creates custom contiguous area from already reserved memory.
173   */
174  int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
175  				 unsigned int order_per_bit,
176  				 const char *name,
177  				 struct cma **res_cma)
178  {
179  	struct cma *cma;
180  
181  	/* Sanity checks */
182  	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
183  		pr_err("Not enough slots for CMA reserved regions!\n");
184  		return -ENOSPC;
185  	}
186  
187  	if (!size || !memblock_is_region_reserved(base, size))
188  		return -EINVAL;
189  
190  	/* ensure minimal alignment required by mm core */
191  	if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES))
192  		return -EINVAL;
193  
194  	/*
195  	 * Each reserved area must be initialised later, when more kernel
196  	 * subsystems (like slab allocator) are available.
197  	 */
198  	cma = &cma_areas[cma_area_count];
199  
200  	if (name)
201  		snprintf(cma->name, CMA_MAX_NAME, name);
202  	else
203  		snprintf(cma->name, CMA_MAX_NAME,  "cma%d\n", cma_area_count);
204  
205  	cma->base_pfn = PFN_DOWN(base);
206  	cma->count = size >> PAGE_SHIFT;
207  	cma->order_per_bit = order_per_bit;
208  	*res_cma = cma;
209  	cma_area_count++;
210  	totalcma_pages += (size / PAGE_SIZE);
211  
212  	return 0;
213  }
214  
215  /**
216   * cma_declare_contiguous_nid() - reserve custom contiguous area
217   * @base: Base address of the reserved area optional, use 0 for any
218   * @size: Size of the reserved area (in bytes),
219   * @limit: End address of the reserved memory (optional, 0 for any).
220   * @alignment: Alignment for the CMA area, should be power of 2 or zero
221   * @order_per_bit: Order of pages represented by one bit on bitmap.
222   * @fixed: hint about where to place the reserved area
223   * @name: The name of the area. See function cma_init_reserved_mem()
224   * @res_cma: Pointer to store the created cma region.
225   * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
226   *
227   * This function reserves memory from early allocator. It should be
228   * called by arch specific code once the early allocator (memblock or bootmem)
229   * has been activated and all other subsystems have already allocated/reserved
230   * memory. This function allows to create custom reserved areas.
231   *
232   * If @fixed is true, reserve contiguous area at exactly @base.  If false,
233   * reserve in range from @base to @limit.
234   */
235  int __init cma_declare_contiguous_nid(phys_addr_t base,
236  			phys_addr_t size, phys_addr_t limit,
237  			phys_addr_t alignment, unsigned int order_per_bit,
238  			bool fixed, const char *name, struct cma **res_cma,
239  			int nid)
240  {
241  	phys_addr_t memblock_end = memblock_end_of_DRAM();
242  	phys_addr_t highmem_start;
243  	int ret = 0;
244  
245  	/*
246  	 * We can't use __pa(high_memory) directly, since high_memory
247  	 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
248  	 * complain. Find the boundary by adding one to the last valid
249  	 * address.
250  	 */
251  	highmem_start = __pa(high_memory - 1) + 1;
252  	pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
253  		__func__, &size, &base, &limit, &alignment);
254  
255  	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
256  		pr_err("Not enough slots for CMA reserved regions!\n");
257  		return -ENOSPC;
258  	}
259  
260  	if (!size)
261  		return -EINVAL;
262  
263  	if (alignment && !is_power_of_2(alignment))
264  		return -EINVAL;
265  
266  	if (!IS_ENABLED(CONFIG_NUMA))
267  		nid = NUMA_NO_NODE;
268  
269  	/* Sanitise input arguments. */
270  	alignment = max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES);
271  	if (fixed && base & (alignment - 1)) {
272  		ret = -EINVAL;
273  		pr_err("Region at %pa must be aligned to %pa bytes\n",
274  			&base, &alignment);
275  		goto err;
276  	}
277  	base = ALIGN(base, alignment);
278  	size = ALIGN(size, alignment);
279  	limit &= ~(alignment - 1);
280  
281  	if (!base)
282  		fixed = false;
283  
284  	/* size should be aligned with order_per_bit */
285  	if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
286  		return -EINVAL;
287  
288  	/*
289  	 * If allocating at a fixed base the request region must not cross the
290  	 * low/high memory boundary.
291  	 */
292  	if (fixed && base < highmem_start && base + size > highmem_start) {
293  		ret = -EINVAL;
294  		pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
295  			&base, &highmem_start);
296  		goto err;
297  	}
298  
299  	/*
300  	 * If the limit is unspecified or above the memblock end, its effective
301  	 * value will be the memblock end. Set it explicitly to simplify further
302  	 * checks.
303  	 */
304  	if (limit == 0 || limit > memblock_end)
305  		limit = memblock_end;
306  
307  	if (base + size > limit) {
308  		ret = -EINVAL;
309  		pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
310  			&size, &base, &limit);
311  		goto err;
312  	}
313  
314  	/* Reserve memory */
315  	if (fixed) {
316  		if (memblock_is_region_reserved(base, size) ||
317  		    memblock_reserve(base, size) < 0) {
318  			ret = -EBUSY;
319  			goto err;
320  		}
321  	} else {
322  		phys_addr_t addr = 0;
323  
324  		/*
325  		 * If there is enough memory, try a bottom-up allocation first.
326  		 * It will place the new cma area close to the start of the node
327  		 * and guarantee that the compaction is moving pages out of the
328  		 * cma area and not into it.
329  		 * Avoid using first 4GB to not interfere with constrained zones
330  		 * like DMA/DMA32.
331  		 */
332  #ifdef CONFIG_PHYS_ADDR_T_64BIT
333  		if (!memblock_bottom_up() && memblock_end >= SZ_4G + size) {
334  			memblock_set_bottom_up(true);
335  			addr = memblock_alloc_range_nid(size, alignment, SZ_4G,
336  							limit, nid, true);
337  			memblock_set_bottom_up(false);
338  		}
339  #endif
340  
341  		/*
342  		 * All pages in the reserved area must come from the same zone.
343  		 * If the requested region crosses the low/high memory boundary,
344  		 * try allocating from high memory first and fall back to low
345  		 * memory in case of failure.
346  		 */
347  		if (!addr && base < highmem_start && limit > highmem_start) {
348  			addr = memblock_alloc_range_nid(size, alignment,
349  					highmem_start, limit, nid, true);
350  			limit = highmem_start;
351  		}
352  
353  		if (!addr) {
354  			addr = memblock_alloc_range_nid(size, alignment, base,
355  					limit, nid, true);
356  			if (!addr) {
357  				ret = -ENOMEM;
358  				goto err;
359  			}
360  		}
361  
362  		/*
363  		 * kmemleak scans/reads tracked objects for pointers to other
364  		 * objects but this address isn't mapped and accessible
365  		 */
366  		kmemleak_ignore_phys(addr);
367  		base = addr;
368  	}
369  
370  	ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
371  	if (ret)
372  		goto free_mem;
373  
374  	pr_info("Reserved %ld MiB at %pa on node %d\n", (unsigned long)size / SZ_1M,
375  		&base, nid);
376  	return 0;
377  
378  free_mem:
379  	memblock_phys_free(base, size);
380  err:
381  	pr_err("Failed to reserve %ld MiB on node %d\n", (unsigned long)size / SZ_1M,
382  	       nid);
383  	return ret;
384  }
385  
386  #ifdef CONFIG_CMA_DEBUG
387  static void cma_debug_show_areas(struct cma *cma)
388  {
389  	unsigned long next_zero_bit, next_set_bit, nr_zero;
390  	unsigned long start = 0;
391  	unsigned long nr_part, nr_total = 0;
392  	unsigned long nbits = cma_bitmap_maxno(cma);
393  
394  	spin_lock_irq(&cma->lock);
395  	pr_info("number of available pages: ");
396  	for (;;) {
397  		next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
398  		if (next_zero_bit >= nbits)
399  			break;
400  		next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
401  		nr_zero = next_set_bit - next_zero_bit;
402  		nr_part = nr_zero << cma->order_per_bit;
403  		pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
404  			next_zero_bit);
405  		nr_total += nr_part;
406  		start = next_zero_bit + nr_zero;
407  	}
408  	pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
409  	spin_unlock_irq(&cma->lock);
410  }
411  #else
412  static inline void cma_debug_show_areas(struct cma *cma) { }
413  #endif
414  
415  /**
416   * cma_alloc() - allocate pages from contiguous area
417   * @cma:   Contiguous memory region for which the allocation is performed.
418   * @count: Requested number of pages.
419   * @align: Requested alignment of pages (in PAGE_SIZE order).
420   * @no_warn: Avoid printing message about failed allocation
421   *
422   * This function allocates part of contiguous memory on specific
423   * contiguous memory area.
424   */
425  struct page *cma_alloc(struct cma *cma, unsigned long count,
426  		       unsigned int align, bool no_warn)
427  {
428  	unsigned long mask, offset;
429  	unsigned long pfn = -1;
430  	unsigned long start = 0;
431  	unsigned long bitmap_maxno, bitmap_no, bitmap_count;
432  	unsigned long i;
433  	struct page *page = NULL;
434  	int ret = -ENOMEM;
435  
436  	if (!cma || !cma->count || !cma->bitmap)
437  		goto out;
438  
439  	pr_debug("%s(cma %p, name: %s, count %lu, align %d)\n", __func__,
440  		(void *)cma, cma->name, count, align);
441  
442  	if (!count)
443  		goto out;
444  
445  	trace_cma_alloc_start(cma->name, count, align);
446  
447  	mask = cma_bitmap_aligned_mask(cma, align);
448  	offset = cma_bitmap_aligned_offset(cma, align);
449  	bitmap_maxno = cma_bitmap_maxno(cma);
450  	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
451  
452  	if (bitmap_count > bitmap_maxno)
453  		goto out;
454  
455  	for (;;) {
456  		spin_lock_irq(&cma->lock);
457  		bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
458  				bitmap_maxno, start, bitmap_count, mask,
459  				offset);
460  		if (bitmap_no >= bitmap_maxno) {
461  			spin_unlock_irq(&cma->lock);
462  			break;
463  		}
464  		bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
465  		/*
466  		 * It's safe to drop the lock here. We've marked this region for
467  		 * our exclusive use. If the migration fails we will take the
468  		 * lock again and unmark it.
469  		 */
470  		spin_unlock_irq(&cma->lock);
471  
472  		pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
473  		mutex_lock(&cma_mutex);
474  		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
475  				     GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
476  		mutex_unlock(&cma_mutex);
477  		if (ret == 0) {
478  			page = pfn_to_page(pfn);
479  			break;
480  		}
481  
482  		cma_clear_bitmap(cma, pfn, count);
483  		if (ret != -EBUSY)
484  			break;
485  
486  		pr_debug("%s(): memory range at pfn 0x%lx %p is busy, retrying\n",
487  			 __func__, pfn, pfn_to_page(pfn));
488  
489  		trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn),
490  					   count, align);
491  		/* try again with a bit different memory target */
492  		start = bitmap_no + mask + 1;
493  	}
494  
495  	trace_cma_alloc_finish(cma->name, pfn, page, count, align, ret);
496  
497  	/*
498  	 * CMA can allocate multiple page blocks, which results in different
499  	 * blocks being marked with different tags. Reset the tags to ignore
500  	 * those page blocks.
501  	 */
502  	if (page) {
503  		for (i = 0; i < count; i++)
504  			page_kasan_tag_reset(nth_page(page, i));
505  	}
506  
507  	if (ret && !no_warn) {
508  		pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n",
509  				   __func__, cma->name, count, ret);
510  		cma_debug_show_areas(cma);
511  	}
512  
513  	pr_debug("%s(): returned %p\n", __func__, page);
514  out:
515  	if (page) {
516  		count_vm_event(CMA_ALLOC_SUCCESS);
517  		cma_sysfs_account_success_pages(cma, count);
518  	} else {
519  		count_vm_event(CMA_ALLOC_FAIL);
520  		if (cma)
521  			cma_sysfs_account_fail_pages(cma, count);
522  	}
523  
524  	return page;
525  }
526  
527  bool cma_pages_valid(struct cma *cma, const struct page *pages,
528  		     unsigned long count)
529  {
530  	unsigned long pfn;
531  
532  	if (!cma || !pages)
533  		return false;
534  
535  	pfn = page_to_pfn(pages);
536  
537  	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) {
538  		pr_debug("%s(page %p, count %lu)\n", __func__,
539  						(void *)pages, count);
540  		return false;
541  	}
542  
543  	return true;
544  }
545  
546  /**
547   * cma_release() - release allocated pages
548   * @cma:   Contiguous memory region for which the allocation is performed.
549   * @pages: Allocated pages.
550   * @count: Number of allocated pages.
551   *
552   * This function releases memory allocated by cma_alloc().
553   * It returns false when provided pages do not belong to contiguous area and
554   * true otherwise.
555   */
556  bool cma_release(struct cma *cma, const struct page *pages,
557  		 unsigned long count)
558  {
559  	unsigned long pfn;
560  
561  	if (!cma_pages_valid(cma, pages, count))
562  		return false;
563  
564  	pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
565  
566  	pfn = page_to_pfn(pages);
567  
568  	VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
569  
570  	free_contig_range(pfn, count);
571  	cma_clear_bitmap(cma, pfn, count);
572  	trace_cma_release(cma->name, pfn, pages, count);
573  
574  	return true;
575  }
576  
577  int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
578  {
579  	int i;
580  
581  	for (i = 0; i < cma_area_count; i++) {
582  		int ret = it(&cma_areas[i], data);
583  
584  		if (ret)
585  			return ret;
586  	}
587  
588  	return 0;
589  }
590