xref: /openbmc/linux/mm/cma.c (revision 7b4537199a4a8480b8c3ba37a2d44765ce76cd9b)
1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   * Contiguous Memory Allocator
4   *
5   * Copyright (c) 2010-2011 by Samsung Electronics.
6   * Copyright IBM Corporation, 2013
7   * Copyright LG Electronics Inc., 2014
8   * Written by:
9   *	Marek Szyprowski <m.szyprowski@samsung.com>
10   *	Michal Nazarewicz <mina86@mina86.com>
11   *	Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
12   *	Joonsoo Kim <iamjoonsoo.kim@lge.com>
13   */
14  
15  #define pr_fmt(fmt) "cma: " fmt
16  
17  #ifdef CONFIG_CMA_DEBUG
18  #ifndef DEBUG
19  #  define DEBUG
20  #endif
21  #endif
22  #define CREATE_TRACE_POINTS
23  
24  #include <linux/memblock.h>
25  #include <linux/err.h>
26  #include <linux/mm.h>
27  #include <linux/sizes.h>
28  #include <linux/slab.h>
29  #include <linux/log2.h>
30  #include <linux/cma.h>
31  #include <linux/highmem.h>
32  #include <linux/io.h>
33  #include <linux/kmemleak.h>
34  #include <trace/events/cma.h>
35  
36  #include "cma.h"
37  
38  struct cma cma_areas[MAX_CMA_AREAS];
39  unsigned cma_area_count;
40  
41  phys_addr_t cma_get_base(const struct cma *cma)
42  {
43  	return PFN_PHYS(cma->base_pfn);
44  }
45  
46  unsigned long cma_get_size(const struct cma *cma)
47  {
48  	return cma->count << PAGE_SHIFT;
49  }
50  
51  const char *cma_get_name(const struct cma *cma)
52  {
53  	return cma->name;
54  }
55  
56  static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
57  					     unsigned int align_order)
58  {
59  	if (align_order <= cma->order_per_bit)
60  		return 0;
61  	return (1UL << (align_order - cma->order_per_bit)) - 1;
62  }
63  
64  /*
65   * Find the offset of the base PFN from the specified align_order.
66   * The value returned is represented in order_per_bits.
67   */
68  static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
69  					       unsigned int align_order)
70  {
71  	return (cma->base_pfn & ((1UL << align_order) - 1))
72  		>> cma->order_per_bit;
73  }
74  
75  static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
76  					      unsigned long pages)
77  {
78  	return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
79  }
80  
81  static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
82  			     unsigned long count)
83  {
84  	unsigned long bitmap_no, bitmap_count;
85  	unsigned long flags;
86  
87  	bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
88  	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
89  
90  	spin_lock_irqsave(&cma->lock, flags);
91  	bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
92  	spin_unlock_irqrestore(&cma->lock, flags);
93  }
94  
95  static void __init cma_activate_area(struct cma *cma)
96  {
97  	unsigned long base_pfn = cma->base_pfn, pfn;
98  	struct zone *zone;
99  
100  	cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
101  	if (!cma->bitmap)
102  		goto out_error;
103  
104  	/*
105  	 * alloc_contig_range() requires the pfn range specified to be in the
106  	 * same zone. Simplify by forcing the entire CMA resv range to be in the
107  	 * same zone.
108  	 */
109  	WARN_ON_ONCE(!pfn_valid(base_pfn));
110  	zone = page_zone(pfn_to_page(base_pfn));
111  	for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) {
112  		WARN_ON_ONCE(!pfn_valid(pfn));
113  		if (page_zone(pfn_to_page(pfn)) != zone)
114  			goto not_in_zone;
115  	}
116  
117  	for (pfn = base_pfn; pfn < base_pfn + cma->count;
118  	     pfn += pageblock_nr_pages)
119  		init_cma_reserved_pageblock(pfn_to_page(pfn));
120  
121  	spin_lock_init(&cma->lock);
122  
123  #ifdef CONFIG_CMA_DEBUGFS
124  	INIT_HLIST_HEAD(&cma->mem_head);
125  	spin_lock_init(&cma->mem_head_lock);
126  #endif
127  
128  	return;
129  
130  not_in_zone:
131  	bitmap_free(cma->bitmap);
132  out_error:
133  	/* Expose all pages to the buddy, they are useless for CMA. */
134  	if (!cma->reserve_pages_on_error) {
135  		for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++)
136  			free_reserved_page(pfn_to_page(pfn));
137  	}
138  	totalcma_pages -= cma->count;
139  	cma->count = 0;
140  	pr_err("CMA area %s could not be activated\n", cma->name);
141  	return;
142  }
143  
144  static int __init cma_init_reserved_areas(void)
145  {
146  	int i;
147  
148  	for (i = 0; i < cma_area_count; i++)
149  		cma_activate_area(&cma_areas[i]);
150  
151  	return 0;
152  }
153  core_initcall(cma_init_reserved_areas);
154  
155  void __init cma_reserve_pages_on_error(struct cma *cma)
156  {
157  	cma->reserve_pages_on_error = true;
158  }
159  
160  /**
161   * cma_init_reserved_mem() - create custom contiguous area from reserved memory
162   * @base: Base address of the reserved area
163   * @size: Size of the reserved area (in bytes),
164   * @order_per_bit: Order of pages represented by one bit on bitmap.
165   * @name: The name of the area. If this parameter is NULL, the name of
166   *        the area will be set to "cmaN", where N is a running counter of
167   *        used areas.
168   * @res_cma: Pointer to store the created cma region.
169   *
170   * This function creates custom contiguous area from already reserved memory.
171   */
172  int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
173  				 unsigned int order_per_bit,
174  				 const char *name,
175  				 struct cma **res_cma)
176  {
177  	struct cma *cma;
178  
179  	/* Sanity checks */
180  	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
181  		pr_err("Not enough slots for CMA reserved regions!\n");
182  		return -ENOSPC;
183  	}
184  
185  	if (!size || !memblock_is_region_reserved(base, size))
186  		return -EINVAL;
187  
188  	/* alignment should be aligned with order_per_bit */
189  	if (!IS_ALIGNED(CMA_MIN_ALIGNMENT_PAGES, 1 << order_per_bit))
190  		return -EINVAL;
191  
192  	/* ensure minimal alignment required by mm core */
193  	if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES))
194  		return -EINVAL;
195  
196  	/*
197  	 * Each reserved area must be initialised later, when more kernel
198  	 * subsystems (like slab allocator) are available.
199  	 */
200  	cma = &cma_areas[cma_area_count];
201  
202  	if (name)
203  		snprintf(cma->name, CMA_MAX_NAME, name);
204  	else
205  		snprintf(cma->name, CMA_MAX_NAME,  "cma%d\n", cma_area_count);
206  
207  	cma->base_pfn = PFN_DOWN(base);
208  	cma->count = size >> PAGE_SHIFT;
209  	cma->order_per_bit = order_per_bit;
210  	*res_cma = cma;
211  	cma_area_count++;
212  	totalcma_pages += (size / PAGE_SIZE);
213  
214  	return 0;
215  }
216  
217  /**
218   * cma_declare_contiguous_nid() - reserve custom contiguous area
219   * @base: Base address of the reserved area optional, use 0 for any
220   * @size: Size of the reserved area (in bytes),
221   * @limit: End address of the reserved memory (optional, 0 for any).
222   * @alignment: Alignment for the CMA area, should be power of 2 or zero
223   * @order_per_bit: Order of pages represented by one bit on bitmap.
224   * @fixed: hint about where to place the reserved area
225   * @name: The name of the area. See function cma_init_reserved_mem()
226   * @res_cma: Pointer to store the created cma region.
227   * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
228   *
229   * This function reserves memory from early allocator. It should be
230   * called by arch specific code once the early allocator (memblock or bootmem)
231   * has been activated and all other subsystems have already allocated/reserved
232   * memory. This function allows to create custom reserved areas.
233   *
234   * If @fixed is true, reserve contiguous area at exactly @base.  If false,
235   * reserve in range from @base to @limit.
236   */
237  int __init cma_declare_contiguous_nid(phys_addr_t base,
238  			phys_addr_t size, phys_addr_t limit,
239  			phys_addr_t alignment, unsigned int order_per_bit,
240  			bool fixed, const char *name, struct cma **res_cma,
241  			int nid)
242  {
243  	phys_addr_t memblock_end = memblock_end_of_DRAM();
244  	phys_addr_t highmem_start;
245  	int ret = 0;
246  
247  	/*
248  	 * We can't use __pa(high_memory) directly, since high_memory
249  	 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
250  	 * complain. Find the boundary by adding one to the last valid
251  	 * address.
252  	 */
253  	highmem_start = __pa(high_memory - 1) + 1;
254  	pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
255  		__func__, &size, &base, &limit, &alignment);
256  
257  	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
258  		pr_err("Not enough slots for CMA reserved regions!\n");
259  		return -ENOSPC;
260  	}
261  
262  	if (!size)
263  		return -EINVAL;
264  
265  	if (alignment && !is_power_of_2(alignment))
266  		return -EINVAL;
267  
268  	/* Sanitise input arguments. */
269  	alignment = max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES);
270  	if (fixed && base & (alignment - 1)) {
271  		ret = -EINVAL;
272  		pr_err("Region at %pa must be aligned to %pa bytes\n",
273  			&base, &alignment);
274  		goto err;
275  	}
276  	base = ALIGN(base, alignment);
277  	size = ALIGN(size, alignment);
278  	limit &= ~(alignment - 1);
279  
280  	if (!base)
281  		fixed = false;
282  
283  	/* size should be aligned with order_per_bit */
284  	if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
285  		return -EINVAL;
286  
287  	/*
288  	 * If allocating at a fixed base the request region must not cross the
289  	 * low/high memory boundary.
290  	 */
291  	if (fixed && base < highmem_start && base + size > highmem_start) {
292  		ret = -EINVAL;
293  		pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
294  			&base, &highmem_start);
295  		goto err;
296  	}
297  
298  	/*
299  	 * If the limit is unspecified or above the memblock end, its effective
300  	 * value will be the memblock end. Set it explicitly to simplify further
301  	 * checks.
302  	 */
303  	if (limit == 0 || limit > memblock_end)
304  		limit = memblock_end;
305  
306  	if (base + size > limit) {
307  		ret = -EINVAL;
308  		pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
309  			&size, &base, &limit);
310  		goto err;
311  	}
312  
313  	/* Reserve memory */
314  	if (fixed) {
315  		if (memblock_is_region_reserved(base, size) ||
316  		    memblock_reserve(base, size) < 0) {
317  			ret = -EBUSY;
318  			goto err;
319  		}
320  	} else {
321  		phys_addr_t addr = 0;
322  
323  		/*
324  		 * All pages in the reserved area must come from the same zone.
325  		 * If the requested region crosses the low/high memory boundary,
326  		 * try allocating from high memory first and fall back to low
327  		 * memory in case of failure.
328  		 */
329  		if (base < highmem_start && limit > highmem_start) {
330  			addr = memblock_alloc_range_nid(size, alignment,
331  					highmem_start, limit, nid, true);
332  			limit = highmem_start;
333  		}
334  
335  		/*
336  		 * If there is enough memory, try a bottom-up allocation first.
337  		 * It will place the new cma area close to the start of the node
338  		 * and guarantee that the compaction is moving pages out of the
339  		 * cma area and not into it.
340  		 * Avoid using first 4GB to not interfere with constrained zones
341  		 * like DMA/DMA32.
342  		 */
343  #ifdef CONFIG_PHYS_ADDR_T_64BIT
344  		if (!memblock_bottom_up() && memblock_end >= SZ_4G + size) {
345  			memblock_set_bottom_up(true);
346  			addr = memblock_alloc_range_nid(size, alignment, SZ_4G,
347  							limit, nid, true);
348  			memblock_set_bottom_up(false);
349  		}
350  #endif
351  
352  		if (!addr) {
353  			addr = memblock_alloc_range_nid(size, alignment, base,
354  					limit, nid, true);
355  			if (!addr) {
356  				ret = -ENOMEM;
357  				goto err;
358  			}
359  		}
360  
361  		/*
362  		 * kmemleak scans/reads tracked objects for pointers to other
363  		 * objects but this address isn't mapped and accessible
364  		 */
365  		kmemleak_ignore_phys(addr);
366  		base = addr;
367  	}
368  
369  	ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
370  	if (ret)
371  		goto free_mem;
372  
373  	pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
374  		&base);
375  	return 0;
376  
377  free_mem:
378  	memblock_phys_free(base, size);
379  err:
380  	pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
381  	return ret;
382  }
383  
384  #ifdef CONFIG_CMA_DEBUG
385  static void cma_debug_show_areas(struct cma *cma)
386  {
387  	unsigned long next_zero_bit, next_set_bit, nr_zero;
388  	unsigned long start = 0;
389  	unsigned long nr_part, nr_total = 0;
390  	unsigned long nbits = cma_bitmap_maxno(cma);
391  
392  	spin_lock_irq(&cma->lock);
393  	pr_info("number of available pages: ");
394  	for (;;) {
395  		next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
396  		if (next_zero_bit >= nbits)
397  			break;
398  		next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
399  		nr_zero = next_set_bit - next_zero_bit;
400  		nr_part = nr_zero << cma->order_per_bit;
401  		pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
402  			next_zero_bit);
403  		nr_total += nr_part;
404  		start = next_zero_bit + nr_zero;
405  	}
406  	pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
407  	spin_unlock_irq(&cma->lock);
408  }
409  #else
410  static inline void cma_debug_show_areas(struct cma *cma) { }
411  #endif
412  
413  /**
414   * cma_alloc() - allocate pages from contiguous area
415   * @cma:   Contiguous memory region for which the allocation is performed.
416   * @count: Requested number of pages.
417   * @align: Requested alignment of pages (in PAGE_SIZE order).
418   * @no_warn: Avoid printing message about failed allocation
419   *
420   * This function allocates part of contiguous memory on specific
421   * contiguous memory area.
422   */
423  struct page *cma_alloc(struct cma *cma, unsigned long count,
424  		       unsigned int align, bool no_warn)
425  {
426  	unsigned long mask, offset;
427  	unsigned long pfn = -1;
428  	unsigned long start = 0;
429  	unsigned long bitmap_maxno, bitmap_no, bitmap_count;
430  	unsigned long i;
431  	struct page *page = NULL;
432  	int ret = -ENOMEM;
433  
434  	if (!cma || !cma->count || !cma->bitmap)
435  		goto out;
436  
437  	pr_debug("%s(cma %p, count %lu, align %d)\n", __func__, (void *)cma,
438  		 count, align);
439  
440  	if (!count)
441  		goto out;
442  
443  	trace_cma_alloc_start(cma->name, count, align);
444  
445  	mask = cma_bitmap_aligned_mask(cma, align);
446  	offset = cma_bitmap_aligned_offset(cma, align);
447  	bitmap_maxno = cma_bitmap_maxno(cma);
448  	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
449  
450  	if (bitmap_count > bitmap_maxno)
451  		goto out;
452  
453  	for (;;) {
454  		spin_lock_irq(&cma->lock);
455  		bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
456  				bitmap_maxno, start, bitmap_count, mask,
457  				offset);
458  		if (bitmap_no >= bitmap_maxno) {
459  			spin_unlock_irq(&cma->lock);
460  			break;
461  		}
462  		bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
463  		/*
464  		 * It's safe to drop the lock here. We've marked this region for
465  		 * our exclusive use. If the migration fails we will take the
466  		 * lock again and unmark it.
467  		 */
468  		spin_unlock_irq(&cma->lock);
469  
470  		pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
471  		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
472  				     GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
473  
474  		if (ret == 0) {
475  			page = pfn_to_page(pfn);
476  			break;
477  		}
478  
479  		cma_clear_bitmap(cma, pfn, count);
480  		if (ret != -EBUSY)
481  			break;
482  
483  		pr_debug("%s(): memory range at %p is busy, retrying\n",
484  			 __func__, pfn_to_page(pfn));
485  
486  		trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn),
487  					   count, align);
488  		/* try again with a bit different memory target */
489  		start = bitmap_no + mask + 1;
490  	}
491  
492  	trace_cma_alloc_finish(cma->name, pfn, page, count, align);
493  
494  	/*
495  	 * CMA can allocate multiple page blocks, which results in different
496  	 * blocks being marked with different tags. Reset the tags to ignore
497  	 * those page blocks.
498  	 */
499  	if (page) {
500  		for (i = 0; i < count; i++)
501  			page_kasan_tag_reset(page + i);
502  	}
503  
504  	if (ret && !no_warn) {
505  		pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n",
506  				   __func__, cma->name, count, ret);
507  		cma_debug_show_areas(cma);
508  	}
509  
510  	pr_debug("%s(): returned %p\n", __func__, page);
511  out:
512  	if (page) {
513  		count_vm_event(CMA_ALLOC_SUCCESS);
514  		cma_sysfs_account_success_pages(cma, count);
515  	} else {
516  		count_vm_event(CMA_ALLOC_FAIL);
517  		if (cma)
518  			cma_sysfs_account_fail_pages(cma, count);
519  	}
520  
521  	return page;
522  }
523  
524  bool cma_pages_valid(struct cma *cma, const struct page *pages,
525  		     unsigned long count)
526  {
527  	unsigned long pfn;
528  
529  	if (!cma || !pages)
530  		return false;
531  
532  	pfn = page_to_pfn(pages);
533  
534  	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) {
535  		pr_debug("%s(page %p, count %lu)\n", __func__,
536  						(void *)pages, count);
537  		return false;
538  	}
539  
540  	return true;
541  }
542  
543  /**
544   * cma_release() - release allocated pages
545   * @cma:   Contiguous memory region for which the allocation is performed.
546   * @pages: Allocated pages.
547   * @count: Number of allocated pages.
548   *
549   * This function releases memory allocated by cma_alloc().
550   * It returns false when provided pages do not belong to contiguous area and
551   * true otherwise.
552   */
553  bool cma_release(struct cma *cma, const struct page *pages,
554  		 unsigned long count)
555  {
556  	unsigned long pfn;
557  
558  	if (!cma_pages_valid(cma, pages, count))
559  		return false;
560  
561  	pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
562  
563  	pfn = page_to_pfn(pages);
564  
565  	VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
566  
567  	free_contig_range(pfn, count);
568  	cma_clear_bitmap(cma, pfn, count);
569  	trace_cma_release(cma->name, pfn, pages, count);
570  
571  	return true;
572  }
573  
574  int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
575  {
576  	int i;
577  
578  	for (i = 0; i < cma_area_count; i++) {
579  		int ret = it(&cma_areas[i], data);
580  
581  		if (ret)
582  			return ret;
583  	}
584  
585  	return 0;
586  }
587