xref: /openbmc/linux/mm/cma.c (revision fe160a22)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Contiguous Memory Allocator
4  *
5  * Copyright (c) 2010-2011 by Samsung Electronics.
6  * Copyright IBM Corporation, 2013
7  * Copyright LG Electronics Inc., 2014
8  * Written by:
9  *	Marek Szyprowski <m.szyprowski@samsung.com>
10  *	Michal Nazarewicz <mina86@mina86.com>
11  *	Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
12  *	Joonsoo Kim <iamjoonsoo.kim@lge.com>
13  */
14 
15 #define pr_fmt(fmt) "cma: " fmt
16 
17 #ifdef CONFIG_CMA_DEBUG
18 #ifndef DEBUG
19 #  define DEBUG
20 #endif
21 #endif
22 #define CREATE_TRACE_POINTS
23 
24 #include <linux/memblock.h>
25 #include <linux/err.h>
26 #include <linux/mm.h>
27 #include <linux/mutex.h>
28 #include <linux/sizes.h>
29 #include <linux/slab.h>
30 #include <linux/log2.h>
31 #include <linux/cma.h>
32 #include <linux/highmem.h>
33 #include <linux/io.h>
34 #include <linux/kmemleak.h>
35 #include <trace/events/cma.h>
36 
37 #include "cma.h"
38 
39 struct cma cma_areas[MAX_CMA_AREAS];
40 unsigned cma_area_count;
41 static DEFINE_MUTEX(cma_mutex);
42 
43 phys_addr_t cma_get_base(const struct cma *cma)
44 {
45 	return PFN_PHYS(cma->base_pfn);
46 }
47 
48 unsigned long cma_get_size(const struct cma *cma)
49 {
50 	return cma->count << PAGE_SHIFT;
51 }
52 
53 const char *cma_get_name(const struct cma *cma)
54 {
55 	return cma->name;
56 }
57 
58 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
59 					     unsigned int align_order)
60 {
61 	if (align_order <= cma->order_per_bit)
62 		return 0;
63 	return (1UL << (align_order - cma->order_per_bit)) - 1;
64 }
65 
66 /*
67  * Find the offset of the base PFN from the specified align_order.
68  * The value returned is represented in order_per_bits.
69  */
70 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
71 					       unsigned int align_order)
72 {
73 	return (cma->base_pfn & ((1UL << align_order) - 1))
74 		>> cma->order_per_bit;
75 }
76 
77 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
78 					      unsigned long pages)
79 {
80 	return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
81 }
82 
83 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
84 			     unsigned int count)
85 {
86 	unsigned long bitmap_no, bitmap_count;
87 
88 	bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
89 	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
90 
91 	mutex_lock(&cma->lock);
92 	bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
93 	mutex_unlock(&cma->lock);
94 }
95 
96 static void __init cma_activate_area(struct cma *cma)
97 {
98 	unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
99 	unsigned i = cma->count >> pageblock_order;
100 	struct zone *zone;
101 
102 	cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
103 	if (!cma->bitmap)
104 		goto out_error;
105 
106 	WARN_ON_ONCE(!pfn_valid(pfn));
107 	zone = page_zone(pfn_to_page(pfn));
108 
109 	do {
110 		unsigned j;
111 
112 		base_pfn = pfn;
113 		for (j = pageblock_nr_pages; j; --j, pfn++) {
114 			WARN_ON_ONCE(!pfn_valid(pfn));
115 			/*
116 			 * alloc_contig_range requires the pfn range
117 			 * specified to be in the same zone. Make this
118 			 * simple by forcing the entire CMA resv range
119 			 * to be in the same zone.
120 			 */
121 			if (page_zone(pfn_to_page(pfn)) != zone)
122 				goto not_in_zone;
123 		}
124 		init_cma_reserved_pageblock(pfn_to_page(base_pfn));
125 	} while (--i);
126 
127 	mutex_init(&cma->lock);
128 
129 #ifdef CONFIG_CMA_DEBUGFS
130 	INIT_HLIST_HEAD(&cma->mem_head);
131 	spin_lock_init(&cma->mem_head_lock);
132 #endif
133 
134 	return;
135 
136 not_in_zone:
137 	bitmap_free(cma->bitmap);
138 out_error:
139 	cma->count = 0;
140 	pr_err("CMA area %s could not be activated\n", cma->name);
141 	return;
142 }
143 
144 static int __init cma_init_reserved_areas(void)
145 {
146 	int i;
147 
148 	for (i = 0; i < cma_area_count; i++)
149 		cma_activate_area(&cma_areas[i]);
150 
151 	return 0;
152 }
153 core_initcall(cma_init_reserved_areas);
154 
155 /**
156  * cma_init_reserved_mem() - create custom contiguous area from reserved memory
157  * @base: Base address of the reserved area
158  * @size: Size of the reserved area (in bytes),
159  * @order_per_bit: Order of pages represented by one bit on bitmap.
160  * @name: The name of the area. If this parameter is NULL, the name of
161  *        the area will be set to "cmaN", where N is a running counter of
162  *        used areas.
163  * @res_cma: Pointer to store the created cma region.
164  *
165  * This function creates custom contiguous area from already reserved memory.
166  */
167 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
168 				 unsigned int order_per_bit,
169 				 const char *name,
170 				 struct cma **res_cma)
171 {
172 	struct cma *cma;
173 	phys_addr_t alignment;
174 
175 	/* Sanity checks */
176 	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
177 		pr_err("Not enough slots for CMA reserved regions!\n");
178 		return -ENOSPC;
179 	}
180 
181 	if (!size || !memblock_is_region_reserved(base, size))
182 		return -EINVAL;
183 
184 	/* ensure minimal alignment required by mm core */
185 	alignment = PAGE_SIZE <<
186 			max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
187 
188 	/* alignment should be aligned with order_per_bit */
189 	if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
190 		return -EINVAL;
191 
192 	if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
193 		return -EINVAL;
194 
195 	/*
196 	 * Each reserved area must be initialised later, when more kernel
197 	 * subsystems (like slab allocator) are available.
198 	 */
199 	cma = &cma_areas[cma_area_count];
200 
201 	if (name)
202 		snprintf(cma->name, CMA_MAX_NAME, name);
203 	else
204 		snprintf(cma->name, CMA_MAX_NAME,  "cma%d\n", cma_area_count);
205 
206 	cma->base_pfn = PFN_DOWN(base);
207 	cma->count = size >> PAGE_SHIFT;
208 	cma->order_per_bit = order_per_bit;
209 	*res_cma = cma;
210 	cma_area_count++;
211 	totalcma_pages += (size / PAGE_SIZE);
212 
213 	return 0;
214 }
215 
216 /**
217  * cma_declare_contiguous_nid() - reserve custom contiguous area
218  * @base: Base address of the reserved area optional, use 0 for any
219  * @size: Size of the reserved area (in bytes),
220  * @limit: End address of the reserved memory (optional, 0 for any).
221  * @alignment: Alignment for the CMA area, should be power of 2 or zero
222  * @order_per_bit: Order of pages represented by one bit on bitmap.
223  * @fixed: hint about where to place the reserved area
224  * @name: The name of the area. See function cma_init_reserved_mem()
225  * @res_cma: Pointer to store the created cma region.
226  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
227  *
228  * This function reserves memory from early allocator. It should be
229  * called by arch specific code once the early allocator (memblock or bootmem)
230  * has been activated and all other subsystems have already allocated/reserved
231  * memory. This function allows to create custom reserved areas.
232  *
233  * If @fixed is true, reserve contiguous area at exactly @base.  If false,
234  * reserve in range from @base to @limit.
235  */
236 int __init cma_declare_contiguous_nid(phys_addr_t base,
237 			phys_addr_t size, phys_addr_t limit,
238 			phys_addr_t alignment, unsigned int order_per_bit,
239 			bool fixed, const char *name, struct cma **res_cma,
240 			int nid)
241 {
242 	phys_addr_t memblock_end = memblock_end_of_DRAM();
243 	phys_addr_t highmem_start;
244 	int ret = 0;
245 
246 	/*
247 	 * We can't use __pa(high_memory) directly, since high_memory
248 	 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
249 	 * complain. Find the boundary by adding one to the last valid
250 	 * address.
251 	 */
252 	highmem_start = __pa(high_memory - 1) + 1;
253 	pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
254 		__func__, &size, &base, &limit, &alignment);
255 
256 	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
257 		pr_err("Not enough slots for CMA reserved regions!\n");
258 		return -ENOSPC;
259 	}
260 
261 	if (!size)
262 		return -EINVAL;
263 
264 	if (alignment && !is_power_of_2(alignment))
265 		return -EINVAL;
266 
267 	/*
268 	 * Sanitise input arguments.
269 	 * Pages both ends in CMA area could be merged into adjacent unmovable
270 	 * migratetype page by page allocator's buddy algorithm. In the case,
271 	 * you couldn't get a contiguous memory, which is not what we want.
272 	 */
273 	alignment = max(alignment,  (phys_addr_t)PAGE_SIZE <<
274 			  max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
275 	if (fixed && base & (alignment - 1)) {
276 		ret = -EINVAL;
277 		pr_err("Region at %pa must be aligned to %pa bytes\n",
278 			&base, &alignment);
279 		goto err;
280 	}
281 	base = ALIGN(base, alignment);
282 	size = ALIGN(size, alignment);
283 	limit &= ~(alignment - 1);
284 
285 	if (!base)
286 		fixed = false;
287 
288 	/* size should be aligned with order_per_bit */
289 	if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
290 		return -EINVAL;
291 
292 	/*
293 	 * If allocating at a fixed base the request region must not cross the
294 	 * low/high memory boundary.
295 	 */
296 	if (fixed && base < highmem_start && base + size > highmem_start) {
297 		ret = -EINVAL;
298 		pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
299 			&base, &highmem_start);
300 		goto err;
301 	}
302 
303 	/*
304 	 * If the limit is unspecified or above the memblock end, its effective
305 	 * value will be the memblock end. Set it explicitly to simplify further
306 	 * checks.
307 	 */
308 	if (limit == 0 || limit > memblock_end)
309 		limit = memblock_end;
310 
311 	if (base + size > limit) {
312 		ret = -EINVAL;
313 		pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
314 			&size, &base, &limit);
315 		goto err;
316 	}
317 
318 	/* Reserve memory */
319 	if (fixed) {
320 		if (memblock_is_region_reserved(base, size) ||
321 		    memblock_reserve(base, size) < 0) {
322 			ret = -EBUSY;
323 			goto err;
324 		}
325 	} else {
326 		phys_addr_t addr = 0;
327 
328 		/*
329 		 * All pages in the reserved area must come from the same zone.
330 		 * If the requested region crosses the low/high memory boundary,
331 		 * try allocating from high memory first and fall back to low
332 		 * memory in case of failure.
333 		 */
334 		if (base < highmem_start && limit > highmem_start) {
335 			addr = memblock_alloc_range_nid(size, alignment,
336 					highmem_start, limit, nid, true);
337 			limit = highmem_start;
338 		}
339 
340 		if (!addr) {
341 			addr = memblock_alloc_range_nid(size, alignment, base,
342 					limit, nid, true);
343 			if (!addr) {
344 				ret = -ENOMEM;
345 				goto err;
346 			}
347 		}
348 
349 		/*
350 		 * kmemleak scans/reads tracked objects for pointers to other
351 		 * objects but this address isn't mapped and accessible
352 		 */
353 		kmemleak_ignore_phys(addr);
354 		base = addr;
355 	}
356 
357 	ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
358 	if (ret)
359 		goto free_mem;
360 
361 	pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
362 		&base);
363 	return 0;
364 
365 free_mem:
366 	memblock_free(base, size);
367 err:
368 	pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
369 	return ret;
370 }
371 
372 #ifdef CONFIG_CMA_DEBUG
373 static void cma_debug_show_areas(struct cma *cma)
374 {
375 	unsigned long next_zero_bit, next_set_bit, nr_zero;
376 	unsigned long start = 0;
377 	unsigned long nr_part, nr_total = 0;
378 	unsigned long nbits = cma_bitmap_maxno(cma);
379 
380 	mutex_lock(&cma->lock);
381 	pr_info("number of available pages: ");
382 	for (;;) {
383 		next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
384 		if (next_zero_bit >= nbits)
385 			break;
386 		next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
387 		nr_zero = next_set_bit - next_zero_bit;
388 		nr_part = nr_zero << cma->order_per_bit;
389 		pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
390 			next_zero_bit);
391 		nr_total += nr_part;
392 		start = next_zero_bit + nr_zero;
393 	}
394 	pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
395 	mutex_unlock(&cma->lock);
396 }
397 #else
398 static inline void cma_debug_show_areas(struct cma *cma) { }
399 #endif
400 
401 /**
402  * cma_alloc() - allocate pages from contiguous area
403  * @cma:   Contiguous memory region for which the allocation is performed.
404  * @count: Requested number of pages.
405  * @align: Requested alignment of pages (in PAGE_SIZE order).
406  * @no_warn: Avoid printing message about failed allocation
407  *
408  * This function allocates part of contiguous memory on specific
409  * contiguous memory area.
410  */
411 struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
412 		       bool no_warn)
413 {
414 	unsigned long mask, offset;
415 	unsigned long pfn = -1;
416 	unsigned long start = 0;
417 	unsigned long bitmap_maxno, bitmap_no, bitmap_count;
418 	size_t i;
419 	struct page *page = NULL;
420 	int ret = -ENOMEM;
421 
422 	if (!cma || !cma->count || !cma->bitmap)
423 		return NULL;
424 
425 	pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
426 		 count, align);
427 
428 	if (!count)
429 		return NULL;
430 
431 	mask = cma_bitmap_aligned_mask(cma, align);
432 	offset = cma_bitmap_aligned_offset(cma, align);
433 	bitmap_maxno = cma_bitmap_maxno(cma);
434 	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
435 
436 	if (bitmap_count > bitmap_maxno)
437 		return NULL;
438 
439 	for (;;) {
440 		mutex_lock(&cma->lock);
441 		bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
442 				bitmap_maxno, start, bitmap_count, mask,
443 				offset);
444 		if (bitmap_no >= bitmap_maxno) {
445 			mutex_unlock(&cma->lock);
446 			break;
447 		}
448 		bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
449 		/*
450 		 * It's safe to drop the lock here. We've marked this region for
451 		 * our exclusive use. If the migration fails we will take the
452 		 * lock again and unmark it.
453 		 */
454 		mutex_unlock(&cma->lock);
455 
456 		pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
457 		mutex_lock(&cma_mutex);
458 		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
459 				     GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
460 		mutex_unlock(&cma_mutex);
461 		if (ret == 0) {
462 			page = pfn_to_page(pfn);
463 			break;
464 		}
465 
466 		cma_clear_bitmap(cma, pfn, count);
467 		if (ret != -EBUSY)
468 			break;
469 
470 		pr_debug("%s(): memory range at %p is busy, retrying\n",
471 			 __func__, pfn_to_page(pfn));
472 		/* try again with a bit different memory target */
473 		start = bitmap_no + mask + 1;
474 	}
475 
476 	trace_cma_alloc(pfn, page, count, align);
477 
478 	/*
479 	 * CMA can allocate multiple page blocks, which results in different
480 	 * blocks being marked with different tags. Reset the tags to ignore
481 	 * those page blocks.
482 	 */
483 	if (page) {
484 		for (i = 0; i < count; i++)
485 			page_kasan_tag_reset(page + i);
486 	}
487 
488 	if (ret && !no_warn) {
489 		pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n",
490 			__func__, count, ret);
491 		cma_debug_show_areas(cma);
492 	}
493 
494 	pr_debug("%s(): returned %p\n", __func__, page);
495 	return page;
496 }
497 
498 /**
499  * cma_release() - release allocated pages
500  * @cma:   Contiguous memory region for which the allocation is performed.
501  * @pages: Allocated pages.
502  * @count: Number of allocated pages.
503  *
504  * This function releases memory allocated by cma_alloc().
505  * It returns false when provided pages do not belong to contiguous area and
506  * true otherwise.
507  */
508 bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
509 {
510 	unsigned long pfn;
511 
512 	if (!cma || !pages)
513 		return false;
514 
515 	pr_debug("%s(page %p)\n", __func__, (void *)pages);
516 
517 	pfn = page_to_pfn(pages);
518 
519 	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
520 		return false;
521 
522 	VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
523 
524 	free_contig_range(pfn, count);
525 	cma_clear_bitmap(cma, pfn, count);
526 	trace_cma_release(pfn, pages, count);
527 
528 	return true;
529 }
530 
531 int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
532 {
533 	int i;
534 
535 	for (i = 0; i < cma_area_count; i++) {
536 		int ret = it(&cma_areas[i], data);
537 
538 		if (ret)
539 			return ret;
540 	}
541 
542 	return 0;
543 }
544