xref: /openbmc/linux/mm/cma.c (revision efe4a1ac)
1 /*
2  * Contiguous Memory Allocator
3  *
4  * Copyright (c) 2010-2011 by Samsung Electronics.
5  * Copyright IBM Corporation, 2013
6  * Copyright LG Electronics Inc., 2014
7  * Written by:
8  *	Marek Szyprowski <m.szyprowski@samsung.com>
9  *	Michal Nazarewicz <mina86@mina86.com>
10  *	Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
11  *	Joonsoo Kim <iamjoonsoo.kim@lge.com>
12  *
13  * This program is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU General Public License as
15  * published by the Free Software Foundation; either version 2 of the
16  * License or (at your optional) any later version of the license.
17  */
18 
19 #define pr_fmt(fmt) "cma: " fmt
20 
21 #ifdef CONFIG_CMA_DEBUG
22 #ifndef DEBUG
23 #  define DEBUG
24 #endif
25 #endif
26 #define CREATE_TRACE_POINTS
27 
28 #include <linux/memblock.h>
29 #include <linux/err.h>
30 #include <linux/mm.h>
31 #include <linux/mutex.h>
32 #include <linux/sizes.h>
33 #include <linux/slab.h>
34 #include <linux/log2.h>
35 #include <linux/cma.h>
36 #include <linux/highmem.h>
37 #include <linux/io.h>
38 #include <trace/events/cma.h>
39 
40 #include "cma.h"
41 
42 struct cma cma_areas[MAX_CMA_AREAS];
43 unsigned cma_area_count;
44 static DEFINE_MUTEX(cma_mutex);
45 
46 phys_addr_t cma_get_base(const struct cma *cma)
47 {
48 	return PFN_PHYS(cma->base_pfn);
49 }
50 
51 unsigned long cma_get_size(const struct cma *cma)
52 {
53 	return cma->count << PAGE_SHIFT;
54 }
55 
56 const char *cma_get_name(const struct cma *cma)
57 {
58 	return cma->name ? cma->name : "(undefined)";
59 }
60 
61 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
62 					     int align_order)
63 {
64 	if (align_order <= cma->order_per_bit)
65 		return 0;
66 	return (1UL << (align_order - cma->order_per_bit)) - 1;
67 }
68 
69 /*
70  * Find a PFN aligned to the specified order and return an offset represented in
71  * order_per_bits.
72  */
73 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
74 					       int align_order)
75 {
76 	if (align_order <= cma->order_per_bit)
77 		return 0;
78 
79 	return (ALIGN(cma->base_pfn, (1UL << align_order))
80 		- cma->base_pfn) >> cma->order_per_bit;
81 }
82 
83 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
84 					      unsigned long pages)
85 {
86 	return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
87 }
88 
89 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
90 			     unsigned int count)
91 {
92 	unsigned long bitmap_no, bitmap_count;
93 
94 	bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
95 	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
96 
97 	mutex_lock(&cma->lock);
98 	bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
99 	mutex_unlock(&cma->lock);
100 }
101 
102 static int __init cma_activate_area(struct cma *cma)
103 {
104 	int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
105 	unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
106 	unsigned i = cma->count >> pageblock_order;
107 	struct zone *zone;
108 
109 	cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
110 
111 	if (!cma->bitmap)
112 		return -ENOMEM;
113 
114 	WARN_ON_ONCE(!pfn_valid(pfn));
115 	zone = page_zone(pfn_to_page(pfn));
116 
117 	do {
118 		unsigned j;
119 
120 		base_pfn = pfn;
121 		for (j = pageblock_nr_pages; j; --j, pfn++) {
122 			WARN_ON_ONCE(!pfn_valid(pfn));
123 			/*
124 			 * alloc_contig_range requires the pfn range
125 			 * specified to be in the same zone. Make this
126 			 * simple by forcing the entire CMA resv range
127 			 * to be in the same zone.
128 			 */
129 			if (page_zone(pfn_to_page(pfn)) != zone)
130 				goto err;
131 		}
132 		init_cma_reserved_pageblock(pfn_to_page(base_pfn));
133 	} while (--i);
134 
135 	mutex_init(&cma->lock);
136 
137 #ifdef CONFIG_CMA_DEBUGFS
138 	INIT_HLIST_HEAD(&cma->mem_head);
139 	spin_lock_init(&cma->mem_head_lock);
140 #endif
141 
142 	return 0;
143 
144 err:
145 	kfree(cma->bitmap);
146 	cma->count = 0;
147 	return -EINVAL;
148 }
149 
150 static int __init cma_init_reserved_areas(void)
151 {
152 	int i;
153 
154 	for (i = 0; i < cma_area_count; i++) {
155 		int ret = cma_activate_area(&cma_areas[i]);
156 
157 		if (ret)
158 			return ret;
159 	}
160 
161 	return 0;
162 }
163 core_initcall(cma_init_reserved_areas);
164 
165 /**
166  * cma_init_reserved_mem() - create custom contiguous area from reserved memory
167  * @base: Base address of the reserved area
168  * @size: Size of the reserved area (in bytes),
169  * @order_per_bit: Order of pages represented by one bit on bitmap.
170  * @res_cma: Pointer to store the created cma region.
171  *
172  * This function creates custom contiguous area from already reserved memory.
173  */
174 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
175 				 unsigned int order_per_bit,
176 				 const char *name,
177 				 struct cma **res_cma)
178 {
179 	struct cma *cma;
180 	phys_addr_t alignment;
181 
182 	/* Sanity checks */
183 	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
184 		pr_err("Not enough slots for CMA reserved regions!\n");
185 		return -ENOSPC;
186 	}
187 
188 	if (!size || !memblock_is_region_reserved(base, size))
189 		return -EINVAL;
190 
191 	/* ensure minimal alignment required by mm core */
192 	alignment = PAGE_SIZE <<
193 			max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
194 
195 	/* alignment should be aligned with order_per_bit */
196 	if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
197 		return -EINVAL;
198 
199 	if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
200 		return -EINVAL;
201 
202 	/*
203 	 * Each reserved area must be initialised later, when more kernel
204 	 * subsystems (like slab allocator) are available.
205 	 */
206 	cma = &cma_areas[cma_area_count];
207 	if (name) {
208 		cma->name = name;
209 	} else {
210 		cma->name = kasprintf(GFP_KERNEL, "cma%d\n", cma_area_count);
211 		if (!cma->name)
212 			return -ENOMEM;
213 	}
214 	cma->base_pfn = PFN_DOWN(base);
215 	cma->count = size >> PAGE_SHIFT;
216 	cma->order_per_bit = order_per_bit;
217 	*res_cma = cma;
218 	cma_area_count++;
219 	totalcma_pages += (size / PAGE_SIZE);
220 
221 	return 0;
222 }
223 
224 /**
225  * cma_declare_contiguous() - reserve custom contiguous area
226  * @base: Base address of the reserved area optional, use 0 for any
227  * @size: Size of the reserved area (in bytes),
228  * @limit: End address of the reserved memory (optional, 0 for any).
229  * @alignment: Alignment for the CMA area, should be power of 2 or zero
230  * @order_per_bit: Order of pages represented by one bit on bitmap.
231  * @fixed: hint about where to place the reserved area
232  * @res_cma: Pointer to store the created cma region.
233  *
234  * This function reserves memory from early allocator. It should be
235  * called by arch specific code once the early allocator (memblock or bootmem)
236  * has been activated and all other subsystems have already allocated/reserved
237  * memory. This function allows to create custom reserved areas.
238  *
239  * If @fixed is true, reserve contiguous area at exactly @base.  If false,
240  * reserve in range from @base to @limit.
241  */
242 int __init cma_declare_contiguous(phys_addr_t base,
243 			phys_addr_t size, phys_addr_t limit,
244 			phys_addr_t alignment, unsigned int order_per_bit,
245 			bool fixed, const char *name, struct cma **res_cma)
246 {
247 	phys_addr_t memblock_end = memblock_end_of_DRAM();
248 	phys_addr_t highmem_start;
249 	int ret = 0;
250 
251 	/*
252 	 * We can't use __pa(high_memory) directly, since high_memory
253 	 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
254 	 * complain. Find the boundary by adding one to the last valid
255 	 * address.
256 	 */
257 	highmem_start = __pa(high_memory - 1) + 1;
258 	pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
259 		__func__, &size, &base, &limit, &alignment);
260 
261 	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
262 		pr_err("Not enough slots for CMA reserved regions!\n");
263 		return -ENOSPC;
264 	}
265 
266 	if (!size)
267 		return -EINVAL;
268 
269 	if (alignment && !is_power_of_2(alignment))
270 		return -EINVAL;
271 
272 	/*
273 	 * Sanitise input arguments.
274 	 * Pages both ends in CMA area could be merged into adjacent unmovable
275 	 * migratetype page by page allocator's buddy algorithm. In the case,
276 	 * you couldn't get a contiguous memory, which is not what we want.
277 	 */
278 	alignment = max(alignment,  (phys_addr_t)PAGE_SIZE <<
279 			  max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
280 	base = ALIGN(base, alignment);
281 	size = ALIGN(size, alignment);
282 	limit &= ~(alignment - 1);
283 
284 	if (!base)
285 		fixed = false;
286 
287 	/* size should be aligned with order_per_bit */
288 	if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
289 		return -EINVAL;
290 
291 	/*
292 	 * If allocating at a fixed base the request region must not cross the
293 	 * low/high memory boundary.
294 	 */
295 	if (fixed && base < highmem_start && base + size > highmem_start) {
296 		ret = -EINVAL;
297 		pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
298 			&base, &highmem_start);
299 		goto err;
300 	}
301 
302 	/*
303 	 * If the limit is unspecified or above the memblock end, its effective
304 	 * value will be the memblock end. Set it explicitly to simplify further
305 	 * checks.
306 	 */
307 	if (limit == 0 || limit > memblock_end)
308 		limit = memblock_end;
309 
310 	/* Reserve memory */
311 	if (fixed) {
312 		if (memblock_is_region_reserved(base, size) ||
313 		    memblock_reserve(base, size) < 0) {
314 			ret = -EBUSY;
315 			goto err;
316 		}
317 	} else {
318 		phys_addr_t addr = 0;
319 
320 		/*
321 		 * All pages in the reserved area must come from the same zone.
322 		 * If the requested region crosses the low/high memory boundary,
323 		 * try allocating from high memory first and fall back to low
324 		 * memory in case of failure.
325 		 */
326 		if (base < highmem_start && limit > highmem_start) {
327 			addr = memblock_alloc_range(size, alignment,
328 						    highmem_start, limit,
329 						    MEMBLOCK_NONE);
330 			limit = highmem_start;
331 		}
332 
333 		if (!addr) {
334 			addr = memblock_alloc_range(size, alignment, base,
335 						    limit,
336 						    MEMBLOCK_NONE);
337 			if (!addr) {
338 				ret = -ENOMEM;
339 				goto err;
340 			}
341 		}
342 
343 		/*
344 		 * kmemleak scans/reads tracked objects for pointers to other
345 		 * objects but this address isn't mapped and accessible
346 		 */
347 		kmemleak_ignore_phys(addr);
348 		base = addr;
349 	}
350 
351 	ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
352 	if (ret)
353 		goto err;
354 
355 	pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
356 		&base);
357 	return 0;
358 
359 err:
360 	pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
361 	return ret;
362 }
363 
364 #ifdef CONFIG_CMA_DEBUG
365 static void cma_debug_show_areas(struct cma *cma)
366 {
367 	unsigned long next_zero_bit, next_set_bit;
368 	unsigned long start = 0;
369 	unsigned int nr_zero, nr_total = 0;
370 
371 	mutex_lock(&cma->lock);
372 	pr_info("number of available pages: ");
373 	for (;;) {
374 		next_zero_bit = find_next_zero_bit(cma->bitmap, cma->count, start);
375 		if (next_zero_bit >= cma->count)
376 			break;
377 		next_set_bit = find_next_bit(cma->bitmap, cma->count, next_zero_bit);
378 		nr_zero = next_set_bit - next_zero_bit;
379 		pr_cont("%s%u@%lu", nr_total ? "+" : "", nr_zero, next_zero_bit);
380 		nr_total += nr_zero;
381 		start = next_zero_bit + nr_zero;
382 	}
383 	pr_cont("=> %u free of %lu total pages\n", nr_total, cma->count);
384 	mutex_unlock(&cma->lock);
385 }
386 #else
387 static inline void cma_debug_show_areas(struct cma *cma) { }
388 #endif
389 
390 /**
391  * cma_alloc() - allocate pages from contiguous area
392  * @cma:   Contiguous memory region for which the allocation is performed.
393  * @count: Requested number of pages.
394  * @align: Requested alignment of pages (in PAGE_SIZE order).
395  *
396  * This function allocates part of contiguous memory on specific
397  * contiguous memory area.
398  */
399 struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
400 		       gfp_t gfp_mask)
401 {
402 	unsigned long mask, offset;
403 	unsigned long pfn = -1;
404 	unsigned long start = 0;
405 	unsigned long bitmap_maxno, bitmap_no, bitmap_count;
406 	struct page *page = NULL;
407 	int ret = -ENOMEM;
408 
409 	if (!cma || !cma->count)
410 		return NULL;
411 
412 	pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
413 		 count, align);
414 
415 	if (!count)
416 		return NULL;
417 
418 	mask = cma_bitmap_aligned_mask(cma, align);
419 	offset = cma_bitmap_aligned_offset(cma, align);
420 	bitmap_maxno = cma_bitmap_maxno(cma);
421 	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
422 
423 	if (bitmap_count > bitmap_maxno)
424 		return NULL;
425 
426 	for (;;) {
427 		mutex_lock(&cma->lock);
428 		bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
429 				bitmap_maxno, start, bitmap_count, mask,
430 				offset);
431 		if (bitmap_no >= bitmap_maxno) {
432 			mutex_unlock(&cma->lock);
433 			break;
434 		}
435 		bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
436 		/*
437 		 * It's safe to drop the lock here. We've marked this region for
438 		 * our exclusive use. If the migration fails we will take the
439 		 * lock again and unmark it.
440 		 */
441 		mutex_unlock(&cma->lock);
442 
443 		pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
444 		mutex_lock(&cma_mutex);
445 		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
446 					 gfp_mask);
447 		mutex_unlock(&cma_mutex);
448 		if (ret == 0) {
449 			page = pfn_to_page(pfn);
450 			break;
451 		}
452 
453 		cma_clear_bitmap(cma, pfn, count);
454 		if (ret != -EBUSY)
455 			break;
456 
457 		pr_debug("%s(): memory range at %p is busy, retrying\n",
458 			 __func__, pfn_to_page(pfn));
459 		/* try again with a bit different memory target */
460 		start = bitmap_no + mask + 1;
461 	}
462 
463 	trace_cma_alloc(pfn, page, count, align);
464 
465 	if (ret) {
466 		pr_info("%s: alloc failed, req-size: %zu pages, ret: %d\n",
467 			__func__, count, ret);
468 		cma_debug_show_areas(cma);
469 	}
470 
471 	pr_debug("%s(): returned %p\n", __func__, page);
472 	return page;
473 }
474 
475 /**
476  * cma_release() - release allocated pages
477  * @cma:   Contiguous memory region for which the allocation is performed.
478  * @pages: Allocated pages.
479  * @count: Number of allocated pages.
480  *
481  * This function releases memory allocated by alloc_cma().
482  * It returns false when provided pages do not belong to contiguous area and
483  * true otherwise.
484  */
485 bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
486 {
487 	unsigned long pfn;
488 
489 	if (!cma || !pages)
490 		return false;
491 
492 	pr_debug("%s(page %p)\n", __func__, (void *)pages);
493 
494 	pfn = page_to_pfn(pages);
495 
496 	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
497 		return false;
498 
499 	VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
500 
501 	free_contig_range(pfn, count);
502 	cma_clear_bitmap(cma, pfn, count);
503 	trace_cma_release(pfn, pages, count);
504 
505 	return true;
506 }
507 
508 int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
509 {
510 	int i;
511 
512 	for (i = 0; i < cma_area_count; i++) {
513 		int ret = it(&cma_areas[i], data);
514 
515 		if (ret)
516 			return ret;
517 	}
518 
519 	return 0;
520 }
521