xref: /openbmc/linux/mm/cma.c (revision 1c2dd16a)
1 /*
2  * Contiguous Memory Allocator
3  *
4  * Copyright (c) 2010-2011 by Samsung Electronics.
5  * Copyright IBM Corporation, 2013
6  * Copyright LG Electronics Inc., 2014
7  * Written by:
8  *	Marek Szyprowski <m.szyprowski@samsung.com>
9  *	Michal Nazarewicz <mina86@mina86.com>
10  *	Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
11  *	Joonsoo Kim <iamjoonsoo.kim@lge.com>
12  *
13  * This program is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU General Public License as
15  * published by the Free Software Foundation; either version 2 of the
16  * License or (at your optional) any later version of the license.
17  */
18 
19 #define pr_fmt(fmt) "cma: " fmt
20 
21 #ifdef CONFIG_CMA_DEBUG
22 #ifndef DEBUG
23 #  define DEBUG
24 #endif
25 #endif
26 #define CREATE_TRACE_POINTS
27 
28 #include <linux/memblock.h>
29 #include <linux/err.h>
30 #include <linux/mm.h>
31 #include <linux/mutex.h>
32 #include <linux/sizes.h>
33 #include <linux/slab.h>
34 #include <linux/log2.h>
35 #include <linux/cma.h>
36 #include <linux/highmem.h>
37 #include <linux/io.h>
38 #include <trace/events/cma.h>
39 
40 #include "cma.h"
41 
42 struct cma cma_areas[MAX_CMA_AREAS];
43 unsigned cma_area_count;
44 static DEFINE_MUTEX(cma_mutex);
45 
46 phys_addr_t cma_get_base(const struct cma *cma)
47 {
48 	return PFN_PHYS(cma->base_pfn);
49 }
50 
51 unsigned long cma_get_size(const struct cma *cma)
52 {
53 	return cma->count << PAGE_SHIFT;
54 }
55 
56 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
57 					     int align_order)
58 {
59 	if (align_order <= cma->order_per_bit)
60 		return 0;
61 	return (1UL << (align_order - cma->order_per_bit)) - 1;
62 }
63 
64 /*
65  * Find a PFN aligned to the specified order and return an offset represented in
66  * order_per_bits.
67  */
68 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
69 					       int align_order)
70 {
71 	if (align_order <= cma->order_per_bit)
72 		return 0;
73 
74 	return (ALIGN(cma->base_pfn, (1UL << align_order))
75 		- cma->base_pfn) >> cma->order_per_bit;
76 }
77 
78 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
79 					      unsigned long pages)
80 {
81 	return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
82 }
83 
84 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
85 			     unsigned int count)
86 {
87 	unsigned long bitmap_no, bitmap_count;
88 
89 	bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
90 	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
91 
92 	mutex_lock(&cma->lock);
93 	bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
94 	mutex_unlock(&cma->lock);
95 }
96 
97 static int __init cma_activate_area(struct cma *cma)
98 {
99 	int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
100 	unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
101 	unsigned i = cma->count >> pageblock_order;
102 	struct zone *zone;
103 
104 	cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
105 
106 	if (!cma->bitmap)
107 		return -ENOMEM;
108 
109 	WARN_ON_ONCE(!pfn_valid(pfn));
110 	zone = page_zone(pfn_to_page(pfn));
111 
112 	do {
113 		unsigned j;
114 
115 		base_pfn = pfn;
116 		for (j = pageblock_nr_pages; j; --j, pfn++) {
117 			WARN_ON_ONCE(!pfn_valid(pfn));
118 			/*
119 			 * alloc_contig_range requires the pfn range
120 			 * specified to be in the same zone. Make this
121 			 * simple by forcing the entire CMA resv range
122 			 * to be in the same zone.
123 			 */
124 			if (page_zone(pfn_to_page(pfn)) != zone)
125 				goto err;
126 		}
127 		init_cma_reserved_pageblock(pfn_to_page(base_pfn));
128 	} while (--i);
129 
130 	mutex_init(&cma->lock);
131 
132 #ifdef CONFIG_CMA_DEBUGFS
133 	INIT_HLIST_HEAD(&cma->mem_head);
134 	spin_lock_init(&cma->mem_head_lock);
135 #endif
136 
137 	return 0;
138 
139 err:
140 	kfree(cma->bitmap);
141 	cma->count = 0;
142 	return -EINVAL;
143 }
144 
145 static int __init cma_init_reserved_areas(void)
146 {
147 	int i;
148 
149 	for (i = 0; i < cma_area_count; i++) {
150 		int ret = cma_activate_area(&cma_areas[i]);
151 
152 		if (ret)
153 			return ret;
154 	}
155 
156 	return 0;
157 }
158 core_initcall(cma_init_reserved_areas);
159 
160 /**
161  * cma_init_reserved_mem() - create custom contiguous area from reserved memory
162  * @base: Base address of the reserved area
163  * @size: Size of the reserved area (in bytes),
164  * @order_per_bit: Order of pages represented by one bit on bitmap.
165  * @res_cma: Pointer to store the created cma region.
166  *
167  * This function creates custom contiguous area from already reserved memory.
168  */
169 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
170 				 unsigned int order_per_bit,
171 				 struct cma **res_cma)
172 {
173 	struct cma *cma;
174 	phys_addr_t alignment;
175 
176 	/* Sanity checks */
177 	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
178 		pr_err("Not enough slots for CMA reserved regions!\n");
179 		return -ENOSPC;
180 	}
181 
182 	if (!size || !memblock_is_region_reserved(base, size))
183 		return -EINVAL;
184 
185 	/* ensure minimal alignment required by mm core */
186 	alignment = PAGE_SIZE <<
187 			max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
188 
189 	/* alignment should be aligned with order_per_bit */
190 	if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
191 		return -EINVAL;
192 
193 	if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
194 		return -EINVAL;
195 
196 	/*
197 	 * Each reserved area must be initialised later, when more kernel
198 	 * subsystems (like slab allocator) are available.
199 	 */
200 	cma = &cma_areas[cma_area_count];
201 	cma->base_pfn = PFN_DOWN(base);
202 	cma->count = size >> PAGE_SHIFT;
203 	cma->order_per_bit = order_per_bit;
204 	*res_cma = cma;
205 	cma_area_count++;
206 	totalcma_pages += (size / PAGE_SIZE);
207 
208 	return 0;
209 }
210 
211 /**
212  * cma_declare_contiguous() - reserve custom contiguous area
213  * @base: Base address of the reserved area optional, use 0 for any
214  * @size: Size of the reserved area (in bytes),
215  * @limit: End address of the reserved memory (optional, 0 for any).
216  * @alignment: Alignment for the CMA area, should be power of 2 or zero
217  * @order_per_bit: Order of pages represented by one bit on bitmap.
218  * @fixed: hint about where to place the reserved area
219  * @res_cma: Pointer to store the created cma region.
220  *
221  * This function reserves memory from early allocator. It should be
222  * called by arch specific code once the early allocator (memblock or bootmem)
223  * has been activated and all other subsystems have already allocated/reserved
224  * memory. This function allows to create custom reserved areas.
225  *
226  * If @fixed is true, reserve contiguous area at exactly @base.  If false,
227  * reserve in range from @base to @limit.
228  */
229 int __init cma_declare_contiguous(phys_addr_t base,
230 			phys_addr_t size, phys_addr_t limit,
231 			phys_addr_t alignment, unsigned int order_per_bit,
232 			bool fixed, struct cma **res_cma)
233 {
234 	phys_addr_t memblock_end = memblock_end_of_DRAM();
235 	phys_addr_t highmem_start;
236 	int ret = 0;
237 
238 	/*
239 	 * We can't use __pa(high_memory) directly, since high_memory
240 	 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
241 	 * complain. Find the boundary by adding one to the last valid
242 	 * address.
243 	 */
244 	highmem_start = __pa(high_memory - 1) + 1;
245 	pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
246 		__func__, &size, &base, &limit, &alignment);
247 
248 	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
249 		pr_err("Not enough slots for CMA reserved regions!\n");
250 		return -ENOSPC;
251 	}
252 
253 	if (!size)
254 		return -EINVAL;
255 
256 	if (alignment && !is_power_of_2(alignment))
257 		return -EINVAL;
258 
259 	/*
260 	 * Sanitise input arguments.
261 	 * Pages both ends in CMA area could be merged into adjacent unmovable
262 	 * migratetype page by page allocator's buddy algorithm. In the case,
263 	 * you couldn't get a contiguous memory, which is not what we want.
264 	 */
265 	alignment = max(alignment,  (phys_addr_t)PAGE_SIZE <<
266 			  max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
267 	base = ALIGN(base, alignment);
268 	size = ALIGN(size, alignment);
269 	limit &= ~(alignment - 1);
270 
271 	if (!base)
272 		fixed = false;
273 
274 	/* size should be aligned with order_per_bit */
275 	if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
276 		return -EINVAL;
277 
278 	/*
279 	 * If allocating at a fixed base the request region must not cross the
280 	 * low/high memory boundary.
281 	 */
282 	if (fixed && base < highmem_start && base + size > highmem_start) {
283 		ret = -EINVAL;
284 		pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
285 			&base, &highmem_start);
286 		goto err;
287 	}
288 
289 	/*
290 	 * If the limit is unspecified or above the memblock end, its effective
291 	 * value will be the memblock end. Set it explicitly to simplify further
292 	 * checks.
293 	 */
294 	if (limit == 0 || limit > memblock_end)
295 		limit = memblock_end;
296 
297 	/* Reserve memory */
298 	if (fixed) {
299 		if (memblock_is_region_reserved(base, size) ||
300 		    memblock_reserve(base, size) < 0) {
301 			ret = -EBUSY;
302 			goto err;
303 		}
304 	} else {
305 		phys_addr_t addr = 0;
306 
307 		/*
308 		 * All pages in the reserved area must come from the same zone.
309 		 * If the requested region crosses the low/high memory boundary,
310 		 * try allocating from high memory first and fall back to low
311 		 * memory in case of failure.
312 		 */
313 		if (base < highmem_start && limit > highmem_start) {
314 			addr = memblock_alloc_range(size, alignment,
315 						    highmem_start, limit,
316 						    MEMBLOCK_NONE);
317 			limit = highmem_start;
318 		}
319 
320 		if (!addr) {
321 			addr = memblock_alloc_range(size, alignment, base,
322 						    limit,
323 						    MEMBLOCK_NONE);
324 			if (!addr) {
325 				ret = -ENOMEM;
326 				goto err;
327 			}
328 		}
329 
330 		/*
331 		 * kmemleak scans/reads tracked objects for pointers to other
332 		 * objects but this address isn't mapped and accessible
333 		 */
334 		kmemleak_ignore_phys(addr);
335 		base = addr;
336 	}
337 
338 	ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma);
339 	if (ret)
340 		goto err;
341 
342 	pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
343 		&base);
344 	return 0;
345 
346 err:
347 	pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
348 	return ret;
349 }
350 
351 #ifdef CONFIG_CMA_DEBUG
352 static void cma_debug_show_areas(struct cma *cma)
353 {
354 	unsigned long next_zero_bit, next_set_bit;
355 	unsigned long start = 0;
356 	unsigned int nr_zero, nr_total = 0;
357 
358 	mutex_lock(&cma->lock);
359 	pr_info("number of available pages: ");
360 	for (;;) {
361 		next_zero_bit = find_next_zero_bit(cma->bitmap, cma->count, start);
362 		if (next_zero_bit >= cma->count)
363 			break;
364 		next_set_bit = find_next_bit(cma->bitmap, cma->count, next_zero_bit);
365 		nr_zero = next_set_bit - next_zero_bit;
366 		pr_cont("%s%u@%lu", nr_total ? "+" : "", nr_zero, next_zero_bit);
367 		nr_total += nr_zero;
368 		start = next_zero_bit + nr_zero;
369 	}
370 	pr_cont("=> %u free of %lu total pages\n", nr_total, cma->count);
371 	mutex_unlock(&cma->lock);
372 }
373 #else
374 static inline void cma_debug_show_areas(struct cma *cma) { }
375 #endif
376 
377 /**
378  * cma_alloc() - allocate pages from contiguous area
379  * @cma:   Contiguous memory region for which the allocation is performed.
380  * @count: Requested number of pages.
381  * @align: Requested alignment of pages (in PAGE_SIZE order).
382  *
383  * This function allocates part of contiguous memory on specific
384  * contiguous memory area.
385  */
386 struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
387 		       gfp_t gfp_mask)
388 {
389 	unsigned long mask, offset;
390 	unsigned long pfn = -1;
391 	unsigned long start = 0;
392 	unsigned long bitmap_maxno, bitmap_no, bitmap_count;
393 	struct page *page = NULL;
394 	int ret = -ENOMEM;
395 
396 	if (!cma || !cma->count)
397 		return NULL;
398 
399 	pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
400 		 count, align);
401 
402 	if (!count)
403 		return NULL;
404 
405 	mask = cma_bitmap_aligned_mask(cma, align);
406 	offset = cma_bitmap_aligned_offset(cma, align);
407 	bitmap_maxno = cma_bitmap_maxno(cma);
408 	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
409 
410 	if (bitmap_count > bitmap_maxno)
411 		return NULL;
412 
413 	for (;;) {
414 		mutex_lock(&cma->lock);
415 		bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
416 				bitmap_maxno, start, bitmap_count, mask,
417 				offset);
418 		if (bitmap_no >= bitmap_maxno) {
419 			mutex_unlock(&cma->lock);
420 			break;
421 		}
422 		bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
423 		/*
424 		 * It's safe to drop the lock here. We've marked this region for
425 		 * our exclusive use. If the migration fails we will take the
426 		 * lock again and unmark it.
427 		 */
428 		mutex_unlock(&cma->lock);
429 
430 		pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
431 		mutex_lock(&cma_mutex);
432 		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
433 					 gfp_mask);
434 		mutex_unlock(&cma_mutex);
435 		if (ret == 0) {
436 			page = pfn_to_page(pfn);
437 			break;
438 		}
439 
440 		cma_clear_bitmap(cma, pfn, count);
441 		if (ret != -EBUSY)
442 			break;
443 
444 		pr_debug("%s(): memory range at %p is busy, retrying\n",
445 			 __func__, pfn_to_page(pfn));
446 		/* try again with a bit different memory target */
447 		start = bitmap_no + mask + 1;
448 	}
449 
450 	trace_cma_alloc(pfn, page, count, align);
451 
452 	if (ret) {
453 		pr_info("%s: alloc failed, req-size: %zu pages, ret: %d\n",
454 			__func__, count, ret);
455 		cma_debug_show_areas(cma);
456 	}
457 
458 	pr_debug("%s(): returned %p\n", __func__, page);
459 	return page;
460 }
461 
462 /**
463  * cma_release() - release allocated pages
464  * @cma:   Contiguous memory region for which the allocation is performed.
465  * @pages: Allocated pages.
466  * @count: Number of allocated pages.
467  *
468  * This function releases memory allocated by alloc_cma().
469  * It returns false when provided pages do not belong to contiguous area and
470  * true otherwise.
471  */
472 bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
473 {
474 	unsigned long pfn;
475 
476 	if (!cma || !pages)
477 		return false;
478 
479 	pr_debug("%s(page %p)\n", __func__, (void *)pages);
480 
481 	pfn = page_to_pfn(pages);
482 
483 	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
484 		return false;
485 
486 	VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
487 
488 	free_contig_range(pfn, count);
489 	cma_clear_bitmap(cma, pfn, count);
490 	trace_cma_release(pfn, pages, count);
491 
492 	return true;
493 }
494