xref: /openbmc/linux/mm/cma.c (revision 93d90ad7)
1 /*
2  * Contiguous Memory Allocator
3  *
4  * Copyright (c) 2010-2011 by Samsung Electronics.
5  * Copyright IBM Corporation, 2013
6  * Copyright LG Electronics Inc., 2014
7  * Written by:
8  *	Marek Szyprowski <m.szyprowski@samsung.com>
9  *	Michal Nazarewicz <mina86@mina86.com>
10  *	Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
11  *	Joonsoo Kim <iamjoonsoo.kim@lge.com>
12  *
13  * This program is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU General Public License as
15  * published by the Free Software Foundation; either version 2 of the
16  * License or (at your optional) any later version of the license.
17  */
18 
19 #define pr_fmt(fmt) "cma: " fmt
20 
21 #ifdef CONFIG_CMA_DEBUG
22 #ifndef DEBUG
23 #  define DEBUG
24 #endif
25 #endif
26 
27 #include <linux/memblock.h>
28 #include <linux/err.h>
29 #include <linux/mm.h>
30 #include <linux/mutex.h>
31 #include <linux/sizes.h>
32 #include <linux/slab.h>
33 #include <linux/log2.h>
34 #include <linux/cma.h>
35 #include <linux/highmem.h>
36 #include <linux/io.h>
37 
38 struct cma {
39 	unsigned long	base_pfn;
40 	unsigned long	count;
41 	unsigned long	*bitmap;
42 	unsigned int order_per_bit; /* Order of pages represented by one bit */
43 	struct mutex	lock;
44 };
45 
46 static struct cma cma_areas[MAX_CMA_AREAS];
47 static unsigned cma_area_count;
48 static DEFINE_MUTEX(cma_mutex);
49 
50 phys_addr_t cma_get_base(struct cma *cma)
51 {
52 	return PFN_PHYS(cma->base_pfn);
53 }
54 
55 unsigned long cma_get_size(struct cma *cma)
56 {
57 	return cma->count << PAGE_SHIFT;
58 }
59 
60 static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order)
61 {
62 	if (align_order <= cma->order_per_bit)
63 		return 0;
64 	return (1UL << (align_order - cma->order_per_bit)) - 1;
65 }
66 
67 static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order)
68 {
69 	unsigned int alignment;
70 
71 	if (align_order <= cma->order_per_bit)
72 		return 0;
73 	alignment = 1UL << (align_order - cma->order_per_bit);
74 	return ALIGN(cma->base_pfn, alignment) -
75 		(cma->base_pfn >> cma->order_per_bit);
76 }
77 
78 static unsigned long cma_bitmap_maxno(struct cma *cma)
79 {
80 	return cma->count >> cma->order_per_bit;
81 }
82 
83 static unsigned long cma_bitmap_pages_to_bits(struct cma *cma,
84 						unsigned long pages)
85 {
86 	return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
87 }
88 
89 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, int count)
90 {
91 	unsigned long bitmap_no, bitmap_count;
92 
93 	bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
94 	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
95 
96 	mutex_lock(&cma->lock);
97 	bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
98 	mutex_unlock(&cma->lock);
99 }
100 
101 static int __init cma_activate_area(struct cma *cma)
102 {
103 	int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
104 	unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
105 	unsigned i = cma->count >> pageblock_order;
106 	struct zone *zone;
107 
108 	cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
109 
110 	if (!cma->bitmap)
111 		return -ENOMEM;
112 
113 	WARN_ON_ONCE(!pfn_valid(pfn));
114 	zone = page_zone(pfn_to_page(pfn));
115 
116 	do {
117 		unsigned j;
118 
119 		base_pfn = pfn;
120 		for (j = pageblock_nr_pages; j; --j, pfn++) {
121 			WARN_ON_ONCE(!pfn_valid(pfn));
122 			/*
123 			 * alloc_contig_range requires the pfn range
124 			 * specified to be in the same zone. Make this
125 			 * simple by forcing the entire CMA resv range
126 			 * to be in the same zone.
127 			 */
128 			if (page_zone(pfn_to_page(pfn)) != zone)
129 				goto err;
130 		}
131 		init_cma_reserved_pageblock(pfn_to_page(base_pfn));
132 	} while (--i);
133 
134 	mutex_init(&cma->lock);
135 	return 0;
136 
137 err:
138 	kfree(cma->bitmap);
139 	cma->count = 0;
140 	return -EINVAL;
141 }
142 
143 static int __init cma_init_reserved_areas(void)
144 {
145 	int i;
146 
147 	for (i = 0; i < cma_area_count; i++) {
148 		int ret = cma_activate_area(&cma_areas[i]);
149 
150 		if (ret)
151 			return ret;
152 	}
153 
154 	return 0;
155 }
156 core_initcall(cma_init_reserved_areas);
157 
158 /**
159  * cma_init_reserved_mem() - create custom contiguous area from reserved memory
160  * @base: Base address of the reserved area
161  * @size: Size of the reserved area (in bytes),
162  * @order_per_bit: Order of pages represented by one bit on bitmap.
163  * @res_cma: Pointer to store the created cma region.
164  *
165  * This function creates custom contiguous area from already reserved memory.
166  */
167 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
168 				 int order_per_bit, struct cma **res_cma)
169 {
170 	struct cma *cma;
171 	phys_addr_t alignment;
172 
173 	/* Sanity checks */
174 	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
175 		pr_err("Not enough slots for CMA reserved regions!\n");
176 		return -ENOSPC;
177 	}
178 
179 	if (!size || !memblock_is_region_reserved(base, size))
180 		return -EINVAL;
181 
182 	/* ensure minimal alignment requied by mm core */
183 	alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
184 
185 	/* alignment should be aligned with order_per_bit */
186 	if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
187 		return -EINVAL;
188 
189 	if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
190 		return -EINVAL;
191 
192 	/*
193 	 * Each reserved area must be initialised later, when more kernel
194 	 * subsystems (like slab allocator) are available.
195 	 */
196 	cma = &cma_areas[cma_area_count];
197 	cma->base_pfn = PFN_DOWN(base);
198 	cma->count = size >> PAGE_SHIFT;
199 	cma->order_per_bit = order_per_bit;
200 	*res_cma = cma;
201 	cma_area_count++;
202 
203 	return 0;
204 }
205 
206 /**
207  * cma_declare_contiguous() - reserve custom contiguous area
208  * @base: Base address of the reserved area optional, use 0 for any
209  * @size: Size of the reserved area (in bytes),
210  * @limit: End address of the reserved memory (optional, 0 for any).
211  * @alignment: Alignment for the CMA area, should be power of 2 or zero
212  * @order_per_bit: Order of pages represented by one bit on bitmap.
213  * @fixed: hint about where to place the reserved area
214  * @res_cma: Pointer to store the created cma region.
215  *
216  * This function reserves memory from early allocator. It should be
217  * called by arch specific code once the early allocator (memblock or bootmem)
218  * has been activated and all other subsystems have already allocated/reserved
219  * memory. This function allows to create custom reserved areas.
220  *
221  * If @fixed is true, reserve contiguous area at exactly @base.  If false,
222  * reserve in range from @base to @limit.
223  */
224 int __init cma_declare_contiguous(phys_addr_t base,
225 			phys_addr_t size, phys_addr_t limit,
226 			phys_addr_t alignment, unsigned int order_per_bit,
227 			bool fixed, struct cma **res_cma)
228 {
229 	phys_addr_t memblock_end = memblock_end_of_DRAM();
230 	phys_addr_t highmem_start;
231 	int ret = 0;
232 
233 #ifdef CONFIG_X86
234 	/*
235 	 * high_memory isn't direct mapped memory so retrieving its physical
236 	 * address isn't appropriate.  But it would be useful to check the
237 	 * physical address of the highmem boundary so it's justfiable to get
238 	 * the physical address from it.  On x86 there is a validation check for
239 	 * this case, so the following workaround is needed to avoid it.
240 	 */
241 	highmem_start = __pa_nodebug(high_memory);
242 #else
243 	highmem_start = __pa(high_memory);
244 #endif
245 	pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
246 		__func__, &size, &base, &limit, &alignment);
247 
248 	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
249 		pr_err("Not enough slots for CMA reserved regions!\n");
250 		return -ENOSPC;
251 	}
252 
253 	if (!size)
254 		return -EINVAL;
255 
256 	if (alignment && !is_power_of_2(alignment))
257 		return -EINVAL;
258 
259 	/*
260 	 * Sanitise input arguments.
261 	 * Pages both ends in CMA area could be merged into adjacent unmovable
262 	 * migratetype page by page allocator's buddy algorithm. In the case,
263 	 * you couldn't get a contiguous memory, which is not what we want.
264 	 */
265 	alignment = max(alignment,
266 		(phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
267 	base = ALIGN(base, alignment);
268 	size = ALIGN(size, alignment);
269 	limit &= ~(alignment - 1);
270 
271 	if (!base)
272 		fixed = false;
273 
274 	/* size should be aligned with order_per_bit */
275 	if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
276 		return -EINVAL;
277 
278 	/*
279 	 * If allocating at a fixed base the request region must not cross the
280 	 * low/high memory boundary.
281 	 */
282 	if (fixed && base < highmem_start && base + size > highmem_start) {
283 		ret = -EINVAL;
284 		pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
285 			&base, &highmem_start);
286 		goto err;
287 	}
288 
289 	/*
290 	 * If the limit is unspecified or above the memblock end, its effective
291 	 * value will be the memblock end. Set it explicitly to simplify further
292 	 * checks.
293 	 */
294 	if (limit == 0 || limit > memblock_end)
295 		limit = memblock_end;
296 
297 	/* Reserve memory */
298 	if (fixed) {
299 		if (memblock_is_region_reserved(base, size) ||
300 		    memblock_reserve(base, size) < 0) {
301 			ret = -EBUSY;
302 			goto err;
303 		}
304 	} else {
305 		phys_addr_t addr = 0;
306 
307 		/*
308 		 * All pages in the reserved area must come from the same zone.
309 		 * If the requested region crosses the low/high memory boundary,
310 		 * try allocating from high memory first and fall back to low
311 		 * memory in case of failure.
312 		 */
313 		if (base < highmem_start && limit > highmem_start) {
314 			addr = memblock_alloc_range(size, alignment,
315 						    highmem_start, limit);
316 			limit = highmem_start;
317 		}
318 
319 		if (!addr) {
320 			addr = memblock_alloc_range(size, alignment, base,
321 						    limit);
322 			if (!addr) {
323 				ret = -ENOMEM;
324 				goto err;
325 			}
326 		}
327 
328 		/*
329 		 * kmemleak scans/reads tracked objects for pointers to other
330 		 * objects but this address isn't mapped and accessible
331 		 */
332 		kmemleak_ignore(phys_to_virt(addr));
333 		base = addr;
334 	}
335 
336 	ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma);
337 	if (ret)
338 		goto err;
339 
340 	totalcma_pages += (size / PAGE_SIZE);
341 	pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
342 		&base);
343 	return 0;
344 
345 err:
346 	pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
347 	return ret;
348 }
349 
350 /**
351  * cma_alloc() - allocate pages from contiguous area
352  * @cma:   Contiguous memory region for which the allocation is performed.
353  * @count: Requested number of pages.
354  * @align: Requested alignment of pages (in PAGE_SIZE order).
355  *
356  * This function allocates part of contiguous memory on specific
357  * contiguous memory area.
358  */
359 struct page *cma_alloc(struct cma *cma, int count, unsigned int align)
360 {
361 	unsigned long mask, offset, pfn, start = 0;
362 	unsigned long bitmap_maxno, bitmap_no, bitmap_count;
363 	struct page *page = NULL;
364 	int ret;
365 
366 	if (!cma || !cma->count)
367 		return NULL;
368 
369 	pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
370 		 count, align);
371 
372 	if (!count)
373 		return NULL;
374 
375 	mask = cma_bitmap_aligned_mask(cma, align);
376 	offset = cma_bitmap_aligned_offset(cma, align);
377 	bitmap_maxno = cma_bitmap_maxno(cma);
378 	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
379 
380 	for (;;) {
381 		mutex_lock(&cma->lock);
382 		bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
383 				bitmap_maxno, start, bitmap_count, mask,
384 				offset);
385 		if (bitmap_no >= bitmap_maxno) {
386 			mutex_unlock(&cma->lock);
387 			break;
388 		}
389 		bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
390 		/*
391 		 * It's safe to drop the lock here. We've marked this region for
392 		 * our exclusive use. If the migration fails we will take the
393 		 * lock again and unmark it.
394 		 */
395 		mutex_unlock(&cma->lock);
396 
397 		pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
398 		mutex_lock(&cma_mutex);
399 		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
400 		mutex_unlock(&cma_mutex);
401 		if (ret == 0) {
402 			page = pfn_to_page(pfn);
403 			break;
404 		}
405 
406 		cma_clear_bitmap(cma, pfn, count);
407 		if (ret != -EBUSY)
408 			break;
409 
410 		pr_debug("%s(): memory range at %p is busy, retrying\n",
411 			 __func__, pfn_to_page(pfn));
412 		/* try again with a bit different memory target */
413 		start = bitmap_no + mask + 1;
414 	}
415 
416 	pr_debug("%s(): returned %p\n", __func__, page);
417 	return page;
418 }
419 
420 /**
421  * cma_release() - release allocated pages
422  * @cma:   Contiguous memory region for which the allocation is performed.
423  * @pages: Allocated pages.
424  * @count: Number of allocated pages.
425  *
426  * This function releases memory allocated by alloc_cma().
427  * It returns false when provided pages do not belong to contiguous area and
428  * true otherwise.
429  */
430 bool cma_release(struct cma *cma, struct page *pages, int count)
431 {
432 	unsigned long pfn;
433 
434 	if (!cma || !pages)
435 		return false;
436 
437 	pr_debug("%s(page %p)\n", __func__, (void *)pages);
438 
439 	pfn = page_to_pfn(pages);
440 
441 	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
442 		return false;
443 
444 	VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
445 
446 	free_contig_range(pfn, count);
447 	cma_clear_bitmap(cma, pfn, count);
448 
449 	return true;
450 }
451