xref: /openbmc/linux/mm/cma.c (revision 6774def6)
1 /*
2  * Contiguous Memory Allocator
3  *
4  * Copyright (c) 2010-2011 by Samsung Electronics.
5  * Copyright IBM Corporation, 2013
6  * Copyright LG Electronics Inc., 2014
7  * Written by:
8  *	Marek Szyprowski <m.szyprowski@samsung.com>
9  *	Michal Nazarewicz <mina86@mina86.com>
10  *	Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
11  *	Joonsoo Kim <iamjoonsoo.kim@lge.com>
12  *
13  * This program is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU General Public License as
15  * published by the Free Software Foundation; either version 2 of the
16  * License or (at your optional) any later version of the license.
17  */
18 
19 #define pr_fmt(fmt) "cma: " fmt
20 
21 #ifdef CONFIG_CMA_DEBUG
22 #ifndef DEBUG
23 #  define DEBUG
24 #endif
25 #endif
26 
27 #include <linux/memblock.h>
28 #include <linux/err.h>
29 #include <linux/mm.h>
30 #include <linux/mutex.h>
31 #include <linux/sizes.h>
32 #include <linux/slab.h>
33 #include <linux/log2.h>
34 #include <linux/cma.h>
35 #include <linux/highmem.h>
36 
37 struct cma {
38 	unsigned long	base_pfn;
39 	unsigned long	count;
40 	unsigned long	*bitmap;
41 	unsigned int order_per_bit; /* Order of pages represented by one bit */
42 	struct mutex	lock;
43 };
44 
45 static struct cma cma_areas[MAX_CMA_AREAS];
46 static unsigned cma_area_count;
47 static DEFINE_MUTEX(cma_mutex);
48 
49 phys_addr_t cma_get_base(struct cma *cma)
50 {
51 	return PFN_PHYS(cma->base_pfn);
52 }
53 
54 unsigned long cma_get_size(struct cma *cma)
55 {
56 	return cma->count << PAGE_SHIFT;
57 }
58 
59 static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order)
60 {
61 	if (align_order <= cma->order_per_bit)
62 		return 0;
63 	return (1UL << (align_order - cma->order_per_bit)) - 1;
64 }
65 
66 static unsigned long cma_bitmap_maxno(struct cma *cma)
67 {
68 	return cma->count >> cma->order_per_bit;
69 }
70 
71 static unsigned long cma_bitmap_pages_to_bits(struct cma *cma,
72 						unsigned long pages)
73 {
74 	return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
75 }
76 
77 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, int count)
78 {
79 	unsigned long bitmap_no, bitmap_count;
80 
81 	bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
82 	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
83 
84 	mutex_lock(&cma->lock);
85 	bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
86 	mutex_unlock(&cma->lock);
87 }
88 
89 static int __init cma_activate_area(struct cma *cma)
90 {
91 	int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
92 	unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
93 	unsigned i = cma->count >> pageblock_order;
94 	struct zone *zone;
95 
96 	cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
97 
98 	if (!cma->bitmap)
99 		return -ENOMEM;
100 
101 	WARN_ON_ONCE(!pfn_valid(pfn));
102 	zone = page_zone(pfn_to_page(pfn));
103 
104 	do {
105 		unsigned j;
106 
107 		base_pfn = pfn;
108 		for (j = pageblock_nr_pages; j; --j, pfn++) {
109 			WARN_ON_ONCE(!pfn_valid(pfn));
110 			/*
111 			 * alloc_contig_range requires the pfn range
112 			 * specified to be in the same zone. Make this
113 			 * simple by forcing the entire CMA resv range
114 			 * to be in the same zone.
115 			 */
116 			if (page_zone(pfn_to_page(pfn)) != zone)
117 				goto err;
118 		}
119 		init_cma_reserved_pageblock(pfn_to_page(base_pfn));
120 	} while (--i);
121 
122 	mutex_init(&cma->lock);
123 	return 0;
124 
125 err:
126 	kfree(cma->bitmap);
127 	cma->count = 0;
128 	return -EINVAL;
129 }
130 
131 static int __init cma_init_reserved_areas(void)
132 {
133 	int i;
134 
135 	for (i = 0; i < cma_area_count; i++) {
136 		int ret = cma_activate_area(&cma_areas[i]);
137 
138 		if (ret)
139 			return ret;
140 	}
141 
142 	return 0;
143 }
144 core_initcall(cma_init_reserved_areas);
145 
146 /**
147  * cma_init_reserved_mem() - create custom contiguous area from reserved memory
148  * @base: Base address of the reserved area
149  * @size: Size of the reserved area (in bytes),
150  * @order_per_bit: Order of pages represented by one bit on bitmap.
151  * @res_cma: Pointer to store the created cma region.
152  *
153  * This function creates custom contiguous area from already reserved memory.
154  */
155 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
156 				 int order_per_bit, struct cma **res_cma)
157 {
158 	struct cma *cma;
159 	phys_addr_t alignment;
160 
161 	/* Sanity checks */
162 	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
163 		pr_err("Not enough slots for CMA reserved regions!\n");
164 		return -ENOSPC;
165 	}
166 
167 	if (!size || !memblock_is_region_reserved(base, size))
168 		return -EINVAL;
169 
170 	/* ensure minimal alignment requied by mm core */
171 	alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
172 
173 	/* alignment should be aligned with order_per_bit */
174 	if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
175 		return -EINVAL;
176 
177 	if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
178 		return -EINVAL;
179 
180 	/*
181 	 * Each reserved area must be initialised later, when more kernel
182 	 * subsystems (like slab allocator) are available.
183 	 */
184 	cma = &cma_areas[cma_area_count];
185 	cma->base_pfn = PFN_DOWN(base);
186 	cma->count = size >> PAGE_SHIFT;
187 	cma->order_per_bit = order_per_bit;
188 	*res_cma = cma;
189 	cma_area_count++;
190 
191 	return 0;
192 }
193 
194 /**
195  * cma_declare_contiguous() - reserve custom contiguous area
196  * @base: Base address of the reserved area optional, use 0 for any
197  * @size: Size of the reserved area (in bytes),
198  * @limit: End address of the reserved memory (optional, 0 for any).
199  * @alignment: Alignment for the CMA area, should be power of 2 or zero
200  * @order_per_bit: Order of pages represented by one bit on bitmap.
201  * @fixed: hint about where to place the reserved area
202  * @res_cma: Pointer to store the created cma region.
203  *
204  * This function reserves memory from early allocator. It should be
205  * called by arch specific code once the early allocator (memblock or bootmem)
206  * has been activated and all other subsystems have already allocated/reserved
207  * memory. This function allows to create custom reserved areas.
208  *
209  * If @fixed is true, reserve contiguous area at exactly @base.  If false,
210  * reserve in range from @base to @limit.
211  */
212 int __init cma_declare_contiguous(phys_addr_t base,
213 			phys_addr_t size, phys_addr_t limit,
214 			phys_addr_t alignment, unsigned int order_per_bit,
215 			bool fixed, struct cma **res_cma)
216 {
217 	phys_addr_t memblock_end = memblock_end_of_DRAM();
218 	phys_addr_t highmem_start = __pa(high_memory);
219 	int ret = 0;
220 
221 	pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
222 		__func__, &size, &base, &limit, &alignment);
223 
224 	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
225 		pr_err("Not enough slots for CMA reserved regions!\n");
226 		return -ENOSPC;
227 	}
228 
229 	if (!size)
230 		return -EINVAL;
231 
232 	if (alignment && !is_power_of_2(alignment))
233 		return -EINVAL;
234 
235 	/*
236 	 * Sanitise input arguments.
237 	 * Pages both ends in CMA area could be merged into adjacent unmovable
238 	 * migratetype page by page allocator's buddy algorithm. In the case,
239 	 * you couldn't get a contiguous memory, which is not what we want.
240 	 */
241 	alignment = max(alignment,
242 		(phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
243 	base = ALIGN(base, alignment);
244 	size = ALIGN(size, alignment);
245 	limit &= ~(alignment - 1);
246 
247 	if (!base)
248 		fixed = false;
249 
250 	/* size should be aligned with order_per_bit */
251 	if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
252 		return -EINVAL;
253 
254 	/*
255 	 * If allocating at a fixed base the request region must not cross the
256 	 * low/high memory boundary.
257 	 */
258 	if (fixed && base < highmem_start && base + size > highmem_start) {
259 		ret = -EINVAL;
260 		pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
261 			&base, &highmem_start);
262 		goto err;
263 	}
264 
265 	/*
266 	 * If the limit is unspecified or above the memblock end, its effective
267 	 * value will be the memblock end. Set it explicitly to simplify further
268 	 * checks.
269 	 */
270 	if (limit == 0 || limit > memblock_end)
271 		limit = memblock_end;
272 
273 	/* Reserve memory */
274 	if (fixed) {
275 		if (memblock_is_region_reserved(base, size) ||
276 		    memblock_reserve(base, size) < 0) {
277 			ret = -EBUSY;
278 			goto err;
279 		}
280 	} else {
281 		phys_addr_t addr = 0;
282 
283 		/*
284 		 * All pages in the reserved area must come from the same zone.
285 		 * If the requested region crosses the low/high memory boundary,
286 		 * try allocating from high memory first and fall back to low
287 		 * memory in case of failure.
288 		 */
289 		if (base < highmem_start && limit > highmem_start) {
290 			addr = memblock_alloc_range(size, alignment,
291 						    highmem_start, limit);
292 			limit = highmem_start;
293 		}
294 
295 		if (!addr) {
296 			addr = memblock_alloc_range(size, alignment, base,
297 						    limit);
298 			if (!addr) {
299 				ret = -ENOMEM;
300 				goto err;
301 			}
302 		}
303 
304 		base = addr;
305 	}
306 
307 	ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma);
308 	if (ret)
309 		goto err;
310 
311 	pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
312 		&base);
313 	return 0;
314 
315 err:
316 	pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
317 	return ret;
318 }
319 
320 /**
321  * cma_alloc() - allocate pages from contiguous area
322  * @cma:   Contiguous memory region for which the allocation is performed.
323  * @count: Requested number of pages.
324  * @align: Requested alignment of pages (in PAGE_SIZE order).
325  *
326  * This function allocates part of contiguous memory on specific
327  * contiguous memory area.
328  */
329 struct page *cma_alloc(struct cma *cma, int count, unsigned int align)
330 {
331 	unsigned long mask, pfn, start = 0;
332 	unsigned long bitmap_maxno, bitmap_no, bitmap_count;
333 	struct page *page = NULL;
334 	int ret;
335 
336 	if (!cma || !cma->count)
337 		return NULL;
338 
339 	pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
340 		 count, align);
341 
342 	if (!count)
343 		return NULL;
344 
345 	mask = cma_bitmap_aligned_mask(cma, align);
346 	bitmap_maxno = cma_bitmap_maxno(cma);
347 	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
348 
349 	for (;;) {
350 		mutex_lock(&cma->lock);
351 		bitmap_no = bitmap_find_next_zero_area(cma->bitmap,
352 				bitmap_maxno, start, bitmap_count, mask);
353 		if (bitmap_no >= bitmap_maxno) {
354 			mutex_unlock(&cma->lock);
355 			break;
356 		}
357 		bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
358 		/*
359 		 * It's safe to drop the lock here. We've marked this region for
360 		 * our exclusive use. If the migration fails we will take the
361 		 * lock again and unmark it.
362 		 */
363 		mutex_unlock(&cma->lock);
364 
365 		pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
366 		mutex_lock(&cma_mutex);
367 		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
368 		mutex_unlock(&cma_mutex);
369 		if (ret == 0) {
370 			page = pfn_to_page(pfn);
371 			break;
372 		}
373 
374 		cma_clear_bitmap(cma, pfn, count);
375 		if (ret != -EBUSY)
376 			break;
377 
378 		pr_debug("%s(): memory range at %p is busy, retrying\n",
379 			 __func__, pfn_to_page(pfn));
380 		/* try again with a bit different memory target */
381 		start = bitmap_no + mask + 1;
382 	}
383 
384 	pr_debug("%s(): returned %p\n", __func__, page);
385 	return page;
386 }
387 
388 /**
389  * cma_release() - release allocated pages
390  * @cma:   Contiguous memory region for which the allocation is performed.
391  * @pages: Allocated pages.
392  * @count: Number of allocated pages.
393  *
394  * This function releases memory allocated by alloc_cma().
395  * It returns false when provided pages do not belong to contiguous area and
396  * true otherwise.
397  */
398 bool cma_release(struct cma *cma, struct page *pages, int count)
399 {
400 	unsigned long pfn;
401 
402 	if (!cma || !pages)
403 		return false;
404 
405 	pr_debug("%s(page %p)\n", __func__, (void *)pages);
406 
407 	pfn = page_to_pfn(pages);
408 
409 	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
410 		return false;
411 
412 	VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
413 
414 	free_contig_range(pfn, count);
415 	cma_clear_bitmap(cma, pfn, count);
416 
417 	return true;
418 }
419