1 /* 2 * Contiguous Memory Allocator 3 * 4 * Copyright (c) 2010-2011 by Samsung Electronics. 5 * Copyright IBM Corporation, 2013 6 * Copyright LG Electronics Inc., 2014 7 * Written by: 8 * Marek Szyprowski <m.szyprowski@samsung.com> 9 * Michal Nazarewicz <mina86@mina86.com> 10 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> 11 * Joonsoo Kim <iamjoonsoo.kim@lge.com> 12 * 13 * This program is free software; you can redistribute it and/or 14 * modify it under the terms of the GNU General Public License as 15 * published by the Free Software Foundation; either version 2 of the 16 * License or (at your optional) any later version of the license. 17 */ 18 19 #define pr_fmt(fmt) "cma: " fmt 20 21 #ifdef CONFIG_CMA_DEBUG 22 #ifndef DEBUG 23 # define DEBUG 24 #endif 25 #endif 26 27 #include <linux/memblock.h> 28 #include <linux/err.h> 29 #include <linux/mm.h> 30 #include <linux/mutex.h> 31 #include <linux/sizes.h> 32 #include <linux/slab.h> 33 #include <linux/log2.h> 34 #include <linux/cma.h> 35 36 struct cma { 37 unsigned long base_pfn; 38 unsigned long count; 39 unsigned long *bitmap; 40 unsigned int order_per_bit; /* Order of pages represented by one bit */ 41 struct mutex lock; 42 }; 43 44 static struct cma cma_areas[MAX_CMA_AREAS]; 45 static unsigned cma_area_count; 46 static DEFINE_MUTEX(cma_mutex); 47 48 phys_addr_t cma_get_base(struct cma *cma) 49 { 50 return PFN_PHYS(cma->base_pfn); 51 } 52 53 unsigned long cma_get_size(struct cma *cma) 54 { 55 return cma->count << PAGE_SHIFT; 56 } 57 58 static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order) 59 { 60 return (1UL << (align_order >> cma->order_per_bit)) - 1; 61 } 62 63 static unsigned long cma_bitmap_maxno(struct cma *cma) 64 { 65 return cma->count >> cma->order_per_bit; 66 } 67 68 static unsigned long cma_bitmap_pages_to_bits(struct cma *cma, 69 unsigned long pages) 70 { 71 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; 72 } 73 74 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, int count) 75 { 76 unsigned long bitmap_no, bitmap_count; 77 78 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; 79 bitmap_count = cma_bitmap_pages_to_bits(cma, count); 80 81 mutex_lock(&cma->lock); 82 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); 83 mutex_unlock(&cma->lock); 84 } 85 86 static int __init cma_activate_area(struct cma *cma) 87 { 88 int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long); 89 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; 90 unsigned i = cma->count >> pageblock_order; 91 struct zone *zone; 92 93 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 94 95 if (!cma->bitmap) 96 return -ENOMEM; 97 98 WARN_ON_ONCE(!pfn_valid(pfn)); 99 zone = page_zone(pfn_to_page(pfn)); 100 101 do { 102 unsigned j; 103 104 base_pfn = pfn; 105 for (j = pageblock_nr_pages; j; --j, pfn++) { 106 WARN_ON_ONCE(!pfn_valid(pfn)); 107 /* 108 * alloc_contig_range requires the pfn range 109 * specified to be in the same zone. Make this 110 * simple by forcing the entire CMA resv range 111 * to be in the same zone. 112 */ 113 if (page_zone(pfn_to_page(pfn)) != zone) 114 goto err; 115 } 116 init_cma_reserved_pageblock(pfn_to_page(base_pfn)); 117 } while (--i); 118 119 mutex_init(&cma->lock); 120 return 0; 121 122 err: 123 kfree(cma->bitmap); 124 return -EINVAL; 125 } 126 127 static int __init cma_init_reserved_areas(void) 128 { 129 int i; 130 131 for (i = 0; i < cma_area_count; i++) { 132 int ret = cma_activate_area(&cma_areas[i]); 133 134 if (ret) 135 return ret; 136 } 137 138 return 0; 139 } 140 core_initcall(cma_init_reserved_areas); 141 142 /** 143 * cma_declare_contiguous() - reserve custom contiguous area 144 * @base: Base address of the reserved area optional, use 0 for any 145 * @size: Size of the reserved area (in bytes), 146 * @limit: End address of the reserved memory (optional, 0 for any). 147 * @alignment: Alignment for the CMA area, should be power of 2 or zero 148 * @order_per_bit: Order of pages represented by one bit on bitmap. 149 * @fixed: hint about where to place the reserved area 150 * @res_cma: Pointer to store the created cma region. 151 * 152 * This function reserves memory from early allocator. It should be 153 * called by arch specific code once the early allocator (memblock or bootmem) 154 * has been activated and all other subsystems have already allocated/reserved 155 * memory. This function allows to create custom reserved areas. 156 * 157 * If @fixed is true, reserve contiguous area at exactly @base. If false, 158 * reserve in range from @base to @limit. 159 */ 160 int __init cma_declare_contiguous(phys_addr_t base, 161 phys_addr_t size, phys_addr_t limit, 162 phys_addr_t alignment, unsigned int order_per_bit, 163 bool fixed, struct cma **res_cma) 164 { 165 struct cma *cma; 166 int ret = 0; 167 168 pr_debug("%s(size %lx, base %08lx, limit %08lx alignment %08lx)\n", 169 __func__, (unsigned long)size, (unsigned long)base, 170 (unsigned long)limit, (unsigned long)alignment); 171 172 if (cma_area_count == ARRAY_SIZE(cma_areas)) { 173 pr_err("Not enough slots for CMA reserved regions!\n"); 174 return -ENOSPC; 175 } 176 177 if (!size) 178 return -EINVAL; 179 180 if (alignment && !is_power_of_2(alignment)) 181 return -EINVAL; 182 183 /* 184 * Sanitise input arguments. 185 * Pages both ends in CMA area could be merged into adjacent unmovable 186 * migratetype page by page allocator's buddy algorithm. In the case, 187 * you couldn't get a contiguous memory, which is not what we want. 188 */ 189 alignment = max(alignment, 190 (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order)); 191 base = ALIGN(base, alignment); 192 size = ALIGN(size, alignment); 193 limit &= ~(alignment - 1); 194 195 /* size should be aligned with order_per_bit */ 196 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) 197 return -EINVAL; 198 199 /* Reserve memory */ 200 if (base && fixed) { 201 if (memblock_is_region_reserved(base, size) || 202 memblock_reserve(base, size) < 0) { 203 ret = -EBUSY; 204 goto err; 205 } 206 } else { 207 phys_addr_t addr = memblock_alloc_range(size, alignment, base, 208 limit); 209 if (!addr) { 210 ret = -ENOMEM; 211 goto err; 212 } else { 213 base = addr; 214 } 215 } 216 217 /* 218 * Each reserved area must be initialised later, when more kernel 219 * subsystems (like slab allocator) are available. 220 */ 221 cma = &cma_areas[cma_area_count]; 222 cma->base_pfn = PFN_DOWN(base); 223 cma->count = size >> PAGE_SHIFT; 224 cma->order_per_bit = order_per_bit; 225 *res_cma = cma; 226 cma_area_count++; 227 228 pr_info("Reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M, 229 (unsigned long)base); 230 return 0; 231 232 err: 233 pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); 234 return ret; 235 } 236 237 /** 238 * cma_alloc() - allocate pages from contiguous area 239 * @cma: Contiguous memory region for which the allocation is performed. 240 * @count: Requested number of pages. 241 * @align: Requested alignment of pages (in PAGE_SIZE order). 242 * 243 * This function allocates part of contiguous memory on specific 244 * contiguous memory area. 245 */ 246 struct page *cma_alloc(struct cma *cma, int count, unsigned int align) 247 { 248 unsigned long mask, pfn, start = 0; 249 unsigned long bitmap_maxno, bitmap_no, bitmap_count; 250 struct page *page = NULL; 251 int ret; 252 253 if (!cma || !cma->count) 254 return NULL; 255 256 pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma, 257 count, align); 258 259 if (!count) 260 return NULL; 261 262 mask = cma_bitmap_aligned_mask(cma, align); 263 bitmap_maxno = cma_bitmap_maxno(cma); 264 bitmap_count = cma_bitmap_pages_to_bits(cma, count); 265 266 for (;;) { 267 mutex_lock(&cma->lock); 268 bitmap_no = bitmap_find_next_zero_area(cma->bitmap, 269 bitmap_maxno, start, bitmap_count, mask); 270 if (bitmap_no >= bitmap_maxno) { 271 mutex_unlock(&cma->lock); 272 break; 273 } 274 bitmap_set(cma->bitmap, bitmap_no, bitmap_count); 275 /* 276 * It's safe to drop the lock here. We've marked this region for 277 * our exclusive use. If the migration fails we will take the 278 * lock again and unmark it. 279 */ 280 mutex_unlock(&cma->lock); 281 282 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); 283 mutex_lock(&cma_mutex); 284 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); 285 mutex_unlock(&cma_mutex); 286 if (ret == 0) { 287 page = pfn_to_page(pfn); 288 break; 289 } 290 291 cma_clear_bitmap(cma, pfn, count); 292 if (ret != -EBUSY) 293 break; 294 295 pr_debug("%s(): memory range at %p is busy, retrying\n", 296 __func__, pfn_to_page(pfn)); 297 /* try again with a bit different memory target */ 298 start = bitmap_no + mask + 1; 299 } 300 301 pr_debug("%s(): returned %p\n", __func__, page); 302 return page; 303 } 304 305 /** 306 * cma_release() - release allocated pages 307 * @cma: Contiguous memory region for which the allocation is performed. 308 * @pages: Allocated pages. 309 * @count: Number of allocated pages. 310 * 311 * This function releases memory allocated by alloc_cma(). 312 * It returns false when provided pages do not belong to contiguous area and 313 * true otherwise. 314 */ 315 bool cma_release(struct cma *cma, struct page *pages, int count) 316 { 317 unsigned long pfn; 318 319 if (!cma || !pages) 320 return false; 321 322 pr_debug("%s(page %p)\n", __func__, (void *)pages); 323 324 pfn = page_to_pfn(pages); 325 326 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) 327 return false; 328 329 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); 330 331 free_contig_range(pfn, count); 332 cma_clear_bitmap(cma, pfn, count); 333 334 return true; 335 } 336