1 /* 2 * Contiguous Memory Allocator 3 * 4 * Copyright (c) 2010-2011 by Samsung Electronics. 5 * Copyright IBM Corporation, 2013 6 * Copyright LG Electronics Inc., 2014 7 * Written by: 8 * Marek Szyprowski <m.szyprowski@samsung.com> 9 * Michal Nazarewicz <mina86@mina86.com> 10 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> 11 * Joonsoo Kim <iamjoonsoo.kim@lge.com> 12 * 13 * This program is free software; you can redistribute it and/or 14 * modify it under the terms of the GNU General Public License as 15 * published by the Free Software Foundation; either version 2 of the 16 * License or (at your optional) any later version of the license. 17 */ 18 19 #define pr_fmt(fmt) "cma: " fmt 20 21 #ifdef CONFIG_CMA_DEBUG 22 #ifndef DEBUG 23 # define DEBUG 24 #endif 25 #endif 26 #define CREATE_TRACE_POINTS 27 28 #include <linux/memblock.h> 29 #include <linux/err.h> 30 #include <linux/mm.h> 31 #include <linux/mutex.h> 32 #include <linux/sizes.h> 33 #include <linux/slab.h> 34 #include <linux/log2.h> 35 #include <linux/cma.h> 36 #include <linux/highmem.h> 37 #include <linux/io.h> 38 #include <trace/events/cma.h> 39 40 #include "cma.h" 41 42 struct cma cma_areas[MAX_CMA_AREAS]; 43 unsigned cma_area_count; 44 static DEFINE_MUTEX(cma_mutex); 45 46 phys_addr_t cma_get_base(const struct cma *cma) 47 { 48 return PFN_PHYS(cma->base_pfn); 49 } 50 51 unsigned long cma_get_size(const struct cma *cma) 52 { 53 return cma->count << PAGE_SHIFT; 54 } 55 56 const char *cma_get_name(const struct cma *cma) 57 { 58 return cma->name ? cma->name : "(undefined)"; 59 } 60 61 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, 62 unsigned int align_order) 63 { 64 if (align_order <= cma->order_per_bit) 65 return 0; 66 return (1UL << (align_order - cma->order_per_bit)) - 1; 67 } 68 69 /* 70 * Find the offset of the base PFN from the specified align_order. 71 * The value returned is represented in order_per_bits. 72 */ 73 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma, 74 unsigned int align_order) 75 { 76 return (cma->base_pfn & ((1UL << align_order) - 1)) 77 >> cma->order_per_bit; 78 } 79 80 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, 81 unsigned long pages) 82 { 83 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; 84 } 85 86 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, 87 unsigned int count) 88 { 89 unsigned long bitmap_no, bitmap_count; 90 91 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; 92 bitmap_count = cma_bitmap_pages_to_bits(cma, count); 93 94 mutex_lock(&cma->lock); 95 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); 96 mutex_unlock(&cma->lock); 97 } 98 99 static int __init cma_activate_area(struct cma *cma) 100 { 101 int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long); 102 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; 103 unsigned i = cma->count >> pageblock_order; 104 struct zone *zone; 105 106 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 107 108 if (!cma->bitmap) 109 return -ENOMEM; 110 111 WARN_ON_ONCE(!pfn_valid(pfn)); 112 zone = page_zone(pfn_to_page(pfn)); 113 114 do { 115 unsigned j; 116 117 base_pfn = pfn; 118 for (j = pageblock_nr_pages; j; --j, pfn++) { 119 WARN_ON_ONCE(!pfn_valid(pfn)); 120 /* 121 * alloc_contig_range requires the pfn range 122 * specified to be in the same zone. Make this 123 * simple by forcing the entire CMA resv range 124 * to be in the same zone. 125 */ 126 if (page_zone(pfn_to_page(pfn)) != zone) 127 goto not_in_zone; 128 } 129 init_cma_reserved_pageblock(pfn_to_page(base_pfn)); 130 } while (--i); 131 132 mutex_init(&cma->lock); 133 134 #ifdef CONFIG_CMA_DEBUGFS 135 INIT_HLIST_HEAD(&cma->mem_head); 136 spin_lock_init(&cma->mem_head_lock); 137 #endif 138 139 return 0; 140 141 not_in_zone: 142 pr_err("CMA area %s could not be activated\n", cma->name); 143 kfree(cma->bitmap); 144 cma->count = 0; 145 return -EINVAL; 146 } 147 148 static int __init cma_init_reserved_areas(void) 149 { 150 int i; 151 152 for (i = 0; i < cma_area_count; i++) { 153 int ret = cma_activate_area(&cma_areas[i]); 154 155 if (ret) 156 return ret; 157 } 158 159 return 0; 160 } 161 core_initcall(cma_init_reserved_areas); 162 163 /** 164 * cma_init_reserved_mem() - create custom contiguous area from reserved memory 165 * @base: Base address of the reserved area 166 * @size: Size of the reserved area (in bytes), 167 * @order_per_bit: Order of pages represented by one bit on bitmap. 168 * @res_cma: Pointer to store the created cma region. 169 * 170 * This function creates custom contiguous area from already reserved memory. 171 */ 172 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, 173 unsigned int order_per_bit, 174 const char *name, 175 struct cma **res_cma) 176 { 177 struct cma *cma; 178 phys_addr_t alignment; 179 180 /* Sanity checks */ 181 if (cma_area_count == ARRAY_SIZE(cma_areas)) { 182 pr_err("Not enough slots for CMA reserved regions!\n"); 183 return -ENOSPC; 184 } 185 186 if (!size || !memblock_is_region_reserved(base, size)) 187 return -EINVAL; 188 189 /* ensure minimal alignment required by mm core */ 190 alignment = PAGE_SIZE << 191 max_t(unsigned long, MAX_ORDER - 1, pageblock_order); 192 193 /* alignment should be aligned with order_per_bit */ 194 if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit)) 195 return -EINVAL; 196 197 if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size) 198 return -EINVAL; 199 200 /* 201 * Each reserved area must be initialised later, when more kernel 202 * subsystems (like slab allocator) are available. 203 */ 204 cma = &cma_areas[cma_area_count]; 205 if (name) { 206 cma->name = name; 207 } else { 208 cma->name = kasprintf(GFP_KERNEL, "cma%d\n", cma_area_count); 209 if (!cma->name) 210 return -ENOMEM; 211 } 212 cma->base_pfn = PFN_DOWN(base); 213 cma->count = size >> PAGE_SHIFT; 214 cma->order_per_bit = order_per_bit; 215 *res_cma = cma; 216 cma_area_count++; 217 totalcma_pages += (size / PAGE_SIZE); 218 219 return 0; 220 } 221 222 /** 223 * cma_declare_contiguous() - reserve custom contiguous area 224 * @base: Base address of the reserved area optional, use 0 for any 225 * @size: Size of the reserved area (in bytes), 226 * @limit: End address of the reserved memory (optional, 0 for any). 227 * @alignment: Alignment for the CMA area, should be power of 2 or zero 228 * @order_per_bit: Order of pages represented by one bit on bitmap. 229 * @fixed: hint about where to place the reserved area 230 * @res_cma: Pointer to store the created cma region. 231 * 232 * This function reserves memory from early allocator. It should be 233 * called by arch specific code once the early allocator (memblock or bootmem) 234 * has been activated and all other subsystems have already allocated/reserved 235 * memory. This function allows to create custom reserved areas. 236 * 237 * If @fixed is true, reserve contiguous area at exactly @base. If false, 238 * reserve in range from @base to @limit. 239 */ 240 int __init cma_declare_contiguous(phys_addr_t base, 241 phys_addr_t size, phys_addr_t limit, 242 phys_addr_t alignment, unsigned int order_per_bit, 243 bool fixed, const char *name, struct cma **res_cma) 244 { 245 phys_addr_t memblock_end = memblock_end_of_DRAM(); 246 phys_addr_t highmem_start; 247 int ret = 0; 248 249 /* 250 * We can't use __pa(high_memory) directly, since high_memory 251 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly) 252 * complain. Find the boundary by adding one to the last valid 253 * address. 254 */ 255 highmem_start = __pa(high_memory - 1) + 1; 256 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", 257 __func__, &size, &base, &limit, &alignment); 258 259 if (cma_area_count == ARRAY_SIZE(cma_areas)) { 260 pr_err("Not enough slots for CMA reserved regions!\n"); 261 return -ENOSPC; 262 } 263 264 if (!size) 265 return -EINVAL; 266 267 if (alignment && !is_power_of_2(alignment)) 268 return -EINVAL; 269 270 /* 271 * Sanitise input arguments. 272 * Pages both ends in CMA area could be merged into adjacent unmovable 273 * migratetype page by page allocator's buddy algorithm. In the case, 274 * you couldn't get a contiguous memory, which is not what we want. 275 */ 276 alignment = max(alignment, (phys_addr_t)PAGE_SIZE << 277 max_t(unsigned long, MAX_ORDER - 1, pageblock_order)); 278 base = ALIGN(base, alignment); 279 size = ALIGN(size, alignment); 280 limit &= ~(alignment - 1); 281 282 if (!base) 283 fixed = false; 284 285 /* size should be aligned with order_per_bit */ 286 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) 287 return -EINVAL; 288 289 /* 290 * If allocating at a fixed base the request region must not cross the 291 * low/high memory boundary. 292 */ 293 if (fixed && base < highmem_start && base + size > highmem_start) { 294 ret = -EINVAL; 295 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", 296 &base, &highmem_start); 297 goto err; 298 } 299 300 /* 301 * If the limit is unspecified or above the memblock end, its effective 302 * value will be the memblock end. Set it explicitly to simplify further 303 * checks. 304 */ 305 if (limit == 0 || limit > memblock_end) 306 limit = memblock_end; 307 308 /* Reserve memory */ 309 if (fixed) { 310 if (memblock_is_region_reserved(base, size) || 311 memblock_reserve(base, size) < 0) { 312 ret = -EBUSY; 313 goto err; 314 } 315 } else { 316 phys_addr_t addr = 0; 317 318 /* 319 * All pages in the reserved area must come from the same zone. 320 * If the requested region crosses the low/high memory boundary, 321 * try allocating from high memory first and fall back to low 322 * memory in case of failure. 323 */ 324 if (base < highmem_start && limit > highmem_start) { 325 addr = memblock_alloc_range(size, alignment, 326 highmem_start, limit, 327 MEMBLOCK_NONE); 328 limit = highmem_start; 329 } 330 331 if (!addr) { 332 addr = memblock_alloc_range(size, alignment, base, 333 limit, 334 MEMBLOCK_NONE); 335 if (!addr) { 336 ret = -ENOMEM; 337 goto err; 338 } 339 } 340 341 /* 342 * kmemleak scans/reads tracked objects for pointers to other 343 * objects but this address isn't mapped and accessible 344 */ 345 kmemleak_ignore_phys(addr); 346 base = addr; 347 } 348 349 ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma); 350 if (ret) 351 goto err; 352 353 pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M, 354 &base); 355 return 0; 356 357 err: 358 pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); 359 return ret; 360 } 361 362 #ifdef CONFIG_CMA_DEBUG 363 static void cma_debug_show_areas(struct cma *cma) 364 { 365 unsigned long next_zero_bit, next_set_bit; 366 unsigned long start = 0; 367 unsigned int nr_zero, nr_total = 0; 368 369 mutex_lock(&cma->lock); 370 pr_info("number of available pages: "); 371 for (;;) { 372 next_zero_bit = find_next_zero_bit(cma->bitmap, cma->count, start); 373 if (next_zero_bit >= cma->count) 374 break; 375 next_set_bit = find_next_bit(cma->bitmap, cma->count, next_zero_bit); 376 nr_zero = next_set_bit - next_zero_bit; 377 pr_cont("%s%u@%lu", nr_total ? "+" : "", nr_zero, next_zero_bit); 378 nr_total += nr_zero; 379 start = next_zero_bit + nr_zero; 380 } 381 pr_cont("=> %u free of %lu total pages\n", nr_total, cma->count); 382 mutex_unlock(&cma->lock); 383 } 384 #else 385 static inline void cma_debug_show_areas(struct cma *cma) { } 386 #endif 387 388 /** 389 * cma_alloc() - allocate pages from contiguous area 390 * @cma: Contiguous memory region for which the allocation is performed. 391 * @count: Requested number of pages. 392 * @align: Requested alignment of pages (in PAGE_SIZE order). 393 * 394 * This function allocates part of contiguous memory on specific 395 * contiguous memory area. 396 */ 397 struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, 398 gfp_t gfp_mask) 399 { 400 unsigned long mask, offset; 401 unsigned long pfn = -1; 402 unsigned long start = 0; 403 unsigned long bitmap_maxno, bitmap_no, bitmap_count; 404 struct page *page = NULL; 405 int ret = -ENOMEM; 406 407 if (!cma || !cma->count) 408 return NULL; 409 410 pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma, 411 count, align); 412 413 if (!count) 414 return NULL; 415 416 mask = cma_bitmap_aligned_mask(cma, align); 417 offset = cma_bitmap_aligned_offset(cma, align); 418 bitmap_maxno = cma_bitmap_maxno(cma); 419 bitmap_count = cma_bitmap_pages_to_bits(cma, count); 420 421 if (bitmap_count > bitmap_maxno) 422 return NULL; 423 424 for (;;) { 425 mutex_lock(&cma->lock); 426 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, 427 bitmap_maxno, start, bitmap_count, mask, 428 offset); 429 if (bitmap_no >= bitmap_maxno) { 430 mutex_unlock(&cma->lock); 431 break; 432 } 433 bitmap_set(cma->bitmap, bitmap_no, bitmap_count); 434 /* 435 * It's safe to drop the lock here. We've marked this region for 436 * our exclusive use. If the migration fails we will take the 437 * lock again and unmark it. 438 */ 439 mutex_unlock(&cma->lock); 440 441 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); 442 mutex_lock(&cma_mutex); 443 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, 444 gfp_mask); 445 mutex_unlock(&cma_mutex); 446 if (ret == 0) { 447 page = pfn_to_page(pfn); 448 break; 449 } 450 451 cma_clear_bitmap(cma, pfn, count); 452 if (ret != -EBUSY) 453 break; 454 455 pr_debug("%s(): memory range at %p is busy, retrying\n", 456 __func__, pfn_to_page(pfn)); 457 /* try again with a bit different memory target */ 458 start = bitmap_no + mask + 1; 459 } 460 461 trace_cma_alloc(pfn, page, count, align); 462 463 if (ret) { 464 pr_info("%s: alloc failed, req-size: %zu pages, ret: %d\n", 465 __func__, count, ret); 466 cma_debug_show_areas(cma); 467 } 468 469 pr_debug("%s(): returned %p\n", __func__, page); 470 return page; 471 } 472 473 /** 474 * cma_release() - release allocated pages 475 * @cma: Contiguous memory region for which the allocation is performed. 476 * @pages: Allocated pages. 477 * @count: Number of allocated pages. 478 * 479 * This function releases memory allocated by alloc_cma(). 480 * It returns false when provided pages do not belong to contiguous area and 481 * true otherwise. 482 */ 483 bool cma_release(struct cma *cma, const struct page *pages, unsigned int count) 484 { 485 unsigned long pfn; 486 487 if (!cma || !pages) 488 return false; 489 490 pr_debug("%s(page %p)\n", __func__, (void *)pages); 491 492 pfn = page_to_pfn(pages); 493 494 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) 495 return false; 496 497 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); 498 499 free_contig_range(pfn, count); 500 cma_clear_bitmap(cma, pfn, count); 501 trace_cma_release(pfn, pages, count); 502 503 return true; 504 } 505 506 int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) 507 { 508 int i; 509 510 for (i = 0; i < cma_area_count; i++) { 511 int ret = it(&cma_areas[i], data); 512 513 if (ret) 514 return ret; 515 } 516 517 return 0; 518 } 519