1a254129eSJoonsoo Kim /* 2a254129eSJoonsoo Kim * Contiguous Memory Allocator 3a254129eSJoonsoo Kim * 4a254129eSJoonsoo Kim * Copyright (c) 2010-2011 by Samsung Electronics. 5a254129eSJoonsoo Kim * Copyright IBM Corporation, 2013 6a254129eSJoonsoo Kim * Copyright LG Electronics Inc., 2014 7a254129eSJoonsoo Kim * Written by: 8a254129eSJoonsoo Kim * Marek Szyprowski <m.szyprowski@samsung.com> 9a254129eSJoonsoo Kim * Michal Nazarewicz <mina86@mina86.com> 10a254129eSJoonsoo Kim * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> 11a254129eSJoonsoo Kim * Joonsoo Kim <iamjoonsoo.kim@lge.com> 12a254129eSJoonsoo Kim * 13a254129eSJoonsoo Kim * This program is free software; you can redistribute it and/or 14a254129eSJoonsoo Kim * modify it under the terms of the GNU General Public License as 15a254129eSJoonsoo Kim * published by the Free Software Foundation; either version 2 of the 16a254129eSJoonsoo Kim * License or (at your optional) any later version of the license. 17a254129eSJoonsoo Kim */ 18a254129eSJoonsoo Kim 19a254129eSJoonsoo Kim #define pr_fmt(fmt) "cma: " fmt 20a254129eSJoonsoo Kim 21a254129eSJoonsoo Kim #ifdef CONFIG_CMA_DEBUG 22a254129eSJoonsoo Kim #ifndef DEBUG 23a254129eSJoonsoo Kim # define DEBUG 24a254129eSJoonsoo Kim #endif 25a254129eSJoonsoo Kim #endif 26a254129eSJoonsoo Kim 27a254129eSJoonsoo Kim #include <linux/memblock.h> 28a254129eSJoonsoo Kim #include <linux/err.h> 29a254129eSJoonsoo Kim #include <linux/mm.h> 30a254129eSJoonsoo Kim #include <linux/mutex.h> 31a254129eSJoonsoo Kim #include <linux/sizes.h> 32a254129eSJoonsoo Kim #include <linux/slab.h> 33a254129eSJoonsoo Kim #include <linux/log2.h> 34a254129eSJoonsoo Kim #include <linux/cma.h> 35f7426b98SMarek Szyprowski #include <linux/highmem.h> 36a254129eSJoonsoo Kim 37a254129eSJoonsoo Kim struct cma { 38a254129eSJoonsoo Kim unsigned long base_pfn; 39a254129eSJoonsoo Kim unsigned long count; 40a254129eSJoonsoo Kim unsigned long *bitmap; 41a254129eSJoonsoo Kim unsigned int order_per_bit; /* Order of pages represented by one bit */ 42a254129eSJoonsoo Kim struct mutex lock; 43a254129eSJoonsoo Kim }; 44a254129eSJoonsoo Kim 45a254129eSJoonsoo Kim static struct cma cma_areas[MAX_CMA_AREAS]; 46a254129eSJoonsoo Kim static unsigned cma_area_count; 47a254129eSJoonsoo Kim static DEFINE_MUTEX(cma_mutex); 48a254129eSJoonsoo Kim 49a254129eSJoonsoo Kim phys_addr_t cma_get_base(struct cma *cma) 50a254129eSJoonsoo Kim { 51a254129eSJoonsoo Kim return PFN_PHYS(cma->base_pfn); 52a254129eSJoonsoo Kim } 53a254129eSJoonsoo Kim 54a254129eSJoonsoo Kim unsigned long cma_get_size(struct cma *cma) 55a254129eSJoonsoo Kim { 56a254129eSJoonsoo Kim return cma->count << PAGE_SHIFT; 57a254129eSJoonsoo Kim } 58a254129eSJoonsoo Kim 59a254129eSJoonsoo Kim static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order) 60a254129eSJoonsoo Kim { 6168faed63SWeijie Yang if (align_order <= cma->order_per_bit) 6268faed63SWeijie Yang return 0; 6368faed63SWeijie Yang return (1UL << (align_order - cma->order_per_bit)) - 1; 64a254129eSJoonsoo Kim } 65a254129eSJoonsoo Kim 66a254129eSJoonsoo Kim static unsigned long cma_bitmap_maxno(struct cma *cma) 67a254129eSJoonsoo Kim { 68a254129eSJoonsoo Kim return cma->count >> cma->order_per_bit; 69a254129eSJoonsoo Kim } 70a254129eSJoonsoo Kim 71a254129eSJoonsoo Kim static unsigned long cma_bitmap_pages_to_bits(struct cma *cma, 72a254129eSJoonsoo Kim unsigned long pages) 73a254129eSJoonsoo Kim { 74a254129eSJoonsoo Kim return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; 75a254129eSJoonsoo Kim } 76a254129eSJoonsoo Kim 77a254129eSJoonsoo Kim static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, int count) 78a254129eSJoonsoo Kim { 79a254129eSJoonsoo Kim unsigned long bitmap_no, bitmap_count; 80a254129eSJoonsoo Kim 81a254129eSJoonsoo Kim bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; 82a254129eSJoonsoo Kim bitmap_count = cma_bitmap_pages_to_bits(cma, count); 83a254129eSJoonsoo Kim 84a254129eSJoonsoo Kim mutex_lock(&cma->lock); 85a254129eSJoonsoo Kim bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); 86a254129eSJoonsoo Kim mutex_unlock(&cma->lock); 87a254129eSJoonsoo Kim } 88a254129eSJoonsoo Kim 89a254129eSJoonsoo Kim static int __init cma_activate_area(struct cma *cma) 90a254129eSJoonsoo Kim { 91a254129eSJoonsoo Kim int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long); 92a254129eSJoonsoo Kim unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; 93a254129eSJoonsoo Kim unsigned i = cma->count >> pageblock_order; 94a254129eSJoonsoo Kim struct zone *zone; 95a254129eSJoonsoo Kim 96a254129eSJoonsoo Kim cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 97a254129eSJoonsoo Kim 98a254129eSJoonsoo Kim if (!cma->bitmap) 99a254129eSJoonsoo Kim return -ENOMEM; 100a254129eSJoonsoo Kim 101a254129eSJoonsoo Kim WARN_ON_ONCE(!pfn_valid(pfn)); 102a254129eSJoonsoo Kim zone = page_zone(pfn_to_page(pfn)); 103a254129eSJoonsoo Kim 104a254129eSJoonsoo Kim do { 105a254129eSJoonsoo Kim unsigned j; 106a254129eSJoonsoo Kim 107a254129eSJoonsoo Kim base_pfn = pfn; 108a254129eSJoonsoo Kim for (j = pageblock_nr_pages; j; --j, pfn++) { 109a254129eSJoonsoo Kim WARN_ON_ONCE(!pfn_valid(pfn)); 110a254129eSJoonsoo Kim /* 111a254129eSJoonsoo Kim * alloc_contig_range requires the pfn range 112a254129eSJoonsoo Kim * specified to be in the same zone. Make this 113a254129eSJoonsoo Kim * simple by forcing the entire CMA resv range 114a254129eSJoonsoo Kim * to be in the same zone. 115a254129eSJoonsoo Kim */ 116a254129eSJoonsoo Kim if (page_zone(pfn_to_page(pfn)) != zone) 117a254129eSJoonsoo Kim goto err; 118a254129eSJoonsoo Kim } 119a254129eSJoonsoo Kim init_cma_reserved_pageblock(pfn_to_page(base_pfn)); 120a254129eSJoonsoo Kim } while (--i); 121a254129eSJoonsoo Kim 122a254129eSJoonsoo Kim mutex_init(&cma->lock); 123a254129eSJoonsoo Kim return 0; 124a254129eSJoonsoo Kim 125a254129eSJoonsoo Kim err: 126a254129eSJoonsoo Kim kfree(cma->bitmap); 127f022d8cbSLaurent Pinchart cma->count = 0; 128a254129eSJoonsoo Kim return -EINVAL; 129a254129eSJoonsoo Kim } 130a254129eSJoonsoo Kim 131a254129eSJoonsoo Kim static int __init cma_init_reserved_areas(void) 132a254129eSJoonsoo Kim { 133a254129eSJoonsoo Kim int i; 134a254129eSJoonsoo Kim 135a254129eSJoonsoo Kim for (i = 0; i < cma_area_count; i++) { 136a254129eSJoonsoo Kim int ret = cma_activate_area(&cma_areas[i]); 137a254129eSJoonsoo Kim 138a254129eSJoonsoo Kim if (ret) 139a254129eSJoonsoo Kim return ret; 140a254129eSJoonsoo Kim } 141a254129eSJoonsoo Kim 142a254129eSJoonsoo Kim return 0; 143a254129eSJoonsoo Kim } 144a254129eSJoonsoo Kim core_initcall(cma_init_reserved_areas); 145a254129eSJoonsoo Kim 146a254129eSJoonsoo Kim /** 147de9e14eeSMarek Szyprowski * cma_init_reserved_mem() - create custom contiguous area from reserved memory 148de9e14eeSMarek Szyprowski * @base: Base address of the reserved area 149de9e14eeSMarek Szyprowski * @size: Size of the reserved area (in bytes), 150de9e14eeSMarek Szyprowski * @order_per_bit: Order of pages represented by one bit on bitmap. 151de9e14eeSMarek Szyprowski * @res_cma: Pointer to store the created cma region. 152de9e14eeSMarek Szyprowski * 153de9e14eeSMarek Szyprowski * This function creates custom contiguous area from already reserved memory. 154de9e14eeSMarek Szyprowski */ 155de9e14eeSMarek Szyprowski int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, 156de9e14eeSMarek Szyprowski int order_per_bit, struct cma **res_cma) 157de9e14eeSMarek Szyprowski { 158de9e14eeSMarek Szyprowski struct cma *cma; 159de9e14eeSMarek Szyprowski phys_addr_t alignment; 160de9e14eeSMarek Szyprowski 161de9e14eeSMarek Szyprowski /* Sanity checks */ 162de9e14eeSMarek Szyprowski if (cma_area_count == ARRAY_SIZE(cma_areas)) { 163de9e14eeSMarek Szyprowski pr_err("Not enough slots for CMA reserved regions!\n"); 164de9e14eeSMarek Szyprowski return -ENOSPC; 165de9e14eeSMarek Szyprowski } 166de9e14eeSMarek Szyprowski 167de9e14eeSMarek Szyprowski if (!size || !memblock_is_region_reserved(base, size)) 168de9e14eeSMarek Szyprowski return -EINVAL; 169de9e14eeSMarek Szyprowski 170de9e14eeSMarek Szyprowski /* ensure minimal alignment requied by mm core */ 171de9e14eeSMarek Szyprowski alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); 172de9e14eeSMarek Szyprowski 173de9e14eeSMarek Szyprowski /* alignment should be aligned with order_per_bit */ 174de9e14eeSMarek Szyprowski if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit)) 175de9e14eeSMarek Szyprowski return -EINVAL; 176de9e14eeSMarek Szyprowski 177de9e14eeSMarek Szyprowski if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size) 178de9e14eeSMarek Szyprowski return -EINVAL; 179de9e14eeSMarek Szyprowski 180de9e14eeSMarek Szyprowski /* 181de9e14eeSMarek Szyprowski * Each reserved area must be initialised later, when more kernel 182de9e14eeSMarek Szyprowski * subsystems (like slab allocator) are available. 183de9e14eeSMarek Szyprowski */ 184de9e14eeSMarek Szyprowski cma = &cma_areas[cma_area_count]; 185de9e14eeSMarek Szyprowski cma->base_pfn = PFN_DOWN(base); 186de9e14eeSMarek Szyprowski cma->count = size >> PAGE_SHIFT; 187de9e14eeSMarek Szyprowski cma->order_per_bit = order_per_bit; 188de9e14eeSMarek Szyprowski *res_cma = cma; 189de9e14eeSMarek Szyprowski cma_area_count++; 190de9e14eeSMarek Szyprowski 191de9e14eeSMarek Szyprowski return 0; 192de9e14eeSMarek Szyprowski } 193de9e14eeSMarek Szyprowski 194de9e14eeSMarek Szyprowski /** 195a254129eSJoonsoo Kim * cma_declare_contiguous() - reserve custom contiguous area 196a254129eSJoonsoo Kim * @base: Base address of the reserved area optional, use 0 for any 197c1f733aaSJoonsoo Kim * @size: Size of the reserved area (in bytes), 198a254129eSJoonsoo Kim * @limit: End address of the reserved memory (optional, 0 for any). 199a254129eSJoonsoo Kim * @alignment: Alignment for the CMA area, should be power of 2 or zero 200a254129eSJoonsoo Kim * @order_per_bit: Order of pages represented by one bit on bitmap. 201a254129eSJoonsoo Kim * @fixed: hint about where to place the reserved area 202c1f733aaSJoonsoo Kim * @res_cma: Pointer to store the created cma region. 203a254129eSJoonsoo Kim * 204a254129eSJoonsoo Kim * This function reserves memory from early allocator. It should be 205a254129eSJoonsoo Kim * called by arch specific code once the early allocator (memblock or bootmem) 206a254129eSJoonsoo Kim * has been activated and all other subsystems have already allocated/reserved 207a254129eSJoonsoo Kim * memory. This function allows to create custom reserved areas. 208a254129eSJoonsoo Kim * 209a254129eSJoonsoo Kim * If @fixed is true, reserve contiguous area at exactly @base. If false, 210a254129eSJoonsoo Kim * reserve in range from @base to @limit. 211a254129eSJoonsoo Kim */ 212c1f733aaSJoonsoo Kim int __init cma_declare_contiguous(phys_addr_t base, 213c1f733aaSJoonsoo Kim phys_addr_t size, phys_addr_t limit, 214a254129eSJoonsoo Kim phys_addr_t alignment, unsigned int order_per_bit, 215c1f733aaSJoonsoo Kim bool fixed, struct cma **res_cma) 216a254129eSJoonsoo Kim { 217f7426b98SMarek Szyprowski phys_addr_t memblock_end = memblock_end_of_DRAM(); 218f7426b98SMarek Szyprowski phys_addr_t highmem_start = __pa(high_memory); 219a254129eSJoonsoo Kim int ret = 0; 220a254129eSJoonsoo Kim 221a254129eSJoonsoo Kim pr_debug("%s(size %lx, base %08lx, limit %08lx alignment %08lx)\n", 222a254129eSJoonsoo Kim __func__, (unsigned long)size, (unsigned long)base, 223a254129eSJoonsoo Kim (unsigned long)limit, (unsigned long)alignment); 224a254129eSJoonsoo Kim 225a254129eSJoonsoo Kim if (cma_area_count == ARRAY_SIZE(cma_areas)) { 226a254129eSJoonsoo Kim pr_err("Not enough slots for CMA reserved regions!\n"); 227a254129eSJoonsoo Kim return -ENOSPC; 228a254129eSJoonsoo Kim } 229a254129eSJoonsoo Kim 230a254129eSJoonsoo Kim if (!size) 231a254129eSJoonsoo Kim return -EINVAL; 232a254129eSJoonsoo Kim 233a254129eSJoonsoo Kim if (alignment && !is_power_of_2(alignment)) 234a254129eSJoonsoo Kim return -EINVAL; 235a254129eSJoonsoo Kim 236a254129eSJoonsoo Kim /* 237a254129eSJoonsoo Kim * Sanitise input arguments. 238a254129eSJoonsoo Kim * Pages both ends in CMA area could be merged into adjacent unmovable 239a254129eSJoonsoo Kim * migratetype page by page allocator's buddy algorithm. In the case, 240a254129eSJoonsoo Kim * you couldn't get a contiguous memory, which is not what we want. 241a254129eSJoonsoo Kim */ 242a254129eSJoonsoo Kim alignment = max(alignment, 243a254129eSJoonsoo Kim (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order)); 244a254129eSJoonsoo Kim base = ALIGN(base, alignment); 245a254129eSJoonsoo Kim size = ALIGN(size, alignment); 246a254129eSJoonsoo Kim limit &= ~(alignment - 1); 247a254129eSJoonsoo Kim 248800a85d3SLaurent Pinchart if (!base) 249800a85d3SLaurent Pinchart fixed = false; 250800a85d3SLaurent Pinchart 251a254129eSJoonsoo Kim /* size should be aligned with order_per_bit */ 252a254129eSJoonsoo Kim if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) 253a254129eSJoonsoo Kim return -EINVAL; 254a254129eSJoonsoo Kim 255f7426b98SMarek Szyprowski /* 25616195dddSLaurent Pinchart * If allocating at a fixed base the request region must not cross the 25716195dddSLaurent Pinchart * low/high memory boundary. 258f7426b98SMarek Szyprowski */ 259f7426b98SMarek Szyprowski if (fixed && base < highmem_start && base + size > highmem_start) { 260f7426b98SMarek Szyprowski ret = -EINVAL; 261f7426b98SMarek Szyprowski pr_err("Region at %08lx defined on low/high memory boundary (%08lx)\n", 262f7426b98SMarek Szyprowski (unsigned long)base, (unsigned long)highmem_start); 263f7426b98SMarek Szyprowski goto err; 264f7426b98SMarek Szyprowski } 265f7426b98SMarek Szyprowski 26616195dddSLaurent Pinchart /* 26716195dddSLaurent Pinchart * If the limit is unspecified or above the memblock end, its effective 26816195dddSLaurent Pinchart * value will be the memblock end. Set it explicitly to simplify further 26916195dddSLaurent Pinchart * checks. 27016195dddSLaurent Pinchart */ 27116195dddSLaurent Pinchart if (limit == 0 || limit > memblock_end) 27216195dddSLaurent Pinchart limit = memblock_end; 27316195dddSLaurent Pinchart 274a254129eSJoonsoo Kim /* Reserve memory */ 275800a85d3SLaurent Pinchart if (fixed) { 276a254129eSJoonsoo Kim if (memblock_is_region_reserved(base, size) || 277a254129eSJoonsoo Kim memblock_reserve(base, size) < 0) { 278a254129eSJoonsoo Kim ret = -EBUSY; 279a254129eSJoonsoo Kim goto err; 280a254129eSJoonsoo Kim } 281a254129eSJoonsoo Kim } else { 28216195dddSLaurent Pinchart phys_addr_t addr = 0; 28316195dddSLaurent Pinchart 28416195dddSLaurent Pinchart /* 28516195dddSLaurent Pinchart * All pages in the reserved area must come from the same zone. 28616195dddSLaurent Pinchart * If the requested region crosses the low/high memory boundary, 28716195dddSLaurent Pinchart * try allocating from high memory first and fall back to low 28816195dddSLaurent Pinchart * memory in case of failure. 28916195dddSLaurent Pinchart */ 29016195dddSLaurent Pinchart if (base < highmem_start && limit > highmem_start) { 29116195dddSLaurent Pinchart addr = memblock_alloc_range(size, alignment, 29216195dddSLaurent Pinchart highmem_start, limit); 29316195dddSLaurent Pinchart limit = highmem_start; 29416195dddSLaurent Pinchart } 29516195dddSLaurent Pinchart 29616195dddSLaurent Pinchart if (!addr) { 29716195dddSLaurent Pinchart addr = memblock_alloc_range(size, alignment, base, 298a254129eSJoonsoo Kim limit); 299a254129eSJoonsoo Kim if (!addr) { 300a254129eSJoonsoo Kim ret = -ENOMEM; 301a254129eSJoonsoo Kim goto err; 302a254129eSJoonsoo Kim } 303a254129eSJoonsoo Kim } 304a254129eSJoonsoo Kim 30516195dddSLaurent Pinchart base = addr; 30616195dddSLaurent Pinchart } 30716195dddSLaurent Pinchart 308de9e14eeSMarek Szyprowski ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma); 309de9e14eeSMarek Szyprowski if (ret) 310de9e14eeSMarek Szyprowski goto err; 311a254129eSJoonsoo Kim 3120de9d2ebSJoonsoo Kim pr_info("Reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M, 313a254129eSJoonsoo Kim (unsigned long)base); 314a254129eSJoonsoo Kim return 0; 315a254129eSJoonsoo Kim 316a254129eSJoonsoo Kim err: 3170de9d2ebSJoonsoo Kim pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); 318a254129eSJoonsoo Kim return ret; 319a254129eSJoonsoo Kim } 320a254129eSJoonsoo Kim 321a254129eSJoonsoo Kim /** 322a254129eSJoonsoo Kim * cma_alloc() - allocate pages from contiguous area 323a254129eSJoonsoo Kim * @cma: Contiguous memory region for which the allocation is performed. 324a254129eSJoonsoo Kim * @count: Requested number of pages. 325a254129eSJoonsoo Kim * @align: Requested alignment of pages (in PAGE_SIZE order). 326a254129eSJoonsoo Kim * 327a254129eSJoonsoo Kim * This function allocates part of contiguous memory on specific 328a254129eSJoonsoo Kim * contiguous memory area. 329a254129eSJoonsoo Kim */ 330a254129eSJoonsoo Kim struct page *cma_alloc(struct cma *cma, int count, unsigned int align) 331a254129eSJoonsoo Kim { 332a254129eSJoonsoo Kim unsigned long mask, pfn, start = 0; 333a254129eSJoonsoo Kim unsigned long bitmap_maxno, bitmap_no, bitmap_count; 334a254129eSJoonsoo Kim struct page *page = NULL; 335a254129eSJoonsoo Kim int ret; 336a254129eSJoonsoo Kim 337a254129eSJoonsoo Kim if (!cma || !cma->count) 338a254129eSJoonsoo Kim return NULL; 339a254129eSJoonsoo Kim 340a254129eSJoonsoo Kim pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma, 341a254129eSJoonsoo Kim count, align); 342a254129eSJoonsoo Kim 343a254129eSJoonsoo Kim if (!count) 344a254129eSJoonsoo Kim return NULL; 345a254129eSJoonsoo Kim 346a254129eSJoonsoo Kim mask = cma_bitmap_aligned_mask(cma, align); 347a254129eSJoonsoo Kim bitmap_maxno = cma_bitmap_maxno(cma); 348a254129eSJoonsoo Kim bitmap_count = cma_bitmap_pages_to_bits(cma, count); 349a254129eSJoonsoo Kim 350a254129eSJoonsoo Kim for (;;) { 351a254129eSJoonsoo Kim mutex_lock(&cma->lock); 352a254129eSJoonsoo Kim bitmap_no = bitmap_find_next_zero_area(cma->bitmap, 353a254129eSJoonsoo Kim bitmap_maxno, start, bitmap_count, mask); 354a254129eSJoonsoo Kim if (bitmap_no >= bitmap_maxno) { 355a254129eSJoonsoo Kim mutex_unlock(&cma->lock); 356a254129eSJoonsoo Kim break; 357a254129eSJoonsoo Kim } 358a254129eSJoonsoo Kim bitmap_set(cma->bitmap, bitmap_no, bitmap_count); 359a254129eSJoonsoo Kim /* 360a254129eSJoonsoo Kim * It's safe to drop the lock here. We've marked this region for 361a254129eSJoonsoo Kim * our exclusive use. If the migration fails we will take the 362a254129eSJoonsoo Kim * lock again and unmark it. 363a254129eSJoonsoo Kim */ 364a254129eSJoonsoo Kim mutex_unlock(&cma->lock); 365a254129eSJoonsoo Kim 366a254129eSJoonsoo Kim pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); 367a254129eSJoonsoo Kim mutex_lock(&cma_mutex); 368a254129eSJoonsoo Kim ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); 369a254129eSJoonsoo Kim mutex_unlock(&cma_mutex); 370a254129eSJoonsoo Kim if (ret == 0) { 371a254129eSJoonsoo Kim page = pfn_to_page(pfn); 372a254129eSJoonsoo Kim break; 373a254129eSJoonsoo Kim } 374b7155e76SJoonsoo Kim 375a254129eSJoonsoo Kim cma_clear_bitmap(cma, pfn, count); 376b7155e76SJoonsoo Kim if (ret != -EBUSY) 377b7155e76SJoonsoo Kim break; 378b7155e76SJoonsoo Kim 379a254129eSJoonsoo Kim pr_debug("%s(): memory range at %p is busy, retrying\n", 380a254129eSJoonsoo Kim __func__, pfn_to_page(pfn)); 381a254129eSJoonsoo Kim /* try again with a bit different memory target */ 382a254129eSJoonsoo Kim start = bitmap_no + mask + 1; 383a254129eSJoonsoo Kim } 384a254129eSJoonsoo Kim 385a254129eSJoonsoo Kim pr_debug("%s(): returned %p\n", __func__, page); 386a254129eSJoonsoo Kim return page; 387a254129eSJoonsoo Kim } 388a254129eSJoonsoo Kim 389a254129eSJoonsoo Kim /** 390a254129eSJoonsoo Kim * cma_release() - release allocated pages 391a254129eSJoonsoo Kim * @cma: Contiguous memory region for which the allocation is performed. 392a254129eSJoonsoo Kim * @pages: Allocated pages. 393a254129eSJoonsoo Kim * @count: Number of allocated pages. 394a254129eSJoonsoo Kim * 395a254129eSJoonsoo Kim * This function releases memory allocated by alloc_cma(). 396a254129eSJoonsoo Kim * It returns false when provided pages do not belong to contiguous area and 397a254129eSJoonsoo Kim * true otherwise. 398a254129eSJoonsoo Kim */ 399a254129eSJoonsoo Kim bool cma_release(struct cma *cma, struct page *pages, int count) 400a254129eSJoonsoo Kim { 401a254129eSJoonsoo Kim unsigned long pfn; 402a254129eSJoonsoo Kim 403a254129eSJoonsoo Kim if (!cma || !pages) 404a254129eSJoonsoo Kim return false; 405a254129eSJoonsoo Kim 406a254129eSJoonsoo Kim pr_debug("%s(page %p)\n", __func__, (void *)pages); 407a254129eSJoonsoo Kim 408a254129eSJoonsoo Kim pfn = page_to_pfn(pages); 409a254129eSJoonsoo Kim 410a254129eSJoonsoo Kim if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) 411a254129eSJoonsoo Kim return false; 412a254129eSJoonsoo Kim 413a254129eSJoonsoo Kim VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); 414a254129eSJoonsoo Kim 415a254129eSJoonsoo Kim free_contig_range(pfn, count); 416a254129eSJoonsoo Kim cma_clear_bitmap(cma, pfn, count); 417a254129eSJoonsoo Kim 418a254129eSJoonsoo Kim return true; 419a254129eSJoonsoo Kim } 420