1a254129eSJoonsoo Kim /* 2a254129eSJoonsoo Kim * Contiguous Memory Allocator 3a254129eSJoonsoo Kim * 4a254129eSJoonsoo Kim * Copyright (c) 2010-2011 by Samsung Electronics. 5a254129eSJoonsoo Kim * Copyright IBM Corporation, 2013 6a254129eSJoonsoo Kim * Copyright LG Electronics Inc., 2014 7a254129eSJoonsoo Kim * Written by: 8a254129eSJoonsoo Kim * Marek Szyprowski <m.szyprowski@samsung.com> 9a254129eSJoonsoo Kim * Michal Nazarewicz <mina86@mina86.com> 10a254129eSJoonsoo Kim * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> 11a254129eSJoonsoo Kim * Joonsoo Kim <iamjoonsoo.kim@lge.com> 12a254129eSJoonsoo Kim * 13a254129eSJoonsoo Kim * This program is free software; you can redistribute it and/or 14a254129eSJoonsoo Kim * modify it under the terms of the GNU General Public License as 15a254129eSJoonsoo Kim * published by the Free Software Foundation; either version 2 of the 16a254129eSJoonsoo Kim * License or (at your optional) any later version of the license. 17a254129eSJoonsoo Kim */ 18a254129eSJoonsoo Kim 19a254129eSJoonsoo Kim #define pr_fmt(fmt) "cma: " fmt 20a254129eSJoonsoo Kim 21a254129eSJoonsoo Kim #ifdef CONFIG_CMA_DEBUG 22a254129eSJoonsoo Kim #ifndef DEBUG 23a254129eSJoonsoo Kim # define DEBUG 24a254129eSJoonsoo Kim #endif 25a254129eSJoonsoo Kim #endif 26a254129eSJoonsoo Kim 27a254129eSJoonsoo Kim #include <linux/memblock.h> 28a254129eSJoonsoo Kim #include <linux/err.h> 29a254129eSJoonsoo Kim #include <linux/mm.h> 30a254129eSJoonsoo Kim #include <linux/mutex.h> 31a254129eSJoonsoo Kim #include <linux/sizes.h> 32a254129eSJoonsoo Kim #include <linux/slab.h> 33a254129eSJoonsoo Kim #include <linux/log2.h> 34a254129eSJoonsoo Kim #include <linux/cma.h> 35f7426b98SMarek Szyprowski #include <linux/highmem.h> 36620951e2SThierry Reding #include <linux/io.h> 37a254129eSJoonsoo Kim 3828b24c1fSSasha Levin #include "cma.h" 39a254129eSJoonsoo Kim 4028b24c1fSSasha Levin struct cma cma_areas[MAX_CMA_AREAS]; 4128b24c1fSSasha Levin unsigned cma_area_count; 42a254129eSJoonsoo Kim static DEFINE_MUTEX(cma_mutex); 43a254129eSJoonsoo Kim 44a254129eSJoonsoo Kim phys_addr_t cma_get_base(struct cma *cma) 45a254129eSJoonsoo Kim { 46a254129eSJoonsoo Kim return PFN_PHYS(cma->base_pfn); 47a254129eSJoonsoo Kim } 48a254129eSJoonsoo Kim 49a254129eSJoonsoo Kim unsigned long cma_get_size(struct cma *cma) 50a254129eSJoonsoo Kim { 51a254129eSJoonsoo Kim return cma->count << PAGE_SHIFT; 52a254129eSJoonsoo Kim } 53a254129eSJoonsoo Kim 54a254129eSJoonsoo Kim static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order) 55a254129eSJoonsoo Kim { 5668faed63SWeijie Yang if (align_order <= cma->order_per_bit) 5768faed63SWeijie Yang return 0; 5868faed63SWeijie Yang return (1UL << (align_order - cma->order_per_bit)) - 1; 59a254129eSJoonsoo Kim } 60a254129eSJoonsoo Kim 61850fc430SDanesh Petigara /* 62850fc430SDanesh Petigara * Find a PFN aligned to the specified order and return an offset represented in 63850fc430SDanesh Petigara * order_per_bits. 64850fc430SDanesh Petigara */ 65b5be83e3SGregory Fong static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order) 66b5be83e3SGregory Fong { 67b5be83e3SGregory Fong if (align_order <= cma->order_per_bit) 68b5be83e3SGregory Fong return 0; 69850fc430SDanesh Petigara 70850fc430SDanesh Petigara return (ALIGN(cma->base_pfn, (1UL << align_order)) 71850fc430SDanesh Petigara - cma->base_pfn) >> cma->order_per_bit; 72b5be83e3SGregory Fong } 73b5be83e3SGregory Fong 74a254129eSJoonsoo Kim static unsigned long cma_bitmap_pages_to_bits(struct cma *cma, 75a254129eSJoonsoo Kim unsigned long pages) 76a254129eSJoonsoo Kim { 77a254129eSJoonsoo Kim return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; 78a254129eSJoonsoo Kim } 79a254129eSJoonsoo Kim 80a254129eSJoonsoo Kim static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, int count) 81a254129eSJoonsoo Kim { 82a254129eSJoonsoo Kim unsigned long bitmap_no, bitmap_count; 83a254129eSJoonsoo Kim 84a254129eSJoonsoo Kim bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; 85a254129eSJoonsoo Kim bitmap_count = cma_bitmap_pages_to_bits(cma, count); 86a254129eSJoonsoo Kim 87a254129eSJoonsoo Kim mutex_lock(&cma->lock); 88a254129eSJoonsoo Kim bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); 89a254129eSJoonsoo Kim mutex_unlock(&cma->lock); 90a254129eSJoonsoo Kim } 91a254129eSJoonsoo Kim 92a254129eSJoonsoo Kim static int __init cma_activate_area(struct cma *cma) 93a254129eSJoonsoo Kim { 94a254129eSJoonsoo Kim int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long); 95a254129eSJoonsoo Kim unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; 96a254129eSJoonsoo Kim unsigned i = cma->count >> pageblock_order; 97a254129eSJoonsoo Kim struct zone *zone; 98a254129eSJoonsoo Kim 99a254129eSJoonsoo Kim cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 100a254129eSJoonsoo Kim 101a254129eSJoonsoo Kim if (!cma->bitmap) 102a254129eSJoonsoo Kim return -ENOMEM; 103a254129eSJoonsoo Kim 104a254129eSJoonsoo Kim WARN_ON_ONCE(!pfn_valid(pfn)); 105a254129eSJoonsoo Kim zone = page_zone(pfn_to_page(pfn)); 106a254129eSJoonsoo Kim 107a254129eSJoonsoo Kim do { 108a254129eSJoonsoo Kim unsigned j; 109a254129eSJoonsoo Kim 110a254129eSJoonsoo Kim base_pfn = pfn; 111a254129eSJoonsoo Kim for (j = pageblock_nr_pages; j; --j, pfn++) { 112a254129eSJoonsoo Kim WARN_ON_ONCE(!pfn_valid(pfn)); 113a254129eSJoonsoo Kim /* 114a254129eSJoonsoo Kim * alloc_contig_range requires the pfn range 115a254129eSJoonsoo Kim * specified to be in the same zone. Make this 116a254129eSJoonsoo Kim * simple by forcing the entire CMA resv range 117a254129eSJoonsoo Kim * to be in the same zone. 118a254129eSJoonsoo Kim */ 119a254129eSJoonsoo Kim if (page_zone(pfn_to_page(pfn)) != zone) 120a254129eSJoonsoo Kim goto err; 121a254129eSJoonsoo Kim } 122a254129eSJoonsoo Kim init_cma_reserved_pageblock(pfn_to_page(base_pfn)); 123a254129eSJoonsoo Kim } while (--i); 124a254129eSJoonsoo Kim 125a254129eSJoonsoo Kim mutex_init(&cma->lock); 126a254129eSJoonsoo Kim return 0; 127a254129eSJoonsoo Kim 128a254129eSJoonsoo Kim err: 129a254129eSJoonsoo Kim kfree(cma->bitmap); 130f022d8cbSLaurent Pinchart cma->count = 0; 131a254129eSJoonsoo Kim return -EINVAL; 132a254129eSJoonsoo Kim } 133a254129eSJoonsoo Kim 134a254129eSJoonsoo Kim static int __init cma_init_reserved_areas(void) 135a254129eSJoonsoo Kim { 136a254129eSJoonsoo Kim int i; 137a254129eSJoonsoo Kim 138a254129eSJoonsoo Kim for (i = 0; i < cma_area_count; i++) { 139a254129eSJoonsoo Kim int ret = cma_activate_area(&cma_areas[i]); 140a254129eSJoonsoo Kim 141a254129eSJoonsoo Kim if (ret) 142a254129eSJoonsoo Kim return ret; 143a254129eSJoonsoo Kim } 144a254129eSJoonsoo Kim 145a254129eSJoonsoo Kim return 0; 146a254129eSJoonsoo Kim } 147a254129eSJoonsoo Kim core_initcall(cma_init_reserved_areas); 148a254129eSJoonsoo Kim 149a254129eSJoonsoo Kim /** 150de9e14eeSMarek Szyprowski * cma_init_reserved_mem() - create custom contiguous area from reserved memory 151de9e14eeSMarek Szyprowski * @base: Base address of the reserved area 152de9e14eeSMarek Szyprowski * @size: Size of the reserved area (in bytes), 153de9e14eeSMarek Szyprowski * @order_per_bit: Order of pages represented by one bit on bitmap. 154de9e14eeSMarek Szyprowski * @res_cma: Pointer to store the created cma region. 155de9e14eeSMarek Szyprowski * 156de9e14eeSMarek Szyprowski * This function creates custom contiguous area from already reserved memory. 157de9e14eeSMarek Szyprowski */ 158de9e14eeSMarek Szyprowski int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, 159de9e14eeSMarek Szyprowski int order_per_bit, struct cma **res_cma) 160de9e14eeSMarek Szyprowski { 161de9e14eeSMarek Szyprowski struct cma *cma; 162de9e14eeSMarek Szyprowski phys_addr_t alignment; 163de9e14eeSMarek Szyprowski 164de9e14eeSMarek Szyprowski /* Sanity checks */ 165de9e14eeSMarek Szyprowski if (cma_area_count == ARRAY_SIZE(cma_areas)) { 166de9e14eeSMarek Szyprowski pr_err("Not enough slots for CMA reserved regions!\n"); 167de9e14eeSMarek Szyprowski return -ENOSPC; 168de9e14eeSMarek Szyprowski } 169de9e14eeSMarek Szyprowski 170de9e14eeSMarek Szyprowski if (!size || !memblock_is_region_reserved(base, size)) 171de9e14eeSMarek Szyprowski return -EINVAL; 172de9e14eeSMarek Szyprowski 173de9e14eeSMarek Szyprowski /* ensure minimal alignment requied by mm core */ 174de9e14eeSMarek Szyprowski alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); 175de9e14eeSMarek Szyprowski 176de9e14eeSMarek Szyprowski /* alignment should be aligned with order_per_bit */ 177de9e14eeSMarek Szyprowski if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit)) 178de9e14eeSMarek Szyprowski return -EINVAL; 179de9e14eeSMarek Szyprowski 180de9e14eeSMarek Szyprowski if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size) 181de9e14eeSMarek Szyprowski return -EINVAL; 182de9e14eeSMarek Szyprowski 183de9e14eeSMarek Szyprowski /* 184de9e14eeSMarek Szyprowski * Each reserved area must be initialised later, when more kernel 185de9e14eeSMarek Szyprowski * subsystems (like slab allocator) are available. 186de9e14eeSMarek Szyprowski */ 187de9e14eeSMarek Szyprowski cma = &cma_areas[cma_area_count]; 188de9e14eeSMarek Szyprowski cma->base_pfn = PFN_DOWN(base); 189de9e14eeSMarek Szyprowski cma->count = size >> PAGE_SHIFT; 190de9e14eeSMarek Szyprowski cma->order_per_bit = order_per_bit; 191de9e14eeSMarek Szyprowski *res_cma = cma; 192de9e14eeSMarek Szyprowski cma_area_count++; 19394737a85SGeorge G. Davis totalcma_pages += (size / PAGE_SIZE); 194de9e14eeSMarek Szyprowski 195de9e14eeSMarek Szyprowski return 0; 196de9e14eeSMarek Szyprowski } 197de9e14eeSMarek Szyprowski 198de9e14eeSMarek Szyprowski /** 199a254129eSJoonsoo Kim * cma_declare_contiguous() - reserve custom contiguous area 200a254129eSJoonsoo Kim * @base: Base address of the reserved area optional, use 0 for any 201c1f733aaSJoonsoo Kim * @size: Size of the reserved area (in bytes), 202a254129eSJoonsoo Kim * @limit: End address of the reserved memory (optional, 0 for any). 203a254129eSJoonsoo Kim * @alignment: Alignment for the CMA area, should be power of 2 or zero 204a254129eSJoonsoo Kim * @order_per_bit: Order of pages represented by one bit on bitmap. 205a254129eSJoonsoo Kim * @fixed: hint about where to place the reserved area 206c1f733aaSJoonsoo Kim * @res_cma: Pointer to store the created cma region. 207a254129eSJoonsoo Kim * 208a254129eSJoonsoo Kim * This function reserves memory from early allocator. It should be 209a254129eSJoonsoo Kim * called by arch specific code once the early allocator (memblock or bootmem) 210a254129eSJoonsoo Kim * has been activated and all other subsystems have already allocated/reserved 211a254129eSJoonsoo Kim * memory. This function allows to create custom reserved areas. 212a254129eSJoonsoo Kim * 213a254129eSJoonsoo Kim * If @fixed is true, reserve contiguous area at exactly @base. If false, 214a254129eSJoonsoo Kim * reserve in range from @base to @limit. 215a254129eSJoonsoo Kim */ 216c1f733aaSJoonsoo Kim int __init cma_declare_contiguous(phys_addr_t base, 217c1f733aaSJoonsoo Kim phys_addr_t size, phys_addr_t limit, 218a254129eSJoonsoo Kim phys_addr_t alignment, unsigned int order_per_bit, 219c1f733aaSJoonsoo Kim bool fixed, struct cma **res_cma) 220a254129eSJoonsoo Kim { 221f7426b98SMarek Szyprowski phys_addr_t memblock_end = memblock_end_of_DRAM(); 2226b101e2aSJoonsoo Kim phys_addr_t highmem_start; 223a254129eSJoonsoo Kim int ret = 0; 224a254129eSJoonsoo Kim 2256b101e2aSJoonsoo Kim #ifdef CONFIG_X86 2266b101e2aSJoonsoo Kim /* 2276b101e2aSJoonsoo Kim * high_memory isn't direct mapped memory so retrieving its physical 2286b101e2aSJoonsoo Kim * address isn't appropriate. But it would be useful to check the 2296b101e2aSJoonsoo Kim * physical address of the highmem boundary so it's justfiable to get 2306b101e2aSJoonsoo Kim * the physical address from it. On x86 there is a validation check for 2316b101e2aSJoonsoo Kim * this case, so the following workaround is needed to avoid it. 2326b101e2aSJoonsoo Kim */ 2336b101e2aSJoonsoo Kim highmem_start = __pa_nodebug(high_memory); 2346b101e2aSJoonsoo Kim #else 2356b101e2aSJoonsoo Kim highmem_start = __pa(high_memory); 2366b101e2aSJoonsoo Kim #endif 23756fa4f60SLaurent Pinchart pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", 23856fa4f60SLaurent Pinchart __func__, &size, &base, &limit, &alignment); 239a254129eSJoonsoo Kim 240a254129eSJoonsoo Kim if (cma_area_count == ARRAY_SIZE(cma_areas)) { 241a254129eSJoonsoo Kim pr_err("Not enough slots for CMA reserved regions!\n"); 242a254129eSJoonsoo Kim return -ENOSPC; 243a254129eSJoonsoo Kim } 244a254129eSJoonsoo Kim 245a254129eSJoonsoo Kim if (!size) 246a254129eSJoonsoo Kim return -EINVAL; 247a254129eSJoonsoo Kim 248a254129eSJoonsoo Kim if (alignment && !is_power_of_2(alignment)) 249a254129eSJoonsoo Kim return -EINVAL; 250a254129eSJoonsoo Kim 251a254129eSJoonsoo Kim /* 252a254129eSJoonsoo Kim * Sanitise input arguments. 253a254129eSJoonsoo Kim * Pages both ends in CMA area could be merged into adjacent unmovable 254a254129eSJoonsoo Kim * migratetype page by page allocator's buddy algorithm. In the case, 255a254129eSJoonsoo Kim * you couldn't get a contiguous memory, which is not what we want. 256a254129eSJoonsoo Kim */ 257a254129eSJoonsoo Kim alignment = max(alignment, 258a254129eSJoonsoo Kim (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order)); 259a254129eSJoonsoo Kim base = ALIGN(base, alignment); 260a254129eSJoonsoo Kim size = ALIGN(size, alignment); 261a254129eSJoonsoo Kim limit &= ~(alignment - 1); 262a254129eSJoonsoo Kim 263800a85d3SLaurent Pinchart if (!base) 264800a85d3SLaurent Pinchart fixed = false; 265800a85d3SLaurent Pinchart 266a254129eSJoonsoo Kim /* size should be aligned with order_per_bit */ 267a254129eSJoonsoo Kim if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) 268a254129eSJoonsoo Kim return -EINVAL; 269a254129eSJoonsoo Kim 270f7426b98SMarek Szyprowski /* 27116195dddSLaurent Pinchart * If allocating at a fixed base the request region must not cross the 27216195dddSLaurent Pinchart * low/high memory boundary. 273f7426b98SMarek Szyprowski */ 274f7426b98SMarek Szyprowski if (fixed && base < highmem_start && base + size > highmem_start) { 275f7426b98SMarek Szyprowski ret = -EINVAL; 27656fa4f60SLaurent Pinchart pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", 27756fa4f60SLaurent Pinchart &base, &highmem_start); 278f7426b98SMarek Szyprowski goto err; 279f7426b98SMarek Szyprowski } 280f7426b98SMarek Szyprowski 28116195dddSLaurent Pinchart /* 28216195dddSLaurent Pinchart * If the limit is unspecified or above the memblock end, its effective 28316195dddSLaurent Pinchart * value will be the memblock end. Set it explicitly to simplify further 28416195dddSLaurent Pinchart * checks. 28516195dddSLaurent Pinchart */ 28616195dddSLaurent Pinchart if (limit == 0 || limit > memblock_end) 28716195dddSLaurent Pinchart limit = memblock_end; 28816195dddSLaurent Pinchart 289a254129eSJoonsoo Kim /* Reserve memory */ 290800a85d3SLaurent Pinchart if (fixed) { 291a254129eSJoonsoo Kim if (memblock_is_region_reserved(base, size) || 292a254129eSJoonsoo Kim memblock_reserve(base, size) < 0) { 293a254129eSJoonsoo Kim ret = -EBUSY; 294a254129eSJoonsoo Kim goto err; 295a254129eSJoonsoo Kim } 296a254129eSJoonsoo Kim } else { 29716195dddSLaurent Pinchart phys_addr_t addr = 0; 29816195dddSLaurent Pinchart 29916195dddSLaurent Pinchart /* 30016195dddSLaurent Pinchart * All pages in the reserved area must come from the same zone. 30116195dddSLaurent Pinchart * If the requested region crosses the low/high memory boundary, 30216195dddSLaurent Pinchart * try allocating from high memory first and fall back to low 30316195dddSLaurent Pinchart * memory in case of failure. 30416195dddSLaurent Pinchart */ 30516195dddSLaurent Pinchart if (base < highmem_start && limit > highmem_start) { 30616195dddSLaurent Pinchart addr = memblock_alloc_range(size, alignment, 30716195dddSLaurent Pinchart highmem_start, limit); 30816195dddSLaurent Pinchart limit = highmem_start; 30916195dddSLaurent Pinchart } 31016195dddSLaurent Pinchart 31116195dddSLaurent Pinchart if (!addr) { 31216195dddSLaurent Pinchart addr = memblock_alloc_range(size, alignment, base, 313a254129eSJoonsoo Kim limit); 314a254129eSJoonsoo Kim if (!addr) { 315a254129eSJoonsoo Kim ret = -ENOMEM; 316a254129eSJoonsoo Kim goto err; 317a254129eSJoonsoo Kim } 318a254129eSJoonsoo Kim } 319a254129eSJoonsoo Kim 320620951e2SThierry Reding /* 321620951e2SThierry Reding * kmemleak scans/reads tracked objects for pointers to other 322620951e2SThierry Reding * objects but this address isn't mapped and accessible 323620951e2SThierry Reding */ 324620951e2SThierry Reding kmemleak_ignore(phys_to_virt(addr)); 32516195dddSLaurent Pinchart base = addr; 32616195dddSLaurent Pinchart } 32716195dddSLaurent Pinchart 328de9e14eeSMarek Szyprowski ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma); 329de9e14eeSMarek Szyprowski if (ret) 330de9e14eeSMarek Szyprowski goto err; 331a254129eSJoonsoo Kim 33256fa4f60SLaurent Pinchart pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M, 33356fa4f60SLaurent Pinchart &base); 334a254129eSJoonsoo Kim return 0; 335a254129eSJoonsoo Kim 336a254129eSJoonsoo Kim err: 3370de9d2ebSJoonsoo Kim pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); 338a254129eSJoonsoo Kim return ret; 339a254129eSJoonsoo Kim } 340a254129eSJoonsoo Kim 341a254129eSJoonsoo Kim /** 342a254129eSJoonsoo Kim * cma_alloc() - allocate pages from contiguous area 343a254129eSJoonsoo Kim * @cma: Contiguous memory region for which the allocation is performed. 344a254129eSJoonsoo Kim * @count: Requested number of pages. 345a254129eSJoonsoo Kim * @align: Requested alignment of pages (in PAGE_SIZE order). 346a254129eSJoonsoo Kim * 347a254129eSJoonsoo Kim * This function allocates part of contiguous memory on specific 348a254129eSJoonsoo Kim * contiguous memory area. 349a254129eSJoonsoo Kim */ 350a254129eSJoonsoo Kim struct page *cma_alloc(struct cma *cma, int count, unsigned int align) 351a254129eSJoonsoo Kim { 352b5be83e3SGregory Fong unsigned long mask, offset, pfn, start = 0; 353a254129eSJoonsoo Kim unsigned long bitmap_maxno, bitmap_no, bitmap_count; 354a254129eSJoonsoo Kim struct page *page = NULL; 355a254129eSJoonsoo Kim int ret; 356a254129eSJoonsoo Kim 357a254129eSJoonsoo Kim if (!cma || !cma->count) 358a254129eSJoonsoo Kim return NULL; 359a254129eSJoonsoo Kim 360a254129eSJoonsoo Kim pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma, 361a254129eSJoonsoo Kim count, align); 362a254129eSJoonsoo Kim 363a254129eSJoonsoo Kim if (!count) 364a254129eSJoonsoo Kim return NULL; 365a254129eSJoonsoo Kim 366a254129eSJoonsoo Kim mask = cma_bitmap_aligned_mask(cma, align); 367b5be83e3SGregory Fong offset = cma_bitmap_aligned_offset(cma, align); 368a254129eSJoonsoo Kim bitmap_maxno = cma_bitmap_maxno(cma); 369a254129eSJoonsoo Kim bitmap_count = cma_bitmap_pages_to_bits(cma, count); 370a254129eSJoonsoo Kim 371a254129eSJoonsoo Kim for (;;) { 372a254129eSJoonsoo Kim mutex_lock(&cma->lock); 373b5be83e3SGregory Fong bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, 374b5be83e3SGregory Fong bitmap_maxno, start, bitmap_count, mask, 375b5be83e3SGregory Fong offset); 376a254129eSJoonsoo Kim if (bitmap_no >= bitmap_maxno) { 377a254129eSJoonsoo Kim mutex_unlock(&cma->lock); 378a254129eSJoonsoo Kim break; 379a254129eSJoonsoo Kim } 380a254129eSJoonsoo Kim bitmap_set(cma->bitmap, bitmap_no, bitmap_count); 381a254129eSJoonsoo Kim /* 382a254129eSJoonsoo Kim * It's safe to drop the lock here. We've marked this region for 383a254129eSJoonsoo Kim * our exclusive use. If the migration fails we will take the 384a254129eSJoonsoo Kim * lock again and unmark it. 385a254129eSJoonsoo Kim */ 386a254129eSJoonsoo Kim mutex_unlock(&cma->lock); 387a254129eSJoonsoo Kim 388a254129eSJoonsoo Kim pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); 389a254129eSJoonsoo Kim mutex_lock(&cma_mutex); 390a254129eSJoonsoo Kim ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); 391a254129eSJoonsoo Kim mutex_unlock(&cma_mutex); 392a254129eSJoonsoo Kim if (ret == 0) { 393a254129eSJoonsoo Kim page = pfn_to_page(pfn); 394a254129eSJoonsoo Kim break; 395a254129eSJoonsoo Kim } 396b7155e76SJoonsoo Kim 397a254129eSJoonsoo Kim cma_clear_bitmap(cma, pfn, count); 398b7155e76SJoonsoo Kim if (ret != -EBUSY) 399b7155e76SJoonsoo Kim break; 400b7155e76SJoonsoo Kim 401a254129eSJoonsoo Kim pr_debug("%s(): memory range at %p is busy, retrying\n", 402a254129eSJoonsoo Kim __func__, pfn_to_page(pfn)); 403a254129eSJoonsoo Kim /* try again with a bit different memory target */ 404a254129eSJoonsoo Kim start = bitmap_no + mask + 1; 405a254129eSJoonsoo Kim } 406a254129eSJoonsoo Kim 407a254129eSJoonsoo Kim pr_debug("%s(): returned %p\n", __func__, page); 408a254129eSJoonsoo Kim return page; 409a254129eSJoonsoo Kim } 410a254129eSJoonsoo Kim 411a254129eSJoonsoo Kim /** 412a254129eSJoonsoo Kim * cma_release() - release allocated pages 413a254129eSJoonsoo Kim * @cma: Contiguous memory region for which the allocation is performed. 414a254129eSJoonsoo Kim * @pages: Allocated pages. 415a254129eSJoonsoo Kim * @count: Number of allocated pages. 416a254129eSJoonsoo Kim * 417a254129eSJoonsoo Kim * This function releases memory allocated by alloc_cma(). 418a254129eSJoonsoo Kim * It returns false when provided pages do not belong to contiguous area and 419a254129eSJoonsoo Kim * true otherwise. 420a254129eSJoonsoo Kim */ 421a254129eSJoonsoo Kim bool cma_release(struct cma *cma, struct page *pages, int count) 422a254129eSJoonsoo Kim { 423a254129eSJoonsoo Kim unsigned long pfn; 424a254129eSJoonsoo Kim 425a254129eSJoonsoo Kim if (!cma || !pages) 426a254129eSJoonsoo Kim return false; 427a254129eSJoonsoo Kim 428a254129eSJoonsoo Kim pr_debug("%s(page %p)\n", __func__, (void *)pages); 429a254129eSJoonsoo Kim 430a254129eSJoonsoo Kim pfn = page_to_pfn(pages); 431a254129eSJoonsoo Kim 432a254129eSJoonsoo Kim if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) 433a254129eSJoonsoo Kim return false; 434a254129eSJoonsoo Kim 435a254129eSJoonsoo Kim VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); 436a254129eSJoonsoo Kim 437a254129eSJoonsoo Kim free_contig_range(pfn, count); 438a254129eSJoonsoo Kim cma_clear_bitmap(cma, pfn, count); 439a254129eSJoonsoo Kim 440a254129eSJoonsoo Kim return true; 441a254129eSJoonsoo Kim } 442