1a254129eSJoonsoo Kim /* 2a254129eSJoonsoo Kim * Contiguous Memory Allocator 3a254129eSJoonsoo Kim * 4a254129eSJoonsoo Kim * Copyright (c) 2010-2011 by Samsung Electronics. 5a254129eSJoonsoo Kim * Copyright IBM Corporation, 2013 6a254129eSJoonsoo Kim * Copyright LG Electronics Inc., 2014 7a254129eSJoonsoo Kim * Written by: 8a254129eSJoonsoo Kim * Marek Szyprowski <m.szyprowski@samsung.com> 9a254129eSJoonsoo Kim * Michal Nazarewicz <mina86@mina86.com> 10a254129eSJoonsoo Kim * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> 11a254129eSJoonsoo Kim * Joonsoo Kim <iamjoonsoo.kim@lge.com> 12a254129eSJoonsoo Kim * 13a254129eSJoonsoo Kim * This program is free software; you can redistribute it and/or 14a254129eSJoonsoo Kim * modify it under the terms of the GNU General Public License as 15a254129eSJoonsoo Kim * published by the Free Software Foundation; either version 2 of the 16a254129eSJoonsoo Kim * License or (at your optional) any later version of the license. 17a254129eSJoonsoo Kim */ 18a254129eSJoonsoo Kim 19a254129eSJoonsoo Kim #define pr_fmt(fmt) "cma: " fmt 20a254129eSJoonsoo Kim 21a254129eSJoonsoo Kim #ifdef CONFIG_CMA_DEBUG 22a254129eSJoonsoo Kim #ifndef DEBUG 23a254129eSJoonsoo Kim # define DEBUG 24a254129eSJoonsoo Kim #endif 25a254129eSJoonsoo Kim #endif 2699e8ea6cSStefan Strogin #define CREATE_TRACE_POINTS 27a254129eSJoonsoo Kim 28a254129eSJoonsoo Kim #include <linux/memblock.h> 29a254129eSJoonsoo Kim #include <linux/err.h> 30a254129eSJoonsoo Kim #include <linux/mm.h> 31a254129eSJoonsoo Kim #include <linux/mutex.h> 32a254129eSJoonsoo Kim #include <linux/sizes.h> 33a254129eSJoonsoo Kim #include <linux/slab.h> 34a254129eSJoonsoo Kim #include <linux/log2.h> 35a254129eSJoonsoo Kim #include <linux/cma.h> 36f7426b98SMarek Szyprowski #include <linux/highmem.h> 37620951e2SThierry Reding #include <linux/io.h> 3899e8ea6cSStefan Strogin #include <trace/events/cma.h> 39a254129eSJoonsoo Kim 4028b24c1fSSasha Levin #include "cma.h" 41a254129eSJoonsoo Kim 4228b24c1fSSasha Levin struct cma cma_areas[MAX_CMA_AREAS]; 4328b24c1fSSasha Levin unsigned cma_area_count; 44a254129eSJoonsoo Kim static DEFINE_MUTEX(cma_mutex); 45a254129eSJoonsoo Kim 46ac173824SSasha Levin phys_addr_t cma_get_base(const struct cma *cma) 47a254129eSJoonsoo Kim { 48a254129eSJoonsoo Kim return PFN_PHYS(cma->base_pfn); 49a254129eSJoonsoo Kim } 50a254129eSJoonsoo Kim 51ac173824SSasha Levin unsigned long cma_get_size(const struct cma *cma) 52a254129eSJoonsoo Kim { 53a254129eSJoonsoo Kim return cma->count << PAGE_SHIFT; 54a254129eSJoonsoo Kim } 55a254129eSJoonsoo Kim 56ac173824SSasha Levin static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, 57ac173824SSasha Levin int align_order) 58a254129eSJoonsoo Kim { 5968faed63SWeijie Yang if (align_order <= cma->order_per_bit) 6068faed63SWeijie Yang return 0; 6168faed63SWeijie Yang return (1UL << (align_order - cma->order_per_bit)) - 1; 62a254129eSJoonsoo Kim } 63a254129eSJoonsoo Kim 64850fc430SDanesh Petigara /* 65850fc430SDanesh Petigara * Find a PFN aligned to the specified order and return an offset represented in 66850fc430SDanesh Petigara * order_per_bits. 67850fc430SDanesh Petigara */ 68ac173824SSasha Levin static unsigned long cma_bitmap_aligned_offset(const struct cma *cma, 69ac173824SSasha Levin int align_order) 70b5be83e3SGregory Fong { 71b5be83e3SGregory Fong if (align_order <= cma->order_per_bit) 72b5be83e3SGregory Fong return 0; 73850fc430SDanesh Petigara 74850fc430SDanesh Petigara return (ALIGN(cma->base_pfn, (1UL << align_order)) 75850fc430SDanesh Petigara - cma->base_pfn) >> cma->order_per_bit; 76b5be83e3SGregory Fong } 77b5be83e3SGregory Fong 78ac173824SSasha Levin static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, 79a254129eSJoonsoo Kim unsigned long pages) 80a254129eSJoonsoo Kim { 81a254129eSJoonsoo Kim return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; 82a254129eSJoonsoo Kim } 83a254129eSJoonsoo Kim 84ac173824SSasha Levin static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, 85ac173824SSasha Levin unsigned int count) 86a254129eSJoonsoo Kim { 87a254129eSJoonsoo Kim unsigned long bitmap_no, bitmap_count; 88a254129eSJoonsoo Kim 89a254129eSJoonsoo Kim bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; 90a254129eSJoonsoo Kim bitmap_count = cma_bitmap_pages_to_bits(cma, count); 91a254129eSJoonsoo Kim 92a254129eSJoonsoo Kim mutex_lock(&cma->lock); 93a254129eSJoonsoo Kim bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); 94a254129eSJoonsoo Kim mutex_unlock(&cma->lock); 95a254129eSJoonsoo Kim } 96a254129eSJoonsoo Kim 97a254129eSJoonsoo Kim static int __init cma_activate_area(struct cma *cma) 98a254129eSJoonsoo Kim { 99a254129eSJoonsoo Kim int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long); 100a254129eSJoonsoo Kim unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; 101a254129eSJoonsoo Kim unsigned i = cma->count >> pageblock_order; 102a254129eSJoonsoo Kim struct zone *zone; 103a254129eSJoonsoo Kim 104a254129eSJoonsoo Kim cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 105a254129eSJoonsoo Kim 106a254129eSJoonsoo Kim if (!cma->bitmap) 107a254129eSJoonsoo Kim return -ENOMEM; 108a254129eSJoonsoo Kim 109a254129eSJoonsoo Kim WARN_ON_ONCE(!pfn_valid(pfn)); 110a254129eSJoonsoo Kim zone = page_zone(pfn_to_page(pfn)); 111a254129eSJoonsoo Kim 112a254129eSJoonsoo Kim do { 113a254129eSJoonsoo Kim unsigned j; 114a254129eSJoonsoo Kim 115a254129eSJoonsoo Kim base_pfn = pfn; 116a254129eSJoonsoo Kim for (j = pageblock_nr_pages; j; --j, pfn++) { 117a254129eSJoonsoo Kim WARN_ON_ONCE(!pfn_valid(pfn)); 118a254129eSJoonsoo Kim /* 119a254129eSJoonsoo Kim * alloc_contig_range requires the pfn range 120a254129eSJoonsoo Kim * specified to be in the same zone. Make this 121a254129eSJoonsoo Kim * simple by forcing the entire CMA resv range 122a254129eSJoonsoo Kim * to be in the same zone. 123a254129eSJoonsoo Kim */ 124a254129eSJoonsoo Kim if (page_zone(pfn_to_page(pfn)) != zone) 125a254129eSJoonsoo Kim goto err; 126a254129eSJoonsoo Kim } 127a254129eSJoonsoo Kim init_cma_reserved_pageblock(pfn_to_page(base_pfn)); 128a254129eSJoonsoo Kim } while (--i); 129a254129eSJoonsoo Kim 130a254129eSJoonsoo Kim mutex_init(&cma->lock); 13126b02a1fSSasha Levin 13226b02a1fSSasha Levin #ifdef CONFIG_CMA_DEBUGFS 13326b02a1fSSasha Levin INIT_HLIST_HEAD(&cma->mem_head); 13426b02a1fSSasha Levin spin_lock_init(&cma->mem_head_lock); 13526b02a1fSSasha Levin #endif 13626b02a1fSSasha Levin 137a254129eSJoonsoo Kim return 0; 138a254129eSJoonsoo Kim 139a254129eSJoonsoo Kim err: 140a254129eSJoonsoo Kim kfree(cma->bitmap); 141f022d8cbSLaurent Pinchart cma->count = 0; 142a254129eSJoonsoo Kim return -EINVAL; 143a254129eSJoonsoo Kim } 144a254129eSJoonsoo Kim 145a254129eSJoonsoo Kim static int __init cma_init_reserved_areas(void) 146a254129eSJoonsoo Kim { 147a254129eSJoonsoo Kim int i; 148a254129eSJoonsoo Kim 149a254129eSJoonsoo Kim for (i = 0; i < cma_area_count; i++) { 150a254129eSJoonsoo Kim int ret = cma_activate_area(&cma_areas[i]); 151a254129eSJoonsoo Kim 152a254129eSJoonsoo Kim if (ret) 153a254129eSJoonsoo Kim return ret; 154a254129eSJoonsoo Kim } 155a254129eSJoonsoo Kim 156a254129eSJoonsoo Kim return 0; 157a254129eSJoonsoo Kim } 158a254129eSJoonsoo Kim core_initcall(cma_init_reserved_areas); 159a254129eSJoonsoo Kim 160a254129eSJoonsoo Kim /** 161de9e14eeSMarek Szyprowski * cma_init_reserved_mem() - create custom contiguous area from reserved memory 162de9e14eeSMarek Szyprowski * @base: Base address of the reserved area 163de9e14eeSMarek Szyprowski * @size: Size of the reserved area (in bytes), 164de9e14eeSMarek Szyprowski * @order_per_bit: Order of pages represented by one bit on bitmap. 165de9e14eeSMarek Szyprowski * @res_cma: Pointer to store the created cma region. 166de9e14eeSMarek Szyprowski * 167de9e14eeSMarek Szyprowski * This function creates custom contiguous area from already reserved memory. 168de9e14eeSMarek Szyprowski */ 169de9e14eeSMarek Szyprowski int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, 170ac173824SSasha Levin unsigned int order_per_bit, 171ac173824SSasha Levin struct cma **res_cma) 172de9e14eeSMarek Szyprowski { 173de9e14eeSMarek Szyprowski struct cma *cma; 174de9e14eeSMarek Szyprowski phys_addr_t alignment; 175de9e14eeSMarek Szyprowski 176de9e14eeSMarek Szyprowski /* Sanity checks */ 177de9e14eeSMarek Szyprowski if (cma_area_count == ARRAY_SIZE(cma_areas)) { 178de9e14eeSMarek Szyprowski pr_err("Not enough slots for CMA reserved regions!\n"); 179de9e14eeSMarek Szyprowski return -ENOSPC; 180de9e14eeSMarek Szyprowski } 181de9e14eeSMarek Szyprowski 182de9e14eeSMarek Szyprowski if (!size || !memblock_is_region_reserved(base, size)) 183de9e14eeSMarek Szyprowski return -EINVAL; 184de9e14eeSMarek Szyprowski 1850f96ae29SShailendra Verma /* ensure minimal alignment required by mm core */ 186de9e14eeSMarek Szyprowski alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); 187de9e14eeSMarek Szyprowski 188de9e14eeSMarek Szyprowski /* alignment should be aligned with order_per_bit */ 189de9e14eeSMarek Szyprowski if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit)) 190de9e14eeSMarek Szyprowski return -EINVAL; 191de9e14eeSMarek Szyprowski 192de9e14eeSMarek Szyprowski if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size) 193de9e14eeSMarek Szyprowski return -EINVAL; 194de9e14eeSMarek Szyprowski 195de9e14eeSMarek Szyprowski /* 196de9e14eeSMarek Szyprowski * Each reserved area must be initialised later, when more kernel 197de9e14eeSMarek Szyprowski * subsystems (like slab allocator) are available. 198de9e14eeSMarek Szyprowski */ 199de9e14eeSMarek Szyprowski cma = &cma_areas[cma_area_count]; 200de9e14eeSMarek Szyprowski cma->base_pfn = PFN_DOWN(base); 201de9e14eeSMarek Szyprowski cma->count = size >> PAGE_SHIFT; 202de9e14eeSMarek Szyprowski cma->order_per_bit = order_per_bit; 203de9e14eeSMarek Szyprowski *res_cma = cma; 204de9e14eeSMarek Szyprowski cma_area_count++; 20594737a85SGeorge G. Davis totalcma_pages += (size / PAGE_SIZE); 206de9e14eeSMarek Szyprowski 207de9e14eeSMarek Szyprowski return 0; 208de9e14eeSMarek Szyprowski } 209de9e14eeSMarek Szyprowski 210de9e14eeSMarek Szyprowski /** 211a254129eSJoonsoo Kim * cma_declare_contiguous() - reserve custom contiguous area 212a254129eSJoonsoo Kim * @base: Base address of the reserved area optional, use 0 for any 213c1f733aaSJoonsoo Kim * @size: Size of the reserved area (in bytes), 214a254129eSJoonsoo Kim * @limit: End address of the reserved memory (optional, 0 for any). 215a254129eSJoonsoo Kim * @alignment: Alignment for the CMA area, should be power of 2 or zero 216a254129eSJoonsoo Kim * @order_per_bit: Order of pages represented by one bit on bitmap. 217a254129eSJoonsoo Kim * @fixed: hint about where to place the reserved area 218c1f733aaSJoonsoo Kim * @res_cma: Pointer to store the created cma region. 219a254129eSJoonsoo Kim * 220a254129eSJoonsoo Kim * This function reserves memory from early allocator. It should be 221a254129eSJoonsoo Kim * called by arch specific code once the early allocator (memblock or bootmem) 222a254129eSJoonsoo Kim * has been activated and all other subsystems have already allocated/reserved 223a254129eSJoonsoo Kim * memory. This function allows to create custom reserved areas. 224a254129eSJoonsoo Kim * 225a254129eSJoonsoo Kim * If @fixed is true, reserve contiguous area at exactly @base. If false, 226a254129eSJoonsoo Kim * reserve in range from @base to @limit. 227a254129eSJoonsoo Kim */ 228c1f733aaSJoonsoo Kim int __init cma_declare_contiguous(phys_addr_t base, 229c1f733aaSJoonsoo Kim phys_addr_t size, phys_addr_t limit, 230a254129eSJoonsoo Kim phys_addr_t alignment, unsigned int order_per_bit, 231c1f733aaSJoonsoo Kim bool fixed, struct cma **res_cma) 232a254129eSJoonsoo Kim { 233f7426b98SMarek Szyprowski phys_addr_t memblock_end = memblock_end_of_DRAM(); 2346b101e2aSJoonsoo Kim phys_addr_t highmem_start; 235a254129eSJoonsoo Kim int ret = 0; 236a254129eSJoonsoo Kim 2376b101e2aSJoonsoo Kim #ifdef CONFIG_X86 2386b101e2aSJoonsoo Kim /* 2396b101e2aSJoonsoo Kim * high_memory isn't direct mapped memory so retrieving its physical 2406b101e2aSJoonsoo Kim * address isn't appropriate. But it would be useful to check the 2410f96ae29SShailendra Verma * physical address of the highmem boundary so it's justifiable to get 2426b101e2aSJoonsoo Kim * the physical address from it. On x86 there is a validation check for 2436b101e2aSJoonsoo Kim * this case, so the following workaround is needed to avoid it. 2446b101e2aSJoonsoo Kim */ 2456b101e2aSJoonsoo Kim highmem_start = __pa_nodebug(high_memory); 2466b101e2aSJoonsoo Kim #else 2476b101e2aSJoonsoo Kim highmem_start = __pa(high_memory); 2486b101e2aSJoonsoo Kim #endif 24956fa4f60SLaurent Pinchart pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", 25056fa4f60SLaurent Pinchart __func__, &size, &base, &limit, &alignment); 251a254129eSJoonsoo Kim 252a254129eSJoonsoo Kim if (cma_area_count == ARRAY_SIZE(cma_areas)) { 253a254129eSJoonsoo Kim pr_err("Not enough slots for CMA reserved regions!\n"); 254a254129eSJoonsoo Kim return -ENOSPC; 255a254129eSJoonsoo Kim } 256a254129eSJoonsoo Kim 257a254129eSJoonsoo Kim if (!size) 258a254129eSJoonsoo Kim return -EINVAL; 259a254129eSJoonsoo Kim 260a254129eSJoonsoo Kim if (alignment && !is_power_of_2(alignment)) 261a254129eSJoonsoo Kim return -EINVAL; 262a254129eSJoonsoo Kim 263a254129eSJoonsoo Kim /* 264a254129eSJoonsoo Kim * Sanitise input arguments. 265a254129eSJoonsoo Kim * Pages both ends in CMA area could be merged into adjacent unmovable 266a254129eSJoonsoo Kim * migratetype page by page allocator's buddy algorithm. In the case, 267a254129eSJoonsoo Kim * you couldn't get a contiguous memory, which is not what we want. 268a254129eSJoonsoo Kim */ 269a254129eSJoonsoo Kim alignment = max(alignment, 270a254129eSJoonsoo Kim (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order)); 271a254129eSJoonsoo Kim base = ALIGN(base, alignment); 272a254129eSJoonsoo Kim size = ALIGN(size, alignment); 273a254129eSJoonsoo Kim limit &= ~(alignment - 1); 274a254129eSJoonsoo Kim 275800a85d3SLaurent Pinchart if (!base) 276800a85d3SLaurent Pinchart fixed = false; 277800a85d3SLaurent Pinchart 278a254129eSJoonsoo Kim /* size should be aligned with order_per_bit */ 279a254129eSJoonsoo Kim if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) 280a254129eSJoonsoo Kim return -EINVAL; 281a254129eSJoonsoo Kim 282f7426b98SMarek Szyprowski /* 28316195dddSLaurent Pinchart * If allocating at a fixed base the request region must not cross the 28416195dddSLaurent Pinchart * low/high memory boundary. 285f7426b98SMarek Szyprowski */ 286f7426b98SMarek Szyprowski if (fixed && base < highmem_start && base + size > highmem_start) { 287f7426b98SMarek Szyprowski ret = -EINVAL; 28856fa4f60SLaurent Pinchart pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", 28956fa4f60SLaurent Pinchart &base, &highmem_start); 290f7426b98SMarek Szyprowski goto err; 291f7426b98SMarek Szyprowski } 292f7426b98SMarek Szyprowski 29316195dddSLaurent Pinchart /* 29416195dddSLaurent Pinchart * If the limit is unspecified or above the memblock end, its effective 29516195dddSLaurent Pinchart * value will be the memblock end. Set it explicitly to simplify further 29616195dddSLaurent Pinchart * checks. 29716195dddSLaurent Pinchart */ 29816195dddSLaurent Pinchart if (limit == 0 || limit > memblock_end) 29916195dddSLaurent Pinchart limit = memblock_end; 30016195dddSLaurent Pinchart 301a254129eSJoonsoo Kim /* Reserve memory */ 302800a85d3SLaurent Pinchart if (fixed) { 303a254129eSJoonsoo Kim if (memblock_is_region_reserved(base, size) || 304a254129eSJoonsoo Kim memblock_reserve(base, size) < 0) { 305a254129eSJoonsoo Kim ret = -EBUSY; 306a254129eSJoonsoo Kim goto err; 307a254129eSJoonsoo Kim } 308a254129eSJoonsoo Kim } else { 30916195dddSLaurent Pinchart phys_addr_t addr = 0; 31016195dddSLaurent Pinchart 31116195dddSLaurent Pinchart /* 31216195dddSLaurent Pinchart * All pages in the reserved area must come from the same zone. 31316195dddSLaurent Pinchart * If the requested region crosses the low/high memory boundary, 31416195dddSLaurent Pinchart * try allocating from high memory first and fall back to low 31516195dddSLaurent Pinchart * memory in case of failure. 31616195dddSLaurent Pinchart */ 31716195dddSLaurent Pinchart if (base < highmem_start && limit > highmem_start) { 31816195dddSLaurent Pinchart addr = memblock_alloc_range(size, alignment, 319fc6daaf9STony Luck highmem_start, limit, 320fc6daaf9STony Luck MEMBLOCK_NONE); 32116195dddSLaurent Pinchart limit = highmem_start; 32216195dddSLaurent Pinchart } 32316195dddSLaurent Pinchart 32416195dddSLaurent Pinchart if (!addr) { 32516195dddSLaurent Pinchart addr = memblock_alloc_range(size, alignment, base, 326fc6daaf9STony Luck limit, 327fc6daaf9STony Luck MEMBLOCK_NONE); 328a254129eSJoonsoo Kim if (!addr) { 329a254129eSJoonsoo Kim ret = -ENOMEM; 330a254129eSJoonsoo Kim goto err; 331a254129eSJoonsoo Kim } 332a254129eSJoonsoo Kim } 333a254129eSJoonsoo Kim 334620951e2SThierry Reding /* 335620951e2SThierry Reding * kmemleak scans/reads tracked objects for pointers to other 336620951e2SThierry Reding * objects but this address isn't mapped and accessible 337620951e2SThierry Reding */ 338620951e2SThierry Reding kmemleak_ignore(phys_to_virt(addr)); 33916195dddSLaurent Pinchart base = addr; 34016195dddSLaurent Pinchart } 34116195dddSLaurent Pinchart 342de9e14eeSMarek Szyprowski ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma); 343de9e14eeSMarek Szyprowski if (ret) 344de9e14eeSMarek Szyprowski goto err; 345a254129eSJoonsoo Kim 34656fa4f60SLaurent Pinchart pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M, 34756fa4f60SLaurent Pinchart &base); 348a254129eSJoonsoo Kim return 0; 349a254129eSJoonsoo Kim 350a254129eSJoonsoo Kim err: 3510de9d2ebSJoonsoo Kim pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); 352a254129eSJoonsoo Kim return ret; 353a254129eSJoonsoo Kim } 354a254129eSJoonsoo Kim 355a254129eSJoonsoo Kim /** 356a254129eSJoonsoo Kim * cma_alloc() - allocate pages from contiguous area 357a254129eSJoonsoo Kim * @cma: Contiguous memory region for which the allocation is performed. 358a254129eSJoonsoo Kim * @count: Requested number of pages. 359a254129eSJoonsoo Kim * @align: Requested alignment of pages (in PAGE_SIZE order). 360a254129eSJoonsoo Kim * 361a254129eSJoonsoo Kim * This function allocates part of contiguous memory on specific 362a254129eSJoonsoo Kim * contiguous memory area. 363a254129eSJoonsoo Kim */ 36467a2e213SRohit Vaswani struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align) 365a254129eSJoonsoo Kim { 3663acaea68SAndrew Morton unsigned long mask, offset; 3673acaea68SAndrew Morton unsigned long pfn = -1; 3683acaea68SAndrew Morton unsigned long start = 0; 369a254129eSJoonsoo Kim unsigned long bitmap_maxno, bitmap_no, bitmap_count; 370a254129eSJoonsoo Kim struct page *page = NULL; 371a254129eSJoonsoo Kim int ret; 372a254129eSJoonsoo Kim 373a254129eSJoonsoo Kim if (!cma || !cma->count) 374a254129eSJoonsoo Kim return NULL; 375a254129eSJoonsoo Kim 37667a2e213SRohit Vaswani pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma, 377a254129eSJoonsoo Kim count, align); 378a254129eSJoonsoo Kim 379a254129eSJoonsoo Kim if (!count) 380a254129eSJoonsoo Kim return NULL; 381a254129eSJoonsoo Kim 382a254129eSJoonsoo Kim mask = cma_bitmap_aligned_mask(cma, align); 383b5be83e3SGregory Fong offset = cma_bitmap_aligned_offset(cma, align); 384a254129eSJoonsoo Kim bitmap_maxno = cma_bitmap_maxno(cma); 385a254129eSJoonsoo Kim bitmap_count = cma_bitmap_pages_to_bits(cma, count); 386a254129eSJoonsoo Kim 387a254129eSJoonsoo Kim for (;;) { 388a254129eSJoonsoo Kim mutex_lock(&cma->lock); 389b5be83e3SGregory Fong bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, 390b5be83e3SGregory Fong bitmap_maxno, start, bitmap_count, mask, 391b5be83e3SGregory Fong offset); 392a254129eSJoonsoo Kim if (bitmap_no >= bitmap_maxno) { 393a254129eSJoonsoo Kim mutex_unlock(&cma->lock); 394a254129eSJoonsoo Kim break; 395a254129eSJoonsoo Kim } 396a254129eSJoonsoo Kim bitmap_set(cma->bitmap, bitmap_no, bitmap_count); 397a254129eSJoonsoo Kim /* 398a254129eSJoonsoo Kim * It's safe to drop the lock here. We've marked this region for 399a254129eSJoonsoo Kim * our exclusive use. If the migration fails we will take the 400a254129eSJoonsoo Kim * lock again and unmark it. 401a254129eSJoonsoo Kim */ 402a254129eSJoonsoo Kim mutex_unlock(&cma->lock); 403a254129eSJoonsoo Kim 404a254129eSJoonsoo Kim pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); 405a254129eSJoonsoo Kim mutex_lock(&cma_mutex); 406a254129eSJoonsoo Kim ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); 407a254129eSJoonsoo Kim mutex_unlock(&cma_mutex); 408a254129eSJoonsoo Kim if (ret == 0) { 409a254129eSJoonsoo Kim page = pfn_to_page(pfn); 410a254129eSJoonsoo Kim break; 411a254129eSJoonsoo Kim } 412b7155e76SJoonsoo Kim 413a254129eSJoonsoo Kim cma_clear_bitmap(cma, pfn, count); 414b7155e76SJoonsoo Kim if (ret != -EBUSY) 415b7155e76SJoonsoo Kim break; 416b7155e76SJoonsoo Kim 417a254129eSJoonsoo Kim pr_debug("%s(): memory range at %p is busy, retrying\n", 418a254129eSJoonsoo Kim __func__, pfn_to_page(pfn)); 419a254129eSJoonsoo Kim /* try again with a bit different memory target */ 420a254129eSJoonsoo Kim start = bitmap_no + mask + 1; 421a254129eSJoonsoo Kim } 422a254129eSJoonsoo Kim 4233acaea68SAndrew Morton trace_cma_alloc(pfn, page, count, align); 42499e8ea6cSStefan Strogin 425a254129eSJoonsoo Kim pr_debug("%s(): returned %p\n", __func__, page); 426a254129eSJoonsoo Kim return page; 427a254129eSJoonsoo Kim } 428a254129eSJoonsoo Kim 429a254129eSJoonsoo Kim /** 430a254129eSJoonsoo Kim * cma_release() - release allocated pages 431a254129eSJoonsoo Kim * @cma: Contiguous memory region for which the allocation is performed. 432a254129eSJoonsoo Kim * @pages: Allocated pages. 433a254129eSJoonsoo Kim * @count: Number of allocated pages. 434a254129eSJoonsoo Kim * 435a254129eSJoonsoo Kim * This function releases memory allocated by alloc_cma(). 436a254129eSJoonsoo Kim * It returns false when provided pages do not belong to contiguous area and 437a254129eSJoonsoo Kim * true otherwise. 438a254129eSJoonsoo Kim */ 439ac173824SSasha Levin bool cma_release(struct cma *cma, const struct page *pages, unsigned int count) 440a254129eSJoonsoo Kim { 441a254129eSJoonsoo Kim unsigned long pfn; 442a254129eSJoonsoo Kim 443a254129eSJoonsoo Kim if (!cma || !pages) 444a254129eSJoonsoo Kim return false; 445a254129eSJoonsoo Kim 446a254129eSJoonsoo Kim pr_debug("%s(page %p)\n", __func__, (void *)pages); 447a254129eSJoonsoo Kim 448a254129eSJoonsoo Kim pfn = page_to_pfn(pages); 449a254129eSJoonsoo Kim 450a254129eSJoonsoo Kim if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) 451a254129eSJoonsoo Kim return false; 452a254129eSJoonsoo Kim 453a254129eSJoonsoo Kim VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); 454a254129eSJoonsoo Kim 455a254129eSJoonsoo Kim free_contig_range(pfn, count); 456a254129eSJoonsoo Kim cma_clear_bitmap(cma, pfn, count); 45799e8ea6cSStefan Strogin trace_cma_release(pfn, pages, count); 458a254129eSJoonsoo Kim 459a254129eSJoonsoo Kim return true; 460a254129eSJoonsoo Kim } 461