1a254129eSJoonsoo Kim /* 2a254129eSJoonsoo Kim * Contiguous Memory Allocator 3a254129eSJoonsoo Kim * 4a254129eSJoonsoo Kim * Copyright (c) 2010-2011 by Samsung Electronics. 5a254129eSJoonsoo Kim * Copyright IBM Corporation, 2013 6a254129eSJoonsoo Kim * Copyright LG Electronics Inc., 2014 7a254129eSJoonsoo Kim * Written by: 8a254129eSJoonsoo Kim * Marek Szyprowski <m.szyprowski@samsung.com> 9a254129eSJoonsoo Kim * Michal Nazarewicz <mina86@mina86.com> 10a254129eSJoonsoo Kim * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> 11a254129eSJoonsoo Kim * Joonsoo Kim <iamjoonsoo.kim@lge.com> 12a254129eSJoonsoo Kim * 13a254129eSJoonsoo Kim * This program is free software; you can redistribute it and/or 14a254129eSJoonsoo Kim * modify it under the terms of the GNU General Public License as 15a254129eSJoonsoo Kim * published by the Free Software Foundation; either version 2 of the 16a254129eSJoonsoo Kim * License or (at your optional) any later version of the license. 17a254129eSJoonsoo Kim */ 18a254129eSJoonsoo Kim 19a254129eSJoonsoo Kim #define pr_fmt(fmt) "cma: " fmt 20a254129eSJoonsoo Kim 21a254129eSJoonsoo Kim #ifdef CONFIG_CMA_DEBUG 22a254129eSJoonsoo Kim #ifndef DEBUG 23a254129eSJoonsoo Kim # define DEBUG 24a254129eSJoonsoo Kim #endif 25a254129eSJoonsoo Kim #endif 2699e8ea6cSStefan Strogin #define CREATE_TRACE_POINTS 27a254129eSJoonsoo Kim 28a254129eSJoonsoo Kim #include <linux/memblock.h> 29a254129eSJoonsoo Kim #include <linux/err.h> 30a254129eSJoonsoo Kim #include <linux/mm.h> 31a254129eSJoonsoo Kim #include <linux/mutex.h> 32a254129eSJoonsoo Kim #include <linux/sizes.h> 33a254129eSJoonsoo Kim #include <linux/slab.h> 34a254129eSJoonsoo Kim #include <linux/log2.h> 35a254129eSJoonsoo Kim #include <linux/cma.h> 36f7426b98SMarek Szyprowski #include <linux/highmem.h> 37620951e2SThierry Reding #include <linux/io.h> 3899e8ea6cSStefan Strogin #include <trace/events/cma.h> 39a254129eSJoonsoo Kim 4028b24c1fSSasha Levin #include "cma.h" 41a254129eSJoonsoo Kim 4228b24c1fSSasha Levin struct cma cma_areas[MAX_CMA_AREAS]; 4328b24c1fSSasha Levin unsigned cma_area_count; 44a254129eSJoonsoo Kim static DEFINE_MUTEX(cma_mutex); 45a254129eSJoonsoo Kim 46ac173824SSasha Levin phys_addr_t cma_get_base(const struct cma *cma) 47a254129eSJoonsoo Kim { 48a254129eSJoonsoo Kim return PFN_PHYS(cma->base_pfn); 49a254129eSJoonsoo Kim } 50a254129eSJoonsoo Kim 51ac173824SSasha Levin unsigned long cma_get_size(const struct cma *cma) 52a254129eSJoonsoo Kim { 53a254129eSJoonsoo Kim return cma->count << PAGE_SHIFT; 54a254129eSJoonsoo Kim } 55a254129eSJoonsoo Kim 56f318dd08SLaura Abbott const char *cma_get_name(const struct cma *cma) 57f318dd08SLaura Abbott { 58f318dd08SLaura Abbott return cma->name ? cma->name : "(undefined)"; 59f318dd08SLaura Abbott } 60f318dd08SLaura Abbott 61ac173824SSasha Levin static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, 62e048cb32SDoug Berger unsigned int align_order) 63a254129eSJoonsoo Kim { 6468faed63SWeijie Yang if (align_order <= cma->order_per_bit) 6568faed63SWeijie Yang return 0; 6668faed63SWeijie Yang return (1UL << (align_order - cma->order_per_bit)) - 1; 67a254129eSJoonsoo Kim } 68a254129eSJoonsoo Kim 69850fc430SDanesh Petigara /* 70e048cb32SDoug Berger * Find the offset of the base PFN from the specified align_order. 71e048cb32SDoug Berger * The value returned is represented in order_per_bits. 72850fc430SDanesh Petigara */ 73ac173824SSasha Levin static unsigned long cma_bitmap_aligned_offset(const struct cma *cma, 74e048cb32SDoug Berger unsigned int align_order) 75b5be83e3SGregory Fong { 76e048cb32SDoug Berger return (cma->base_pfn & ((1UL << align_order) - 1)) 77e048cb32SDoug Berger >> cma->order_per_bit; 78b5be83e3SGregory Fong } 79b5be83e3SGregory Fong 80ac173824SSasha Levin static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, 81a254129eSJoonsoo Kim unsigned long pages) 82a254129eSJoonsoo Kim { 83a254129eSJoonsoo Kim return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; 84a254129eSJoonsoo Kim } 85a254129eSJoonsoo Kim 86ac173824SSasha Levin static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, 87ac173824SSasha Levin unsigned int count) 88a254129eSJoonsoo Kim { 89a254129eSJoonsoo Kim unsigned long bitmap_no, bitmap_count; 90a254129eSJoonsoo Kim 91a254129eSJoonsoo Kim bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; 92a254129eSJoonsoo Kim bitmap_count = cma_bitmap_pages_to_bits(cma, count); 93a254129eSJoonsoo Kim 94a254129eSJoonsoo Kim mutex_lock(&cma->lock); 95a254129eSJoonsoo Kim bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); 96a254129eSJoonsoo Kim mutex_unlock(&cma->lock); 97a254129eSJoonsoo Kim } 98a254129eSJoonsoo Kim 99a254129eSJoonsoo Kim static int __init cma_activate_area(struct cma *cma) 100a254129eSJoonsoo Kim { 101a254129eSJoonsoo Kim int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long); 102a254129eSJoonsoo Kim unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; 103a254129eSJoonsoo Kim unsigned i = cma->count >> pageblock_order; 104a254129eSJoonsoo Kim struct zone *zone; 105a254129eSJoonsoo Kim 106a254129eSJoonsoo Kim cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 107a254129eSJoonsoo Kim 108a254129eSJoonsoo Kim if (!cma->bitmap) 109a254129eSJoonsoo Kim return -ENOMEM; 110a254129eSJoonsoo Kim 111a254129eSJoonsoo Kim WARN_ON_ONCE(!pfn_valid(pfn)); 112a254129eSJoonsoo Kim zone = page_zone(pfn_to_page(pfn)); 113a254129eSJoonsoo Kim 114a254129eSJoonsoo Kim do { 115a254129eSJoonsoo Kim unsigned j; 116a254129eSJoonsoo Kim 117a254129eSJoonsoo Kim base_pfn = pfn; 118a254129eSJoonsoo Kim for (j = pageblock_nr_pages; j; --j, pfn++) { 119a254129eSJoonsoo Kim WARN_ON_ONCE(!pfn_valid(pfn)); 120a254129eSJoonsoo Kim /* 121a254129eSJoonsoo Kim * alloc_contig_range requires the pfn range 122a254129eSJoonsoo Kim * specified to be in the same zone. Make this 123a254129eSJoonsoo Kim * simple by forcing the entire CMA resv range 124a254129eSJoonsoo Kim * to be in the same zone. 125a254129eSJoonsoo Kim */ 126a254129eSJoonsoo Kim if (page_zone(pfn_to_page(pfn)) != zone) 127e35ef639SAnshuman Khandual goto not_in_zone; 128a254129eSJoonsoo Kim } 129a254129eSJoonsoo Kim init_cma_reserved_pageblock(pfn_to_page(base_pfn)); 130a254129eSJoonsoo Kim } while (--i); 131a254129eSJoonsoo Kim 132a254129eSJoonsoo Kim mutex_init(&cma->lock); 13326b02a1fSSasha Levin 13426b02a1fSSasha Levin #ifdef CONFIG_CMA_DEBUGFS 13526b02a1fSSasha Levin INIT_HLIST_HEAD(&cma->mem_head); 13626b02a1fSSasha Levin spin_lock_init(&cma->mem_head_lock); 13726b02a1fSSasha Levin #endif 13826b02a1fSSasha Levin 139a254129eSJoonsoo Kim return 0; 140a254129eSJoonsoo Kim 141e35ef639SAnshuman Khandual not_in_zone: 142e35ef639SAnshuman Khandual pr_err("CMA area %s could not be activated\n", cma->name); 143a254129eSJoonsoo Kim kfree(cma->bitmap); 144f022d8cbSLaurent Pinchart cma->count = 0; 145a254129eSJoonsoo Kim return -EINVAL; 146a254129eSJoonsoo Kim } 147a254129eSJoonsoo Kim 148a254129eSJoonsoo Kim static int __init cma_init_reserved_areas(void) 149a254129eSJoonsoo Kim { 150a254129eSJoonsoo Kim int i; 151a254129eSJoonsoo Kim 152a254129eSJoonsoo Kim for (i = 0; i < cma_area_count; i++) { 153a254129eSJoonsoo Kim int ret = cma_activate_area(&cma_areas[i]); 154a254129eSJoonsoo Kim 155a254129eSJoonsoo Kim if (ret) 156a254129eSJoonsoo Kim return ret; 157a254129eSJoonsoo Kim } 158a254129eSJoonsoo Kim 159a254129eSJoonsoo Kim return 0; 160a254129eSJoonsoo Kim } 161a254129eSJoonsoo Kim core_initcall(cma_init_reserved_areas); 162a254129eSJoonsoo Kim 163a254129eSJoonsoo Kim /** 164de9e14eeSMarek Szyprowski * cma_init_reserved_mem() - create custom contiguous area from reserved memory 165de9e14eeSMarek Szyprowski * @base: Base address of the reserved area 166de9e14eeSMarek Szyprowski * @size: Size of the reserved area (in bytes), 167de9e14eeSMarek Szyprowski * @order_per_bit: Order of pages represented by one bit on bitmap. 168e8b098fcSMike Rapoport * @name: The name of the area. If this parameter is NULL, the name of 169e8b098fcSMike Rapoport * the area will be set to "cmaN", where N is a running counter of 170e8b098fcSMike Rapoport * used areas. 171de9e14eeSMarek Szyprowski * @res_cma: Pointer to store the created cma region. 172de9e14eeSMarek Szyprowski * 173de9e14eeSMarek Szyprowski * This function creates custom contiguous area from already reserved memory. 174de9e14eeSMarek Szyprowski */ 175de9e14eeSMarek Szyprowski int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, 176ac173824SSasha Levin unsigned int order_per_bit, 177f318dd08SLaura Abbott const char *name, 178ac173824SSasha Levin struct cma **res_cma) 179de9e14eeSMarek Szyprowski { 180de9e14eeSMarek Szyprowski struct cma *cma; 181de9e14eeSMarek Szyprowski phys_addr_t alignment; 182de9e14eeSMarek Szyprowski 183de9e14eeSMarek Szyprowski /* Sanity checks */ 184de9e14eeSMarek Szyprowski if (cma_area_count == ARRAY_SIZE(cma_areas)) { 185de9e14eeSMarek Szyprowski pr_err("Not enough slots for CMA reserved regions!\n"); 186de9e14eeSMarek Szyprowski return -ENOSPC; 187de9e14eeSMarek Szyprowski } 188de9e14eeSMarek Szyprowski 189de9e14eeSMarek Szyprowski if (!size || !memblock_is_region_reserved(base, size)) 190de9e14eeSMarek Szyprowski return -EINVAL; 191de9e14eeSMarek Szyprowski 1920f96ae29SShailendra Verma /* ensure minimal alignment required by mm core */ 193badbda53SStephen Rothwell alignment = PAGE_SIZE << 194badbda53SStephen Rothwell max_t(unsigned long, MAX_ORDER - 1, pageblock_order); 195de9e14eeSMarek Szyprowski 196de9e14eeSMarek Szyprowski /* alignment should be aligned with order_per_bit */ 197de9e14eeSMarek Szyprowski if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit)) 198de9e14eeSMarek Szyprowski return -EINVAL; 199de9e14eeSMarek Szyprowski 200de9e14eeSMarek Szyprowski if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size) 201de9e14eeSMarek Szyprowski return -EINVAL; 202de9e14eeSMarek Szyprowski 203de9e14eeSMarek Szyprowski /* 204de9e14eeSMarek Szyprowski * Each reserved area must be initialised later, when more kernel 205de9e14eeSMarek Szyprowski * subsystems (like slab allocator) are available. 206de9e14eeSMarek Szyprowski */ 207de9e14eeSMarek Szyprowski cma = &cma_areas[cma_area_count]; 208f318dd08SLaura Abbott if (name) { 209f318dd08SLaura Abbott cma->name = name; 210f318dd08SLaura Abbott } else { 211f318dd08SLaura Abbott cma->name = kasprintf(GFP_KERNEL, "cma%d\n", cma_area_count); 212f318dd08SLaura Abbott if (!cma->name) 213f318dd08SLaura Abbott return -ENOMEM; 214f318dd08SLaura Abbott } 215de9e14eeSMarek Szyprowski cma->base_pfn = PFN_DOWN(base); 216de9e14eeSMarek Szyprowski cma->count = size >> PAGE_SHIFT; 217de9e14eeSMarek Szyprowski cma->order_per_bit = order_per_bit; 218de9e14eeSMarek Szyprowski *res_cma = cma; 219de9e14eeSMarek Szyprowski cma_area_count++; 22094737a85SGeorge G. Davis totalcma_pages += (size / PAGE_SIZE); 221de9e14eeSMarek Szyprowski 222de9e14eeSMarek Szyprowski return 0; 223de9e14eeSMarek Szyprowski } 224de9e14eeSMarek Szyprowski 225de9e14eeSMarek Szyprowski /** 226a254129eSJoonsoo Kim * cma_declare_contiguous() - reserve custom contiguous area 227a254129eSJoonsoo Kim * @base: Base address of the reserved area optional, use 0 for any 228c1f733aaSJoonsoo Kim * @size: Size of the reserved area (in bytes), 229a254129eSJoonsoo Kim * @limit: End address of the reserved memory (optional, 0 for any). 230a254129eSJoonsoo Kim * @alignment: Alignment for the CMA area, should be power of 2 or zero 231a254129eSJoonsoo Kim * @order_per_bit: Order of pages represented by one bit on bitmap. 232a254129eSJoonsoo Kim * @fixed: hint about where to place the reserved area 233e8b098fcSMike Rapoport * @name: The name of the area. See function cma_init_reserved_mem() 234c1f733aaSJoonsoo Kim * @res_cma: Pointer to store the created cma region. 235a254129eSJoonsoo Kim * 236a254129eSJoonsoo Kim * This function reserves memory from early allocator. It should be 237a254129eSJoonsoo Kim * called by arch specific code once the early allocator (memblock or bootmem) 238a254129eSJoonsoo Kim * has been activated and all other subsystems have already allocated/reserved 239a254129eSJoonsoo Kim * memory. This function allows to create custom reserved areas. 240a254129eSJoonsoo Kim * 241a254129eSJoonsoo Kim * If @fixed is true, reserve contiguous area at exactly @base. If false, 242a254129eSJoonsoo Kim * reserve in range from @base to @limit. 243a254129eSJoonsoo Kim */ 244c1f733aaSJoonsoo Kim int __init cma_declare_contiguous(phys_addr_t base, 245c1f733aaSJoonsoo Kim phys_addr_t size, phys_addr_t limit, 246a254129eSJoonsoo Kim phys_addr_t alignment, unsigned int order_per_bit, 247f318dd08SLaura Abbott bool fixed, const char *name, struct cma **res_cma) 248a254129eSJoonsoo Kim { 249f7426b98SMarek Szyprowski phys_addr_t memblock_end = memblock_end_of_DRAM(); 2506b101e2aSJoonsoo Kim phys_addr_t highmem_start; 251a254129eSJoonsoo Kim int ret = 0; 252a254129eSJoonsoo Kim 2536b101e2aSJoonsoo Kim /* 2542dece445SLaura Abbott * We can't use __pa(high_memory) directly, since high_memory 2552dece445SLaura Abbott * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly) 2562dece445SLaura Abbott * complain. Find the boundary by adding one to the last valid 2572dece445SLaura Abbott * address. 2586b101e2aSJoonsoo Kim */ 2592dece445SLaura Abbott highmem_start = __pa(high_memory - 1) + 1; 26056fa4f60SLaurent Pinchart pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", 26156fa4f60SLaurent Pinchart __func__, &size, &base, &limit, &alignment); 262a254129eSJoonsoo Kim 263a254129eSJoonsoo Kim if (cma_area_count == ARRAY_SIZE(cma_areas)) { 264a254129eSJoonsoo Kim pr_err("Not enough slots for CMA reserved regions!\n"); 265a254129eSJoonsoo Kim return -ENOSPC; 266a254129eSJoonsoo Kim } 267a254129eSJoonsoo Kim 268a254129eSJoonsoo Kim if (!size) 269a254129eSJoonsoo Kim return -EINVAL; 270a254129eSJoonsoo Kim 271a254129eSJoonsoo Kim if (alignment && !is_power_of_2(alignment)) 272a254129eSJoonsoo Kim return -EINVAL; 273a254129eSJoonsoo Kim 274a254129eSJoonsoo Kim /* 275a254129eSJoonsoo Kim * Sanitise input arguments. 276a254129eSJoonsoo Kim * Pages both ends in CMA area could be merged into adjacent unmovable 277a254129eSJoonsoo Kim * migratetype page by page allocator's buddy algorithm. In the case, 278a254129eSJoonsoo Kim * you couldn't get a contiguous memory, which is not what we want. 279a254129eSJoonsoo Kim */ 280badbda53SStephen Rothwell alignment = max(alignment, (phys_addr_t)PAGE_SIZE << 281badbda53SStephen Rothwell max_t(unsigned long, MAX_ORDER - 1, pageblock_order)); 282a254129eSJoonsoo Kim base = ALIGN(base, alignment); 283a254129eSJoonsoo Kim size = ALIGN(size, alignment); 284a254129eSJoonsoo Kim limit &= ~(alignment - 1); 285a254129eSJoonsoo Kim 286800a85d3SLaurent Pinchart if (!base) 287800a85d3SLaurent Pinchart fixed = false; 288800a85d3SLaurent Pinchart 289a254129eSJoonsoo Kim /* size should be aligned with order_per_bit */ 290a254129eSJoonsoo Kim if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) 291a254129eSJoonsoo Kim return -EINVAL; 292a254129eSJoonsoo Kim 293f7426b98SMarek Szyprowski /* 29416195dddSLaurent Pinchart * If allocating at a fixed base the request region must not cross the 29516195dddSLaurent Pinchart * low/high memory boundary. 296f7426b98SMarek Szyprowski */ 297f7426b98SMarek Szyprowski if (fixed && base < highmem_start && base + size > highmem_start) { 298f7426b98SMarek Szyprowski ret = -EINVAL; 29956fa4f60SLaurent Pinchart pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", 30056fa4f60SLaurent Pinchart &base, &highmem_start); 301f7426b98SMarek Szyprowski goto err; 302f7426b98SMarek Szyprowski } 303f7426b98SMarek Szyprowski 30416195dddSLaurent Pinchart /* 30516195dddSLaurent Pinchart * If the limit is unspecified or above the memblock end, its effective 30616195dddSLaurent Pinchart * value will be the memblock end. Set it explicitly to simplify further 30716195dddSLaurent Pinchart * checks. 30816195dddSLaurent Pinchart */ 30916195dddSLaurent Pinchart if (limit == 0 || limit > memblock_end) 31016195dddSLaurent Pinchart limit = memblock_end; 31116195dddSLaurent Pinchart 312a254129eSJoonsoo Kim /* Reserve memory */ 313800a85d3SLaurent Pinchart if (fixed) { 314a254129eSJoonsoo Kim if (memblock_is_region_reserved(base, size) || 315a254129eSJoonsoo Kim memblock_reserve(base, size) < 0) { 316a254129eSJoonsoo Kim ret = -EBUSY; 317a254129eSJoonsoo Kim goto err; 318a254129eSJoonsoo Kim } 319a254129eSJoonsoo Kim } else { 32016195dddSLaurent Pinchart phys_addr_t addr = 0; 32116195dddSLaurent Pinchart 32216195dddSLaurent Pinchart /* 32316195dddSLaurent Pinchart * All pages in the reserved area must come from the same zone. 32416195dddSLaurent Pinchart * If the requested region crosses the low/high memory boundary, 32516195dddSLaurent Pinchart * try allocating from high memory first and fall back to low 32616195dddSLaurent Pinchart * memory in case of failure. 32716195dddSLaurent Pinchart */ 32816195dddSLaurent Pinchart if (base < highmem_start && limit > highmem_start) { 32916195dddSLaurent Pinchart addr = memblock_alloc_range(size, alignment, 330fc6daaf9STony Luck highmem_start, limit, 331fc6daaf9STony Luck MEMBLOCK_NONE); 33216195dddSLaurent Pinchart limit = highmem_start; 33316195dddSLaurent Pinchart } 33416195dddSLaurent Pinchart 33516195dddSLaurent Pinchart if (!addr) { 33616195dddSLaurent Pinchart addr = memblock_alloc_range(size, alignment, base, 337fc6daaf9STony Luck limit, 338fc6daaf9STony Luck MEMBLOCK_NONE); 339a254129eSJoonsoo Kim if (!addr) { 340a254129eSJoonsoo Kim ret = -ENOMEM; 341a254129eSJoonsoo Kim goto err; 342a254129eSJoonsoo Kim } 343a254129eSJoonsoo Kim } 344a254129eSJoonsoo Kim 345620951e2SThierry Reding /* 346620951e2SThierry Reding * kmemleak scans/reads tracked objects for pointers to other 347620951e2SThierry Reding * objects but this address isn't mapped and accessible 348620951e2SThierry Reding */ 3499099daedSCatalin Marinas kmemleak_ignore_phys(addr); 35016195dddSLaurent Pinchart base = addr; 35116195dddSLaurent Pinchart } 35216195dddSLaurent Pinchart 353f318dd08SLaura Abbott ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma); 354de9e14eeSMarek Szyprowski if (ret) 355de9e14eeSMarek Szyprowski goto err; 356a254129eSJoonsoo Kim 35756fa4f60SLaurent Pinchart pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M, 35856fa4f60SLaurent Pinchart &base); 359a254129eSJoonsoo Kim return 0; 360a254129eSJoonsoo Kim 361a254129eSJoonsoo Kim err: 3620de9d2ebSJoonsoo Kim pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); 363a254129eSJoonsoo Kim return ret; 364a254129eSJoonsoo Kim } 365a254129eSJoonsoo Kim 366dbe43d4dSJaewon Kim #ifdef CONFIG_CMA_DEBUG 367dbe43d4dSJaewon Kim static void cma_debug_show_areas(struct cma *cma) 368dbe43d4dSJaewon Kim { 369dbe43d4dSJaewon Kim unsigned long next_zero_bit, next_set_bit; 370dbe43d4dSJaewon Kim unsigned long start = 0; 371dbe43d4dSJaewon Kim unsigned int nr_zero, nr_total = 0; 372dbe43d4dSJaewon Kim 373dbe43d4dSJaewon Kim mutex_lock(&cma->lock); 374dbe43d4dSJaewon Kim pr_info("number of available pages: "); 375dbe43d4dSJaewon Kim for (;;) { 376dbe43d4dSJaewon Kim next_zero_bit = find_next_zero_bit(cma->bitmap, cma->count, start); 377dbe43d4dSJaewon Kim if (next_zero_bit >= cma->count) 378dbe43d4dSJaewon Kim break; 379dbe43d4dSJaewon Kim next_set_bit = find_next_bit(cma->bitmap, cma->count, next_zero_bit); 380dbe43d4dSJaewon Kim nr_zero = next_set_bit - next_zero_bit; 381dbe43d4dSJaewon Kim pr_cont("%s%u@%lu", nr_total ? "+" : "", nr_zero, next_zero_bit); 382dbe43d4dSJaewon Kim nr_total += nr_zero; 383dbe43d4dSJaewon Kim start = next_zero_bit + nr_zero; 384dbe43d4dSJaewon Kim } 385dbe43d4dSJaewon Kim pr_cont("=> %u free of %lu total pages\n", nr_total, cma->count); 386dbe43d4dSJaewon Kim mutex_unlock(&cma->lock); 387dbe43d4dSJaewon Kim } 388dbe43d4dSJaewon Kim #else 389dbe43d4dSJaewon Kim static inline void cma_debug_show_areas(struct cma *cma) { } 390dbe43d4dSJaewon Kim #endif 391dbe43d4dSJaewon Kim 392a254129eSJoonsoo Kim /** 393a254129eSJoonsoo Kim * cma_alloc() - allocate pages from contiguous area 394a254129eSJoonsoo Kim * @cma: Contiguous memory region for which the allocation is performed. 395a254129eSJoonsoo Kim * @count: Requested number of pages. 396a254129eSJoonsoo Kim * @align: Requested alignment of pages (in PAGE_SIZE order). 397e8b098fcSMike Rapoport * @gfp_mask: GFP mask to use during compaction 398a254129eSJoonsoo Kim * 399a254129eSJoonsoo Kim * This function allocates part of contiguous memory on specific 400a254129eSJoonsoo Kim * contiguous memory area. 401a254129eSJoonsoo Kim */ 402e2f466e3SLucas Stach struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, 403e2f466e3SLucas Stach gfp_t gfp_mask) 404a254129eSJoonsoo Kim { 4053acaea68SAndrew Morton unsigned long mask, offset; 4063acaea68SAndrew Morton unsigned long pfn = -1; 4073acaea68SAndrew Morton unsigned long start = 0; 408a254129eSJoonsoo Kim unsigned long bitmap_maxno, bitmap_no, bitmap_count; 409a254129eSJoonsoo Kim struct page *page = NULL; 410dbe43d4dSJaewon Kim int ret = -ENOMEM; 411a254129eSJoonsoo Kim 412a254129eSJoonsoo Kim if (!cma || !cma->count) 413a254129eSJoonsoo Kim return NULL; 414a254129eSJoonsoo Kim 41567a2e213SRohit Vaswani pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma, 416a254129eSJoonsoo Kim count, align); 417a254129eSJoonsoo Kim 418a254129eSJoonsoo Kim if (!count) 419a254129eSJoonsoo Kim return NULL; 420a254129eSJoonsoo Kim 421a254129eSJoonsoo Kim mask = cma_bitmap_aligned_mask(cma, align); 422b5be83e3SGregory Fong offset = cma_bitmap_aligned_offset(cma, align); 423a254129eSJoonsoo Kim bitmap_maxno = cma_bitmap_maxno(cma); 424a254129eSJoonsoo Kim bitmap_count = cma_bitmap_pages_to_bits(cma, count); 425a254129eSJoonsoo Kim 4266b36ba59SShiraz Hashim if (bitmap_count > bitmap_maxno) 4276b36ba59SShiraz Hashim return NULL; 4286b36ba59SShiraz Hashim 429a254129eSJoonsoo Kim for (;;) { 430a254129eSJoonsoo Kim mutex_lock(&cma->lock); 431b5be83e3SGregory Fong bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, 432b5be83e3SGregory Fong bitmap_maxno, start, bitmap_count, mask, 433b5be83e3SGregory Fong offset); 434a254129eSJoonsoo Kim if (bitmap_no >= bitmap_maxno) { 435a254129eSJoonsoo Kim mutex_unlock(&cma->lock); 436a254129eSJoonsoo Kim break; 437a254129eSJoonsoo Kim } 438a254129eSJoonsoo Kim bitmap_set(cma->bitmap, bitmap_no, bitmap_count); 439a254129eSJoonsoo Kim /* 440a254129eSJoonsoo Kim * It's safe to drop the lock here. We've marked this region for 441a254129eSJoonsoo Kim * our exclusive use. If the migration fails we will take the 442a254129eSJoonsoo Kim * lock again and unmark it. 443a254129eSJoonsoo Kim */ 444a254129eSJoonsoo Kim mutex_unlock(&cma->lock); 445a254129eSJoonsoo Kim 446a254129eSJoonsoo Kim pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); 447a254129eSJoonsoo Kim mutex_lock(&cma_mutex); 448ca96b625SLucas Stach ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, 449e2f466e3SLucas Stach gfp_mask); 450a254129eSJoonsoo Kim mutex_unlock(&cma_mutex); 451a254129eSJoonsoo Kim if (ret == 0) { 452a254129eSJoonsoo Kim page = pfn_to_page(pfn); 453a254129eSJoonsoo Kim break; 454a254129eSJoonsoo Kim } 455b7155e76SJoonsoo Kim 456a254129eSJoonsoo Kim cma_clear_bitmap(cma, pfn, count); 457b7155e76SJoonsoo Kim if (ret != -EBUSY) 458b7155e76SJoonsoo Kim break; 459b7155e76SJoonsoo Kim 460a254129eSJoonsoo Kim pr_debug("%s(): memory range at %p is busy, retrying\n", 461a254129eSJoonsoo Kim __func__, pfn_to_page(pfn)); 462a254129eSJoonsoo Kim /* try again with a bit different memory target */ 463a254129eSJoonsoo Kim start = bitmap_no + mask + 1; 464a254129eSJoonsoo Kim } 465a254129eSJoonsoo Kim 4663acaea68SAndrew Morton trace_cma_alloc(pfn, page, count, align); 46799e8ea6cSStefan Strogin 468ef465014SBoris Brezillon if (ret && !(gfp_mask & __GFP_NOWARN)) { 4695984af10SPintu Agarwal pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n", 470dbe43d4dSJaewon Kim __func__, count, ret); 471dbe43d4dSJaewon Kim cma_debug_show_areas(cma); 472dbe43d4dSJaewon Kim } 473dbe43d4dSJaewon Kim 474a254129eSJoonsoo Kim pr_debug("%s(): returned %p\n", __func__, page); 475a254129eSJoonsoo Kim return page; 476a254129eSJoonsoo Kim } 477a254129eSJoonsoo Kim 478a254129eSJoonsoo Kim /** 479a254129eSJoonsoo Kim * cma_release() - release allocated pages 480a254129eSJoonsoo Kim * @cma: Contiguous memory region for which the allocation is performed. 481a254129eSJoonsoo Kim * @pages: Allocated pages. 482a254129eSJoonsoo Kim * @count: Number of allocated pages. 483a254129eSJoonsoo Kim * 484a254129eSJoonsoo Kim * This function releases memory allocated by alloc_cma(). 485a254129eSJoonsoo Kim * It returns false when provided pages do not belong to contiguous area and 486a254129eSJoonsoo Kim * true otherwise. 487a254129eSJoonsoo Kim */ 488ac173824SSasha Levin bool cma_release(struct cma *cma, const struct page *pages, unsigned int count) 489a254129eSJoonsoo Kim { 490a254129eSJoonsoo Kim unsigned long pfn; 491a254129eSJoonsoo Kim 492a254129eSJoonsoo Kim if (!cma || !pages) 493a254129eSJoonsoo Kim return false; 494a254129eSJoonsoo Kim 495a254129eSJoonsoo Kim pr_debug("%s(page %p)\n", __func__, (void *)pages); 496a254129eSJoonsoo Kim 497a254129eSJoonsoo Kim pfn = page_to_pfn(pages); 498a254129eSJoonsoo Kim 499a254129eSJoonsoo Kim if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) 500a254129eSJoonsoo Kim return false; 501a254129eSJoonsoo Kim 502a254129eSJoonsoo Kim VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); 503a254129eSJoonsoo Kim 504a254129eSJoonsoo Kim free_contig_range(pfn, count); 505a254129eSJoonsoo Kim cma_clear_bitmap(cma, pfn, count); 50699e8ea6cSStefan Strogin trace_cma_release(pfn, pages, count); 507a254129eSJoonsoo Kim 508a254129eSJoonsoo Kim return true; 509a254129eSJoonsoo Kim } 510e4231bcdSLaura Abbott 511e4231bcdSLaura Abbott int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) 512e4231bcdSLaura Abbott { 513e4231bcdSLaura Abbott int i; 514e4231bcdSLaura Abbott 515e4231bcdSLaura Abbott for (i = 0; i < cma_area_count; i++) { 516e4231bcdSLaura Abbott int ret = it(&cma_areas[i], data); 517e4231bcdSLaura Abbott 518e4231bcdSLaura Abbott if (ret) 519e4231bcdSLaura Abbott return ret; 520e4231bcdSLaura Abbott } 521e4231bcdSLaura Abbott 522e4231bcdSLaura Abbott return 0; 523e4231bcdSLaura Abbott } 524