18607a965SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 2a254129eSJoonsoo Kim /* 3a254129eSJoonsoo Kim * Contiguous Memory Allocator 4a254129eSJoonsoo Kim * 5a254129eSJoonsoo Kim * Copyright (c) 2010-2011 by Samsung Electronics. 6a254129eSJoonsoo Kim * Copyright IBM Corporation, 2013 7a254129eSJoonsoo Kim * Copyright LG Electronics Inc., 2014 8a254129eSJoonsoo Kim * Written by: 9a254129eSJoonsoo Kim * Marek Szyprowski <m.szyprowski@samsung.com> 10a254129eSJoonsoo Kim * Michal Nazarewicz <mina86@mina86.com> 11a254129eSJoonsoo Kim * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> 12a254129eSJoonsoo Kim * Joonsoo Kim <iamjoonsoo.kim@lge.com> 13a254129eSJoonsoo Kim */ 14a254129eSJoonsoo Kim 15a254129eSJoonsoo Kim #define pr_fmt(fmt) "cma: " fmt 16a254129eSJoonsoo Kim 17a254129eSJoonsoo Kim #ifdef CONFIG_CMA_DEBUG 18a254129eSJoonsoo Kim #ifndef DEBUG 19a254129eSJoonsoo Kim # define DEBUG 20a254129eSJoonsoo Kim #endif 21a254129eSJoonsoo Kim #endif 2299e8ea6cSStefan Strogin #define CREATE_TRACE_POINTS 23a254129eSJoonsoo Kim 24a254129eSJoonsoo Kim #include <linux/memblock.h> 25a254129eSJoonsoo Kim #include <linux/err.h> 26a254129eSJoonsoo Kim #include <linux/mm.h> 27a254129eSJoonsoo Kim #include <linux/sizes.h> 28a254129eSJoonsoo Kim #include <linux/slab.h> 29a254129eSJoonsoo Kim #include <linux/log2.h> 30a254129eSJoonsoo Kim #include <linux/cma.h> 31f7426b98SMarek Szyprowski #include <linux/highmem.h> 32620951e2SThierry Reding #include <linux/io.h> 33514c6032SRandy Dunlap #include <linux/kmemleak.h> 3499e8ea6cSStefan Strogin #include <trace/events/cma.h> 35a254129eSJoonsoo Kim 3628b24c1fSSasha Levin #include "cma.h" 37a254129eSJoonsoo Kim 3828b24c1fSSasha Levin struct cma cma_areas[MAX_CMA_AREAS]; 3928b24c1fSSasha Levin unsigned cma_area_count; 40a254129eSJoonsoo Kim 41ac173824SSasha Levin phys_addr_t cma_get_base(const struct cma *cma) 42a254129eSJoonsoo Kim { 43a254129eSJoonsoo Kim return PFN_PHYS(cma->base_pfn); 44a254129eSJoonsoo Kim } 45a254129eSJoonsoo Kim 46ac173824SSasha Levin unsigned long cma_get_size(const struct cma *cma) 47a254129eSJoonsoo Kim { 48a254129eSJoonsoo Kim return cma->count << PAGE_SHIFT; 49a254129eSJoonsoo Kim } 50a254129eSJoonsoo Kim 51f318dd08SLaura Abbott const char *cma_get_name(const struct cma *cma) 52f318dd08SLaura Abbott { 5318e98e56SBarry Song return cma->name; 54f318dd08SLaura Abbott } 55f318dd08SLaura Abbott 56ac173824SSasha Levin static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, 57e048cb32SDoug Berger unsigned int align_order) 58a254129eSJoonsoo Kim { 5968faed63SWeijie Yang if (align_order <= cma->order_per_bit) 6068faed63SWeijie Yang return 0; 6168faed63SWeijie Yang return (1UL << (align_order - cma->order_per_bit)) - 1; 62a254129eSJoonsoo Kim } 63a254129eSJoonsoo Kim 64850fc430SDanesh Petigara /* 65e048cb32SDoug Berger * Find the offset of the base PFN from the specified align_order. 66e048cb32SDoug Berger * The value returned is represented in order_per_bits. 67850fc430SDanesh Petigara */ 68ac173824SSasha Levin static unsigned long cma_bitmap_aligned_offset(const struct cma *cma, 69e048cb32SDoug Berger unsigned int align_order) 70b5be83e3SGregory Fong { 71e048cb32SDoug Berger return (cma->base_pfn & ((1UL << align_order) - 1)) 72e048cb32SDoug Berger >> cma->order_per_bit; 73b5be83e3SGregory Fong } 74b5be83e3SGregory Fong 75ac173824SSasha Levin static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, 76a254129eSJoonsoo Kim unsigned long pages) 77a254129eSJoonsoo Kim { 78a254129eSJoonsoo Kim return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; 79a254129eSJoonsoo Kim } 80a254129eSJoonsoo Kim 81ac173824SSasha Levin static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, 8278fa5150SMinchan Kim unsigned long count) 83a254129eSJoonsoo Kim { 84a254129eSJoonsoo Kim unsigned long bitmap_no, bitmap_count; 850ef7dcacSMike Kravetz unsigned long flags; 86a254129eSJoonsoo Kim 87a254129eSJoonsoo Kim bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; 88a254129eSJoonsoo Kim bitmap_count = cma_bitmap_pages_to_bits(cma, count); 89a254129eSJoonsoo Kim 900ef7dcacSMike Kravetz spin_lock_irqsave(&cma->lock, flags); 91a254129eSJoonsoo Kim bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); 920ef7dcacSMike Kravetz spin_unlock_irqrestore(&cma->lock, flags); 93a254129eSJoonsoo Kim } 94a254129eSJoonsoo Kim 953a5139f1SMike Kravetz static void __init cma_activate_area(struct cma *cma) 96a254129eSJoonsoo Kim { 97072355c1SDavid Hildenbrand unsigned long base_pfn = cma->base_pfn, pfn; 98a254129eSJoonsoo Kim struct zone *zone; 99a254129eSJoonsoo Kim 1002184f992SYunfeng Ye cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL); 1013a5139f1SMike Kravetz if (!cma->bitmap) 1023a5139f1SMike Kravetz goto out_error; 103a254129eSJoonsoo Kim 104a254129eSJoonsoo Kim /* 105072355c1SDavid Hildenbrand * alloc_contig_range() requires the pfn range specified to be in the 106072355c1SDavid Hildenbrand * same zone. Simplify by forcing the entire CMA resv range to be in the 107072355c1SDavid Hildenbrand * same zone. 108a254129eSJoonsoo Kim */ 109072355c1SDavid Hildenbrand WARN_ON_ONCE(!pfn_valid(base_pfn)); 110072355c1SDavid Hildenbrand zone = page_zone(pfn_to_page(base_pfn)); 111072355c1SDavid Hildenbrand for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) { 112072355c1SDavid Hildenbrand WARN_ON_ONCE(!pfn_valid(pfn)); 113a254129eSJoonsoo Kim if (page_zone(pfn_to_page(pfn)) != zone) 114d883c6cfSJoonsoo Kim goto not_in_zone; 115a254129eSJoonsoo Kim } 116072355c1SDavid Hildenbrand 117072355c1SDavid Hildenbrand for (pfn = base_pfn; pfn < base_pfn + cma->count; 118072355c1SDavid Hildenbrand pfn += pageblock_nr_pages) 119072355c1SDavid Hildenbrand init_cma_reserved_pageblock(pfn_to_page(pfn)); 120a254129eSJoonsoo Kim 1210ef7dcacSMike Kravetz spin_lock_init(&cma->lock); 12226b02a1fSSasha Levin 12326b02a1fSSasha Levin #ifdef CONFIG_CMA_DEBUGFS 12426b02a1fSSasha Levin INIT_HLIST_HEAD(&cma->mem_head); 12526b02a1fSSasha Levin spin_lock_init(&cma->mem_head_lock); 12626b02a1fSSasha Levin #endif 12726b02a1fSSasha Levin 1283a5139f1SMike Kravetz return; 129a254129eSJoonsoo Kim 130d883c6cfSJoonsoo Kim not_in_zone: 1312184f992SYunfeng Ye bitmap_free(cma->bitmap); 1323a5139f1SMike Kravetz out_error: 133072355c1SDavid Hildenbrand /* Expose all pages to the buddy, they are useless for CMA. */ 134072355c1SDavid Hildenbrand for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++) 135072355c1SDavid Hildenbrand free_reserved_page(pfn_to_page(pfn)); 136072355c1SDavid Hildenbrand totalcma_pages -= cma->count; 137f022d8cbSLaurent Pinchart cma->count = 0; 1383a5139f1SMike Kravetz pr_err("CMA area %s could not be activated\n", cma->name); 1393a5139f1SMike Kravetz return; 140a254129eSJoonsoo Kim } 141a254129eSJoonsoo Kim 142a254129eSJoonsoo Kim static int __init cma_init_reserved_areas(void) 143a254129eSJoonsoo Kim { 144a254129eSJoonsoo Kim int i; 145a254129eSJoonsoo Kim 1463a5139f1SMike Kravetz for (i = 0; i < cma_area_count; i++) 1473a5139f1SMike Kravetz cma_activate_area(&cma_areas[i]); 148a254129eSJoonsoo Kim 149a254129eSJoonsoo Kim return 0; 150a254129eSJoonsoo Kim } 151d883c6cfSJoonsoo Kim core_initcall(cma_init_reserved_areas); 152a254129eSJoonsoo Kim 153a254129eSJoonsoo Kim /** 154de9e14eeSMarek Szyprowski * cma_init_reserved_mem() - create custom contiguous area from reserved memory 155de9e14eeSMarek Szyprowski * @base: Base address of the reserved area 156de9e14eeSMarek Szyprowski * @size: Size of the reserved area (in bytes), 157de9e14eeSMarek Szyprowski * @order_per_bit: Order of pages represented by one bit on bitmap. 158e8b098fcSMike Rapoport * @name: The name of the area. If this parameter is NULL, the name of 159e8b098fcSMike Rapoport * the area will be set to "cmaN", where N is a running counter of 160e8b098fcSMike Rapoport * used areas. 161de9e14eeSMarek Szyprowski * @res_cma: Pointer to store the created cma region. 162de9e14eeSMarek Szyprowski * 163de9e14eeSMarek Szyprowski * This function creates custom contiguous area from already reserved memory. 164de9e14eeSMarek Szyprowski */ 165de9e14eeSMarek Szyprowski int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, 166ac173824SSasha Levin unsigned int order_per_bit, 167f318dd08SLaura Abbott const char *name, 168ac173824SSasha Levin struct cma **res_cma) 169de9e14eeSMarek Szyprowski { 170de9e14eeSMarek Szyprowski struct cma *cma; 171de9e14eeSMarek Szyprowski 172de9e14eeSMarek Szyprowski /* Sanity checks */ 173de9e14eeSMarek Szyprowski if (cma_area_count == ARRAY_SIZE(cma_areas)) { 174de9e14eeSMarek Szyprowski pr_err("Not enough slots for CMA reserved regions!\n"); 175de9e14eeSMarek Szyprowski return -ENOSPC; 176de9e14eeSMarek Szyprowski } 177de9e14eeSMarek Szyprowski 178de9e14eeSMarek Szyprowski if (!size || !memblock_is_region_reserved(base, size)) 179de9e14eeSMarek Szyprowski return -EINVAL; 180de9e14eeSMarek Szyprowski 181de9e14eeSMarek Szyprowski /* alignment should be aligned with order_per_bit */ 182*e16faf26SDavid Hildenbrand if (!IS_ALIGNED(CMA_MIN_ALIGNMENT_PAGES, 1 << order_per_bit)) 183de9e14eeSMarek Szyprowski return -EINVAL; 184de9e14eeSMarek Szyprowski 185*e16faf26SDavid Hildenbrand /* ensure minimal alignment required by mm core */ 186*e16faf26SDavid Hildenbrand if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES)) 187de9e14eeSMarek Szyprowski return -EINVAL; 188de9e14eeSMarek Szyprowski 189de9e14eeSMarek Szyprowski /* 190de9e14eeSMarek Szyprowski * Each reserved area must be initialised later, when more kernel 191de9e14eeSMarek Szyprowski * subsystems (like slab allocator) are available. 192de9e14eeSMarek Szyprowski */ 193de9e14eeSMarek Szyprowski cma = &cma_areas[cma_area_count]; 19418e98e56SBarry Song 19518e98e56SBarry Song if (name) 19618e98e56SBarry Song snprintf(cma->name, CMA_MAX_NAME, name); 19718e98e56SBarry Song else 19818e98e56SBarry Song snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count); 19918e98e56SBarry Song 200de9e14eeSMarek Szyprowski cma->base_pfn = PFN_DOWN(base); 201de9e14eeSMarek Szyprowski cma->count = size >> PAGE_SHIFT; 202de9e14eeSMarek Szyprowski cma->order_per_bit = order_per_bit; 203de9e14eeSMarek Szyprowski *res_cma = cma; 204de9e14eeSMarek Szyprowski cma_area_count++; 20594737a85SGeorge G. Davis totalcma_pages += (size / PAGE_SIZE); 206de9e14eeSMarek Szyprowski 207de9e14eeSMarek Szyprowski return 0; 208de9e14eeSMarek Szyprowski } 209de9e14eeSMarek Szyprowski 210de9e14eeSMarek Szyprowski /** 2118676af1fSAslan Bakirov * cma_declare_contiguous_nid() - reserve custom contiguous area 212a254129eSJoonsoo Kim * @base: Base address of the reserved area optional, use 0 for any 213c1f733aaSJoonsoo Kim * @size: Size of the reserved area (in bytes), 214a254129eSJoonsoo Kim * @limit: End address of the reserved memory (optional, 0 for any). 215a254129eSJoonsoo Kim * @alignment: Alignment for the CMA area, should be power of 2 or zero 216a254129eSJoonsoo Kim * @order_per_bit: Order of pages represented by one bit on bitmap. 217a254129eSJoonsoo Kim * @fixed: hint about where to place the reserved area 218e8b098fcSMike Rapoport * @name: The name of the area. See function cma_init_reserved_mem() 219c1f733aaSJoonsoo Kim * @res_cma: Pointer to store the created cma region. 2208676af1fSAslan Bakirov * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 221a254129eSJoonsoo Kim * 222a254129eSJoonsoo Kim * This function reserves memory from early allocator. It should be 223a254129eSJoonsoo Kim * called by arch specific code once the early allocator (memblock or bootmem) 224a254129eSJoonsoo Kim * has been activated and all other subsystems have already allocated/reserved 225a254129eSJoonsoo Kim * memory. This function allows to create custom reserved areas. 226a254129eSJoonsoo Kim * 227a254129eSJoonsoo Kim * If @fixed is true, reserve contiguous area at exactly @base. If false, 228a254129eSJoonsoo Kim * reserve in range from @base to @limit. 229a254129eSJoonsoo Kim */ 2308676af1fSAslan Bakirov int __init cma_declare_contiguous_nid(phys_addr_t base, 231c1f733aaSJoonsoo Kim phys_addr_t size, phys_addr_t limit, 232a254129eSJoonsoo Kim phys_addr_t alignment, unsigned int order_per_bit, 2338676af1fSAslan Bakirov bool fixed, const char *name, struct cma **res_cma, 2348676af1fSAslan Bakirov int nid) 235a254129eSJoonsoo Kim { 236f7426b98SMarek Szyprowski phys_addr_t memblock_end = memblock_end_of_DRAM(); 2376b101e2aSJoonsoo Kim phys_addr_t highmem_start; 238a254129eSJoonsoo Kim int ret = 0; 239a254129eSJoonsoo Kim 2406b101e2aSJoonsoo Kim /* 2412dece445SLaura Abbott * We can't use __pa(high_memory) directly, since high_memory 2422dece445SLaura Abbott * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly) 2432dece445SLaura Abbott * complain. Find the boundary by adding one to the last valid 2442dece445SLaura Abbott * address. 2456b101e2aSJoonsoo Kim */ 2462dece445SLaura Abbott highmem_start = __pa(high_memory - 1) + 1; 24756fa4f60SLaurent Pinchart pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", 24856fa4f60SLaurent Pinchart __func__, &size, &base, &limit, &alignment); 249a254129eSJoonsoo Kim 250a254129eSJoonsoo Kim if (cma_area_count == ARRAY_SIZE(cma_areas)) { 251a254129eSJoonsoo Kim pr_err("Not enough slots for CMA reserved regions!\n"); 252a254129eSJoonsoo Kim return -ENOSPC; 253a254129eSJoonsoo Kim } 254a254129eSJoonsoo Kim 255a254129eSJoonsoo Kim if (!size) 256a254129eSJoonsoo Kim return -EINVAL; 257a254129eSJoonsoo Kim 258a254129eSJoonsoo Kim if (alignment && !is_power_of_2(alignment)) 259a254129eSJoonsoo Kim return -EINVAL; 260a254129eSJoonsoo Kim 261*e16faf26SDavid Hildenbrand /* Sanitise input arguments. */ 262*e16faf26SDavid Hildenbrand alignment = max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES); 263c633324eSDoug Berger if (fixed && base & (alignment - 1)) { 264c633324eSDoug Berger ret = -EINVAL; 265c633324eSDoug Berger pr_err("Region at %pa must be aligned to %pa bytes\n", 266c633324eSDoug Berger &base, &alignment); 267c633324eSDoug Berger goto err; 268c633324eSDoug Berger } 269a254129eSJoonsoo Kim base = ALIGN(base, alignment); 270a254129eSJoonsoo Kim size = ALIGN(size, alignment); 271a254129eSJoonsoo Kim limit &= ~(alignment - 1); 272a254129eSJoonsoo Kim 273800a85d3SLaurent Pinchart if (!base) 274800a85d3SLaurent Pinchart fixed = false; 275800a85d3SLaurent Pinchart 276a254129eSJoonsoo Kim /* size should be aligned with order_per_bit */ 277a254129eSJoonsoo Kim if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) 278a254129eSJoonsoo Kim return -EINVAL; 279a254129eSJoonsoo Kim 280f7426b98SMarek Szyprowski /* 28116195dddSLaurent Pinchart * If allocating at a fixed base the request region must not cross the 28216195dddSLaurent Pinchart * low/high memory boundary. 283f7426b98SMarek Szyprowski */ 284f7426b98SMarek Szyprowski if (fixed && base < highmem_start && base + size > highmem_start) { 285f7426b98SMarek Szyprowski ret = -EINVAL; 28656fa4f60SLaurent Pinchart pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", 28756fa4f60SLaurent Pinchart &base, &highmem_start); 288f7426b98SMarek Szyprowski goto err; 289f7426b98SMarek Szyprowski } 290f7426b98SMarek Szyprowski 29116195dddSLaurent Pinchart /* 29216195dddSLaurent Pinchart * If the limit is unspecified or above the memblock end, its effective 29316195dddSLaurent Pinchart * value will be the memblock end. Set it explicitly to simplify further 29416195dddSLaurent Pinchart * checks. 29516195dddSLaurent Pinchart */ 29616195dddSLaurent Pinchart if (limit == 0 || limit > memblock_end) 29716195dddSLaurent Pinchart limit = memblock_end; 29816195dddSLaurent Pinchart 299c633324eSDoug Berger if (base + size > limit) { 300c633324eSDoug Berger ret = -EINVAL; 301c633324eSDoug Berger pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n", 302c633324eSDoug Berger &size, &base, &limit); 303c633324eSDoug Berger goto err; 304c633324eSDoug Berger } 305c633324eSDoug Berger 306a254129eSJoonsoo Kim /* Reserve memory */ 307800a85d3SLaurent Pinchart if (fixed) { 308a254129eSJoonsoo Kim if (memblock_is_region_reserved(base, size) || 309a254129eSJoonsoo Kim memblock_reserve(base, size) < 0) { 310a254129eSJoonsoo Kim ret = -EBUSY; 311a254129eSJoonsoo Kim goto err; 312a254129eSJoonsoo Kim } 313a254129eSJoonsoo Kim } else { 31416195dddSLaurent Pinchart phys_addr_t addr = 0; 31516195dddSLaurent Pinchart 31616195dddSLaurent Pinchart /* 31716195dddSLaurent Pinchart * All pages in the reserved area must come from the same zone. 31816195dddSLaurent Pinchart * If the requested region crosses the low/high memory boundary, 31916195dddSLaurent Pinchart * try allocating from high memory first and fall back to low 32016195dddSLaurent Pinchart * memory in case of failure. 32116195dddSLaurent Pinchart */ 32216195dddSLaurent Pinchart if (base < highmem_start && limit > highmem_start) { 3238676af1fSAslan Bakirov addr = memblock_alloc_range_nid(size, alignment, 32440366bd7SBarry Song highmem_start, limit, nid, true); 32516195dddSLaurent Pinchart limit = highmem_start; 32616195dddSLaurent Pinchart } 32716195dddSLaurent Pinchart 328df2ff39eSRoman Gushchin /* 329df2ff39eSRoman Gushchin * If there is enough memory, try a bottom-up allocation first. 330df2ff39eSRoman Gushchin * It will place the new cma area close to the start of the node 331df2ff39eSRoman Gushchin * and guarantee that the compaction is moving pages out of the 332df2ff39eSRoman Gushchin * cma area and not into it. 333df2ff39eSRoman Gushchin * Avoid using first 4GB to not interfere with constrained zones 334df2ff39eSRoman Gushchin * like DMA/DMA32. 335df2ff39eSRoman Gushchin */ 336df2ff39eSRoman Gushchin #ifdef CONFIG_PHYS_ADDR_T_64BIT 337df2ff39eSRoman Gushchin if (!memblock_bottom_up() && memblock_end >= SZ_4G + size) { 338df2ff39eSRoman Gushchin memblock_set_bottom_up(true); 339df2ff39eSRoman Gushchin addr = memblock_alloc_range_nid(size, alignment, SZ_4G, 340df2ff39eSRoman Gushchin limit, nid, true); 341df2ff39eSRoman Gushchin memblock_set_bottom_up(false); 342df2ff39eSRoman Gushchin } 343df2ff39eSRoman Gushchin #endif 344df2ff39eSRoman Gushchin 34516195dddSLaurent Pinchart if (!addr) { 3468676af1fSAslan Bakirov addr = memblock_alloc_range_nid(size, alignment, base, 34740366bd7SBarry Song limit, nid, true); 348a254129eSJoonsoo Kim if (!addr) { 349a254129eSJoonsoo Kim ret = -ENOMEM; 350a254129eSJoonsoo Kim goto err; 351a254129eSJoonsoo Kim } 352a254129eSJoonsoo Kim } 353a254129eSJoonsoo Kim 354620951e2SThierry Reding /* 355620951e2SThierry Reding * kmemleak scans/reads tracked objects for pointers to other 356620951e2SThierry Reding * objects but this address isn't mapped and accessible 357620951e2SThierry Reding */ 3589099daedSCatalin Marinas kmemleak_ignore_phys(addr); 35916195dddSLaurent Pinchart base = addr; 36016195dddSLaurent Pinchart } 36116195dddSLaurent Pinchart 362f318dd08SLaura Abbott ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma); 363de9e14eeSMarek Szyprowski if (ret) 3640d3bd18aSPeng Fan goto free_mem; 365a254129eSJoonsoo Kim 36656fa4f60SLaurent Pinchart pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M, 36756fa4f60SLaurent Pinchart &base); 368a254129eSJoonsoo Kim return 0; 369a254129eSJoonsoo Kim 3700d3bd18aSPeng Fan free_mem: 3713ecc6834SMike Rapoport memblock_phys_free(base, size); 372a254129eSJoonsoo Kim err: 3730de9d2ebSJoonsoo Kim pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); 374a254129eSJoonsoo Kim return ret; 375a254129eSJoonsoo Kim } 376a254129eSJoonsoo Kim 377dbe43d4dSJaewon Kim #ifdef CONFIG_CMA_DEBUG 378dbe43d4dSJaewon Kim static void cma_debug_show_areas(struct cma *cma) 379dbe43d4dSJaewon Kim { 3802b59e01aSYue Hu unsigned long next_zero_bit, next_set_bit, nr_zero; 381dbe43d4dSJaewon Kim unsigned long start = 0; 3822b59e01aSYue Hu unsigned long nr_part, nr_total = 0; 3832b59e01aSYue Hu unsigned long nbits = cma_bitmap_maxno(cma); 384dbe43d4dSJaewon Kim 3850ef7dcacSMike Kravetz spin_lock_irq(&cma->lock); 386dbe43d4dSJaewon Kim pr_info("number of available pages: "); 387dbe43d4dSJaewon Kim for (;;) { 3882b59e01aSYue Hu next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start); 3892b59e01aSYue Hu if (next_zero_bit >= nbits) 390dbe43d4dSJaewon Kim break; 3912b59e01aSYue Hu next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit); 392dbe43d4dSJaewon Kim nr_zero = next_set_bit - next_zero_bit; 3932b59e01aSYue Hu nr_part = nr_zero << cma->order_per_bit; 3942b59e01aSYue Hu pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part, 3952b59e01aSYue Hu next_zero_bit); 3962b59e01aSYue Hu nr_total += nr_part; 397dbe43d4dSJaewon Kim start = next_zero_bit + nr_zero; 398dbe43d4dSJaewon Kim } 3992b59e01aSYue Hu pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count); 4000ef7dcacSMike Kravetz spin_unlock_irq(&cma->lock); 401dbe43d4dSJaewon Kim } 402dbe43d4dSJaewon Kim #else 403dbe43d4dSJaewon Kim static inline void cma_debug_show_areas(struct cma *cma) { } 404dbe43d4dSJaewon Kim #endif 405dbe43d4dSJaewon Kim 406a254129eSJoonsoo Kim /** 407a254129eSJoonsoo Kim * cma_alloc() - allocate pages from contiguous area 408a254129eSJoonsoo Kim * @cma: Contiguous memory region for which the allocation is performed. 409a254129eSJoonsoo Kim * @count: Requested number of pages. 410a254129eSJoonsoo Kim * @align: Requested alignment of pages (in PAGE_SIZE order). 41165182029SMarek Szyprowski * @no_warn: Avoid printing message about failed allocation 412a254129eSJoonsoo Kim * 413a254129eSJoonsoo Kim * This function allocates part of contiguous memory on specific 414a254129eSJoonsoo Kim * contiguous memory area. 415a254129eSJoonsoo Kim */ 41678fa5150SMinchan Kim struct page *cma_alloc(struct cma *cma, unsigned long count, 41778fa5150SMinchan Kim unsigned int align, bool no_warn) 418a254129eSJoonsoo Kim { 4193acaea68SAndrew Morton unsigned long mask, offset; 4203acaea68SAndrew Morton unsigned long pfn = -1; 4213acaea68SAndrew Morton unsigned long start = 0; 422a254129eSJoonsoo Kim unsigned long bitmap_maxno, bitmap_no, bitmap_count; 42378fa5150SMinchan Kim unsigned long i; 424a254129eSJoonsoo Kim struct page *page = NULL; 425dbe43d4dSJaewon Kim int ret = -ENOMEM; 426a254129eSJoonsoo Kim 427835832baSJianqun Xu if (!cma || !cma->count || !cma->bitmap) 428bbb26920SMinchan Kim goto out; 429a254129eSJoonsoo Kim 43078fa5150SMinchan Kim pr_debug("%s(cma %p, count %lu, align %d)\n", __func__, (void *)cma, 431a254129eSJoonsoo Kim count, align); 432a254129eSJoonsoo Kim 433a254129eSJoonsoo Kim if (!count) 434bbb26920SMinchan Kim goto out; 435a254129eSJoonsoo Kim 4367bc1aec5SLiam Mark trace_cma_alloc_start(cma->name, count, align); 4377bc1aec5SLiam Mark 438a254129eSJoonsoo Kim mask = cma_bitmap_aligned_mask(cma, align); 439b5be83e3SGregory Fong offset = cma_bitmap_aligned_offset(cma, align); 440a254129eSJoonsoo Kim bitmap_maxno = cma_bitmap_maxno(cma); 441a254129eSJoonsoo Kim bitmap_count = cma_bitmap_pages_to_bits(cma, count); 442a254129eSJoonsoo Kim 4436b36ba59SShiraz Hashim if (bitmap_count > bitmap_maxno) 444bbb26920SMinchan Kim goto out; 4456b36ba59SShiraz Hashim 446a254129eSJoonsoo Kim for (;;) { 4470ef7dcacSMike Kravetz spin_lock_irq(&cma->lock); 448b5be83e3SGregory Fong bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, 449b5be83e3SGregory Fong bitmap_maxno, start, bitmap_count, mask, 450b5be83e3SGregory Fong offset); 451a254129eSJoonsoo Kim if (bitmap_no >= bitmap_maxno) { 4520ef7dcacSMike Kravetz spin_unlock_irq(&cma->lock); 453a254129eSJoonsoo Kim break; 454a254129eSJoonsoo Kim } 455a254129eSJoonsoo Kim bitmap_set(cma->bitmap, bitmap_no, bitmap_count); 456a254129eSJoonsoo Kim /* 457a254129eSJoonsoo Kim * It's safe to drop the lock here. We've marked this region for 458a254129eSJoonsoo Kim * our exclusive use. If the migration fails we will take the 459a254129eSJoonsoo Kim * lock again and unmark it. 460a254129eSJoonsoo Kim */ 4610ef7dcacSMike Kravetz spin_unlock_irq(&cma->lock); 462a254129eSJoonsoo Kim 463a254129eSJoonsoo Kim pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); 464ca96b625SLucas Stach ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, 46565182029SMarek Szyprowski GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0)); 466a4efc174SLecopzer Chen 467a254129eSJoonsoo Kim if (ret == 0) { 468a254129eSJoonsoo Kim page = pfn_to_page(pfn); 469a254129eSJoonsoo Kim break; 470a254129eSJoonsoo Kim } 471b7155e76SJoonsoo Kim 472a254129eSJoonsoo Kim cma_clear_bitmap(cma, pfn, count); 473b7155e76SJoonsoo Kim if (ret != -EBUSY) 474b7155e76SJoonsoo Kim break; 475b7155e76SJoonsoo Kim 476a254129eSJoonsoo Kim pr_debug("%s(): memory range at %p is busy, retrying\n", 477a254129eSJoonsoo Kim __func__, pfn_to_page(pfn)); 4787bc1aec5SLiam Mark 4793aab8ae7SMinchan Kim trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn), 4803aab8ae7SMinchan Kim count, align); 481a254129eSJoonsoo Kim /* try again with a bit different memory target */ 482a254129eSJoonsoo Kim start = bitmap_no + mask + 1; 483a254129eSJoonsoo Kim } 484a254129eSJoonsoo Kim 4853aab8ae7SMinchan Kim trace_cma_alloc_finish(cma->name, pfn, page, count, align); 48699e8ea6cSStefan Strogin 4872813b9c0SAndrey Konovalov /* 4882813b9c0SAndrey Konovalov * CMA can allocate multiple page blocks, which results in different 4892813b9c0SAndrey Konovalov * blocks being marked with different tags. Reset the tags to ignore 4902813b9c0SAndrey Konovalov * those page blocks. 4912813b9c0SAndrey Konovalov */ 4922813b9c0SAndrey Konovalov if (page) { 4932813b9c0SAndrey Konovalov for (i = 0; i < count; i++) 4942813b9c0SAndrey Konovalov page_kasan_tag_reset(page + i); 4952813b9c0SAndrey Konovalov } 4962813b9c0SAndrey Konovalov 49765182029SMarek Szyprowski if (ret && !no_warn) { 49878fa5150SMinchan Kim pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n", 499a052d4d1SPatrick Daly __func__, cma->name, count, ret); 500dbe43d4dSJaewon Kim cma_debug_show_areas(cma); 501dbe43d4dSJaewon Kim } 502dbe43d4dSJaewon Kim 503a254129eSJoonsoo Kim pr_debug("%s(): returned %p\n", __func__, page); 504bbb26920SMinchan Kim out: 50543ca106fSMinchan Kim if (page) { 506bbb26920SMinchan Kim count_vm_event(CMA_ALLOC_SUCCESS); 50743ca106fSMinchan Kim cma_sysfs_account_success_pages(cma, count); 50843ca106fSMinchan Kim } else { 509bbb26920SMinchan Kim count_vm_event(CMA_ALLOC_FAIL); 51043ca106fSMinchan Kim if (cma) 51143ca106fSMinchan Kim cma_sysfs_account_fail_pages(cma, count); 51243ca106fSMinchan Kim } 513bbb26920SMinchan Kim 514a254129eSJoonsoo Kim return page; 515a254129eSJoonsoo Kim } 516a254129eSJoonsoo Kim 5179871e2deSMike Kravetz bool cma_pages_valid(struct cma *cma, const struct page *pages, 5189871e2deSMike Kravetz unsigned long count) 5199871e2deSMike Kravetz { 5209871e2deSMike Kravetz unsigned long pfn; 5219871e2deSMike Kravetz 5229871e2deSMike Kravetz if (!cma || !pages) 5239871e2deSMike Kravetz return false; 5249871e2deSMike Kravetz 5259871e2deSMike Kravetz pfn = page_to_pfn(pages); 5269871e2deSMike Kravetz 5279871e2deSMike Kravetz if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) { 5289871e2deSMike Kravetz pr_debug("%s(page %p, count %lu)\n", __func__, 5299871e2deSMike Kravetz (void *)pages, count); 5309871e2deSMike Kravetz return false; 5319871e2deSMike Kravetz } 5329871e2deSMike Kravetz 5339871e2deSMike Kravetz return true; 5349871e2deSMike Kravetz } 5359871e2deSMike Kravetz 536a254129eSJoonsoo Kim /** 537a254129eSJoonsoo Kim * cma_release() - release allocated pages 538a254129eSJoonsoo Kim * @cma: Contiguous memory region for which the allocation is performed. 539a254129eSJoonsoo Kim * @pages: Allocated pages. 540a254129eSJoonsoo Kim * @count: Number of allocated pages. 541a254129eSJoonsoo Kim * 542929f92f7SRyohei Suzuki * This function releases memory allocated by cma_alloc(). 543a254129eSJoonsoo Kim * It returns false when provided pages do not belong to contiguous area and 544a254129eSJoonsoo Kim * true otherwise. 545a254129eSJoonsoo Kim */ 54678fa5150SMinchan Kim bool cma_release(struct cma *cma, const struct page *pages, 54778fa5150SMinchan Kim unsigned long count) 548a254129eSJoonsoo Kim { 549a254129eSJoonsoo Kim unsigned long pfn; 550a254129eSJoonsoo Kim 5519871e2deSMike Kravetz if (!cma_pages_valid(cma, pages, count)) 552a254129eSJoonsoo Kim return false; 553a254129eSJoonsoo Kim 55478fa5150SMinchan Kim pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count); 555a254129eSJoonsoo Kim 556a254129eSJoonsoo Kim pfn = page_to_pfn(pages); 557a254129eSJoonsoo Kim 558a254129eSJoonsoo Kim VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); 559a254129eSJoonsoo Kim 560a254129eSJoonsoo Kim free_contig_range(pfn, count); 561a254129eSJoonsoo Kim cma_clear_bitmap(cma, pfn, count); 5623aab8ae7SMinchan Kim trace_cma_release(cma->name, pfn, pages, count); 563a254129eSJoonsoo Kim 564a254129eSJoonsoo Kim return true; 565a254129eSJoonsoo Kim } 566e4231bcdSLaura Abbott 567e4231bcdSLaura Abbott int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) 568e4231bcdSLaura Abbott { 569e4231bcdSLaura Abbott int i; 570e4231bcdSLaura Abbott 571e4231bcdSLaura Abbott for (i = 0; i < cma_area_count; i++) { 572e4231bcdSLaura Abbott int ret = it(&cma_areas[i], data); 573e4231bcdSLaura Abbott 574e4231bcdSLaura Abbott if (ret) 575e4231bcdSLaura Abbott return ret; 576e4231bcdSLaura Abbott } 577e4231bcdSLaura Abbott 578e4231bcdSLaura Abbott return 0; 579e4231bcdSLaura Abbott } 580