18607a965SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 2a254129eSJoonsoo Kim /* 3a254129eSJoonsoo Kim * Contiguous Memory Allocator 4a254129eSJoonsoo Kim * 5a254129eSJoonsoo Kim * Copyright (c) 2010-2011 by Samsung Electronics. 6a254129eSJoonsoo Kim * Copyright IBM Corporation, 2013 7a254129eSJoonsoo Kim * Copyright LG Electronics Inc., 2014 8a254129eSJoonsoo Kim * Written by: 9a254129eSJoonsoo Kim * Marek Szyprowski <m.szyprowski@samsung.com> 10a254129eSJoonsoo Kim * Michal Nazarewicz <mina86@mina86.com> 11a254129eSJoonsoo Kim * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> 12a254129eSJoonsoo Kim * Joonsoo Kim <iamjoonsoo.kim@lge.com> 13a254129eSJoonsoo Kim */ 14a254129eSJoonsoo Kim 15a254129eSJoonsoo Kim #define pr_fmt(fmt) "cma: " fmt 16a254129eSJoonsoo Kim 17a254129eSJoonsoo Kim #ifdef CONFIG_CMA_DEBUG 18a254129eSJoonsoo Kim #ifndef DEBUG 19a254129eSJoonsoo Kim # define DEBUG 20a254129eSJoonsoo Kim #endif 21a254129eSJoonsoo Kim #endif 2299e8ea6cSStefan Strogin #define CREATE_TRACE_POINTS 23a254129eSJoonsoo Kim 24a254129eSJoonsoo Kim #include <linux/memblock.h> 25a254129eSJoonsoo Kim #include <linux/err.h> 26a254129eSJoonsoo Kim #include <linux/mm.h> 27a254129eSJoonsoo Kim #include <linux/sizes.h> 28a254129eSJoonsoo Kim #include <linux/slab.h> 29a254129eSJoonsoo Kim #include <linux/log2.h> 30a254129eSJoonsoo Kim #include <linux/cma.h> 31f7426b98SMarek Szyprowski #include <linux/highmem.h> 32620951e2SThierry Reding #include <linux/io.h> 33514c6032SRandy Dunlap #include <linux/kmemleak.h> 3499e8ea6cSStefan Strogin #include <trace/events/cma.h> 35a254129eSJoonsoo Kim 3628b24c1fSSasha Levin #include "cma.h" 37a254129eSJoonsoo Kim 3828b24c1fSSasha Levin struct cma cma_areas[MAX_CMA_AREAS]; 3928b24c1fSSasha Levin unsigned cma_area_count; 40a254129eSJoonsoo Kim 41ac173824SSasha Levin phys_addr_t cma_get_base(const struct cma *cma) 42a254129eSJoonsoo Kim { 43a254129eSJoonsoo Kim return PFN_PHYS(cma->base_pfn); 44a254129eSJoonsoo Kim } 45a254129eSJoonsoo Kim 46ac173824SSasha Levin unsigned long cma_get_size(const struct cma *cma) 47a254129eSJoonsoo Kim { 48a254129eSJoonsoo Kim return cma->count << PAGE_SHIFT; 49a254129eSJoonsoo Kim } 50a254129eSJoonsoo Kim 51f318dd08SLaura Abbott const char *cma_get_name(const struct cma *cma) 52f318dd08SLaura Abbott { 5318e98e56SBarry Song return cma->name; 54f318dd08SLaura Abbott } 55f318dd08SLaura Abbott 56ac173824SSasha Levin static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, 57e048cb32SDoug Berger unsigned int align_order) 58a254129eSJoonsoo Kim { 5968faed63SWeijie Yang if (align_order <= cma->order_per_bit) 6068faed63SWeijie Yang return 0; 6168faed63SWeijie Yang return (1UL << (align_order - cma->order_per_bit)) - 1; 62a254129eSJoonsoo Kim } 63a254129eSJoonsoo Kim 64850fc430SDanesh Petigara /* 65e048cb32SDoug Berger * Find the offset of the base PFN from the specified align_order. 66e048cb32SDoug Berger * The value returned is represented in order_per_bits. 67850fc430SDanesh Petigara */ 68ac173824SSasha Levin static unsigned long cma_bitmap_aligned_offset(const struct cma *cma, 69e048cb32SDoug Berger unsigned int align_order) 70b5be83e3SGregory Fong { 71e048cb32SDoug Berger return (cma->base_pfn & ((1UL << align_order) - 1)) 72e048cb32SDoug Berger >> cma->order_per_bit; 73b5be83e3SGregory Fong } 74b5be83e3SGregory Fong 75ac173824SSasha Levin static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, 76a254129eSJoonsoo Kim unsigned long pages) 77a254129eSJoonsoo Kim { 78a254129eSJoonsoo Kim return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; 79a254129eSJoonsoo Kim } 80a254129eSJoonsoo Kim 81ac173824SSasha Levin static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, 8278fa5150SMinchan Kim unsigned long count) 83a254129eSJoonsoo Kim { 84a254129eSJoonsoo Kim unsigned long bitmap_no, bitmap_count; 850ef7dcacSMike Kravetz unsigned long flags; 86a254129eSJoonsoo Kim 87a254129eSJoonsoo Kim bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; 88a254129eSJoonsoo Kim bitmap_count = cma_bitmap_pages_to_bits(cma, count); 89a254129eSJoonsoo Kim 900ef7dcacSMike Kravetz spin_lock_irqsave(&cma->lock, flags); 91a254129eSJoonsoo Kim bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); 920ef7dcacSMike Kravetz spin_unlock_irqrestore(&cma->lock, flags); 93a254129eSJoonsoo Kim } 94a254129eSJoonsoo Kim 953a5139f1SMike Kravetz static void __init cma_activate_area(struct cma *cma) 96a254129eSJoonsoo Kim { 97072355c1SDavid Hildenbrand unsigned long base_pfn = cma->base_pfn, pfn; 98a254129eSJoonsoo Kim struct zone *zone; 99a254129eSJoonsoo Kim 1002184f992SYunfeng Ye cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL); 1013a5139f1SMike Kravetz if (!cma->bitmap) 1023a5139f1SMike Kravetz goto out_error; 103a254129eSJoonsoo Kim 104a254129eSJoonsoo Kim /* 105072355c1SDavid Hildenbrand * alloc_contig_range() requires the pfn range specified to be in the 106072355c1SDavid Hildenbrand * same zone. Simplify by forcing the entire CMA resv range to be in the 107072355c1SDavid Hildenbrand * same zone. 108a254129eSJoonsoo Kim */ 109072355c1SDavid Hildenbrand WARN_ON_ONCE(!pfn_valid(base_pfn)); 110072355c1SDavid Hildenbrand zone = page_zone(pfn_to_page(base_pfn)); 111072355c1SDavid Hildenbrand for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) { 112072355c1SDavid Hildenbrand WARN_ON_ONCE(!pfn_valid(pfn)); 113a254129eSJoonsoo Kim if (page_zone(pfn_to_page(pfn)) != zone) 114d883c6cfSJoonsoo Kim goto not_in_zone; 115a254129eSJoonsoo Kim } 116072355c1SDavid Hildenbrand 117072355c1SDavid Hildenbrand for (pfn = base_pfn; pfn < base_pfn + cma->count; 118072355c1SDavid Hildenbrand pfn += pageblock_nr_pages) 119072355c1SDavid Hildenbrand init_cma_reserved_pageblock(pfn_to_page(pfn)); 120a254129eSJoonsoo Kim 1210ef7dcacSMike Kravetz spin_lock_init(&cma->lock); 12226b02a1fSSasha Levin 12326b02a1fSSasha Levin #ifdef CONFIG_CMA_DEBUGFS 12426b02a1fSSasha Levin INIT_HLIST_HEAD(&cma->mem_head); 12526b02a1fSSasha Levin spin_lock_init(&cma->mem_head_lock); 12626b02a1fSSasha Levin #endif 12726b02a1fSSasha Levin 1283a5139f1SMike Kravetz return; 129a254129eSJoonsoo Kim 130d883c6cfSJoonsoo Kim not_in_zone: 1312184f992SYunfeng Ye bitmap_free(cma->bitmap); 1323a5139f1SMike Kravetz out_error: 133072355c1SDavid Hildenbrand /* Expose all pages to the buddy, they are useless for CMA. */ 134072355c1SDavid Hildenbrand for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++) 135072355c1SDavid Hildenbrand free_reserved_page(pfn_to_page(pfn)); 136072355c1SDavid Hildenbrand totalcma_pages -= cma->count; 137f022d8cbSLaurent Pinchart cma->count = 0; 1383a5139f1SMike Kravetz pr_err("CMA area %s could not be activated\n", cma->name); 1393a5139f1SMike Kravetz return; 140a254129eSJoonsoo Kim } 141a254129eSJoonsoo Kim 142a254129eSJoonsoo Kim static int __init cma_init_reserved_areas(void) 143a254129eSJoonsoo Kim { 144a254129eSJoonsoo Kim int i; 145a254129eSJoonsoo Kim 1463a5139f1SMike Kravetz for (i = 0; i < cma_area_count; i++) 1473a5139f1SMike Kravetz cma_activate_area(&cma_areas[i]); 148a254129eSJoonsoo Kim 149a254129eSJoonsoo Kim return 0; 150a254129eSJoonsoo Kim } 151d883c6cfSJoonsoo Kim core_initcall(cma_init_reserved_areas); 152a254129eSJoonsoo Kim 153a254129eSJoonsoo Kim /** 154de9e14eeSMarek Szyprowski * cma_init_reserved_mem() - create custom contiguous area from reserved memory 155de9e14eeSMarek Szyprowski * @base: Base address of the reserved area 156de9e14eeSMarek Szyprowski * @size: Size of the reserved area (in bytes), 157de9e14eeSMarek Szyprowski * @order_per_bit: Order of pages represented by one bit on bitmap. 158e8b098fcSMike Rapoport * @name: The name of the area. If this parameter is NULL, the name of 159e8b098fcSMike Rapoport * the area will be set to "cmaN", where N is a running counter of 160e8b098fcSMike Rapoport * used areas. 161de9e14eeSMarek Szyprowski * @res_cma: Pointer to store the created cma region. 162de9e14eeSMarek Szyprowski * 163de9e14eeSMarek Szyprowski * This function creates custom contiguous area from already reserved memory. 164de9e14eeSMarek Szyprowski */ 165de9e14eeSMarek Szyprowski int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, 166ac173824SSasha Levin unsigned int order_per_bit, 167f318dd08SLaura Abbott const char *name, 168ac173824SSasha Levin struct cma **res_cma) 169de9e14eeSMarek Szyprowski { 170de9e14eeSMarek Szyprowski struct cma *cma; 171de9e14eeSMarek Szyprowski phys_addr_t alignment; 172de9e14eeSMarek Szyprowski 173de9e14eeSMarek Szyprowski /* Sanity checks */ 174de9e14eeSMarek Szyprowski if (cma_area_count == ARRAY_SIZE(cma_areas)) { 175de9e14eeSMarek Szyprowski pr_err("Not enough slots for CMA reserved regions!\n"); 176de9e14eeSMarek Szyprowski return -ENOSPC; 177de9e14eeSMarek Szyprowski } 178de9e14eeSMarek Szyprowski 179de9e14eeSMarek Szyprowski if (!size || !memblock_is_region_reserved(base, size)) 180de9e14eeSMarek Szyprowski return -EINVAL; 181de9e14eeSMarek Szyprowski 1820f96ae29SShailendra Verma /* ensure minimal alignment required by mm core */ 183badbda53SStephen Rothwell alignment = PAGE_SIZE << 184badbda53SStephen Rothwell max_t(unsigned long, MAX_ORDER - 1, pageblock_order); 185de9e14eeSMarek Szyprowski 186de9e14eeSMarek Szyprowski /* alignment should be aligned with order_per_bit */ 187de9e14eeSMarek Szyprowski if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit)) 188de9e14eeSMarek Szyprowski return -EINVAL; 189de9e14eeSMarek Szyprowski 190de9e14eeSMarek Szyprowski if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size) 191de9e14eeSMarek Szyprowski return -EINVAL; 192de9e14eeSMarek Szyprowski 193de9e14eeSMarek Szyprowski /* 194de9e14eeSMarek Szyprowski * Each reserved area must be initialised later, when more kernel 195de9e14eeSMarek Szyprowski * subsystems (like slab allocator) are available. 196de9e14eeSMarek Szyprowski */ 197de9e14eeSMarek Szyprowski cma = &cma_areas[cma_area_count]; 19818e98e56SBarry Song 19918e98e56SBarry Song if (name) 20018e98e56SBarry Song snprintf(cma->name, CMA_MAX_NAME, name); 20118e98e56SBarry Song else 20218e98e56SBarry Song snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count); 20318e98e56SBarry Song 204de9e14eeSMarek Szyprowski cma->base_pfn = PFN_DOWN(base); 205de9e14eeSMarek Szyprowski cma->count = size >> PAGE_SHIFT; 206de9e14eeSMarek Szyprowski cma->order_per_bit = order_per_bit; 207de9e14eeSMarek Szyprowski *res_cma = cma; 208de9e14eeSMarek Szyprowski cma_area_count++; 20994737a85SGeorge G. Davis totalcma_pages += (size / PAGE_SIZE); 210de9e14eeSMarek Szyprowski 211de9e14eeSMarek Szyprowski return 0; 212de9e14eeSMarek Szyprowski } 213de9e14eeSMarek Szyprowski 214de9e14eeSMarek Szyprowski /** 2158676af1fSAslan Bakirov * cma_declare_contiguous_nid() - reserve custom contiguous area 216a254129eSJoonsoo Kim * @base: Base address of the reserved area optional, use 0 for any 217c1f733aaSJoonsoo Kim * @size: Size of the reserved area (in bytes), 218a254129eSJoonsoo Kim * @limit: End address of the reserved memory (optional, 0 for any). 219a254129eSJoonsoo Kim * @alignment: Alignment for the CMA area, should be power of 2 or zero 220a254129eSJoonsoo Kim * @order_per_bit: Order of pages represented by one bit on bitmap. 221a254129eSJoonsoo Kim * @fixed: hint about where to place the reserved area 222e8b098fcSMike Rapoport * @name: The name of the area. See function cma_init_reserved_mem() 223c1f733aaSJoonsoo Kim * @res_cma: Pointer to store the created cma region. 2248676af1fSAslan Bakirov * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 225a254129eSJoonsoo Kim * 226a254129eSJoonsoo Kim * This function reserves memory from early allocator. It should be 227a254129eSJoonsoo Kim * called by arch specific code once the early allocator (memblock or bootmem) 228a254129eSJoonsoo Kim * has been activated and all other subsystems have already allocated/reserved 229a254129eSJoonsoo Kim * memory. This function allows to create custom reserved areas. 230a254129eSJoonsoo Kim * 231a254129eSJoonsoo Kim * If @fixed is true, reserve contiguous area at exactly @base. If false, 232a254129eSJoonsoo Kim * reserve in range from @base to @limit. 233a254129eSJoonsoo Kim */ 2348676af1fSAslan Bakirov int __init cma_declare_contiguous_nid(phys_addr_t base, 235c1f733aaSJoonsoo Kim phys_addr_t size, phys_addr_t limit, 236a254129eSJoonsoo Kim phys_addr_t alignment, unsigned int order_per_bit, 2378676af1fSAslan Bakirov bool fixed, const char *name, struct cma **res_cma, 2388676af1fSAslan Bakirov int nid) 239a254129eSJoonsoo Kim { 240f7426b98SMarek Szyprowski phys_addr_t memblock_end = memblock_end_of_DRAM(); 2416b101e2aSJoonsoo Kim phys_addr_t highmem_start; 242a254129eSJoonsoo Kim int ret = 0; 243a254129eSJoonsoo Kim 2446b101e2aSJoonsoo Kim /* 2452dece445SLaura Abbott * We can't use __pa(high_memory) directly, since high_memory 2462dece445SLaura Abbott * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly) 2472dece445SLaura Abbott * complain. Find the boundary by adding one to the last valid 2482dece445SLaura Abbott * address. 2496b101e2aSJoonsoo Kim */ 2502dece445SLaura Abbott highmem_start = __pa(high_memory - 1) + 1; 25156fa4f60SLaurent Pinchart pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", 25256fa4f60SLaurent Pinchart __func__, &size, &base, &limit, &alignment); 253a254129eSJoonsoo Kim 254a254129eSJoonsoo Kim if (cma_area_count == ARRAY_SIZE(cma_areas)) { 255a254129eSJoonsoo Kim pr_err("Not enough slots for CMA reserved regions!\n"); 256a254129eSJoonsoo Kim return -ENOSPC; 257a254129eSJoonsoo Kim } 258a254129eSJoonsoo Kim 259a254129eSJoonsoo Kim if (!size) 260a254129eSJoonsoo Kim return -EINVAL; 261a254129eSJoonsoo Kim 262a254129eSJoonsoo Kim if (alignment && !is_power_of_2(alignment)) 263a254129eSJoonsoo Kim return -EINVAL; 264a254129eSJoonsoo Kim 265a254129eSJoonsoo Kim /* 266a254129eSJoonsoo Kim * Sanitise input arguments. 267a254129eSJoonsoo Kim * Pages both ends in CMA area could be merged into adjacent unmovable 268a254129eSJoonsoo Kim * migratetype page by page allocator's buddy algorithm. In the case, 269a254129eSJoonsoo Kim * you couldn't get a contiguous memory, which is not what we want. 270a254129eSJoonsoo Kim */ 271badbda53SStephen Rothwell alignment = max(alignment, (phys_addr_t)PAGE_SIZE << 272badbda53SStephen Rothwell max_t(unsigned long, MAX_ORDER - 1, pageblock_order)); 273c633324eSDoug Berger if (fixed && base & (alignment - 1)) { 274c633324eSDoug Berger ret = -EINVAL; 275c633324eSDoug Berger pr_err("Region at %pa must be aligned to %pa bytes\n", 276c633324eSDoug Berger &base, &alignment); 277c633324eSDoug Berger goto err; 278c633324eSDoug Berger } 279a254129eSJoonsoo Kim base = ALIGN(base, alignment); 280a254129eSJoonsoo Kim size = ALIGN(size, alignment); 281a254129eSJoonsoo Kim limit &= ~(alignment - 1); 282a254129eSJoonsoo Kim 283800a85d3SLaurent Pinchart if (!base) 284800a85d3SLaurent Pinchart fixed = false; 285800a85d3SLaurent Pinchart 286a254129eSJoonsoo Kim /* size should be aligned with order_per_bit */ 287a254129eSJoonsoo Kim if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) 288a254129eSJoonsoo Kim return -EINVAL; 289a254129eSJoonsoo Kim 290f7426b98SMarek Szyprowski /* 29116195dddSLaurent Pinchart * If allocating at a fixed base the request region must not cross the 29216195dddSLaurent Pinchart * low/high memory boundary. 293f7426b98SMarek Szyprowski */ 294f7426b98SMarek Szyprowski if (fixed && base < highmem_start && base + size > highmem_start) { 295f7426b98SMarek Szyprowski ret = -EINVAL; 29656fa4f60SLaurent Pinchart pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", 29756fa4f60SLaurent Pinchart &base, &highmem_start); 298f7426b98SMarek Szyprowski goto err; 299f7426b98SMarek Szyprowski } 300f7426b98SMarek Szyprowski 30116195dddSLaurent Pinchart /* 30216195dddSLaurent Pinchart * If the limit is unspecified or above the memblock end, its effective 30316195dddSLaurent Pinchart * value will be the memblock end. Set it explicitly to simplify further 30416195dddSLaurent Pinchart * checks. 30516195dddSLaurent Pinchart */ 30616195dddSLaurent Pinchart if (limit == 0 || limit > memblock_end) 30716195dddSLaurent Pinchart limit = memblock_end; 30816195dddSLaurent Pinchart 309c633324eSDoug Berger if (base + size > limit) { 310c633324eSDoug Berger ret = -EINVAL; 311c633324eSDoug Berger pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n", 312c633324eSDoug Berger &size, &base, &limit); 313c633324eSDoug Berger goto err; 314c633324eSDoug Berger } 315c633324eSDoug Berger 316a254129eSJoonsoo Kim /* Reserve memory */ 317800a85d3SLaurent Pinchart if (fixed) { 318a254129eSJoonsoo Kim if (memblock_is_region_reserved(base, size) || 319a254129eSJoonsoo Kim memblock_reserve(base, size) < 0) { 320a254129eSJoonsoo Kim ret = -EBUSY; 321a254129eSJoonsoo Kim goto err; 322a254129eSJoonsoo Kim } 323a254129eSJoonsoo Kim } else { 32416195dddSLaurent Pinchart phys_addr_t addr = 0; 32516195dddSLaurent Pinchart 32616195dddSLaurent Pinchart /* 32716195dddSLaurent Pinchart * All pages in the reserved area must come from the same zone. 32816195dddSLaurent Pinchart * If the requested region crosses the low/high memory boundary, 32916195dddSLaurent Pinchart * try allocating from high memory first and fall back to low 33016195dddSLaurent Pinchart * memory in case of failure. 33116195dddSLaurent Pinchart */ 33216195dddSLaurent Pinchart if (base < highmem_start && limit > highmem_start) { 3338676af1fSAslan Bakirov addr = memblock_alloc_range_nid(size, alignment, 33440366bd7SBarry Song highmem_start, limit, nid, true); 33516195dddSLaurent Pinchart limit = highmem_start; 33616195dddSLaurent Pinchart } 33716195dddSLaurent Pinchart 338df2ff39eSRoman Gushchin /* 339df2ff39eSRoman Gushchin * If there is enough memory, try a bottom-up allocation first. 340df2ff39eSRoman Gushchin * It will place the new cma area close to the start of the node 341df2ff39eSRoman Gushchin * and guarantee that the compaction is moving pages out of the 342df2ff39eSRoman Gushchin * cma area and not into it. 343df2ff39eSRoman Gushchin * Avoid using first 4GB to not interfere with constrained zones 344df2ff39eSRoman Gushchin * like DMA/DMA32. 345df2ff39eSRoman Gushchin */ 346df2ff39eSRoman Gushchin #ifdef CONFIG_PHYS_ADDR_T_64BIT 347df2ff39eSRoman Gushchin if (!memblock_bottom_up() && memblock_end >= SZ_4G + size) { 348df2ff39eSRoman Gushchin memblock_set_bottom_up(true); 349df2ff39eSRoman Gushchin addr = memblock_alloc_range_nid(size, alignment, SZ_4G, 350df2ff39eSRoman Gushchin limit, nid, true); 351df2ff39eSRoman Gushchin memblock_set_bottom_up(false); 352df2ff39eSRoman Gushchin } 353df2ff39eSRoman Gushchin #endif 354df2ff39eSRoman Gushchin 35516195dddSLaurent Pinchart if (!addr) { 3568676af1fSAslan Bakirov addr = memblock_alloc_range_nid(size, alignment, base, 35740366bd7SBarry Song limit, nid, true); 358a254129eSJoonsoo Kim if (!addr) { 359a254129eSJoonsoo Kim ret = -ENOMEM; 360a254129eSJoonsoo Kim goto err; 361a254129eSJoonsoo Kim } 362a254129eSJoonsoo Kim } 363a254129eSJoonsoo Kim 364620951e2SThierry Reding /* 365620951e2SThierry Reding * kmemleak scans/reads tracked objects for pointers to other 366620951e2SThierry Reding * objects but this address isn't mapped and accessible 367620951e2SThierry Reding */ 3689099daedSCatalin Marinas kmemleak_ignore_phys(addr); 36916195dddSLaurent Pinchart base = addr; 37016195dddSLaurent Pinchart } 37116195dddSLaurent Pinchart 372f318dd08SLaura Abbott ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma); 373de9e14eeSMarek Szyprowski if (ret) 3740d3bd18aSPeng Fan goto free_mem; 375a254129eSJoonsoo Kim 37656fa4f60SLaurent Pinchart pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M, 37756fa4f60SLaurent Pinchart &base); 378a254129eSJoonsoo Kim return 0; 379a254129eSJoonsoo Kim 3800d3bd18aSPeng Fan free_mem: 3810d3bd18aSPeng Fan memblock_free(base, size); 382a254129eSJoonsoo Kim err: 3830de9d2ebSJoonsoo Kim pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); 384a254129eSJoonsoo Kim return ret; 385a254129eSJoonsoo Kim } 386a254129eSJoonsoo Kim 387dbe43d4dSJaewon Kim #ifdef CONFIG_CMA_DEBUG 388dbe43d4dSJaewon Kim static void cma_debug_show_areas(struct cma *cma) 389dbe43d4dSJaewon Kim { 3902b59e01aSYue Hu unsigned long next_zero_bit, next_set_bit, nr_zero; 391dbe43d4dSJaewon Kim unsigned long start = 0; 3922b59e01aSYue Hu unsigned long nr_part, nr_total = 0; 3932b59e01aSYue Hu unsigned long nbits = cma_bitmap_maxno(cma); 394dbe43d4dSJaewon Kim 3950ef7dcacSMike Kravetz spin_lock_irq(&cma->lock); 396dbe43d4dSJaewon Kim pr_info("number of available pages: "); 397dbe43d4dSJaewon Kim for (;;) { 3982b59e01aSYue Hu next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start); 3992b59e01aSYue Hu if (next_zero_bit >= nbits) 400dbe43d4dSJaewon Kim break; 4012b59e01aSYue Hu next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit); 402dbe43d4dSJaewon Kim nr_zero = next_set_bit - next_zero_bit; 4032b59e01aSYue Hu nr_part = nr_zero << cma->order_per_bit; 4042b59e01aSYue Hu pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part, 4052b59e01aSYue Hu next_zero_bit); 4062b59e01aSYue Hu nr_total += nr_part; 407dbe43d4dSJaewon Kim start = next_zero_bit + nr_zero; 408dbe43d4dSJaewon Kim } 4092b59e01aSYue Hu pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count); 4100ef7dcacSMike Kravetz spin_unlock_irq(&cma->lock); 411dbe43d4dSJaewon Kim } 412dbe43d4dSJaewon Kim #else 413dbe43d4dSJaewon Kim static inline void cma_debug_show_areas(struct cma *cma) { } 414dbe43d4dSJaewon Kim #endif 415dbe43d4dSJaewon Kim 416a254129eSJoonsoo Kim /** 417a254129eSJoonsoo Kim * cma_alloc() - allocate pages from contiguous area 418a254129eSJoonsoo Kim * @cma: Contiguous memory region for which the allocation is performed. 419a254129eSJoonsoo Kim * @count: Requested number of pages. 420a254129eSJoonsoo Kim * @align: Requested alignment of pages (in PAGE_SIZE order). 42165182029SMarek Szyprowski * @no_warn: Avoid printing message about failed allocation 422a254129eSJoonsoo Kim * 423a254129eSJoonsoo Kim * This function allocates part of contiguous memory on specific 424a254129eSJoonsoo Kim * contiguous memory area. 425a254129eSJoonsoo Kim */ 42678fa5150SMinchan Kim struct page *cma_alloc(struct cma *cma, unsigned long count, 42778fa5150SMinchan Kim unsigned int align, bool no_warn) 428a254129eSJoonsoo Kim { 4293acaea68SAndrew Morton unsigned long mask, offset; 4303acaea68SAndrew Morton unsigned long pfn = -1; 4313acaea68SAndrew Morton unsigned long start = 0; 432a254129eSJoonsoo Kim unsigned long bitmap_maxno, bitmap_no, bitmap_count; 43378fa5150SMinchan Kim unsigned long i; 434a254129eSJoonsoo Kim struct page *page = NULL; 435dbe43d4dSJaewon Kim int ret = -ENOMEM; 436a254129eSJoonsoo Kim 437835832baSJianqun Xu if (!cma || !cma->count || !cma->bitmap) 438bbb26920SMinchan Kim goto out; 439a254129eSJoonsoo Kim 44078fa5150SMinchan Kim pr_debug("%s(cma %p, count %lu, align %d)\n", __func__, (void *)cma, 441a254129eSJoonsoo Kim count, align); 442a254129eSJoonsoo Kim 443a254129eSJoonsoo Kim if (!count) 444bbb26920SMinchan Kim goto out; 445a254129eSJoonsoo Kim 4467bc1aec5SLiam Mark trace_cma_alloc_start(cma->name, count, align); 4477bc1aec5SLiam Mark 448a254129eSJoonsoo Kim mask = cma_bitmap_aligned_mask(cma, align); 449b5be83e3SGregory Fong offset = cma_bitmap_aligned_offset(cma, align); 450a254129eSJoonsoo Kim bitmap_maxno = cma_bitmap_maxno(cma); 451a254129eSJoonsoo Kim bitmap_count = cma_bitmap_pages_to_bits(cma, count); 452a254129eSJoonsoo Kim 4536b36ba59SShiraz Hashim if (bitmap_count > bitmap_maxno) 454bbb26920SMinchan Kim goto out; 4556b36ba59SShiraz Hashim 456a254129eSJoonsoo Kim for (;;) { 4570ef7dcacSMike Kravetz spin_lock_irq(&cma->lock); 458b5be83e3SGregory Fong bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, 459b5be83e3SGregory Fong bitmap_maxno, start, bitmap_count, mask, 460b5be83e3SGregory Fong offset); 461a254129eSJoonsoo Kim if (bitmap_no >= bitmap_maxno) { 4620ef7dcacSMike Kravetz spin_unlock_irq(&cma->lock); 463a254129eSJoonsoo Kim break; 464a254129eSJoonsoo Kim } 465a254129eSJoonsoo Kim bitmap_set(cma->bitmap, bitmap_no, bitmap_count); 466a254129eSJoonsoo Kim /* 467a254129eSJoonsoo Kim * It's safe to drop the lock here. We've marked this region for 468a254129eSJoonsoo Kim * our exclusive use. If the migration fails we will take the 469a254129eSJoonsoo Kim * lock again and unmark it. 470a254129eSJoonsoo Kim */ 4710ef7dcacSMike Kravetz spin_unlock_irq(&cma->lock); 472a254129eSJoonsoo Kim 473a254129eSJoonsoo Kim pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); 474ca96b625SLucas Stach ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, 47565182029SMarek Szyprowski GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0)); 476a4efc174SLecopzer Chen 477a254129eSJoonsoo Kim if (ret == 0) { 478a254129eSJoonsoo Kim page = pfn_to_page(pfn); 479a254129eSJoonsoo Kim break; 480a254129eSJoonsoo Kim } 481b7155e76SJoonsoo Kim 482a254129eSJoonsoo Kim cma_clear_bitmap(cma, pfn, count); 483b7155e76SJoonsoo Kim if (ret != -EBUSY) 484b7155e76SJoonsoo Kim break; 485b7155e76SJoonsoo Kim 486a254129eSJoonsoo Kim pr_debug("%s(): memory range at %p is busy, retrying\n", 487a254129eSJoonsoo Kim __func__, pfn_to_page(pfn)); 4887bc1aec5SLiam Mark 4893aab8ae7SMinchan Kim trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn), 4903aab8ae7SMinchan Kim count, align); 491a254129eSJoonsoo Kim /* try again with a bit different memory target */ 492a254129eSJoonsoo Kim start = bitmap_no + mask + 1; 493a254129eSJoonsoo Kim } 494a254129eSJoonsoo Kim 4953aab8ae7SMinchan Kim trace_cma_alloc_finish(cma->name, pfn, page, count, align); 49699e8ea6cSStefan Strogin 4972813b9c0SAndrey Konovalov /* 4982813b9c0SAndrey Konovalov * CMA can allocate multiple page blocks, which results in different 4992813b9c0SAndrey Konovalov * blocks being marked with different tags. Reset the tags to ignore 5002813b9c0SAndrey Konovalov * those page blocks. 5012813b9c0SAndrey Konovalov */ 5022813b9c0SAndrey Konovalov if (page) { 5032813b9c0SAndrey Konovalov for (i = 0; i < count; i++) 5042813b9c0SAndrey Konovalov page_kasan_tag_reset(page + i); 5052813b9c0SAndrey Konovalov } 5062813b9c0SAndrey Konovalov 50765182029SMarek Szyprowski if (ret && !no_warn) { 50878fa5150SMinchan Kim pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n", 509a052d4d1SPatrick Daly __func__, cma->name, count, ret); 510dbe43d4dSJaewon Kim cma_debug_show_areas(cma); 511dbe43d4dSJaewon Kim } 512dbe43d4dSJaewon Kim 513a254129eSJoonsoo Kim pr_debug("%s(): returned %p\n", __func__, page); 514bbb26920SMinchan Kim out: 51543ca106fSMinchan Kim if (page) { 516bbb26920SMinchan Kim count_vm_event(CMA_ALLOC_SUCCESS); 51743ca106fSMinchan Kim cma_sysfs_account_success_pages(cma, count); 51843ca106fSMinchan Kim } else { 519bbb26920SMinchan Kim count_vm_event(CMA_ALLOC_FAIL); 52043ca106fSMinchan Kim if (cma) 52143ca106fSMinchan Kim cma_sysfs_account_fail_pages(cma, count); 52243ca106fSMinchan Kim } 523bbb26920SMinchan Kim 524a254129eSJoonsoo Kim return page; 525a254129eSJoonsoo Kim } 526a254129eSJoonsoo Kim 527*9871e2deSMike Kravetz bool cma_pages_valid(struct cma *cma, const struct page *pages, 528*9871e2deSMike Kravetz unsigned long count) 529*9871e2deSMike Kravetz { 530*9871e2deSMike Kravetz unsigned long pfn; 531*9871e2deSMike Kravetz 532*9871e2deSMike Kravetz if (!cma || !pages) 533*9871e2deSMike Kravetz return false; 534*9871e2deSMike Kravetz 535*9871e2deSMike Kravetz pfn = page_to_pfn(pages); 536*9871e2deSMike Kravetz 537*9871e2deSMike Kravetz if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) { 538*9871e2deSMike Kravetz pr_debug("%s(page %p, count %lu)\n", __func__, 539*9871e2deSMike Kravetz (void *)pages, count); 540*9871e2deSMike Kravetz return false; 541*9871e2deSMike Kravetz } 542*9871e2deSMike Kravetz 543*9871e2deSMike Kravetz return true; 544*9871e2deSMike Kravetz } 545*9871e2deSMike Kravetz 546a254129eSJoonsoo Kim /** 547a254129eSJoonsoo Kim * cma_release() - release allocated pages 548a254129eSJoonsoo Kim * @cma: Contiguous memory region for which the allocation is performed. 549a254129eSJoonsoo Kim * @pages: Allocated pages. 550a254129eSJoonsoo Kim * @count: Number of allocated pages. 551a254129eSJoonsoo Kim * 552929f92f7SRyohei Suzuki * This function releases memory allocated by cma_alloc(). 553a254129eSJoonsoo Kim * It returns false when provided pages do not belong to contiguous area and 554a254129eSJoonsoo Kim * true otherwise. 555a254129eSJoonsoo Kim */ 55678fa5150SMinchan Kim bool cma_release(struct cma *cma, const struct page *pages, 55778fa5150SMinchan Kim unsigned long count) 558a254129eSJoonsoo Kim { 559a254129eSJoonsoo Kim unsigned long pfn; 560a254129eSJoonsoo Kim 561*9871e2deSMike Kravetz if (!cma_pages_valid(cma, pages, count)) 562a254129eSJoonsoo Kim return false; 563a254129eSJoonsoo Kim 56478fa5150SMinchan Kim pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count); 565a254129eSJoonsoo Kim 566a254129eSJoonsoo Kim pfn = page_to_pfn(pages); 567a254129eSJoonsoo Kim 568a254129eSJoonsoo Kim VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); 569a254129eSJoonsoo Kim 570a254129eSJoonsoo Kim free_contig_range(pfn, count); 571a254129eSJoonsoo Kim cma_clear_bitmap(cma, pfn, count); 5723aab8ae7SMinchan Kim trace_cma_release(cma->name, pfn, pages, count); 573a254129eSJoonsoo Kim 574a254129eSJoonsoo Kim return true; 575a254129eSJoonsoo Kim } 576e4231bcdSLaura Abbott 577e4231bcdSLaura Abbott int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) 578e4231bcdSLaura Abbott { 579e4231bcdSLaura Abbott int i; 580e4231bcdSLaura Abbott 581e4231bcdSLaura Abbott for (i = 0; i < cma_area_count; i++) { 582e4231bcdSLaura Abbott int ret = it(&cma_areas[i], data); 583e4231bcdSLaura Abbott 584e4231bcdSLaura Abbott if (ret) 585e4231bcdSLaura Abbott return ret; 586e4231bcdSLaura Abbott } 587e4231bcdSLaura Abbott 588e4231bcdSLaura Abbott return 0; 589e4231bcdSLaura Abbott } 590