Lines Matching full:cma

15 #define pr_fmt(fmt) "cma: " fmt
30 #include <linux/cma.h>
34 #include <trace/events/cma.h>
37 #include "cma.h"
39 struct cma cma_areas[MAX_CMA_AREAS];
43 phys_addr_t cma_get_base(const struct cma *cma) in cma_get_base() argument
45 return PFN_PHYS(cma->base_pfn); in cma_get_base()
48 unsigned long cma_get_size(const struct cma *cma) in cma_get_size() argument
50 return cma->count << PAGE_SHIFT; in cma_get_size()
53 const char *cma_get_name(const struct cma *cma) in cma_get_name() argument
55 return cma->name; in cma_get_name()
58 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, in cma_bitmap_aligned_mask() argument
61 if (align_order <= cma->order_per_bit) in cma_bitmap_aligned_mask()
63 return (1UL << (align_order - cma->order_per_bit)) - 1; in cma_bitmap_aligned_mask()
70 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma, in cma_bitmap_aligned_offset() argument
73 return (cma->base_pfn & ((1UL << align_order) - 1)) in cma_bitmap_aligned_offset()
74 >> cma->order_per_bit; in cma_bitmap_aligned_offset()
77 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, in cma_bitmap_pages_to_bits() argument
80 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; in cma_bitmap_pages_to_bits()
83 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, in cma_clear_bitmap() argument
89 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; in cma_clear_bitmap()
90 bitmap_count = cma_bitmap_pages_to_bits(cma, count); in cma_clear_bitmap()
92 spin_lock_irqsave(&cma->lock, flags); in cma_clear_bitmap()
93 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); in cma_clear_bitmap()
94 spin_unlock_irqrestore(&cma->lock, flags); in cma_clear_bitmap()
97 static void __init cma_activate_area(struct cma *cma) in cma_activate_area() argument
99 unsigned long base_pfn = cma->base_pfn, pfn; in cma_activate_area()
102 cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL); in cma_activate_area()
103 if (!cma->bitmap) in cma_activate_area()
108 * same zone. Simplify by forcing the entire CMA resv range to be in the in cma_activate_area()
113 for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) { in cma_activate_area()
119 for (pfn = base_pfn; pfn < base_pfn + cma->count; in cma_activate_area()
123 spin_lock_init(&cma->lock); in cma_activate_area()
126 INIT_HLIST_HEAD(&cma->mem_head); in cma_activate_area()
127 spin_lock_init(&cma->mem_head_lock); in cma_activate_area()
133 bitmap_free(cma->bitmap); in cma_activate_area()
135 /* Expose all pages to the buddy, they are useless for CMA. */ in cma_activate_area()
136 if (!cma->reserve_pages_on_error) { in cma_activate_area()
137 for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++) in cma_activate_area()
140 totalcma_pages -= cma->count; in cma_activate_area()
141 cma->count = 0; in cma_activate_area()
142 pr_err("CMA area %s could not be activated\n", cma->name); in cma_activate_area()
157 void __init cma_reserve_pages_on_error(struct cma *cma) in cma_reserve_pages_on_error() argument
159 cma->reserve_pages_on_error = true; in cma_reserve_pages_on_error()
170 * @res_cma: Pointer to store the created cma region.
177 struct cma **res_cma) in cma_init_reserved_mem()
179 struct cma *cma; in cma_init_reserved_mem() local
183 pr_err("Not enough slots for CMA reserved regions!\n"); in cma_init_reserved_mem()
198 cma = &cma_areas[cma_area_count]; in cma_init_reserved_mem()
201 snprintf(cma->name, CMA_MAX_NAME, name); in cma_init_reserved_mem()
203 snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count); in cma_init_reserved_mem()
205 cma->base_pfn = PFN_DOWN(base); in cma_init_reserved_mem()
206 cma->count = size >> PAGE_SHIFT; in cma_init_reserved_mem()
207 cma->order_per_bit = order_per_bit; in cma_init_reserved_mem()
208 *res_cma = cma; in cma_init_reserved_mem()
220 * @alignment: Alignment for the CMA area, should be power of 2 or zero
224 * @res_cma: Pointer to store the created cma region.
238 bool fixed, const char *name, struct cma **res_cma, in cma_declare_contiguous_nid()
256 pr_err("Not enough slots for CMA reserved regions!\n"); in cma_declare_contiguous_nid()
326 * It will place the new cma area close to the start of the node in cma_declare_contiguous_nid()
328 * cma area and not into it. in cma_declare_contiguous_nid()
387 static void cma_debug_show_areas(struct cma *cma) in cma_debug_show_areas() argument
392 unsigned long nbits = cma_bitmap_maxno(cma); in cma_debug_show_areas()
394 spin_lock_irq(&cma->lock); in cma_debug_show_areas()
397 next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start); in cma_debug_show_areas()
400 next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit); in cma_debug_show_areas()
402 nr_part = nr_zero << cma->order_per_bit; in cma_debug_show_areas()
408 pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count); in cma_debug_show_areas()
409 spin_unlock_irq(&cma->lock); in cma_debug_show_areas()
412 static inline void cma_debug_show_areas(struct cma *cma) { } in cma_debug_show_areas() argument
417 * @cma: Contiguous memory region for which the allocation is performed.
425 struct page *cma_alloc(struct cma *cma, unsigned long count, in cma_alloc() argument
436 if (!cma || !cma->count || !cma->bitmap) in cma_alloc()
439 pr_debug("%s(cma %p, name: %s, count %lu, align %d)\n", __func__, in cma_alloc()
440 (void *)cma, cma->name, count, align); in cma_alloc()
445 trace_cma_alloc_start(cma->name, count, align); in cma_alloc()
447 mask = cma_bitmap_aligned_mask(cma, align); in cma_alloc()
448 offset = cma_bitmap_aligned_offset(cma, align); in cma_alloc()
449 bitmap_maxno = cma_bitmap_maxno(cma); in cma_alloc()
450 bitmap_count = cma_bitmap_pages_to_bits(cma, count); in cma_alloc()
456 spin_lock_irq(&cma->lock); in cma_alloc()
457 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, in cma_alloc()
461 spin_unlock_irq(&cma->lock); in cma_alloc()
464 bitmap_set(cma->bitmap, bitmap_no, bitmap_count); in cma_alloc()
470 spin_unlock_irq(&cma->lock); in cma_alloc()
472 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); in cma_alloc()
482 cma_clear_bitmap(cma, pfn, count); in cma_alloc()
489 trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn), in cma_alloc()
495 trace_cma_alloc_finish(cma->name, pfn, page, count, align, ret); in cma_alloc()
498 * CMA can allocate multiple page blocks, which results in different in cma_alloc()
509 __func__, cma->name, count, ret); in cma_alloc()
510 cma_debug_show_areas(cma); in cma_alloc()
517 cma_sysfs_account_success_pages(cma, count); in cma_alloc()
520 if (cma) in cma_alloc()
521 cma_sysfs_account_fail_pages(cma, count); in cma_alloc()
527 bool cma_pages_valid(struct cma *cma, const struct page *pages, in cma_pages_valid() argument
532 if (!cma || !pages) in cma_pages_valid()
537 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) { in cma_pages_valid()
548 * @cma: Contiguous memory region for which the allocation is performed.
556 bool cma_release(struct cma *cma, const struct page *pages, in cma_release() argument
561 if (!cma_pages_valid(cma, pages, count)) in cma_release()
568 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); in cma_release()
571 cma_clear_bitmap(cma, pfn, count); in cma_release()
572 trace_cma_release(cma->name, pfn, pages, count); in cma_release()
577 int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) in cma_for_each_area() argument