cma.c (664b0bae0b87f69bc9deb098f5e0158b9cf18e04) | cma.c (e8b098fc5747a7c871f113c9eb65453cc2d86e6f) |
---|---|
1/* 2 * Contiguous Memory Allocator 3 * 4 * Copyright (c) 2010-2011 by Samsung Electronics. 5 * Copyright IBM Corporation, 2013 6 * Copyright LG Electronics Inc., 2014 7 * Written by: 8 * Marek Szyprowski <m.szyprowski@samsung.com> --- 151 unchanged lines hidden (view full) --- 160} 161core_initcall(cma_init_reserved_areas); 162 163/** 164 * cma_init_reserved_mem() - create custom contiguous area from reserved memory 165 * @base: Base address of the reserved area 166 * @size: Size of the reserved area (in bytes), 167 * @order_per_bit: Order of pages represented by one bit on bitmap. | 1/* 2 * Contiguous Memory Allocator 3 * 4 * Copyright (c) 2010-2011 by Samsung Electronics. 5 * Copyright IBM Corporation, 2013 6 * Copyright LG Electronics Inc., 2014 7 * Written by: 8 * Marek Szyprowski <m.szyprowski@samsung.com> --- 151 unchanged lines hidden (view full) --- 160} 161core_initcall(cma_init_reserved_areas); 162 163/** 164 * cma_init_reserved_mem() - create custom contiguous area from reserved memory 165 * @base: Base address of the reserved area 166 * @size: Size of the reserved area (in bytes), 167 * @order_per_bit: Order of pages represented by one bit on bitmap. |
168 * @name: The name of the area. If this parameter is NULL, the name of 169 * the area will be set to "cmaN", where N is a running counter of 170 * used areas. |
|
168 * @res_cma: Pointer to store the created cma region. 169 * 170 * This function creates custom contiguous area from already reserved memory. 171 */ 172int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, 173 unsigned int order_per_bit, 174 const char *name, 175 struct cma **res_cma) --- 46 unchanged lines hidden (view full) --- 222/** 223 * cma_declare_contiguous() - reserve custom contiguous area 224 * @base: Base address of the reserved area optional, use 0 for any 225 * @size: Size of the reserved area (in bytes), 226 * @limit: End address of the reserved memory (optional, 0 for any). 227 * @alignment: Alignment for the CMA area, should be power of 2 or zero 228 * @order_per_bit: Order of pages represented by one bit on bitmap. 229 * @fixed: hint about where to place the reserved area | 171 * @res_cma: Pointer to store the created cma region. 172 * 173 * This function creates custom contiguous area from already reserved memory. 174 */ 175int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, 176 unsigned int order_per_bit, 177 const char *name, 178 struct cma **res_cma) --- 46 unchanged lines hidden (view full) --- 225/** 226 * cma_declare_contiguous() - reserve custom contiguous area 227 * @base: Base address of the reserved area optional, use 0 for any 228 * @size: Size of the reserved area (in bytes), 229 * @limit: End address of the reserved memory (optional, 0 for any). 230 * @alignment: Alignment for the CMA area, should be power of 2 or zero 231 * @order_per_bit: Order of pages represented by one bit on bitmap. 232 * @fixed: hint about where to place the reserved area |
233 * @name: The name of the area. See function cma_init_reserved_mem() |
|
230 * @res_cma: Pointer to store the created cma region. 231 * 232 * This function reserves memory from early allocator. It should be 233 * called by arch specific code once the early allocator (memblock or bootmem) 234 * has been activated and all other subsystems have already allocated/reserved 235 * memory. This function allows to create custom reserved areas. 236 * 237 * If @fixed is true, reserve contiguous area at exactly @base. If false, --- 147 unchanged lines hidden (view full) --- 385static inline void cma_debug_show_areas(struct cma *cma) { } 386#endif 387 388/** 389 * cma_alloc() - allocate pages from contiguous area 390 * @cma: Contiguous memory region for which the allocation is performed. 391 * @count: Requested number of pages. 392 * @align: Requested alignment of pages (in PAGE_SIZE order). | 234 * @res_cma: Pointer to store the created cma region. 235 * 236 * This function reserves memory from early allocator. It should be 237 * called by arch specific code once the early allocator (memblock or bootmem) 238 * has been activated and all other subsystems have already allocated/reserved 239 * memory. This function allows to create custom reserved areas. 240 * 241 * If @fixed is true, reserve contiguous area at exactly @base. If false, --- 147 unchanged lines hidden (view full) --- 389static inline void cma_debug_show_areas(struct cma *cma) { } 390#endif 391 392/** 393 * cma_alloc() - allocate pages from contiguous area 394 * @cma: Contiguous memory region for which the allocation is performed. 395 * @count: Requested number of pages. 396 * @align: Requested alignment of pages (in PAGE_SIZE order). |
397 * @gfp_mask: GFP mask to use during compaction |
|
393 * 394 * This function allocates part of contiguous memory on specific 395 * contiguous memory area. 396 */ 397struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, 398 gfp_t gfp_mask) 399{ 400 unsigned long mask, offset; --- 118 unchanged lines hidden --- | 398 * 399 * This function allocates part of contiguous memory on specific 400 * contiguous memory area. 401 */ 402struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, 403 gfp_t gfp_mask) 404{ 405 unsigned long mask, offset; --- 118 unchanged lines hidden --- |