Lines Matching +full:reserved +full:- +full:memory

1 // SPDX-License-Identifier: GPL-2.0+
3 * Procedures for maintaining information about logical memory blocks.
20 debug(" memory.cnt = 0x%lx\n", lmb->memory.cnt); in lmb_dump_all()
21 debug(" memory.size = 0x%llx\n", in lmb_dump_all()
22 (unsigned long long)lmb->memory.size); in lmb_dump_all()
23 for (i = 0; i < lmb->memory.cnt; i++) { in lmb_dump_all()
24 debug(" memory.reg[0x%lx].base = 0x%llx\n", i, in lmb_dump_all()
25 (unsigned long long)lmb->memory.region[i].base); in lmb_dump_all()
27 (unsigned long long)lmb->memory.region[i].size); in lmb_dump_all()
30 debug("\n reserved.cnt = 0x%lx\n", in lmb_dump_all()
31 lmb->reserved.cnt); in lmb_dump_all()
32 debug(" reserved.size = 0x%llx\n", in lmb_dump_all()
33 (unsigned long long)lmb->reserved.size); in lmb_dump_all()
34 for (i = 0; i < lmb->reserved.cnt; i++) { in lmb_dump_all()
35 debug(" reserved.reg[0x%lx].base = 0x%llx\n", i, in lmb_dump_all()
36 (unsigned long long)lmb->reserved.region[i].base); in lmb_dump_all()
38 (unsigned long long)lmb->reserved.region[i].size); in lmb_dump_all()
46 const phys_addr_t base1_end = base1 + size1 - 1; in lmb_addrs_overlap()
47 const phys_addr_t base2_end = base2 + size2 - 1; in lmb_addrs_overlap()
58 return -1; in lmb_addrs_adjacent()
66 phys_addr_t base1 = rgn->region[r1].base; in lmb_regions_adjacent()
67 phys_size_t size1 = rgn->region[r1].size; in lmb_regions_adjacent()
68 phys_addr_t base2 = rgn->region[r2].base; in lmb_regions_adjacent()
69 phys_size_t size2 = rgn->region[r2].size; in lmb_regions_adjacent()
78 for (i = r; i < rgn->cnt - 1; i++) { in lmb_remove_region()
79 rgn->region[i].base = rgn->region[i + 1].base; in lmb_remove_region()
80 rgn->region[i].size = rgn->region[i + 1].size; in lmb_remove_region()
82 rgn->cnt--; in lmb_remove_region()
89 rgn->region[r1].size += rgn->region[r2].size; in lmb_coalesce_regions()
95 lmb->memory.cnt = 0; in lmb_init()
96 lmb->memory.size = 0; in lmb_init()
97 lmb->reserved.cnt = 0; in lmb_init()
98 lmb->reserved.size = 0; in lmb_init()
110 /* Initialize the struct, add memory and call arch/board reserve functions */
120 if (bd->bi_dram[i].size) { in lmb_init_and_reserve()
121 lmb_add(lmb, bd->bi_dram[i].start, in lmb_init_and_reserve()
122 bd->bi_dram[i].size); in lmb_init_and_reserve()
126 if (bd->bi_memsize) in lmb_init_and_reserve()
127 lmb_add(lmb, bd->bi_memstart, bd->bi_memsize); in lmb_init_and_reserve()
132 /* Initialize the struct, add memory and call arch/board reserve functions */
147 if (rgn->cnt == 0) { in lmb_add_region()
148 rgn->region[0].base = base; in lmb_add_region()
149 rgn->region[0].size = size; in lmb_add_region()
150 rgn->cnt = 1; in lmb_add_region()
155 for (i = 0; i < rgn->cnt; i++) { in lmb_add_region()
156 phys_addr_t rgnbase = rgn->region[i].base; in lmb_add_region()
157 phys_size_t rgnsize = rgn->region[i].size; in lmb_add_region()
165 rgn->region[i].base -= size; in lmb_add_region()
166 rgn->region[i].size += size; in lmb_add_region()
170 rgn->region[i].size += size; in lmb_add_region()
175 return -1; in lmb_add_region()
179 if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i + 1)) { in lmb_add_region()
186 if (rgn->cnt >= MAX_LMB_REGIONS) in lmb_add_region()
187 return -1; in lmb_add_region()
190 for (i = rgn->cnt-1; i >= 0; i--) { in lmb_add_region()
191 if (base < rgn->region[i].base) { in lmb_add_region()
192 rgn->region[i + 1].base = rgn->region[i].base; in lmb_add_region()
193 rgn->region[i + 1].size = rgn->region[i].size; in lmb_add_region()
195 rgn->region[i + 1].base = base; in lmb_add_region()
196 rgn->region[i + 1].size = size; in lmb_add_region()
201 if (base < rgn->region[0].base) { in lmb_add_region()
202 rgn->region[0].base = base; in lmb_add_region()
203 rgn->region[0].size = size; in lmb_add_region()
206 rgn->cnt++; in lmb_add_region()
214 struct lmb_region *_rgn = &(lmb->memory); in lmb_add()
221 struct lmb_region *rgn = &(lmb->reserved); in lmb_free()
223 phys_addr_t end = base + size - 1; in lmb_free()
229 for (i = 0; i < rgn->cnt; i++) { in lmb_free()
230 rgnbegin = rgn->region[i].base; in lmb_free()
231 rgnend = rgnbegin + rgn->region[i].size - 1; in lmb_free()
238 if (i == rgn->cnt) in lmb_free()
239 return -1; in lmb_free()
249 rgn->region[i].base = end + 1; in lmb_free()
250 rgn->region[i].size -= size; in lmb_free()
256 rgn->region[i].size -= size; in lmb_free()
261 * We need to split the entry - adjust the current one to the in lmb_free()
264 rgn->region[i].size = base - rgn->region[i].base; in lmb_free()
265 return lmb_add_region(rgn, end + 1, rgnend - end); in lmb_free()
270 struct lmb_region *_rgn = &(lmb->reserved); in lmb_reserve()
280 for (i = 0; i < rgn->cnt; i++) { in lmb_overlaps_region()
281 phys_addr_t rgnbase = rgn->region[i].base; in lmb_overlaps_region()
282 phys_size_t rgnsize = rgn->region[i].size; in lmb_overlaps_region()
287 return (i < rgn->cnt) ? i : -1; in lmb_overlaps_region()
310 return addr & ~(size - 1); in lmb_align_down()
319 for (i = lmb->memory.cnt - 1; i >= 0; i--) { in __lmb_alloc_base()
320 phys_addr_t lmbbase = lmb->memory.region[i].base; in __lmb_alloc_base()
321 phys_size_t lmbsize = lmb->memory.region[i].size; in __lmb_alloc_base()
326 base = lmb_align_down(lmbbase + lmbsize - size, align); in __lmb_alloc_base()
330 base = -1; in __lmb_alloc_base()
332 base = lmb_align_down(base - size, align); in __lmb_alloc_base()
337 rgn = lmb_overlaps_region(&lmb->reserved, base, size); in __lmb_alloc_base()
339 /* This area isn't reserved, take it */ in __lmb_alloc_base()
340 if (lmb_add_region(&lmb->reserved, base, in __lmb_alloc_base()
345 res_base = lmb->reserved.region[rgn].base; in __lmb_alloc_base()
348 base = lmb_align_down(res_base - size, align); in __lmb_alloc_base()
355 * Try to allocate a specific address range: must be in defined memory but not
356 * reserved
362 /* Check if the requested address is in one of the memory regions */ in lmb_alloc_addr()
363 rgn = lmb_overlaps_region(&lmb->memory, base, size); in lmb_alloc_addr()
366 * Check if the requested end address is in the same memory in lmb_alloc_addr()
369 if (lmb_addrs_overlap(lmb->memory.region[rgn].base, in lmb_alloc_addr()
370 lmb->memory.region[rgn].size, in lmb_alloc_addr()
371 base + size - 1, 1)) { in lmb_alloc_addr()
372 /* ok, reserve the memory */ in lmb_alloc_addr()
386 /* check if the requested address is in the memory regions */ in lmb_get_free_size()
387 rgn = lmb_overlaps_region(&lmb->memory, addr, 1); in lmb_get_free_size()
389 for (i = 0; i < lmb->reserved.cnt; i++) { in lmb_get_free_size()
390 if (addr < lmb->reserved.region[i].base) { in lmb_get_free_size()
391 /* first reserved range > requested address */ in lmb_get_free_size()
392 return lmb->reserved.region[i].base - addr; in lmb_get_free_size()
394 if (lmb->reserved.region[i].base + in lmb_get_free_size()
395 lmb->reserved.region[i].size > addr) { in lmb_get_free_size()
396 /* requested addr is in this reserved range */ in lmb_get_free_size()
400 /* if we come here: no reserved ranges above requested addr */ in lmb_get_free_size()
401 return lmb->memory.region[lmb->memory.cnt - 1].base + in lmb_get_free_size()
402 lmb->memory.region[lmb->memory.cnt - 1].size - addr; in lmb_get_free_size()
411 for (i = 0; i < lmb->reserved.cnt; i++) { in lmb_is_reserved()
412 phys_addr_t upper = lmb->reserved.region[i].base + in lmb_is_reserved()
413 lmb->reserved.region[i].size - 1; in lmb_is_reserved()
414 if ((addr >= lmb->reserved.region[i].base) && (addr <= upper)) in lmb_is_reserved()