1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Procedures for maintaining information about logical memory blocks. 4 * 5 * Peter Bergner, IBM Corp. June 2001. 6 * Copyright (C) 2001 Peter Bergner. 7 */ 8 9 #include <common.h> 10 #include <lmb.h> 11 12 #define LMB_ALLOC_ANYWHERE 0 13 14 void lmb_dump_all(struct lmb *lmb) 15 { 16 #ifdef DEBUG 17 unsigned long i; 18 19 debug("lmb_dump_all:\n"); 20 debug(" memory.cnt = 0x%lx\n", lmb->memory.cnt); 21 debug(" memory.size = 0x%llx\n", 22 (unsigned long long)lmb->memory.size); 23 for (i=0; i < lmb->memory.cnt ;i++) { 24 debug(" memory.reg[0x%lx].base = 0x%llx\n", i, 25 (long long unsigned)lmb->memory.region[i].base); 26 debug(" .size = 0x%llx\n", 27 (long long unsigned)lmb->memory.region[i].size); 28 } 29 30 debug("\n reserved.cnt = 0x%lx\n", 31 lmb->reserved.cnt); 32 debug(" reserved.size = 0x%llx\n", 33 (long long unsigned)lmb->reserved.size); 34 for (i=0; i < lmb->reserved.cnt ;i++) { 35 debug(" reserved.reg[0x%lx].base = 0x%llx\n", i, 36 (long long unsigned)lmb->reserved.region[i].base); 37 debug(" .size = 0x%llx\n", 38 (long long unsigned)lmb->reserved.region[i].size); 39 } 40 #endif /* DEBUG */ 41 } 42 43 static long lmb_addrs_overlap(phys_addr_t base1, 44 phys_size_t size1, phys_addr_t base2, phys_size_t size2) 45 { 46 const phys_addr_t base1_end = base1 + size1 - 1; 47 const phys_addr_t base2_end = base2 + size2 - 1; 48 49 return ((base1 <= base2_end) && (base2 <= base1_end)); 50 } 51 52 static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1, 53 phys_addr_t base2, phys_size_t size2) 54 { 55 if (base2 == base1 + size1) 56 return 1; 57 else if (base1 == base2 + size2) 58 return -1; 59 60 return 0; 61 } 62 63 static long lmb_regions_adjacent(struct lmb_region *rgn, 64 unsigned long r1, unsigned long r2) 65 { 66 phys_addr_t base1 = rgn->region[r1].base; 67 phys_size_t size1 = rgn->region[r1].size; 68 phys_addr_t base2 = rgn->region[r2].base; 69 phys_size_t size2 = rgn->region[r2].size; 70 71 return lmb_addrs_adjacent(base1, size1, base2, size2); 72 } 73 74 static void lmb_remove_region(struct lmb_region *rgn, unsigned long r) 75 { 76 unsigned long i; 77 78 for (i = r; i < rgn->cnt - 1; i++) { 79 rgn->region[i].base = rgn->region[i + 1].base; 80 rgn->region[i].size = rgn->region[i + 1].size; 81 } 82 rgn->cnt--; 83 } 84 85 /* Assumption: base addr of region 1 < base addr of region 2 */ 86 static void lmb_coalesce_regions(struct lmb_region *rgn, 87 unsigned long r1, unsigned long r2) 88 { 89 rgn->region[r1].size += rgn->region[r2].size; 90 lmb_remove_region(rgn, r2); 91 } 92 93 void lmb_init(struct lmb *lmb) 94 { 95 lmb->memory.cnt = 0; 96 lmb->memory.size = 0; 97 lmb->reserved.cnt = 0; 98 lmb->reserved.size = 0; 99 } 100 101 /* Initialize the struct, add memory and call arch/board reserve functions */ 102 void lmb_init_and_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size, 103 void *fdt_blob) 104 { 105 lmb_init(lmb); 106 lmb_add(lmb, base, size); 107 arch_lmb_reserve(lmb); 108 board_lmb_reserve(lmb); 109 110 if (IMAGE_ENABLE_OF_LIBFDT && fdt_blob) 111 boot_fdt_add_mem_rsv_regions(lmb, fdt_blob); 112 } 113 114 /* This routine called with relocation disabled. */ 115 static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t size) 116 { 117 unsigned long coalesced = 0; 118 long adjacent, i; 119 120 if (rgn->cnt == 0) { 121 rgn->region[0].base = base; 122 rgn->region[0].size = size; 123 rgn->cnt = 1; 124 return 0; 125 } 126 127 /* First try and coalesce this LMB with another. */ 128 for (i=0; i < rgn->cnt; i++) { 129 phys_addr_t rgnbase = rgn->region[i].base; 130 phys_size_t rgnsize = rgn->region[i].size; 131 132 if ((rgnbase == base) && (rgnsize == size)) 133 /* Already have this region, so we're done */ 134 return 0; 135 136 adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize); 137 if ( adjacent > 0 ) { 138 rgn->region[i].base -= size; 139 rgn->region[i].size += size; 140 coalesced++; 141 break; 142 } 143 else if ( adjacent < 0 ) { 144 rgn->region[i].size += size; 145 coalesced++; 146 break; 147 } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) { 148 /* regions overlap */ 149 return -1; 150 } 151 } 152 153 if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) { 154 lmb_coalesce_regions(rgn, i, i+1); 155 coalesced++; 156 } 157 158 if (coalesced) 159 return coalesced; 160 if (rgn->cnt >= MAX_LMB_REGIONS) 161 return -1; 162 163 /* Couldn't coalesce the LMB, so add it to the sorted table. */ 164 for (i = rgn->cnt-1; i >= 0; i--) { 165 if (base < rgn->region[i].base) { 166 rgn->region[i+1].base = rgn->region[i].base; 167 rgn->region[i+1].size = rgn->region[i].size; 168 } else { 169 rgn->region[i+1].base = base; 170 rgn->region[i+1].size = size; 171 break; 172 } 173 } 174 175 if (base < rgn->region[0].base) { 176 rgn->region[0].base = base; 177 rgn->region[0].size = size; 178 } 179 180 rgn->cnt++; 181 182 return 0; 183 } 184 185 /* This routine may be called with relocation disabled. */ 186 long lmb_add(struct lmb *lmb, phys_addr_t base, phys_size_t size) 187 { 188 struct lmb_region *_rgn = &(lmb->memory); 189 190 return lmb_add_region(_rgn, base, size); 191 } 192 193 long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size) 194 { 195 struct lmb_region *rgn = &(lmb->reserved); 196 phys_addr_t rgnbegin, rgnend; 197 phys_addr_t end = base + size - 1; 198 int i; 199 200 rgnbegin = rgnend = 0; /* supress gcc warnings */ 201 202 /* Find the region where (base, size) belongs to */ 203 for (i=0; i < rgn->cnt; i++) { 204 rgnbegin = rgn->region[i].base; 205 rgnend = rgnbegin + rgn->region[i].size - 1; 206 207 if ((rgnbegin <= base) && (end <= rgnend)) 208 break; 209 } 210 211 /* Didn't find the region */ 212 if (i == rgn->cnt) 213 return -1; 214 215 /* Check to see if we are removing entire region */ 216 if ((rgnbegin == base) && (rgnend == end)) { 217 lmb_remove_region(rgn, i); 218 return 0; 219 } 220 221 /* Check to see if region is matching at the front */ 222 if (rgnbegin == base) { 223 rgn->region[i].base = end + 1; 224 rgn->region[i].size -= size; 225 return 0; 226 } 227 228 /* Check to see if the region is matching at the end */ 229 if (rgnend == end) { 230 rgn->region[i].size -= size; 231 return 0; 232 } 233 234 /* 235 * We need to split the entry - adjust the current one to the 236 * beginging of the hole and add the region after hole. 237 */ 238 rgn->region[i].size = base - rgn->region[i].base; 239 return lmb_add_region(rgn, end + 1, rgnend - end); 240 } 241 242 long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size) 243 { 244 struct lmb_region *_rgn = &(lmb->reserved); 245 246 return lmb_add_region(_rgn, base, size); 247 } 248 249 static long lmb_overlaps_region(struct lmb_region *rgn, phys_addr_t base, 250 phys_size_t size) 251 { 252 unsigned long i; 253 254 for (i=0; i < rgn->cnt; i++) { 255 phys_addr_t rgnbase = rgn->region[i].base; 256 phys_size_t rgnsize = rgn->region[i].size; 257 if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) { 258 break; 259 } 260 } 261 262 return (i < rgn->cnt) ? i : -1; 263 } 264 265 phys_addr_t lmb_alloc(struct lmb *lmb, phys_size_t size, ulong align) 266 { 267 return lmb_alloc_base(lmb, size, align, LMB_ALLOC_ANYWHERE); 268 } 269 270 phys_addr_t lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr) 271 { 272 phys_addr_t alloc; 273 274 alloc = __lmb_alloc_base(lmb, size, align, max_addr); 275 276 if (alloc == 0) 277 printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n", 278 (ulong)size, (ulong)max_addr); 279 280 return alloc; 281 } 282 283 static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size) 284 { 285 return addr & ~(size - 1); 286 } 287 288 phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr) 289 { 290 long i, j; 291 phys_addr_t base = 0; 292 phys_addr_t res_base; 293 294 for (i = lmb->memory.cnt-1; i >= 0; i--) { 295 phys_addr_t lmbbase = lmb->memory.region[i].base; 296 phys_size_t lmbsize = lmb->memory.region[i].size; 297 298 if (lmbsize < size) 299 continue; 300 if (max_addr == LMB_ALLOC_ANYWHERE) 301 base = lmb_align_down(lmbbase + lmbsize - size, align); 302 else if (lmbbase < max_addr) { 303 base = lmbbase + lmbsize; 304 if (base < lmbbase) 305 base = -1; 306 base = min(base, max_addr); 307 base = lmb_align_down(base - size, align); 308 } else 309 continue; 310 311 while (base && lmbbase <= base) { 312 j = lmb_overlaps_region(&lmb->reserved, base, size); 313 if (j < 0) { 314 /* This area isn't reserved, take it */ 315 if (lmb_add_region(&lmb->reserved, base, 316 size) < 0) 317 return 0; 318 return base; 319 } 320 res_base = lmb->reserved.region[j].base; 321 if (res_base < size) 322 break; 323 base = lmb_align_down(res_base - size, align); 324 } 325 } 326 return 0; 327 } 328 329 /* 330 * Try to allocate a specific address range: must be in defined memory but not 331 * reserved 332 */ 333 phys_addr_t lmb_alloc_addr(struct lmb *lmb, phys_addr_t base, phys_size_t size) 334 { 335 long j; 336 337 /* Check if the requested address is in one of the memory regions */ 338 j = lmb_overlaps_region(&lmb->memory, base, size); 339 if (j >= 0) { 340 /* 341 * Check if the requested end address is in the same memory 342 * region we found. 343 */ 344 if (lmb_addrs_overlap(lmb->memory.region[j].base, 345 lmb->memory.region[j].size, base + size - 346 1, 1)) { 347 /* ok, reserve the memory */ 348 if (lmb_reserve(lmb, base, size) >= 0) 349 return base; 350 } 351 } 352 return 0; 353 } 354 355 /* Return number of bytes from a given address that are free */ 356 phys_size_t lmb_get_unreserved_size(struct lmb *lmb, phys_addr_t addr) 357 { 358 int i; 359 long j; 360 361 /* check if the requested address is in the memory regions */ 362 j = lmb_overlaps_region(&lmb->memory, addr, 1); 363 if (j >= 0) { 364 for (i = 0; i < lmb->reserved.cnt; i++) { 365 if (addr < lmb->reserved.region[i].base) { 366 /* first reserved range > requested address */ 367 return lmb->reserved.region[i].base - addr; 368 } 369 if (lmb->reserved.region[i].base + 370 lmb->reserved.region[i].size > addr) { 371 /* requested addr is in this reserved range */ 372 return 0; 373 } 374 } 375 /* if we come here: no reserved ranges above requested addr */ 376 return lmb->memory.region[lmb->memory.cnt - 1].base + 377 lmb->memory.region[lmb->memory.cnt - 1].size - addr; 378 } 379 return 0; 380 } 381 382 int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr) 383 { 384 int i; 385 386 for (i = 0; i < lmb->reserved.cnt; i++) { 387 phys_addr_t upper = lmb->reserved.region[i].base + 388 lmb->reserved.region[i].size - 1; 389 if ((addr >= lmb->reserved.region[i].base) && (addr <= upper)) 390 return 1; 391 } 392 return 0; 393 } 394 395 __weak void board_lmb_reserve(struct lmb *lmb) 396 { 397 /* please define platform specific board_lmb_reserve() */ 398 } 399 400 __weak void arch_lmb_reserve(struct lmb *lmb) 401 { 402 /* please define platform specific arch_lmb_reserve() */ 403 } 404