1 /* 2 * Procedures for maintaining information about logical memory blocks. 3 * 4 * Peter Bergner, IBM Corp. June 2001. 5 * Copyright (C) 2001 Peter Bergner. 6 * 7 * SPDX-License-Identifier: GPL-2.0+ 8 */ 9 10 #include <common.h> 11 #include <lmb.h> 12 13 #define LMB_ALLOC_ANYWHERE 0 14 15 void lmb_dump_all(struct lmb *lmb) 16 { 17 #ifdef DEBUG 18 unsigned long i; 19 20 debug("lmb_dump_all:\n"); 21 debug(" memory.cnt = 0x%lx\n", lmb->memory.cnt); 22 debug(" memory.size = 0x%llx\n", 23 (unsigned long long)lmb->memory.size); 24 for (i=0; i < lmb->memory.cnt ;i++) { 25 debug(" memory.reg[0x%lx].base = 0x%llx\n", i, 26 (long long unsigned)lmb->memory.region[i].base); 27 debug(" .size = 0x%llx\n", 28 (long long unsigned)lmb->memory.region[i].size); 29 } 30 31 debug("\n reserved.cnt = 0x%lx\n", 32 lmb->reserved.cnt); 33 debug(" reserved.size = 0x%llx\n", 34 (long long unsigned)lmb->reserved.size); 35 for (i=0; i < lmb->reserved.cnt ;i++) { 36 debug(" reserved.reg[0x%lx].base = 0x%llx\n", i, 37 (long long unsigned)lmb->reserved.region[i].base); 38 debug(" .size = 0x%llx\n", 39 (long long unsigned)lmb->reserved.region[i].size); 40 } 41 #endif /* DEBUG */ 42 } 43 44 static long lmb_addrs_overlap(phys_addr_t base1, 45 phys_size_t size1, phys_addr_t base2, phys_size_t size2) 46 { 47 return ((base1 < (base2+size2)) && (base2 < (base1+size1))); 48 } 49 50 static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1, 51 phys_addr_t base2, phys_size_t size2) 52 { 53 if (base2 == base1 + size1) 54 return 1; 55 else if (base1 == base2 + size2) 56 return -1; 57 58 return 0; 59 } 60 61 static long lmb_regions_adjacent(struct lmb_region *rgn, 62 unsigned long r1, unsigned long r2) 63 { 64 phys_addr_t base1 = rgn->region[r1].base; 65 phys_size_t size1 = rgn->region[r1].size; 66 phys_addr_t base2 = rgn->region[r2].base; 67 phys_size_t size2 = rgn->region[r2].size; 68 69 return lmb_addrs_adjacent(base1, size1, base2, size2); 70 } 71 72 static void lmb_remove_region(struct lmb_region *rgn, unsigned long r) 73 { 74 unsigned long i; 75 76 for (i = r; i < rgn->cnt - 1; i++) { 77 rgn->region[i].base = rgn->region[i + 1].base; 78 rgn->region[i].size = rgn->region[i + 1].size; 79 } 80 rgn->cnt--; 81 } 82 83 /* Assumption: base addr of region 1 < base addr of region 2 */ 84 static void lmb_coalesce_regions(struct lmb_region *rgn, 85 unsigned long r1, unsigned long r2) 86 { 87 rgn->region[r1].size += rgn->region[r2].size; 88 lmb_remove_region(rgn, r2); 89 } 90 91 void lmb_init(struct lmb *lmb) 92 { 93 /* Create a dummy zero size LMB which will get coalesced away later. 94 * This simplifies the lmb_add() code below... 95 */ 96 lmb->memory.region[0].base = 0; 97 lmb->memory.region[0].size = 0; 98 lmb->memory.cnt = 1; 99 lmb->memory.size = 0; 100 101 /* Ditto. */ 102 lmb->reserved.region[0].base = 0; 103 lmb->reserved.region[0].size = 0; 104 lmb->reserved.cnt = 1; 105 lmb->reserved.size = 0; 106 } 107 108 /* This routine called with relocation disabled. */ 109 static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t size) 110 { 111 unsigned long coalesced = 0; 112 long adjacent, i; 113 114 if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) { 115 rgn->region[0].base = base; 116 rgn->region[0].size = size; 117 return 0; 118 } 119 120 /* First try and coalesce this LMB with another. */ 121 for (i=0; i < rgn->cnt; i++) { 122 phys_addr_t rgnbase = rgn->region[i].base; 123 phys_size_t rgnsize = rgn->region[i].size; 124 125 if ((rgnbase == base) && (rgnsize == size)) 126 /* Already have this region, so we're done */ 127 return 0; 128 129 adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize); 130 if ( adjacent > 0 ) { 131 rgn->region[i].base -= size; 132 rgn->region[i].size += size; 133 coalesced++; 134 break; 135 } 136 else if ( adjacent < 0 ) { 137 rgn->region[i].size += size; 138 coalesced++; 139 break; 140 } 141 } 142 143 if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) { 144 lmb_coalesce_regions(rgn, i, i+1); 145 coalesced++; 146 } 147 148 if (coalesced) 149 return coalesced; 150 if (rgn->cnt >= MAX_LMB_REGIONS) 151 return -1; 152 153 /* Couldn't coalesce the LMB, so add it to the sorted table. */ 154 for (i = rgn->cnt-1; i >= 0; i--) { 155 if (base < rgn->region[i].base) { 156 rgn->region[i+1].base = rgn->region[i].base; 157 rgn->region[i+1].size = rgn->region[i].size; 158 } else { 159 rgn->region[i+1].base = base; 160 rgn->region[i+1].size = size; 161 break; 162 } 163 } 164 165 if (base < rgn->region[0].base) { 166 rgn->region[0].base = base; 167 rgn->region[0].size = size; 168 } 169 170 rgn->cnt++; 171 172 return 0; 173 } 174 175 /* This routine may be called with relocation disabled. */ 176 long lmb_add(struct lmb *lmb, phys_addr_t base, phys_size_t size) 177 { 178 struct lmb_region *_rgn = &(lmb->memory); 179 180 return lmb_add_region(_rgn, base, size); 181 } 182 183 long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size) 184 { 185 struct lmb_region *rgn = &(lmb->reserved); 186 phys_addr_t rgnbegin, rgnend; 187 phys_addr_t end = base + size; 188 int i; 189 190 rgnbegin = rgnend = 0; /* supress gcc warnings */ 191 192 /* Find the region where (base, size) belongs to */ 193 for (i=0; i < rgn->cnt; i++) { 194 rgnbegin = rgn->region[i].base; 195 rgnend = rgnbegin + rgn->region[i].size; 196 197 if ((rgnbegin <= base) && (end <= rgnend)) 198 break; 199 } 200 201 /* Didn't find the region */ 202 if (i == rgn->cnt) 203 return -1; 204 205 /* Check to see if we are removing entire region */ 206 if ((rgnbegin == base) && (rgnend == end)) { 207 lmb_remove_region(rgn, i); 208 return 0; 209 } 210 211 /* Check to see if region is matching at the front */ 212 if (rgnbegin == base) { 213 rgn->region[i].base = end; 214 rgn->region[i].size -= size; 215 return 0; 216 } 217 218 /* Check to see if the region is matching at the end */ 219 if (rgnend == end) { 220 rgn->region[i].size -= size; 221 return 0; 222 } 223 224 /* 225 * We need to split the entry - adjust the current one to the 226 * beginging of the hole and add the region after hole. 227 */ 228 rgn->region[i].size = base - rgn->region[i].base; 229 return lmb_add_region(rgn, end, rgnend - end); 230 } 231 232 long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size) 233 { 234 struct lmb_region *_rgn = &(lmb->reserved); 235 236 return lmb_add_region(_rgn, base, size); 237 } 238 239 static long lmb_overlaps_region(struct lmb_region *rgn, phys_addr_t base, 240 phys_size_t size) 241 { 242 unsigned long i; 243 244 for (i=0; i < rgn->cnt; i++) { 245 phys_addr_t rgnbase = rgn->region[i].base; 246 phys_size_t rgnsize = rgn->region[i].size; 247 if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) { 248 break; 249 } 250 } 251 252 return (i < rgn->cnt) ? i : -1; 253 } 254 255 phys_addr_t lmb_alloc(struct lmb *lmb, phys_size_t size, ulong align) 256 { 257 return lmb_alloc_base(lmb, size, align, LMB_ALLOC_ANYWHERE); 258 } 259 260 phys_addr_t lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr) 261 { 262 phys_addr_t alloc; 263 264 alloc = __lmb_alloc_base(lmb, size, align, max_addr); 265 266 if (alloc == 0) 267 printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n", 268 (ulong)size, (ulong)max_addr); 269 270 return alloc; 271 } 272 273 static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size) 274 { 275 return addr & ~(size - 1); 276 } 277 278 static phys_addr_t lmb_align_up(phys_addr_t addr, ulong size) 279 { 280 return (addr + (size - 1)) & ~(size - 1); 281 } 282 283 phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr) 284 { 285 long i, j; 286 phys_addr_t base = 0; 287 phys_addr_t res_base; 288 289 for (i = lmb->memory.cnt-1; i >= 0; i--) { 290 phys_addr_t lmbbase = lmb->memory.region[i].base; 291 phys_size_t lmbsize = lmb->memory.region[i].size; 292 293 if (lmbsize < size) 294 continue; 295 if (max_addr == LMB_ALLOC_ANYWHERE) 296 base = lmb_align_down(lmbbase + lmbsize - size, align); 297 else if (lmbbase < max_addr) { 298 base = lmbbase + lmbsize; 299 if (base < lmbbase) 300 base = -1; 301 base = min(base, max_addr); 302 base = lmb_align_down(base - size, align); 303 } else 304 continue; 305 306 while (base && lmbbase <= base) { 307 j = lmb_overlaps_region(&lmb->reserved, base, size); 308 if (j < 0) { 309 /* This area isn't reserved, take it */ 310 if (lmb_add_region(&lmb->reserved, base, 311 lmb_align_up(size, 312 align)) < 0) 313 return 0; 314 return base; 315 } 316 res_base = lmb->reserved.region[j].base; 317 if (res_base < size) 318 break; 319 base = lmb_align_down(res_base - size, align); 320 } 321 } 322 return 0; 323 } 324 325 int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr) 326 { 327 int i; 328 329 for (i = 0; i < lmb->reserved.cnt; i++) { 330 phys_addr_t upper = lmb->reserved.region[i].base + 331 lmb->reserved.region[i].size - 1; 332 if ((addr >= lmb->reserved.region[i].base) && (addr <= upper)) 333 return 1; 334 } 335 return 0; 336 } 337 338 __weak void board_lmb_reserve(struct lmb *lmb) 339 { 340 /* please define platform specific board_lmb_reserve() */ 341 } 342 343 __weak void arch_lmb_reserve(struct lmb *lmb) 344 { 345 /* please define platform specific arch_lmb_reserve() */ 346 } 347