1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * EFI application memory management 4 * 5 * Copyright (c) 2016 Alexander Graf 6 */ 7 8 #include <common.h> 9 #include <efi_loader.h> 10 #include <malloc.h> 11 #include <mapmem.h> 12 #include <watchdog.h> 13 #include <linux/list_sort.h> 14 15 DECLARE_GLOBAL_DATA_PTR; 16 17 efi_uintn_t efi_memory_map_key; 18 19 struct efi_mem_list { 20 struct list_head link; 21 struct efi_mem_desc desc; 22 }; 23 24 #define EFI_CARVE_NO_OVERLAP -1 25 #define EFI_CARVE_LOOP_AGAIN -2 26 #define EFI_CARVE_OVERLAPS_NONRAM -3 27 28 /* This list contains all memory map items */ 29 LIST_HEAD(efi_mem); 30 31 #ifdef CONFIG_EFI_LOADER_BOUNCE_BUFFER 32 void *efi_bounce_buffer; 33 #endif 34 35 /* 36 * U-Boot services each EFI AllocatePool request as a separate 37 * (multiple) page allocation. We have to track the number of pages 38 * to be able to free the correct amount later. 39 * EFI requires 8 byte alignment for pool allocations, so we can 40 * prepend each allocation with an 64 bit header tracking the 41 * allocation size, and hand out the remainder to the caller. 42 */ 43 struct efi_pool_allocation { 44 u64 num_pages; 45 char data[] __aligned(ARCH_DMA_MINALIGN); 46 }; 47 48 /* 49 * Sorts the memory list from highest address to lowest address 50 * 51 * When allocating memory we should always start from the highest 52 * address chunk, so sort the memory list such that the first list 53 * iterator gets the highest address and goes lower from there. 54 */ 55 static int efi_mem_cmp(void *priv, struct list_head *a, struct list_head *b) 56 { 57 struct efi_mem_list *mema = list_entry(a, struct efi_mem_list, link); 58 struct efi_mem_list *memb = list_entry(b, struct efi_mem_list, link); 59 60 if (mema->desc.physical_start == memb->desc.physical_start) 61 return 0; 62 else if (mema->desc.physical_start < memb->desc.physical_start) 63 return 1; 64 else 65 return -1; 66 } 67 68 static uint64_t desc_get_end(struct efi_mem_desc *desc) 69 { 70 return desc->physical_start + (desc->num_pages << EFI_PAGE_SHIFT); 71 } 72 73 static void efi_mem_sort(void) 74 { 75 struct list_head *lhandle; 76 struct efi_mem_list *prevmem = NULL; 77 bool merge_again = true; 78 79 list_sort(NULL, &efi_mem, efi_mem_cmp); 80 81 /* Now merge entries that can be merged */ 82 while (merge_again) { 83 merge_again = false; 84 list_for_each(lhandle, &efi_mem) { 85 struct efi_mem_list *lmem; 86 struct efi_mem_desc *prev = &prevmem->desc; 87 struct efi_mem_desc *cur; 88 uint64_t pages; 89 90 lmem = list_entry(lhandle, struct efi_mem_list, link); 91 if (!prevmem) { 92 prevmem = lmem; 93 continue; 94 } 95 96 cur = &lmem->desc; 97 98 if ((desc_get_end(cur) == prev->physical_start) && 99 (prev->type == cur->type) && 100 (prev->attribute == cur->attribute)) { 101 /* There is an existing map before, reuse it */ 102 pages = cur->num_pages; 103 prev->num_pages += pages; 104 prev->physical_start -= pages << EFI_PAGE_SHIFT; 105 prev->virtual_start -= pages << EFI_PAGE_SHIFT; 106 list_del(&lmem->link); 107 free(lmem); 108 109 merge_again = true; 110 break; 111 } 112 113 prevmem = lmem; 114 } 115 } 116 } 117 118 /** efi_mem_carve_out - unmap memory region 119 * 120 * @map: memory map 121 * @carve_desc: memory region to unmap 122 * @overlap_only_ram: the carved out region may only overlap RAM 123 * Return Value: the number of overlapping pages which have been 124 * removed from the map, 125 * EFI_CARVE_NO_OVERLAP, if the regions don't overlap, 126 * EFI_CARVE_OVERLAPS_NONRAM, if the carve and map overlap, 127 * and the map contains anything but free ram 128 * (only when overlap_only_ram is true), 129 * EFI_CARVE_LOOP_AGAIN, if the mapping list should be 130 * traversed again, as it has been altered. 131 * 132 * Unmaps all memory occupied by the carve_desc region from the list entry 133 * pointed to by map. 134 * 135 * In case of EFI_CARVE_OVERLAPS_NONRAM it is the callers responsibility 136 * to re-add the already carved out pages to the mapping. 137 */ 138 static s64 efi_mem_carve_out(struct efi_mem_list *map, 139 struct efi_mem_desc *carve_desc, 140 bool overlap_only_ram) 141 { 142 struct efi_mem_list *newmap; 143 struct efi_mem_desc *map_desc = &map->desc; 144 uint64_t map_start = map_desc->physical_start; 145 uint64_t map_end = map_start + (map_desc->num_pages << EFI_PAGE_SHIFT); 146 uint64_t carve_start = carve_desc->physical_start; 147 uint64_t carve_end = carve_start + 148 (carve_desc->num_pages << EFI_PAGE_SHIFT); 149 150 /* check whether we're overlapping */ 151 if ((carve_end <= map_start) || (carve_start >= map_end)) 152 return EFI_CARVE_NO_OVERLAP; 153 154 /* We're overlapping with non-RAM, warn the caller if desired */ 155 if (overlap_only_ram && (map_desc->type != EFI_CONVENTIONAL_MEMORY)) 156 return EFI_CARVE_OVERLAPS_NONRAM; 157 158 /* Sanitize carve_start and carve_end to lie within our bounds */ 159 carve_start = max(carve_start, map_start); 160 carve_end = min(carve_end, map_end); 161 162 /* Carving at the beginning of our map? Just move it! */ 163 if (carve_start == map_start) { 164 if (map_end == carve_end) { 165 /* Full overlap, just remove map */ 166 list_del(&map->link); 167 free(map); 168 } else { 169 map->desc.physical_start = carve_end; 170 map->desc.num_pages = (map_end - carve_end) 171 >> EFI_PAGE_SHIFT; 172 } 173 174 return (carve_end - carve_start) >> EFI_PAGE_SHIFT; 175 } 176 177 /* 178 * Overlapping maps, just split the list map at carve_start, 179 * it will get moved or removed in the next iteration. 180 * 181 * [ map_desc |__carve_start__| newmap ] 182 */ 183 184 /* Create a new map from [ carve_start ... map_end ] */ 185 newmap = calloc(1, sizeof(*newmap)); 186 newmap->desc = map->desc; 187 newmap->desc.physical_start = carve_start; 188 newmap->desc.num_pages = (map_end - carve_start) >> EFI_PAGE_SHIFT; 189 /* Insert before current entry (descending address order) */ 190 list_add_tail(&newmap->link, &map->link); 191 192 /* Shrink the map to [ map_start ... carve_start ] */ 193 map_desc->num_pages = (carve_start - map_start) >> EFI_PAGE_SHIFT; 194 195 return EFI_CARVE_LOOP_AGAIN; 196 } 197 198 uint64_t efi_add_memory_map(uint64_t start, uint64_t pages, int memory_type, 199 bool overlap_only_ram) 200 { 201 struct list_head *lhandle; 202 struct efi_mem_list *newlist; 203 bool carve_again; 204 uint64_t carved_pages = 0; 205 206 debug("%s: 0x%llx 0x%llx %d %s\n", __func__, 207 start, pages, memory_type, overlap_only_ram ? "yes" : "no"); 208 209 if (memory_type >= EFI_MAX_MEMORY_TYPE) 210 return EFI_INVALID_PARAMETER; 211 212 if (!pages) 213 return start; 214 215 ++efi_memory_map_key; 216 newlist = calloc(1, sizeof(*newlist)); 217 newlist->desc.type = memory_type; 218 newlist->desc.physical_start = start; 219 newlist->desc.virtual_start = start; 220 newlist->desc.num_pages = pages; 221 222 switch (memory_type) { 223 case EFI_RUNTIME_SERVICES_CODE: 224 case EFI_RUNTIME_SERVICES_DATA: 225 newlist->desc.attribute = EFI_MEMORY_WB | EFI_MEMORY_RUNTIME; 226 break; 227 case EFI_MMAP_IO: 228 newlist->desc.attribute = EFI_MEMORY_RUNTIME; 229 break; 230 default: 231 newlist->desc.attribute = EFI_MEMORY_WB; 232 break; 233 } 234 235 /* Add our new map */ 236 do { 237 carve_again = false; 238 list_for_each(lhandle, &efi_mem) { 239 struct efi_mem_list *lmem; 240 s64 r; 241 242 lmem = list_entry(lhandle, struct efi_mem_list, link); 243 r = efi_mem_carve_out(lmem, &newlist->desc, 244 overlap_only_ram); 245 switch (r) { 246 case EFI_CARVE_OVERLAPS_NONRAM: 247 /* 248 * The user requested to only have RAM overlaps, 249 * but we hit a non-RAM region. Error out. 250 */ 251 return 0; 252 case EFI_CARVE_NO_OVERLAP: 253 /* Just ignore this list entry */ 254 break; 255 case EFI_CARVE_LOOP_AGAIN: 256 /* 257 * We split an entry, but need to loop through 258 * the list again to actually carve it. 259 */ 260 carve_again = true; 261 break; 262 default: 263 /* We carved a number of pages */ 264 carved_pages += r; 265 carve_again = true; 266 break; 267 } 268 269 if (carve_again) { 270 /* The list changed, we need to start over */ 271 break; 272 } 273 } 274 } while (carve_again); 275 276 if (overlap_only_ram && (carved_pages != pages)) { 277 /* 278 * The payload wanted to have RAM overlaps, but we overlapped 279 * with an unallocated region. Error out. 280 */ 281 return 0; 282 } 283 284 /* Add our new map */ 285 list_add_tail(&newlist->link, &efi_mem); 286 287 /* And make sure memory is listed in descending order */ 288 efi_mem_sort(); 289 290 return start; 291 } 292 293 static uint64_t efi_find_free_memory(uint64_t len, uint64_t max_addr) 294 { 295 struct list_head *lhandle; 296 297 list_for_each(lhandle, &efi_mem) { 298 struct efi_mem_list *lmem = list_entry(lhandle, 299 struct efi_mem_list, link); 300 struct efi_mem_desc *desc = &lmem->desc; 301 uint64_t desc_len = desc->num_pages << EFI_PAGE_SHIFT; 302 uint64_t desc_end = desc->physical_start + desc_len; 303 uint64_t curmax = min(max_addr, desc_end); 304 uint64_t ret = curmax - len; 305 306 /* We only take memory from free RAM */ 307 if (desc->type != EFI_CONVENTIONAL_MEMORY) 308 continue; 309 310 /* Out of bounds for max_addr */ 311 if ((ret + len) > max_addr) 312 continue; 313 314 /* Out of bounds for upper map limit */ 315 if ((ret + len) > desc_end) 316 continue; 317 318 /* Out of bounds for lower map limit */ 319 if (ret < desc->physical_start) 320 continue; 321 322 /* Return the highest address in this map within bounds */ 323 return ret; 324 } 325 326 return 0; 327 } 328 329 /* 330 * Allocate memory pages. 331 * 332 * @type type of allocation to be performed 333 * @memory_type usage type of the allocated memory 334 * @pages number of pages to be allocated 335 * @memory allocated memory 336 * @return status code 337 */ 338 efi_status_t efi_allocate_pages(int type, int memory_type, 339 efi_uintn_t pages, uint64_t *memory) 340 { 341 u64 len = pages << EFI_PAGE_SHIFT; 342 efi_status_t r = EFI_SUCCESS; 343 uint64_t addr; 344 345 if (!memory) 346 return EFI_INVALID_PARAMETER; 347 348 switch (type) { 349 case EFI_ALLOCATE_ANY_PAGES: 350 /* Any page */ 351 addr = efi_find_free_memory(len, -1ULL); 352 if (!addr) { 353 r = EFI_NOT_FOUND; 354 break; 355 } 356 break; 357 case EFI_ALLOCATE_MAX_ADDRESS: 358 /* Max address */ 359 addr = efi_find_free_memory(len, *memory); 360 if (!addr) { 361 r = EFI_NOT_FOUND; 362 break; 363 } 364 break; 365 case EFI_ALLOCATE_ADDRESS: 366 /* Exact address, reserve it. The addr is already in *memory. */ 367 addr = *memory; 368 break; 369 default: 370 /* UEFI doesn't specify other allocation types */ 371 r = EFI_INVALID_PARAMETER; 372 break; 373 } 374 375 if (r == EFI_SUCCESS) { 376 uint64_t ret; 377 378 /* Reserve that map in our memory maps */ 379 ret = efi_add_memory_map(addr, pages, memory_type, true); 380 if (ret == addr) { 381 *memory = (uintptr_t)map_sysmem(addr, len); 382 } else { 383 /* Map would overlap, bail out */ 384 r = EFI_OUT_OF_RESOURCES; 385 } 386 } 387 388 return r; 389 } 390 391 void *efi_alloc(uint64_t len, int memory_type) 392 { 393 uint64_t ret = 0; 394 uint64_t pages = (len + EFI_PAGE_MASK) >> EFI_PAGE_SHIFT; 395 efi_status_t r; 396 397 r = efi_allocate_pages(EFI_ALLOCATE_ANY_PAGES, memory_type, pages, 398 &ret); 399 if (r == EFI_SUCCESS) 400 return (void*)(uintptr_t)ret; 401 402 return NULL; 403 } 404 405 /* 406 * Free memory pages. 407 * 408 * @memory start of the memory area to be freed 409 * @pages number of pages to be freed 410 * @return status code 411 */ 412 efi_status_t efi_free_pages(uint64_t memory, efi_uintn_t pages) 413 { 414 uint64_t r = 0; 415 uint64_t addr = map_to_sysmem((void *)(uintptr_t)memory); 416 417 r = efi_add_memory_map(addr, pages, EFI_CONVENTIONAL_MEMORY, false); 418 /* Merging of adjacent free regions is missing */ 419 420 if (r == addr) 421 return EFI_SUCCESS; 422 423 return EFI_NOT_FOUND; 424 } 425 426 /* 427 * Allocate memory from pool. 428 * 429 * @pool_type type of the pool from which memory is to be allocated 430 * @size number of bytes to be allocated 431 * @buffer allocated memory 432 * @return status code 433 */ 434 efi_status_t efi_allocate_pool(int pool_type, efi_uintn_t size, void **buffer) 435 { 436 efi_status_t r; 437 struct efi_pool_allocation *alloc; 438 u64 num_pages = (size + sizeof(struct efi_pool_allocation) + 439 EFI_PAGE_MASK) >> EFI_PAGE_SHIFT; 440 441 if (!buffer) 442 return EFI_INVALID_PARAMETER; 443 444 if (size == 0) { 445 *buffer = NULL; 446 return EFI_SUCCESS; 447 } 448 449 r = efi_allocate_pages(EFI_ALLOCATE_ANY_PAGES, pool_type, num_pages, 450 (uint64_t *)&alloc); 451 452 if (r == EFI_SUCCESS) { 453 alloc->num_pages = num_pages; 454 *buffer = alloc->data; 455 } 456 457 return r; 458 } 459 460 /* 461 * Free memory from pool. 462 * 463 * @buffer start of memory to be freed 464 * @return status code 465 */ 466 efi_status_t efi_free_pool(void *buffer) 467 { 468 efi_status_t r; 469 struct efi_pool_allocation *alloc; 470 471 if (buffer == NULL) 472 return EFI_INVALID_PARAMETER; 473 474 alloc = container_of(buffer, struct efi_pool_allocation, data); 475 /* Sanity check, was the supplied address returned by allocate_pool */ 476 assert(((uintptr_t)alloc & EFI_PAGE_MASK) == 0); 477 478 r = efi_free_pages((uintptr_t)alloc, alloc->num_pages); 479 480 return r; 481 } 482 483 /* 484 * Get map describing memory usage. 485 * 486 * @memory_map_size on entry the size, in bytes, of the memory map buffer, 487 * on exit the size of the copied memory map 488 * @memory_map buffer to which the memory map is written 489 * @map_key key for the memory map 490 * @descriptor_size size of an individual memory descriptor 491 * @descriptor_version version number of the memory descriptor structure 492 * @return status code 493 */ 494 efi_status_t efi_get_memory_map(efi_uintn_t *memory_map_size, 495 struct efi_mem_desc *memory_map, 496 efi_uintn_t *map_key, 497 efi_uintn_t *descriptor_size, 498 uint32_t *descriptor_version) 499 { 500 efi_uintn_t map_size = 0; 501 int map_entries = 0; 502 struct list_head *lhandle; 503 efi_uintn_t provided_map_size; 504 505 if (!memory_map_size) 506 return EFI_INVALID_PARAMETER; 507 508 provided_map_size = *memory_map_size; 509 510 list_for_each(lhandle, &efi_mem) 511 map_entries++; 512 513 map_size = map_entries * sizeof(struct efi_mem_desc); 514 515 *memory_map_size = map_size; 516 517 if (provided_map_size < map_size) 518 return EFI_BUFFER_TOO_SMALL; 519 520 if (!memory_map) 521 return EFI_INVALID_PARAMETER; 522 523 if (descriptor_size) 524 *descriptor_size = sizeof(struct efi_mem_desc); 525 526 if (descriptor_version) 527 *descriptor_version = EFI_MEMORY_DESCRIPTOR_VERSION; 528 529 /* Copy list into array */ 530 /* Return the list in ascending order */ 531 memory_map = &memory_map[map_entries - 1]; 532 list_for_each(lhandle, &efi_mem) { 533 struct efi_mem_list *lmem; 534 535 lmem = list_entry(lhandle, struct efi_mem_list, link); 536 *memory_map = lmem->desc; 537 memory_map--; 538 } 539 540 if (map_key) 541 *map_key = efi_memory_map_key; 542 543 return EFI_SUCCESS; 544 } 545 546 __weak void efi_add_known_memory(void) 547 { 548 int i; 549 550 /* Add RAM */ 551 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { 552 u64 ram_start = gd->bd->bi_dram[i].start; 553 u64 ram_size = gd->bd->bi_dram[i].size; 554 u64 start = (ram_start + EFI_PAGE_MASK) & ~EFI_PAGE_MASK; 555 u64 pages = (ram_size + EFI_PAGE_MASK) >> EFI_PAGE_SHIFT; 556 557 efi_add_memory_map(start, pages, EFI_CONVENTIONAL_MEMORY, 558 false); 559 } 560 } 561 562 /* Add memory regions for U-Boot's memory and for the runtime services code */ 563 static void add_u_boot_and_runtime(void) 564 { 565 unsigned long runtime_start, runtime_end, runtime_pages; 566 unsigned long uboot_start, uboot_pages; 567 unsigned long uboot_stack_size = 16 * 1024 * 1024; 568 569 /* Add U-Boot */ 570 uboot_start = (gd->start_addr_sp - uboot_stack_size) & ~EFI_PAGE_MASK; 571 uboot_pages = (gd->ram_top - uboot_start) >> EFI_PAGE_SHIFT; 572 efi_add_memory_map(uboot_start, uboot_pages, EFI_LOADER_DATA, false); 573 574 /* Add Runtime Services */ 575 runtime_start = (ulong)&__efi_runtime_start & ~EFI_PAGE_MASK; 576 runtime_end = (ulong)&__efi_runtime_stop; 577 runtime_end = (runtime_end + EFI_PAGE_MASK) & ~EFI_PAGE_MASK; 578 runtime_pages = (runtime_end - runtime_start) >> EFI_PAGE_SHIFT; 579 efi_add_memory_map(runtime_start, runtime_pages, 580 EFI_RUNTIME_SERVICES_CODE, false); 581 } 582 583 int efi_memory_init(void) 584 { 585 efi_add_known_memory(); 586 587 if (!IS_ENABLED(CONFIG_SANDBOX)) 588 add_u_boot_and_runtime(); 589 590 #ifdef CONFIG_EFI_LOADER_BOUNCE_BUFFER 591 /* Request a 32bit 64MB bounce buffer region */ 592 uint64_t efi_bounce_buffer_addr = 0xffffffff; 593 594 if (efi_allocate_pages(EFI_ALLOCATE_MAX_ADDRESS, EFI_LOADER_DATA, 595 (64 * 1024 * 1024) >> EFI_PAGE_SHIFT, 596 &efi_bounce_buffer_addr) != EFI_SUCCESS) 597 return -1; 598 599 efi_bounce_buffer = (void*)(uintptr_t)efi_bounce_buffer_addr; 600 #endif 601 602 return 0; 603 } 604