1 /* 2 * Extensible Firmware Interface 3 * 4 * Based on Extensible Firmware Interface Specification version 2.4 5 * 6 * Copyright (C) 2013, 2014 Linaro Ltd. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 */ 13 14 #include <linux/efi.h> 15 #include <linux/export.h> 16 #include <linux/memblock.h> 17 #include <linux/bootmem.h> 18 #include <linux/of.h> 19 #include <linux/of_fdt.h> 20 #include <linux/sched.h> 21 #include <linux/slab.h> 22 23 #include <asm/cacheflush.h> 24 #include <asm/efi.h> 25 #include <asm/tlbflush.h> 26 #include <asm/mmu_context.h> 27 28 struct efi_memory_map memmap; 29 30 static efi_runtime_services_t *runtime; 31 32 static u64 efi_system_table; 33 34 static int uefi_debug __initdata; 35 static int __init uefi_debug_setup(char *str) 36 { 37 uefi_debug = 1; 38 39 return 0; 40 } 41 early_param("uefi_debug", uefi_debug_setup); 42 43 static int __init is_normal_ram(efi_memory_desc_t *md) 44 { 45 if (md->attribute & EFI_MEMORY_WB) 46 return 1; 47 return 0; 48 } 49 50 static void __init efi_setup_idmap(void) 51 { 52 struct memblock_region *r; 53 efi_memory_desc_t *md; 54 u64 paddr, npages, size; 55 56 for_each_memblock(memory, r) 57 create_id_mapping(r->base, r->size, 0); 58 59 /* map runtime io spaces */ 60 for_each_efi_memory_desc(&memmap, md) { 61 if (!(md->attribute & EFI_MEMORY_RUNTIME) || is_normal_ram(md)) 62 continue; 63 paddr = md->phys_addr; 64 npages = md->num_pages; 65 memrange_efi_to_native(&paddr, &npages); 66 size = npages << PAGE_SHIFT; 67 create_id_mapping(paddr, size, 1); 68 } 69 } 70 71 static int __init uefi_init(void) 72 { 73 efi_char16_t *c16; 74 char vendor[100] = "unknown"; 75 int i, retval; 76 77 efi.systab = early_memremap(efi_system_table, 78 sizeof(efi_system_table_t)); 79 if (efi.systab == NULL) { 80 pr_warn("Unable to map EFI system table.\n"); 81 return -ENOMEM; 82 } 83 84 set_bit(EFI_BOOT, &efi.flags); 85 set_bit(EFI_64BIT, &efi.flags); 86 87 /* 88 * Verify the EFI Table 89 */ 90 if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) { 91 pr_err("System table signature incorrect\n"); 92 return -EINVAL; 93 } 94 if ((efi.systab->hdr.revision >> 16) < 2) 95 pr_warn("Warning: EFI system table version %d.%02d, expected 2.00 or greater\n", 96 efi.systab->hdr.revision >> 16, 97 efi.systab->hdr.revision & 0xffff); 98 99 /* Show what we know for posterity */ 100 c16 = early_memremap(efi.systab->fw_vendor, 101 sizeof(vendor)); 102 if (c16) { 103 for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i) 104 vendor[i] = c16[i]; 105 vendor[i] = '\0'; 106 } 107 108 pr_info("EFI v%u.%.02u by %s\n", 109 efi.systab->hdr.revision >> 16, 110 efi.systab->hdr.revision & 0xffff, vendor); 111 112 retval = efi_config_init(NULL); 113 if (retval == 0) 114 set_bit(EFI_CONFIG_TABLES, &efi.flags); 115 116 early_memunmap(c16, sizeof(vendor)); 117 early_memunmap(efi.systab, sizeof(efi_system_table_t)); 118 119 return retval; 120 } 121 122 static __initdata char memory_type_name[][32] = { 123 {"Reserved"}, 124 {"Loader Code"}, 125 {"Loader Data"}, 126 {"Boot Code"}, 127 {"Boot Data"}, 128 {"Runtime Code"}, 129 {"Runtime Data"}, 130 {"Conventional Memory"}, 131 {"Unusable Memory"}, 132 {"ACPI Reclaim Memory"}, 133 {"ACPI Memory NVS"}, 134 {"Memory Mapped I/O"}, 135 {"MMIO Port Space"}, 136 {"PAL Code"}, 137 }; 138 139 /* 140 * Return true for RAM regions we want to permanently reserve. 141 */ 142 static __init int is_reserve_region(efi_memory_desc_t *md) 143 { 144 if (!is_normal_ram(md)) 145 return 0; 146 147 if (md->attribute & EFI_MEMORY_RUNTIME) 148 return 1; 149 150 if (md->type == EFI_ACPI_RECLAIM_MEMORY || 151 md->type == EFI_RESERVED_TYPE) 152 return 1; 153 154 return 0; 155 } 156 157 static __init void reserve_regions(void) 158 { 159 efi_memory_desc_t *md; 160 u64 paddr, npages, size; 161 162 if (uefi_debug) 163 pr_info("Processing EFI memory map:\n"); 164 165 for_each_efi_memory_desc(&memmap, md) { 166 paddr = md->phys_addr; 167 npages = md->num_pages; 168 169 if (uefi_debug) 170 pr_info(" 0x%012llx-0x%012llx [%s]", 171 paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1, 172 memory_type_name[md->type]); 173 174 memrange_efi_to_native(&paddr, &npages); 175 size = npages << PAGE_SHIFT; 176 177 if (is_normal_ram(md)) 178 early_init_dt_add_memory_arch(paddr, size); 179 180 if (is_reserve_region(md) || 181 md->type == EFI_BOOT_SERVICES_CODE || 182 md->type == EFI_BOOT_SERVICES_DATA) { 183 memblock_reserve(paddr, size); 184 if (uefi_debug) 185 pr_cont("*"); 186 } 187 188 if (uefi_debug) 189 pr_cont("\n"); 190 } 191 192 set_bit(EFI_MEMMAP, &efi.flags); 193 } 194 195 196 static u64 __init free_one_region(u64 start, u64 end) 197 { 198 u64 size = end - start; 199 200 if (uefi_debug) 201 pr_info(" EFI freeing: 0x%012llx-0x%012llx\n", start, end - 1); 202 203 free_bootmem_late(start, size); 204 return size; 205 } 206 207 static u64 __init free_region(u64 start, u64 end) 208 { 209 u64 map_start, map_end, total = 0; 210 211 if (end <= start) 212 return total; 213 214 map_start = (u64)memmap.phys_map; 215 map_end = PAGE_ALIGN(map_start + (memmap.map_end - memmap.map)); 216 map_start &= PAGE_MASK; 217 218 if (start < map_end && end > map_start) { 219 /* region overlaps UEFI memmap */ 220 if (start < map_start) 221 total += free_one_region(start, map_start); 222 223 if (map_end < end) 224 total += free_one_region(map_end, end); 225 } else 226 total += free_one_region(start, end); 227 228 return total; 229 } 230 231 static void __init free_boot_services(void) 232 { 233 u64 total_freed = 0; 234 u64 keep_end, free_start, free_end; 235 efi_memory_desc_t *md; 236 237 /* 238 * If kernel uses larger pages than UEFI, we have to be careful 239 * not to inadvertantly free memory we want to keep if there is 240 * overlap at the kernel page size alignment. We do not want to 241 * free is_reserve_region() memory nor the UEFI memmap itself. 242 * 243 * The memory map is sorted, so we keep track of the end of 244 * any previous region we want to keep, remember any region 245 * we want to free and defer freeing it until we encounter 246 * the next region we want to keep. This way, before freeing 247 * it, we can clip it as needed to avoid freeing memory we 248 * want to keep for UEFI. 249 */ 250 251 keep_end = 0; 252 free_start = 0; 253 254 for_each_efi_memory_desc(&memmap, md) { 255 u64 paddr, npages, size; 256 257 if (is_reserve_region(md)) { 258 /* 259 * We don't want to free any memory from this region. 260 */ 261 if (free_start) { 262 /* adjust free_end then free region */ 263 if (free_end > md->phys_addr) 264 free_end -= PAGE_SIZE; 265 total_freed += free_region(free_start, free_end); 266 free_start = 0; 267 } 268 keep_end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); 269 continue; 270 } 271 272 if (md->type != EFI_BOOT_SERVICES_CODE && 273 md->type != EFI_BOOT_SERVICES_DATA) { 274 /* no need to free this region */ 275 continue; 276 } 277 278 /* 279 * We want to free memory from this region. 280 */ 281 paddr = md->phys_addr; 282 npages = md->num_pages; 283 memrange_efi_to_native(&paddr, &npages); 284 size = npages << PAGE_SHIFT; 285 286 if (free_start) { 287 if (paddr <= free_end) 288 free_end = paddr + size; 289 else { 290 total_freed += free_region(free_start, free_end); 291 free_start = paddr; 292 free_end = paddr + size; 293 } 294 } else { 295 free_start = paddr; 296 free_end = paddr + size; 297 } 298 if (free_start < keep_end) { 299 free_start += PAGE_SIZE; 300 if (free_start >= free_end) 301 free_start = 0; 302 } 303 } 304 if (free_start) 305 total_freed += free_region(free_start, free_end); 306 307 if (total_freed) 308 pr_info("Freed 0x%llx bytes of EFI boot services memory", 309 total_freed); 310 } 311 312 void __init efi_init(void) 313 { 314 struct efi_fdt_params params; 315 316 /* Grab UEFI information placed in FDT by stub */ 317 if (!efi_get_fdt_params(¶ms, uefi_debug)) 318 return; 319 320 efi_system_table = params.system_table; 321 322 memblock_reserve(params.mmap & PAGE_MASK, 323 PAGE_ALIGN(params.mmap_size + (params.mmap & ~PAGE_MASK))); 324 memmap.phys_map = (void *)params.mmap; 325 memmap.map = early_memremap(params.mmap, params.mmap_size); 326 memmap.map_end = memmap.map + params.mmap_size; 327 memmap.desc_size = params.desc_size; 328 memmap.desc_version = params.desc_ver; 329 330 if (uefi_init() < 0) 331 return; 332 333 reserve_regions(); 334 } 335 336 void __init efi_idmap_init(void) 337 { 338 if (!efi_enabled(EFI_BOOT)) 339 return; 340 341 /* boot time idmap_pg_dir is incomplete, so fill in missing parts */ 342 efi_setup_idmap(); 343 } 344 345 static int __init remap_region(efi_memory_desc_t *md, void **new) 346 { 347 u64 paddr, vaddr, npages, size; 348 349 paddr = md->phys_addr; 350 npages = md->num_pages; 351 memrange_efi_to_native(&paddr, &npages); 352 size = npages << PAGE_SHIFT; 353 354 if (is_normal_ram(md)) 355 vaddr = (__force u64)ioremap_cache(paddr, size); 356 else 357 vaddr = (__force u64)ioremap(paddr, size); 358 359 if (!vaddr) { 360 pr_err("Unable to remap 0x%llx pages @ %p\n", 361 npages, (void *)paddr); 362 return 0; 363 } 364 365 /* adjust for any rounding when EFI and system pagesize differs */ 366 md->virt_addr = vaddr + (md->phys_addr - paddr); 367 368 if (uefi_debug) 369 pr_info(" EFI remap 0x%012llx => %p\n", 370 md->phys_addr, (void *)md->virt_addr); 371 372 memcpy(*new, md, memmap.desc_size); 373 *new += memmap.desc_size; 374 375 return 1; 376 } 377 378 /* 379 * Switch UEFI from an identity map to a kernel virtual map 380 */ 381 static int __init arm64_enter_virtual_mode(void) 382 { 383 efi_memory_desc_t *md; 384 phys_addr_t virtmap_phys; 385 void *virtmap, *virt_md; 386 efi_status_t status; 387 u64 mapsize; 388 int count = 0; 389 unsigned long flags; 390 391 if (!efi_enabled(EFI_BOOT)) { 392 pr_info("EFI services will not be available.\n"); 393 return -1; 394 } 395 396 pr_info("Remapping and enabling EFI services.\n"); 397 398 /* replace early memmap mapping with permanent mapping */ 399 mapsize = memmap.map_end - memmap.map; 400 early_memunmap(memmap.map, mapsize); 401 memmap.map = (__force void *)ioremap_cache((phys_addr_t)memmap.phys_map, 402 mapsize); 403 memmap.map_end = memmap.map + mapsize; 404 405 efi.memmap = &memmap; 406 407 /* Map the runtime regions */ 408 virtmap = kmalloc(mapsize, GFP_KERNEL); 409 if (!virtmap) { 410 pr_err("Failed to allocate EFI virtual memmap\n"); 411 return -1; 412 } 413 virtmap_phys = virt_to_phys(virtmap); 414 virt_md = virtmap; 415 416 for_each_efi_memory_desc(&memmap, md) { 417 if (!(md->attribute & EFI_MEMORY_RUNTIME)) 418 continue; 419 if (!remap_region(md, &virt_md)) 420 goto err_unmap; 421 ++count; 422 } 423 424 efi.systab = (__force void *)efi_lookup_mapped_addr(efi_system_table); 425 if (!efi.systab) { 426 /* 427 * If we have no virtual mapping for the System Table at this 428 * point, the memory map doesn't cover the physical offset where 429 * it resides. This means the System Table will be inaccessible 430 * to Runtime Services themselves once the virtual mapping is 431 * installed. 432 */ 433 pr_err("Failed to remap EFI System Table -- buggy firmware?\n"); 434 goto err_unmap; 435 } 436 set_bit(EFI_SYSTEM_TABLES, &efi.flags); 437 438 local_irq_save(flags); 439 cpu_switch_mm(idmap_pg_dir, &init_mm); 440 441 /* Call SetVirtualAddressMap with the physical address of the map */ 442 runtime = efi.systab->runtime; 443 efi.set_virtual_address_map = runtime->set_virtual_address_map; 444 445 status = efi.set_virtual_address_map(count * memmap.desc_size, 446 memmap.desc_size, 447 memmap.desc_version, 448 (efi_memory_desc_t *)virtmap_phys); 449 cpu_set_reserved_ttbr0(); 450 flush_tlb_all(); 451 local_irq_restore(flags); 452 453 kfree(virtmap); 454 455 free_boot_services(); 456 457 if (status != EFI_SUCCESS) { 458 pr_err("Failed to set EFI virtual address map! [%lx]\n", 459 status); 460 return -1; 461 } 462 463 /* Set up runtime services function pointers */ 464 runtime = efi.systab->runtime; 465 efi_native_runtime_setup(); 466 set_bit(EFI_RUNTIME_SERVICES, &efi.flags); 467 468 efi.runtime_version = efi.systab->hdr.revision; 469 470 return 0; 471 472 err_unmap: 473 /* unmap all mappings that succeeded: there are 'count' of those */ 474 for (virt_md = virtmap; count--; virt_md += memmap.desc_size) { 475 md = virt_md; 476 iounmap((__force void __iomem *)md->virt_addr); 477 } 478 kfree(virtmap); 479 return -1; 480 } 481 early_initcall(arm64_enter_virtual_mode); 482