1 /* 2 * Extensible Firmware Interface 3 * 4 * Based on Extensible Firmware Interface Specification version 2.4 5 * 6 * Copyright (C) 2013, 2014 Linaro Ltd. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 */ 13 14 #include <linux/efi.h> 15 #include <linux/export.h> 16 #include <linux/memblock.h> 17 #include <linux/bootmem.h> 18 #include <linux/of.h> 19 #include <linux/of_fdt.h> 20 #include <linux/sched.h> 21 #include <linux/slab.h> 22 23 #include <asm/cacheflush.h> 24 #include <asm/efi.h> 25 #include <asm/tlbflush.h> 26 #include <asm/mmu_context.h> 27 28 struct efi_memory_map memmap; 29 30 static efi_runtime_services_t *runtime; 31 32 static u64 efi_system_table; 33 34 static int uefi_debug __initdata; 35 static int __init uefi_debug_setup(char *str) 36 { 37 uefi_debug = 1; 38 39 return 0; 40 } 41 early_param("uefi_debug", uefi_debug_setup); 42 43 static int __init is_normal_ram(efi_memory_desc_t *md) 44 { 45 if (md->attribute & EFI_MEMORY_WB) 46 return 1; 47 return 0; 48 } 49 50 static void __init efi_setup_idmap(void) 51 { 52 struct memblock_region *r; 53 efi_memory_desc_t *md; 54 u64 paddr, npages, size; 55 56 for_each_memblock(memory, r) 57 create_id_mapping(r->base, r->size, 0); 58 59 /* map runtime io spaces */ 60 for_each_efi_memory_desc(&memmap, md) { 61 if (!(md->attribute & EFI_MEMORY_RUNTIME) || is_normal_ram(md)) 62 continue; 63 paddr = md->phys_addr; 64 npages = md->num_pages; 65 memrange_efi_to_native(&paddr, &npages); 66 size = npages << PAGE_SHIFT; 67 create_id_mapping(paddr, size, 1); 68 } 69 } 70 71 static int __init uefi_init(void) 72 { 73 efi_char16_t *c16; 74 char vendor[100] = "unknown"; 75 int i, retval; 76 77 efi.systab = early_memremap(efi_system_table, 78 sizeof(efi_system_table_t)); 79 if (efi.systab == NULL) { 80 pr_warn("Unable to map EFI system table.\n"); 81 return -ENOMEM; 82 } 83 84 set_bit(EFI_BOOT, &efi.flags); 85 set_bit(EFI_64BIT, &efi.flags); 86 87 /* 88 * Verify the EFI Table 89 */ 90 if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) { 91 pr_err("System table signature incorrect\n"); 92 return -EINVAL; 93 } 94 if ((efi.systab->hdr.revision >> 16) < 2) 95 pr_warn("Warning: EFI system table version %d.%02d, expected 2.00 or greater\n", 96 efi.systab->hdr.revision >> 16, 97 efi.systab->hdr.revision & 0xffff); 98 99 /* Show what we know for posterity */ 100 c16 = early_memremap(efi.systab->fw_vendor, 101 sizeof(vendor)); 102 if (c16) { 103 for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i) 104 vendor[i] = c16[i]; 105 vendor[i] = '\0'; 106 } 107 108 pr_info("EFI v%u.%.02u by %s\n", 109 efi.systab->hdr.revision >> 16, 110 efi.systab->hdr.revision & 0xffff, vendor); 111 112 retval = efi_config_init(NULL); 113 if (retval == 0) 114 set_bit(EFI_CONFIG_TABLES, &efi.flags); 115 116 early_memunmap(c16, sizeof(vendor)); 117 early_memunmap(efi.systab, sizeof(efi_system_table_t)); 118 119 return retval; 120 } 121 122 static __initdata char memory_type_name[][32] = { 123 {"Reserved"}, 124 {"Loader Code"}, 125 {"Loader Data"}, 126 {"Boot Code"}, 127 {"Boot Data"}, 128 {"Runtime Code"}, 129 {"Runtime Data"}, 130 {"Conventional Memory"}, 131 {"Unusable Memory"}, 132 {"ACPI Reclaim Memory"}, 133 {"ACPI Memory NVS"}, 134 {"Memory Mapped I/O"}, 135 {"MMIO Port Space"}, 136 {"PAL Code"}, 137 }; 138 139 /* 140 * Return true for RAM regions we want to permanently reserve. 141 */ 142 static __init int is_reserve_region(efi_memory_desc_t *md) 143 { 144 if (!is_normal_ram(md)) 145 return 0; 146 147 if (md->attribute & EFI_MEMORY_RUNTIME) 148 return 1; 149 150 if (md->type == EFI_ACPI_RECLAIM_MEMORY || 151 md->type == EFI_RESERVED_TYPE) 152 return 1; 153 154 return 0; 155 } 156 157 static __init void reserve_regions(void) 158 { 159 efi_memory_desc_t *md; 160 u64 paddr, npages, size; 161 162 if (uefi_debug) 163 pr_info("Processing EFI memory map:\n"); 164 165 for_each_efi_memory_desc(&memmap, md) { 166 paddr = md->phys_addr; 167 npages = md->num_pages; 168 169 if (uefi_debug) 170 pr_info(" 0x%012llx-0x%012llx [%s]", 171 paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1, 172 memory_type_name[md->type]); 173 174 memrange_efi_to_native(&paddr, &npages); 175 size = npages << PAGE_SHIFT; 176 177 if (is_normal_ram(md)) 178 early_init_dt_add_memory_arch(paddr, size); 179 180 if (is_reserve_region(md) || 181 md->type == EFI_BOOT_SERVICES_CODE || 182 md->type == EFI_BOOT_SERVICES_DATA) { 183 memblock_reserve(paddr, size); 184 if (uefi_debug) 185 pr_cont("*"); 186 } 187 188 if (uefi_debug) 189 pr_cont("\n"); 190 } 191 } 192 193 194 static u64 __init free_one_region(u64 start, u64 end) 195 { 196 u64 size = end - start; 197 198 if (uefi_debug) 199 pr_info(" EFI freeing: 0x%012llx-0x%012llx\n", start, end - 1); 200 201 free_bootmem_late(start, size); 202 return size; 203 } 204 205 static u64 __init free_region(u64 start, u64 end) 206 { 207 u64 map_start, map_end, total = 0; 208 209 if (end <= start) 210 return total; 211 212 map_start = (u64)memmap.phys_map; 213 map_end = PAGE_ALIGN(map_start + (memmap.map_end - memmap.map)); 214 map_start &= PAGE_MASK; 215 216 if (start < map_end && end > map_start) { 217 /* region overlaps UEFI memmap */ 218 if (start < map_start) 219 total += free_one_region(start, map_start); 220 221 if (map_end < end) 222 total += free_one_region(map_end, end); 223 } else 224 total += free_one_region(start, end); 225 226 return total; 227 } 228 229 static void __init free_boot_services(void) 230 { 231 u64 total_freed = 0; 232 u64 keep_end, free_start, free_end; 233 efi_memory_desc_t *md; 234 235 /* 236 * If kernel uses larger pages than UEFI, we have to be careful 237 * not to inadvertantly free memory we want to keep if there is 238 * overlap at the kernel page size alignment. We do not want to 239 * free is_reserve_region() memory nor the UEFI memmap itself. 240 * 241 * The memory map is sorted, so we keep track of the end of 242 * any previous region we want to keep, remember any region 243 * we want to free and defer freeing it until we encounter 244 * the next region we want to keep. This way, before freeing 245 * it, we can clip it as needed to avoid freeing memory we 246 * want to keep for UEFI. 247 */ 248 249 keep_end = 0; 250 free_start = 0; 251 252 for_each_efi_memory_desc(&memmap, md) { 253 u64 paddr, npages, size; 254 255 if (is_reserve_region(md)) { 256 /* 257 * We don't want to free any memory from this region. 258 */ 259 if (free_start) { 260 /* adjust free_end then free region */ 261 if (free_end > md->phys_addr) 262 free_end -= PAGE_SIZE; 263 total_freed += free_region(free_start, free_end); 264 free_start = 0; 265 } 266 keep_end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); 267 continue; 268 } 269 270 if (md->type != EFI_BOOT_SERVICES_CODE && 271 md->type != EFI_BOOT_SERVICES_DATA) { 272 /* no need to free this region */ 273 continue; 274 } 275 276 /* 277 * We want to free memory from this region. 278 */ 279 paddr = md->phys_addr; 280 npages = md->num_pages; 281 memrange_efi_to_native(&paddr, &npages); 282 size = npages << PAGE_SHIFT; 283 284 if (free_start) { 285 if (paddr <= free_end) 286 free_end = paddr + size; 287 else { 288 total_freed += free_region(free_start, free_end); 289 free_start = paddr; 290 free_end = paddr + size; 291 } 292 } else { 293 free_start = paddr; 294 free_end = paddr + size; 295 } 296 if (free_start < keep_end) { 297 free_start += PAGE_SIZE; 298 if (free_start >= free_end) 299 free_start = 0; 300 } 301 } 302 if (free_start) 303 total_freed += free_region(free_start, free_end); 304 305 if (total_freed) 306 pr_info("Freed 0x%llx bytes of EFI boot services memory", 307 total_freed); 308 } 309 310 void __init efi_init(void) 311 { 312 struct efi_fdt_params params; 313 314 /* Grab UEFI information placed in FDT by stub */ 315 if (!efi_get_fdt_params(¶ms, uefi_debug)) 316 return; 317 318 efi_system_table = params.system_table; 319 320 memblock_reserve(params.mmap & PAGE_MASK, 321 PAGE_ALIGN(params.mmap_size + (params.mmap & ~PAGE_MASK))); 322 memmap.phys_map = (void *)params.mmap; 323 memmap.map = early_memremap(params.mmap, params.mmap_size); 324 memmap.map_end = memmap.map + params.mmap_size; 325 memmap.desc_size = params.desc_size; 326 memmap.desc_version = params.desc_ver; 327 328 if (uefi_init() < 0) 329 return; 330 331 reserve_regions(); 332 } 333 334 void __init efi_idmap_init(void) 335 { 336 if (!efi_enabled(EFI_BOOT)) 337 return; 338 339 /* boot time idmap_pg_dir is incomplete, so fill in missing parts */ 340 efi_setup_idmap(); 341 } 342 343 static int __init remap_region(efi_memory_desc_t *md, void **new) 344 { 345 u64 paddr, vaddr, npages, size; 346 347 paddr = md->phys_addr; 348 npages = md->num_pages; 349 memrange_efi_to_native(&paddr, &npages); 350 size = npages << PAGE_SHIFT; 351 352 if (is_normal_ram(md)) 353 vaddr = (__force u64)ioremap_cache(paddr, size); 354 else 355 vaddr = (__force u64)ioremap(paddr, size); 356 357 if (!vaddr) { 358 pr_err("Unable to remap 0x%llx pages @ %p\n", 359 npages, (void *)paddr); 360 return 0; 361 } 362 363 /* adjust for any rounding when EFI and system pagesize differs */ 364 md->virt_addr = vaddr + (md->phys_addr - paddr); 365 366 if (uefi_debug) 367 pr_info(" EFI remap 0x%012llx => %p\n", 368 md->phys_addr, (void *)md->virt_addr); 369 370 memcpy(*new, md, memmap.desc_size); 371 *new += memmap.desc_size; 372 373 return 1; 374 } 375 376 /* 377 * Switch UEFI from an identity map to a kernel virtual map 378 */ 379 static int __init arm64_enter_virtual_mode(void) 380 { 381 efi_memory_desc_t *md; 382 phys_addr_t virtmap_phys; 383 void *virtmap, *virt_md; 384 efi_status_t status; 385 u64 mapsize; 386 int count = 0; 387 unsigned long flags; 388 389 if (!efi_enabled(EFI_BOOT)) { 390 pr_info("EFI services will not be available.\n"); 391 return -1; 392 } 393 394 pr_info("Remapping and enabling EFI services.\n"); 395 396 /* replace early memmap mapping with permanent mapping */ 397 mapsize = memmap.map_end - memmap.map; 398 early_memunmap(memmap.map, mapsize); 399 memmap.map = (__force void *)ioremap_cache((phys_addr_t)memmap.phys_map, 400 mapsize); 401 memmap.map_end = memmap.map + mapsize; 402 403 efi.memmap = &memmap; 404 405 /* Map the runtime regions */ 406 virtmap = kmalloc(mapsize, GFP_KERNEL); 407 if (!virtmap) { 408 pr_err("Failed to allocate EFI virtual memmap\n"); 409 return -1; 410 } 411 virtmap_phys = virt_to_phys(virtmap); 412 virt_md = virtmap; 413 414 for_each_efi_memory_desc(&memmap, md) { 415 if (!(md->attribute & EFI_MEMORY_RUNTIME)) 416 continue; 417 if (!remap_region(md, &virt_md)) 418 goto err_unmap; 419 ++count; 420 } 421 422 efi.systab = (__force void *)efi_lookup_mapped_addr(efi_system_table); 423 if (!efi.systab) { 424 /* 425 * If we have no virtual mapping for the System Table at this 426 * point, the memory map doesn't cover the physical offset where 427 * it resides. This means the System Table will be inaccessible 428 * to Runtime Services themselves once the virtual mapping is 429 * installed. 430 */ 431 pr_err("Failed to remap EFI System Table -- buggy firmware?\n"); 432 goto err_unmap; 433 } 434 set_bit(EFI_SYSTEM_TABLES, &efi.flags); 435 436 local_irq_save(flags); 437 cpu_switch_mm(idmap_pg_dir, &init_mm); 438 439 /* Call SetVirtualAddressMap with the physical address of the map */ 440 runtime = efi.systab->runtime; 441 efi.set_virtual_address_map = runtime->set_virtual_address_map; 442 443 status = efi.set_virtual_address_map(count * memmap.desc_size, 444 memmap.desc_size, 445 memmap.desc_version, 446 (efi_memory_desc_t *)virtmap_phys); 447 cpu_set_reserved_ttbr0(); 448 flush_tlb_all(); 449 local_irq_restore(flags); 450 451 kfree(virtmap); 452 453 free_boot_services(); 454 455 if (status != EFI_SUCCESS) { 456 pr_err("Failed to set EFI virtual address map! [%lx]\n", 457 status); 458 return -1; 459 } 460 461 /* Set up runtime services function pointers */ 462 runtime = efi.systab->runtime; 463 efi_native_runtime_setup(); 464 set_bit(EFI_RUNTIME_SERVICES, &efi.flags); 465 466 return 0; 467 468 err_unmap: 469 /* unmap all mappings that succeeded: there are 'count' of those */ 470 for (virt_md = virtmap; count--; virt_md += memmap.desc_size) { 471 md = virt_md; 472 iounmap((__force void __iomem *)md->virt_addr); 473 } 474 kfree(virtmap); 475 return -1; 476 } 477 early_initcall(arm64_enter_virtual_mode); 478