1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * efi.c - EFI subsystem 4 * 5 * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com> 6 * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com> 7 * Copyright (C) 2013 Tom Gundersen <teg@jklm.no> 8 * 9 * This code registers /sys/firmware/efi{,/efivars} when EFI is supported, 10 * allowing the efivarfs to be mounted or the efivars module to be loaded. 11 * The existance of /sys/firmware/efi may also be used by userspace to 12 * determine that the system supports EFI. 13 */ 14 15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16 17 #include <linux/kobject.h> 18 #include <linux/module.h> 19 #include <linux/init.h> 20 #include <linux/debugfs.h> 21 #include <linux/device.h> 22 #include <linux/efi.h> 23 #include <linux/of.h> 24 #include <linux/initrd.h> 25 #include <linux/io.h> 26 #include <linux/kexec.h> 27 #include <linux/platform_device.h> 28 #include <linux/random.h> 29 #include <linux/reboot.h> 30 #include <linux/slab.h> 31 #include <linux/acpi.h> 32 #include <linux/ucs2_string.h> 33 #include <linux/memblock.h> 34 #include <linux/security.h> 35 36 #include <asm/early_ioremap.h> 37 38 struct efi __read_mostly efi = { 39 .runtime_supported_mask = EFI_RT_SUPPORTED_ALL, 40 .acpi = EFI_INVALID_TABLE_ADDR, 41 .acpi20 = EFI_INVALID_TABLE_ADDR, 42 .smbios = EFI_INVALID_TABLE_ADDR, 43 .smbios3 = EFI_INVALID_TABLE_ADDR, 44 .esrt = EFI_INVALID_TABLE_ADDR, 45 .tpm_log = EFI_INVALID_TABLE_ADDR, 46 .tpm_final_log = EFI_INVALID_TABLE_ADDR, 47 #ifdef CONFIG_LOAD_UEFI_KEYS 48 .mokvar_table = EFI_INVALID_TABLE_ADDR, 49 #endif 50 #ifdef CONFIG_EFI_COCO_SECRET 51 .coco_secret = EFI_INVALID_TABLE_ADDR, 52 #endif 53 #ifdef CONFIG_UNACCEPTED_MEMORY 54 .unaccepted = EFI_INVALID_TABLE_ADDR, 55 #endif 56 }; 57 EXPORT_SYMBOL(efi); 58 59 unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR; 60 static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR; 61 static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR; 62 static unsigned long __initdata initrd = EFI_INVALID_TABLE_ADDR; 63 64 extern unsigned long screen_info_table; 65 66 struct mm_struct efi_mm = { 67 .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock), 68 .mm_users = ATOMIC_INIT(2), 69 .mm_count = ATOMIC_INIT(1), 70 .write_protect_seq = SEQCNT_ZERO(efi_mm.write_protect_seq), 71 MMAP_LOCK_INITIALIZER(efi_mm) 72 .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock), 73 .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), 74 .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0}, 75 }; 76 77 struct workqueue_struct *efi_rts_wq; 78 79 static bool disable_runtime = IS_ENABLED(CONFIG_EFI_DISABLE_RUNTIME); 80 static int __init setup_noefi(char *arg) 81 { 82 disable_runtime = true; 83 return 0; 84 } 85 early_param("noefi", setup_noefi); 86 87 bool efi_runtime_disabled(void) 88 { 89 return disable_runtime; 90 } 91 92 bool __pure __efi_soft_reserve_enabled(void) 93 { 94 return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE); 95 } 96 97 static int __init parse_efi_cmdline(char *str) 98 { 99 if (!str) { 100 pr_warn("need at least one option\n"); 101 return -EINVAL; 102 } 103 104 if (parse_option_str(str, "debug")) 105 set_bit(EFI_DBG, &efi.flags); 106 107 if (parse_option_str(str, "noruntime")) 108 disable_runtime = true; 109 110 if (parse_option_str(str, "runtime")) 111 disable_runtime = false; 112 113 if (parse_option_str(str, "nosoftreserve")) 114 set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags); 115 116 return 0; 117 } 118 early_param("efi", parse_efi_cmdline); 119 120 struct kobject *efi_kobj; 121 122 /* 123 * Let's not leave out systab information that snuck into 124 * the efivars driver 125 * Note, do not add more fields in systab sysfs file as it breaks sysfs 126 * one value per file rule! 127 */ 128 static ssize_t systab_show(struct kobject *kobj, 129 struct kobj_attribute *attr, char *buf) 130 { 131 char *str = buf; 132 133 if (!kobj || !buf) 134 return -EINVAL; 135 136 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 137 str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20); 138 if (efi.acpi != EFI_INVALID_TABLE_ADDR) 139 str += sprintf(str, "ACPI=0x%lx\n", efi.acpi); 140 /* 141 * If both SMBIOS and SMBIOS3 entry points are implemented, the 142 * SMBIOS3 entry point shall be preferred, so we list it first to 143 * let applications stop parsing after the first match. 144 */ 145 if (efi.smbios3 != EFI_INVALID_TABLE_ADDR) 146 str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3); 147 if (efi.smbios != EFI_INVALID_TABLE_ADDR) 148 str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios); 149 150 if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86)) 151 str = efi_systab_show_arch(str); 152 153 return str - buf; 154 } 155 156 static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400); 157 158 static ssize_t fw_platform_size_show(struct kobject *kobj, 159 struct kobj_attribute *attr, char *buf) 160 { 161 return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32); 162 } 163 164 extern __weak struct kobj_attribute efi_attr_fw_vendor; 165 extern __weak struct kobj_attribute efi_attr_runtime; 166 extern __weak struct kobj_attribute efi_attr_config_table; 167 static struct kobj_attribute efi_attr_fw_platform_size = 168 __ATTR_RO(fw_platform_size); 169 170 static struct attribute *efi_subsys_attrs[] = { 171 &efi_attr_systab.attr, 172 &efi_attr_fw_platform_size.attr, 173 &efi_attr_fw_vendor.attr, 174 &efi_attr_runtime.attr, 175 &efi_attr_config_table.attr, 176 NULL, 177 }; 178 179 umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr, 180 int n) 181 { 182 return attr->mode; 183 } 184 185 static const struct attribute_group efi_subsys_attr_group = { 186 .attrs = efi_subsys_attrs, 187 .is_visible = efi_attr_is_visible, 188 }; 189 190 static struct efivars generic_efivars; 191 static struct efivar_operations generic_ops; 192 193 static bool generic_ops_supported(void) 194 { 195 unsigned long name_size; 196 efi_status_t status; 197 efi_char16_t name; 198 efi_guid_t guid; 199 200 name_size = sizeof(name); 201 202 status = efi.get_next_variable(&name_size, &name, &guid); 203 if (status == EFI_UNSUPPORTED) 204 return false; 205 206 return true; 207 } 208 209 static int generic_ops_register(void) 210 { 211 if (!generic_ops_supported()) 212 return 0; 213 214 generic_ops.get_variable = efi.get_variable; 215 generic_ops.get_next_variable = efi.get_next_variable; 216 generic_ops.query_variable_store = efi_query_variable_store; 217 generic_ops.query_variable_info = efi.query_variable_info; 218 219 if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) { 220 generic_ops.set_variable = efi.set_variable; 221 generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking; 222 } 223 return efivars_register(&generic_efivars, &generic_ops); 224 } 225 226 static void generic_ops_unregister(void) 227 { 228 if (!generic_ops.get_variable) 229 return; 230 231 efivars_unregister(&generic_efivars); 232 } 233 234 #ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS 235 #define EFIVAR_SSDT_NAME_MAX 16UL 236 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata; 237 static int __init efivar_ssdt_setup(char *str) 238 { 239 int ret = security_locked_down(LOCKDOWN_ACPI_TABLES); 240 241 if (ret) 242 return ret; 243 244 if (strlen(str) < sizeof(efivar_ssdt)) 245 memcpy(efivar_ssdt, str, strlen(str)); 246 else 247 pr_warn("efivar_ssdt: name too long: %s\n", str); 248 return 1; 249 } 250 __setup("efivar_ssdt=", efivar_ssdt_setup); 251 252 static __init int efivar_ssdt_load(void) 253 { 254 unsigned long name_size = 256; 255 efi_char16_t *name = NULL; 256 efi_status_t status; 257 efi_guid_t guid; 258 259 if (!efivar_ssdt[0]) 260 return 0; 261 262 name = kzalloc(name_size, GFP_KERNEL); 263 if (!name) 264 return -ENOMEM; 265 266 for (;;) { 267 char utf8_name[EFIVAR_SSDT_NAME_MAX]; 268 unsigned long data_size = 0; 269 void *data; 270 int limit; 271 272 status = efi.get_next_variable(&name_size, name, &guid); 273 if (status == EFI_NOT_FOUND) { 274 break; 275 } else if (status == EFI_BUFFER_TOO_SMALL) { 276 name = krealloc(name, name_size, GFP_KERNEL); 277 if (!name) 278 return -ENOMEM; 279 continue; 280 } 281 282 limit = min(EFIVAR_SSDT_NAME_MAX, name_size); 283 ucs2_as_utf8(utf8_name, name, limit - 1); 284 if (strncmp(utf8_name, efivar_ssdt, limit) != 0) 285 continue; 286 287 pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, &guid); 288 289 status = efi.get_variable(name, &guid, NULL, &data_size, NULL); 290 if (status != EFI_BUFFER_TOO_SMALL || !data_size) 291 return -EIO; 292 293 data = kmalloc(data_size, GFP_KERNEL); 294 if (!data) 295 return -ENOMEM; 296 297 status = efi.get_variable(name, &guid, NULL, &data_size, data); 298 if (status == EFI_SUCCESS) { 299 acpi_status ret = acpi_load_table(data, NULL); 300 if (ret) 301 pr_err("failed to load table: %u\n", ret); 302 else 303 continue; 304 } else { 305 pr_err("failed to get var data: 0x%lx\n", status); 306 } 307 kfree(data); 308 } 309 return 0; 310 } 311 #else 312 static inline int efivar_ssdt_load(void) { return 0; } 313 #endif 314 315 #ifdef CONFIG_DEBUG_FS 316 317 #define EFI_DEBUGFS_MAX_BLOBS 32 318 319 static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS]; 320 321 static void __init efi_debugfs_init(void) 322 { 323 struct dentry *efi_debugfs; 324 efi_memory_desc_t *md; 325 char name[32]; 326 int type_count[EFI_BOOT_SERVICES_DATA + 1] = {}; 327 int i = 0; 328 329 efi_debugfs = debugfs_create_dir("efi", NULL); 330 if (IS_ERR_OR_NULL(efi_debugfs)) 331 return; 332 333 for_each_efi_memory_desc(md) { 334 switch (md->type) { 335 case EFI_BOOT_SERVICES_CODE: 336 snprintf(name, sizeof(name), "boot_services_code%d", 337 type_count[md->type]++); 338 break; 339 case EFI_BOOT_SERVICES_DATA: 340 snprintf(name, sizeof(name), "boot_services_data%d", 341 type_count[md->type]++); 342 break; 343 default: 344 continue; 345 } 346 347 if (i >= EFI_DEBUGFS_MAX_BLOBS) { 348 pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n", 349 EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS); 350 break; 351 } 352 353 debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT; 354 debugfs_blob[i].data = memremap(md->phys_addr, 355 debugfs_blob[i].size, 356 MEMREMAP_WB); 357 if (!debugfs_blob[i].data) 358 continue; 359 360 debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]); 361 i++; 362 } 363 } 364 #else 365 static inline void efi_debugfs_init(void) {} 366 #endif 367 368 /* 369 * We register the efi subsystem with the firmware subsystem and the 370 * efivars subsystem with the efi subsystem, if the system was booted with 371 * EFI. 372 */ 373 static int __init efisubsys_init(void) 374 { 375 int error; 376 377 if (!efi_enabled(EFI_RUNTIME_SERVICES)) 378 efi.runtime_supported_mask = 0; 379 380 if (!efi_enabled(EFI_BOOT)) 381 return 0; 382 383 if (efi.runtime_supported_mask) { 384 /* 385 * Since we process only one efi_runtime_service() at a time, an 386 * ordered workqueue (which creates only one execution context) 387 * should suffice for all our needs. 388 */ 389 efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0); 390 if (!efi_rts_wq) { 391 pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n"); 392 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); 393 efi.runtime_supported_mask = 0; 394 return 0; 395 } 396 } 397 398 if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES)) 399 platform_device_register_simple("rtc-efi", 0, NULL, 0); 400 401 /* We register the efi directory at /sys/firmware/efi */ 402 efi_kobj = kobject_create_and_add("efi", firmware_kobj); 403 if (!efi_kobj) { 404 pr_err("efi: Firmware registration failed.\n"); 405 error = -ENOMEM; 406 goto err_destroy_wq; 407 } 408 409 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE | 410 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) { 411 error = generic_ops_register(); 412 if (error) 413 goto err_put; 414 efivar_ssdt_load(); 415 platform_device_register_simple("efivars", 0, NULL, 0); 416 } 417 418 error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group); 419 if (error) { 420 pr_err("efi: Sysfs attribute export failed with error %d.\n", 421 error); 422 goto err_unregister; 423 } 424 425 /* and the standard mountpoint for efivarfs */ 426 error = sysfs_create_mount_point(efi_kobj, "efivars"); 427 if (error) { 428 pr_err("efivars: Subsystem registration failed.\n"); 429 goto err_remove_group; 430 } 431 432 if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS)) 433 efi_debugfs_init(); 434 435 #ifdef CONFIG_EFI_COCO_SECRET 436 if (efi.coco_secret != EFI_INVALID_TABLE_ADDR) 437 platform_device_register_simple("efi_secret", 0, NULL, 0); 438 #endif 439 440 return 0; 441 442 err_remove_group: 443 sysfs_remove_group(efi_kobj, &efi_subsys_attr_group); 444 err_unregister: 445 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE | 446 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) 447 generic_ops_unregister(); 448 err_put: 449 kobject_put(efi_kobj); 450 efi_kobj = NULL; 451 err_destroy_wq: 452 if (efi_rts_wq) 453 destroy_workqueue(efi_rts_wq); 454 455 return error; 456 } 457 458 subsys_initcall(efisubsys_init); 459 460 void __init efi_find_mirror(void) 461 { 462 efi_memory_desc_t *md; 463 u64 mirror_size = 0, total_size = 0; 464 465 if (!efi_enabled(EFI_MEMMAP)) 466 return; 467 468 for_each_efi_memory_desc(md) { 469 unsigned long long start = md->phys_addr; 470 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; 471 472 total_size += size; 473 if (md->attribute & EFI_MEMORY_MORE_RELIABLE) { 474 memblock_mark_mirror(start, size); 475 mirror_size += size; 476 } 477 } 478 if (mirror_size) 479 pr_info("Memory: %lldM/%lldM mirrored memory\n", 480 mirror_size>>20, total_size>>20); 481 } 482 483 /* 484 * Find the efi memory descriptor for a given physical address. Given a 485 * physical address, determine if it exists within an EFI Memory Map entry, 486 * and if so, populate the supplied memory descriptor with the appropriate 487 * data. 488 */ 489 int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md) 490 { 491 efi_memory_desc_t *md; 492 493 if (!efi_enabled(EFI_MEMMAP)) { 494 pr_err_once("EFI_MEMMAP is not enabled.\n"); 495 return -EINVAL; 496 } 497 498 if (!out_md) { 499 pr_err_once("out_md is null.\n"); 500 return -EINVAL; 501 } 502 503 for_each_efi_memory_desc(md) { 504 u64 size; 505 u64 end; 506 507 /* skip bogus entries (including empty ones) */ 508 if ((md->phys_addr & (EFI_PAGE_SIZE - 1)) || 509 (md->num_pages <= 0) || 510 (md->num_pages > (U64_MAX - md->phys_addr) >> EFI_PAGE_SHIFT)) 511 continue; 512 513 size = md->num_pages << EFI_PAGE_SHIFT; 514 end = md->phys_addr + size; 515 if (phys_addr >= md->phys_addr && phys_addr < end) { 516 memcpy(out_md, md, sizeof(*out_md)); 517 return 0; 518 } 519 } 520 return -ENOENT; 521 } 522 523 extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md) 524 __weak __alias(__efi_mem_desc_lookup); 525 526 /* 527 * Calculate the highest address of an efi memory descriptor. 528 */ 529 u64 __init efi_mem_desc_end(efi_memory_desc_t *md) 530 { 531 u64 size = md->num_pages << EFI_PAGE_SHIFT; 532 u64 end = md->phys_addr + size; 533 return end; 534 } 535 536 void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {} 537 538 /** 539 * efi_mem_reserve - Reserve an EFI memory region 540 * @addr: Physical address to reserve 541 * @size: Size of reservation 542 * 543 * Mark a region as reserved from general kernel allocation and 544 * prevent it being released by efi_free_boot_services(). 545 * 546 * This function should be called drivers once they've parsed EFI 547 * configuration tables to figure out where their data lives, e.g. 548 * efi_esrt_init(). 549 */ 550 void __init efi_mem_reserve(phys_addr_t addr, u64 size) 551 { 552 /* efi_mem_reserve() does not work under Xen */ 553 if (WARN_ON_ONCE(efi_enabled(EFI_PARAVIRT))) 554 return; 555 556 if (!memblock_is_region_reserved(addr, size)) 557 memblock_reserve(addr, size); 558 559 /* 560 * Some architectures (x86) reserve all boot services ranges 561 * until efi_free_boot_services() because of buggy firmware 562 * implementations. This means the above memblock_reserve() is 563 * superfluous on x86 and instead what it needs to do is 564 * ensure the @start, @size is not freed. 565 */ 566 efi_arch_mem_reserve(addr, size); 567 } 568 569 static const efi_config_table_type_t common_tables[] __initconst = { 570 {ACPI_20_TABLE_GUID, &efi.acpi20, "ACPI 2.0" }, 571 {ACPI_TABLE_GUID, &efi.acpi, "ACPI" }, 572 {SMBIOS_TABLE_GUID, &efi.smbios, "SMBIOS" }, 573 {SMBIOS3_TABLE_GUID, &efi.smbios3, "SMBIOS 3.0" }, 574 {EFI_SYSTEM_RESOURCE_TABLE_GUID, &efi.esrt, "ESRT" }, 575 {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, &efi_mem_attr_table, "MEMATTR" }, 576 {LINUX_EFI_RANDOM_SEED_TABLE_GUID, &efi_rng_seed, "RNG" }, 577 {LINUX_EFI_TPM_EVENT_LOG_GUID, &efi.tpm_log, "TPMEventLog" }, 578 {LINUX_EFI_TPM_FINAL_LOG_GUID, &efi.tpm_final_log, "TPMFinalLog" }, 579 {LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" }, 580 {LINUX_EFI_INITRD_MEDIA_GUID, &initrd, "INITRD" }, 581 {EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" }, 582 #ifdef CONFIG_EFI_RCI2_TABLE 583 {DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys }, 584 #endif 585 #ifdef CONFIG_LOAD_UEFI_KEYS 586 {LINUX_EFI_MOK_VARIABLE_TABLE_GUID, &efi.mokvar_table, "MOKvar" }, 587 #endif 588 #ifdef CONFIG_EFI_COCO_SECRET 589 {LINUX_EFI_COCO_SECRET_AREA_GUID, &efi.coco_secret, "CocoSecret" }, 590 #endif 591 #ifdef CONFIG_UNACCEPTED_MEMORY 592 {LINUX_EFI_UNACCEPTED_MEM_TABLE_GUID, &efi.unaccepted, "Unaccepted" }, 593 #endif 594 #ifdef CONFIG_EFI_GENERIC_STUB 595 {LINUX_EFI_SCREEN_INFO_TABLE_GUID, &screen_info_table }, 596 #endif 597 {}, 598 }; 599 600 static __init int match_config_table(const efi_guid_t *guid, 601 unsigned long table, 602 const efi_config_table_type_t *table_types) 603 { 604 int i; 605 606 for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) { 607 if (efi_guidcmp(*guid, table_types[i].guid)) 608 continue; 609 610 if (!efi_config_table_is_usable(guid, table)) { 611 if (table_types[i].name[0]) 612 pr_cont("(%s=0x%lx unusable) ", 613 table_types[i].name, table); 614 return 1; 615 } 616 617 *(table_types[i].ptr) = table; 618 if (table_types[i].name[0]) 619 pr_cont("%s=0x%lx ", table_types[i].name, table); 620 return 1; 621 } 622 623 return 0; 624 } 625 626 /** 627 * reserve_unaccepted - Map and reserve unaccepted configuration table 628 * @unaccepted: Pointer to unaccepted memory table 629 * 630 * memblock_add() makes sure that the table is mapped in direct mapping. During 631 * normal boot it happens automatically because the table is allocated from 632 * usable memory. But during crashkernel boot only memory specifically reserved 633 * for crash scenario is mapped. memblock_add() forces the table to be mapped 634 * in crashkernel case. 635 * 636 * Align the range to the nearest page borders. Ranges smaller than page size 637 * are not going to be mapped. 638 * 639 * memblock_reserve() makes sure that future allocations will not touch the 640 * table. 641 */ 642 643 static __init void reserve_unaccepted(struct efi_unaccepted_memory *unaccepted) 644 { 645 phys_addr_t start, size; 646 647 start = PAGE_ALIGN_DOWN(efi.unaccepted); 648 size = PAGE_ALIGN(sizeof(*unaccepted) + unaccepted->size); 649 650 memblock_add(start, size); 651 memblock_reserve(start, size); 652 } 653 654 int __init efi_config_parse_tables(const efi_config_table_t *config_tables, 655 int count, 656 const efi_config_table_type_t *arch_tables) 657 { 658 const efi_config_table_64_t *tbl64 = (void *)config_tables; 659 const efi_config_table_32_t *tbl32 = (void *)config_tables; 660 const efi_guid_t *guid; 661 unsigned long table; 662 int i; 663 664 pr_info(""); 665 for (i = 0; i < count; i++) { 666 if (!IS_ENABLED(CONFIG_X86)) { 667 guid = &config_tables[i].guid; 668 table = (unsigned long)config_tables[i].table; 669 } else if (efi_enabled(EFI_64BIT)) { 670 guid = &tbl64[i].guid; 671 table = tbl64[i].table; 672 673 if (IS_ENABLED(CONFIG_X86_32) && 674 tbl64[i].table > U32_MAX) { 675 pr_cont("\n"); 676 pr_err("Table located above 4GB, disabling EFI.\n"); 677 return -EINVAL; 678 } 679 } else { 680 guid = &tbl32[i].guid; 681 table = tbl32[i].table; 682 } 683 684 if (!match_config_table(guid, table, common_tables) && arch_tables) 685 match_config_table(guid, table, arch_tables); 686 } 687 pr_cont("\n"); 688 set_bit(EFI_CONFIG_TABLES, &efi.flags); 689 690 if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) { 691 struct linux_efi_random_seed *seed; 692 u32 size = 0; 693 694 seed = early_memremap(efi_rng_seed, sizeof(*seed)); 695 if (seed != NULL) { 696 size = min_t(u32, seed->size, SZ_1K); // sanity check 697 early_memunmap(seed, sizeof(*seed)); 698 } else { 699 pr_err("Could not map UEFI random seed!\n"); 700 } 701 if (size > 0) { 702 seed = early_memremap(efi_rng_seed, 703 sizeof(*seed) + size); 704 if (seed != NULL) { 705 add_bootloader_randomness(seed->bits, size); 706 memzero_explicit(seed->bits, size); 707 early_memunmap(seed, sizeof(*seed) + size); 708 } else { 709 pr_err("Could not map UEFI random seed!\n"); 710 } 711 } 712 } 713 714 if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP)) 715 efi_memattr_init(); 716 717 efi_tpm_eventlog_init(); 718 719 if (mem_reserve != EFI_INVALID_TABLE_ADDR) { 720 unsigned long prsv = mem_reserve; 721 722 while (prsv) { 723 struct linux_efi_memreserve *rsv; 724 u8 *p; 725 726 /* 727 * Just map a full page: that is what we will get 728 * anyway, and it permits us to map the entire entry 729 * before knowing its size. 730 */ 731 p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE), 732 PAGE_SIZE); 733 if (p == NULL) { 734 pr_err("Could not map UEFI memreserve entry!\n"); 735 return -ENOMEM; 736 } 737 738 rsv = (void *)(p + prsv % PAGE_SIZE); 739 740 /* reserve the entry itself */ 741 memblock_reserve(prsv, 742 struct_size(rsv, entry, rsv->size)); 743 744 for (i = 0; i < atomic_read(&rsv->count); i++) { 745 memblock_reserve(rsv->entry[i].base, 746 rsv->entry[i].size); 747 } 748 749 prsv = rsv->next; 750 early_memunmap(p, PAGE_SIZE); 751 } 752 } 753 754 if (rt_prop != EFI_INVALID_TABLE_ADDR) { 755 efi_rt_properties_table_t *tbl; 756 757 tbl = early_memremap(rt_prop, sizeof(*tbl)); 758 if (tbl) { 759 efi.runtime_supported_mask &= tbl->runtime_services_supported; 760 early_memunmap(tbl, sizeof(*tbl)); 761 } 762 } 763 764 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && 765 initrd != EFI_INVALID_TABLE_ADDR && phys_initrd_size == 0) { 766 struct linux_efi_initrd *tbl; 767 768 tbl = early_memremap(initrd, sizeof(*tbl)); 769 if (tbl) { 770 phys_initrd_start = tbl->base; 771 phys_initrd_size = tbl->size; 772 early_memunmap(tbl, sizeof(*tbl)); 773 } 774 } 775 776 if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY) && 777 efi.unaccepted != EFI_INVALID_TABLE_ADDR) { 778 struct efi_unaccepted_memory *unaccepted; 779 780 unaccepted = early_memremap(efi.unaccepted, sizeof(*unaccepted)); 781 if (unaccepted) { 782 783 if (unaccepted->version == 1) { 784 reserve_unaccepted(unaccepted); 785 } else { 786 efi.unaccepted = EFI_INVALID_TABLE_ADDR; 787 } 788 789 early_memunmap(unaccepted, sizeof(*unaccepted)); 790 } 791 } 792 793 return 0; 794 } 795 796 int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr) 797 { 798 if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) { 799 pr_err("System table signature incorrect!\n"); 800 return -EINVAL; 801 } 802 803 return 0; 804 } 805 806 #ifndef CONFIG_IA64 807 static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor, 808 size_t size) 809 { 810 const efi_char16_t *ret; 811 812 ret = early_memremap_ro(fw_vendor, size); 813 if (!ret) 814 pr_err("Could not map the firmware vendor!\n"); 815 return ret; 816 } 817 818 static void __init unmap_fw_vendor(const void *fw_vendor, size_t size) 819 { 820 early_memunmap((void *)fw_vendor, size); 821 } 822 #else 823 #define map_fw_vendor(p, s) __va(p) 824 #define unmap_fw_vendor(v, s) 825 #endif 826 827 void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr, 828 unsigned long fw_vendor) 829 { 830 char vendor[100] = "unknown"; 831 const efi_char16_t *c16; 832 size_t i; 833 u16 rev; 834 835 c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t)); 836 if (c16) { 837 for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i) 838 vendor[i] = c16[i]; 839 vendor[i] = '\0'; 840 841 unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t)); 842 } 843 844 rev = (u16)systab_hdr->revision; 845 pr_info("EFI v%u.%u", systab_hdr->revision >> 16, rev / 10); 846 847 rev %= 10; 848 if (rev) 849 pr_cont(".%u", rev); 850 851 pr_cont(" by %s\n", vendor); 852 853 if (IS_ENABLED(CONFIG_X86_64) && 854 systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION && 855 !strcmp(vendor, "Apple")) { 856 pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n"); 857 efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION; 858 } 859 } 860 861 static __initdata char memory_type_name[][13] = { 862 "Reserved", 863 "Loader Code", 864 "Loader Data", 865 "Boot Code", 866 "Boot Data", 867 "Runtime Code", 868 "Runtime Data", 869 "Conventional", 870 "Unusable", 871 "ACPI Reclaim", 872 "ACPI Mem NVS", 873 "MMIO", 874 "MMIO Port", 875 "PAL Code", 876 "Persistent", 877 "Unaccepted", 878 }; 879 880 char * __init efi_md_typeattr_format(char *buf, size_t size, 881 const efi_memory_desc_t *md) 882 { 883 char *pos; 884 int type_len; 885 u64 attr; 886 887 pos = buf; 888 if (md->type >= ARRAY_SIZE(memory_type_name)) 889 type_len = snprintf(pos, size, "[type=%u", md->type); 890 else 891 type_len = snprintf(pos, size, "[%-*s", 892 (int)(sizeof(memory_type_name[0]) - 1), 893 memory_type_name[md->type]); 894 if (type_len >= size) 895 return buf; 896 897 pos += type_len; 898 size -= type_len; 899 900 attr = md->attribute; 901 if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT | 902 EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO | 903 EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP | 904 EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO | 905 EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE)) 906 snprintf(pos, size, "|attr=0x%016llx]", 907 (unsigned long long)attr); 908 else 909 snprintf(pos, size, 910 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]", 911 attr & EFI_MEMORY_RUNTIME ? "RUN" : "", 912 attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "", 913 attr & EFI_MEMORY_CPU_CRYPTO ? "CC" : "", 914 attr & EFI_MEMORY_SP ? "SP" : "", 915 attr & EFI_MEMORY_NV ? "NV" : "", 916 attr & EFI_MEMORY_XP ? "XP" : "", 917 attr & EFI_MEMORY_RP ? "RP" : "", 918 attr & EFI_MEMORY_WP ? "WP" : "", 919 attr & EFI_MEMORY_RO ? "RO" : "", 920 attr & EFI_MEMORY_UCE ? "UCE" : "", 921 attr & EFI_MEMORY_WB ? "WB" : "", 922 attr & EFI_MEMORY_WT ? "WT" : "", 923 attr & EFI_MEMORY_WC ? "WC" : "", 924 attr & EFI_MEMORY_UC ? "UC" : ""); 925 return buf; 926 } 927 928 /* 929 * IA64 has a funky EFI memory map that doesn't work the same way as 930 * other architectures. 931 */ 932 #ifndef CONFIG_IA64 933 /* 934 * efi_mem_attributes - lookup memmap attributes for physical address 935 * @phys_addr: the physical address to lookup 936 * 937 * Search in the EFI memory map for the region covering 938 * @phys_addr. Returns the EFI memory attributes if the region 939 * was found in the memory map, 0 otherwise. 940 */ 941 u64 efi_mem_attributes(unsigned long phys_addr) 942 { 943 efi_memory_desc_t *md; 944 945 if (!efi_enabled(EFI_MEMMAP)) 946 return 0; 947 948 for_each_efi_memory_desc(md) { 949 if ((md->phys_addr <= phys_addr) && 950 (phys_addr < (md->phys_addr + 951 (md->num_pages << EFI_PAGE_SHIFT)))) 952 return md->attribute; 953 } 954 return 0; 955 } 956 957 /* 958 * efi_mem_type - lookup memmap type for physical address 959 * @phys_addr: the physical address to lookup 960 * 961 * Search in the EFI memory map for the region covering @phys_addr. 962 * Returns the EFI memory type if the region was found in the memory 963 * map, -EINVAL otherwise. 964 */ 965 int efi_mem_type(unsigned long phys_addr) 966 { 967 const efi_memory_desc_t *md; 968 969 if (!efi_enabled(EFI_MEMMAP)) 970 return -ENOTSUPP; 971 972 for_each_efi_memory_desc(md) { 973 if ((md->phys_addr <= phys_addr) && 974 (phys_addr < (md->phys_addr + 975 (md->num_pages << EFI_PAGE_SHIFT)))) 976 return md->type; 977 } 978 return -EINVAL; 979 } 980 #endif 981 982 int efi_status_to_err(efi_status_t status) 983 { 984 int err; 985 986 switch (status) { 987 case EFI_SUCCESS: 988 err = 0; 989 break; 990 case EFI_INVALID_PARAMETER: 991 err = -EINVAL; 992 break; 993 case EFI_OUT_OF_RESOURCES: 994 err = -ENOSPC; 995 break; 996 case EFI_DEVICE_ERROR: 997 err = -EIO; 998 break; 999 case EFI_WRITE_PROTECTED: 1000 err = -EROFS; 1001 break; 1002 case EFI_SECURITY_VIOLATION: 1003 err = -EACCES; 1004 break; 1005 case EFI_NOT_FOUND: 1006 err = -ENOENT; 1007 break; 1008 case EFI_ABORTED: 1009 err = -EINTR; 1010 break; 1011 default: 1012 err = -EINVAL; 1013 } 1014 1015 return err; 1016 } 1017 EXPORT_SYMBOL_GPL(efi_status_to_err); 1018 1019 static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock); 1020 static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init; 1021 1022 static int __init efi_memreserve_map_root(void) 1023 { 1024 if (mem_reserve == EFI_INVALID_TABLE_ADDR) 1025 return -ENODEV; 1026 1027 efi_memreserve_root = memremap(mem_reserve, 1028 sizeof(*efi_memreserve_root), 1029 MEMREMAP_WB); 1030 if (WARN_ON_ONCE(!efi_memreserve_root)) 1031 return -ENOMEM; 1032 return 0; 1033 } 1034 1035 static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size) 1036 { 1037 struct resource *res, *parent; 1038 int ret; 1039 1040 res = kzalloc(sizeof(struct resource), GFP_ATOMIC); 1041 if (!res) 1042 return -ENOMEM; 1043 1044 res->name = "reserved"; 1045 res->flags = IORESOURCE_MEM; 1046 res->start = addr; 1047 res->end = addr + size - 1; 1048 1049 /* we expect a conflict with a 'System RAM' region */ 1050 parent = request_resource_conflict(&iomem_resource, res); 1051 ret = parent ? request_resource(parent, res) : 0; 1052 1053 /* 1054 * Given that efi_mem_reserve_iomem() can be called at any 1055 * time, only call memblock_reserve() if the architecture 1056 * keeps the infrastructure around. 1057 */ 1058 if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret) 1059 memblock_reserve(addr, size); 1060 1061 return ret; 1062 } 1063 1064 int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) 1065 { 1066 struct linux_efi_memreserve *rsv; 1067 unsigned long prsv; 1068 int rc, index; 1069 1070 if (efi_memreserve_root == (void *)ULONG_MAX) 1071 return -ENODEV; 1072 1073 if (!efi_memreserve_root) { 1074 rc = efi_memreserve_map_root(); 1075 if (rc) 1076 return rc; 1077 } 1078 1079 /* first try to find a slot in an existing linked list entry */ 1080 for (prsv = efi_memreserve_root->next; prsv; ) { 1081 rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB); 1082 if (!rsv) 1083 return -ENOMEM; 1084 index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size); 1085 if (index < rsv->size) { 1086 rsv->entry[index].base = addr; 1087 rsv->entry[index].size = size; 1088 1089 memunmap(rsv); 1090 return efi_mem_reserve_iomem(addr, size); 1091 } 1092 prsv = rsv->next; 1093 memunmap(rsv); 1094 } 1095 1096 /* no slot found - allocate a new linked list entry */ 1097 rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC); 1098 if (!rsv) 1099 return -ENOMEM; 1100 1101 rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K); 1102 if (rc) { 1103 free_page((unsigned long)rsv); 1104 return rc; 1105 } 1106 1107 /* 1108 * The memremap() call above assumes that a linux_efi_memreserve entry 1109 * never crosses a page boundary, so let's ensure that this remains true 1110 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by 1111 * using SZ_4K explicitly in the size calculation below. 1112 */ 1113 rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K); 1114 atomic_set(&rsv->count, 1); 1115 rsv->entry[0].base = addr; 1116 rsv->entry[0].size = size; 1117 1118 spin_lock(&efi_mem_reserve_persistent_lock); 1119 rsv->next = efi_memreserve_root->next; 1120 efi_memreserve_root->next = __pa(rsv); 1121 spin_unlock(&efi_mem_reserve_persistent_lock); 1122 1123 return efi_mem_reserve_iomem(addr, size); 1124 } 1125 1126 static int __init efi_memreserve_root_init(void) 1127 { 1128 if (efi_memreserve_root) 1129 return 0; 1130 if (efi_memreserve_map_root()) 1131 efi_memreserve_root = (void *)ULONG_MAX; 1132 return 0; 1133 } 1134 early_initcall(efi_memreserve_root_init); 1135 1136 #ifdef CONFIG_KEXEC 1137 static int update_efi_random_seed(struct notifier_block *nb, 1138 unsigned long code, void *unused) 1139 { 1140 struct linux_efi_random_seed *seed; 1141 u32 size = 0; 1142 1143 if (!kexec_in_progress) 1144 return NOTIFY_DONE; 1145 1146 seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB); 1147 if (seed != NULL) { 1148 size = min(seed->size, EFI_RANDOM_SEED_SIZE); 1149 memunmap(seed); 1150 } else { 1151 pr_err("Could not map UEFI random seed!\n"); 1152 } 1153 if (size > 0) { 1154 seed = memremap(efi_rng_seed, sizeof(*seed) + size, 1155 MEMREMAP_WB); 1156 if (seed != NULL) { 1157 seed->size = size; 1158 get_random_bytes(seed->bits, seed->size); 1159 memunmap(seed); 1160 } else { 1161 pr_err("Could not map UEFI random seed!\n"); 1162 } 1163 } 1164 return NOTIFY_DONE; 1165 } 1166 1167 static struct notifier_block efi_random_seed_nb = { 1168 .notifier_call = update_efi_random_seed, 1169 }; 1170 1171 static int __init register_update_efi_random_seed(void) 1172 { 1173 if (efi_rng_seed == EFI_INVALID_TABLE_ADDR) 1174 return 0; 1175 return register_reboot_notifier(&efi_random_seed_nb); 1176 } 1177 late_initcall(register_update_efi_random_seed); 1178 #endif 1179