1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * efi.c - EFI subsystem 4 * 5 * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com> 6 * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com> 7 * Copyright (C) 2013 Tom Gundersen <teg@jklm.no> 8 * 9 * This code registers /sys/firmware/efi{,/efivars} when EFI is supported, 10 * allowing the efivarfs to be mounted or the efivars module to be loaded. 11 * The existance of /sys/firmware/efi may also be used by userspace to 12 * determine that the system supports EFI. 13 */ 14 15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16 17 #include <linux/kobject.h> 18 #include <linux/module.h> 19 #include <linux/init.h> 20 #include <linux/debugfs.h> 21 #include <linux/device.h> 22 #include <linux/efi.h> 23 #include <linux/of.h> 24 #include <linux/initrd.h> 25 #include <linux/io.h> 26 #include <linux/kexec.h> 27 #include <linux/platform_device.h> 28 #include <linux/random.h> 29 #include <linux/reboot.h> 30 #include <linux/slab.h> 31 #include <linux/acpi.h> 32 #include <linux/ucs2_string.h> 33 #include <linux/memblock.h> 34 #include <linux/security.h> 35 36 #include <asm/early_ioremap.h> 37 38 struct efi __read_mostly efi = { 39 .runtime_supported_mask = EFI_RT_SUPPORTED_ALL, 40 .acpi = EFI_INVALID_TABLE_ADDR, 41 .acpi20 = EFI_INVALID_TABLE_ADDR, 42 .smbios = EFI_INVALID_TABLE_ADDR, 43 .smbios3 = EFI_INVALID_TABLE_ADDR, 44 .esrt = EFI_INVALID_TABLE_ADDR, 45 .tpm_log = EFI_INVALID_TABLE_ADDR, 46 .tpm_final_log = EFI_INVALID_TABLE_ADDR, 47 #ifdef CONFIG_LOAD_UEFI_KEYS 48 .mokvar_table = EFI_INVALID_TABLE_ADDR, 49 #endif 50 #ifdef CONFIG_EFI_COCO_SECRET 51 .coco_secret = EFI_INVALID_TABLE_ADDR, 52 #endif 53 }; 54 EXPORT_SYMBOL(efi); 55 56 unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR; 57 static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR; 58 static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR; 59 static unsigned long __initdata initrd = EFI_INVALID_TABLE_ADDR; 60 61 extern unsigned long screen_info_table; 62 63 struct mm_struct efi_mm = { 64 .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock), 65 .mm_users = ATOMIC_INIT(2), 66 .mm_count = ATOMIC_INIT(1), 67 .write_protect_seq = SEQCNT_ZERO(efi_mm.write_protect_seq), 68 MMAP_LOCK_INITIALIZER(efi_mm) 69 .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock), 70 .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), 71 .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0}, 72 }; 73 74 struct workqueue_struct *efi_rts_wq; 75 76 static bool disable_runtime = IS_ENABLED(CONFIG_EFI_DISABLE_RUNTIME); 77 static int __init setup_noefi(char *arg) 78 { 79 disable_runtime = true; 80 return 0; 81 } 82 early_param("noefi", setup_noefi); 83 84 bool efi_runtime_disabled(void) 85 { 86 return disable_runtime; 87 } 88 89 bool __pure __efi_soft_reserve_enabled(void) 90 { 91 return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE); 92 } 93 94 static int __init parse_efi_cmdline(char *str) 95 { 96 if (!str) { 97 pr_warn("need at least one option\n"); 98 return -EINVAL; 99 } 100 101 if (parse_option_str(str, "debug")) 102 set_bit(EFI_DBG, &efi.flags); 103 104 if (parse_option_str(str, "noruntime")) 105 disable_runtime = true; 106 107 if (parse_option_str(str, "runtime")) 108 disable_runtime = false; 109 110 if (parse_option_str(str, "nosoftreserve")) 111 set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags); 112 113 return 0; 114 } 115 early_param("efi", parse_efi_cmdline); 116 117 struct kobject *efi_kobj; 118 119 /* 120 * Let's not leave out systab information that snuck into 121 * the efivars driver 122 * Note, do not add more fields in systab sysfs file as it breaks sysfs 123 * one value per file rule! 124 */ 125 static ssize_t systab_show(struct kobject *kobj, 126 struct kobj_attribute *attr, char *buf) 127 { 128 char *str = buf; 129 130 if (!kobj || !buf) 131 return -EINVAL; 132 133 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 134 str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20); 135 if (efi.acpi != EFI_INVALID_TABLE_ADDR) 136 str += sprintf(str, "ACPI=0x%lx\n", efi.acpi); 137 /* 138 * If both SMBIOS and SMBIOS3 entry points are implemented, the 139 * SMBIOS3 entry point shall be preferred, so we list it first to 140 * let applications stop parsing after the first match. 141 */ 142 if (efi.smbios3 != EFI_INVALID_TABLE_ADDR) 143 str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3); 144 if (efi.smbios != EFI_INVALID_TABLE_ADDR) 145 str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios); 146 147 if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86)) 148 str = efi_systab_show_arch(str); 149 150 return str - buf; 151 } 152 153 static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400); 154 155 static ssize_t fw_platform_size_show(struct kobject *kobj, 156 struct kobj_attribute *attr, char *buf) 157 { 158 return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32); 159 } 160 161 extern __weak struct kobj_attribute efi_attr_fw_vendor; 162 extern __weak struct kobj_attribute efi_attr_runtime; 163 extern __weak struct kobj_attribute efi_attr_config_table; 164 static struct kobj_attribute efi_attr_fw_platform_size = 165 __ATTR_RO(fw_platform_size); 166 167 static struct attribute *efi_subsys_attrs[] = { 168 &efi_attr_systab.attr, 169 &efi_attr_fw_platform_size.attr, 170 &efi_attr_fw_vendor.attr, 171 &efi_attr_runtime.attr, 172 &efi_attr_config_table.attr, 173 NULL, 174 }; 175 176 umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr, 177 int n) 178 { 179 return attr->mode; 180 } 181 182 static const struct attribute_group efi_subsys_attr_group = { 183 .attrs = efi_subsys_attrs, 184 .is_visible = efi_attr_is_visible, 185 }; 186 187 static struct efivars generic_efivars; 188 static struct efivar_operations generic_ops; 189 190 static int generic_ops_register(void) 191 { 192 generic_ops.get_variable = efi.get_variable; 193 generic_ops.get_next_variable = efi.get_next_variable; 194 generic_ops.query_variable_store = efi_query_variable_store; 195 196 if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) { 197 generic_ops.set_variable = efi.set_variable; 198 generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking; 199 } 200 return efivars_register(&generic_efivars, &generic_ops, efi_kobj); 201 } 202 203 static void generic_ops_unregister(void) 204 { 205 efivars_unregister(&generic_efivars); 206 } 207 208 #ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS 209 #define EFIVAR_SSDT_NAME_MAX 16UL 210 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata; 211 static int __init efivar_ssdt_setup(char *str) 212 { 213 int ret = security_locked_down(LOCKDOWN_ACPI_TABLES); 214 215 if (ret) 216 return ret; 217 218 if (strlen(str) < sizeof(efivar_ssdt)) 219 memcpy(efivar_ssdt, str, strlen(str)); 220 else 221 pr_warn("efivar_ssdt: name too long: %s\n", str); 222 return 1; 223 } 224 __setup("efivar_ssdt=", efivar_ssdt_setup); 225 226 static __init int efivar_ssdt_load(void) 227 { 228 unsigned long name_size = 256; 229 efi_char16_t *name = NULL; 230 efi_status_t status; 231 efi_guid_t guid; 232 233 if (!efivar_ssdt[0]) 234 return 0; 235 236 name = kzalloc(name_size, GFP_KERNEL); 237 if (!name) 238 return -ENOMEM; 239 240 for (;;) { 241 char utf8_name[EFIVAR_SSDT_NAME_MAX]; 242 unsigned long data_size = 0; 243 void *data; 244 int limit; 245 246 status = efi.get_next_variable(&name_size, name, &guid); 247 if (status == EFI_NOT_FOUND) { 248 break; 249 } else if (status == EFI_BUFFER_TOO_SMALL) { 250 name = krealloc(name, name_size, GFP_KERNEL); 251 if (!name) 252 return -ENOMEM; 253 continue; 254 } 255 256 limit = min(EFIVAR_SSDT_NAME_MAX, name_size); 257 ucs2_as_utf8(utf8_name, name, limit - 1); 258 if (strncmp(utf8_name, efivar_ssdt, limit) != 0) 259 continue; 260 261 pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, &guid); 262 263 status = efi.get_variable(name, &guid, NULL, &data_size, NULL); 264 if (status != EFI_BUFFER_TOO_SMALL || !data_size) 265 return -EIO; 266 267 data = kmalloc(data_size, GFP_KERNEL); 268 if (!data) 269 return -ENOMEM; 270 271 status = efi.get_variable(name, &guid, NULL, &data_size, data); 272 if (status == EFI_SUCCESS) { 273 acpi_status ret = acpi_load_table(data, NULL); 274 if (ret) 275 pr_err("failed to load table: %u\n", ret); 276 else 277 continue; 278 } else { 279 pr_err("failed to get var data: 0x%lx\n", status); 280 } 281 kfree(data); 282 } 283 return 0; 284 } 285 #else 286 static inline int efivar_ssdt_load(void) { return 0; } 287 #endif 288 289 #ifdef CONFIG_DEBUG_FS 290 291 #define EFI_DEBUGFS_MAX_BLOBS 32 292 293 static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS]; 294 295 static void __init efi_debugfs_init(void) 296 { 297 struct dentry *efi_debugfs; 298 efi_memory_desc_t *md; 299 char name[32]; 300 int type_count[EFI_BOOT_SERVICES_DATA + 1] = {}; 301 int i = 0; 302 303 efi_debugfs = debugfs_create_dir("efi", NULL); 304 if (IS_ERR_OR_NULL(efi_debugfs)) 305 return; 306 307 for_each_efi_memory_desc(md) { 308 switch (md->type) { 309 case EFI_BOOT_SERVICES_CODE: 310 snprintf(name, sizeof(name), "boot_services_code%d", 311 type_count[md->type]++); 312 break; 313 case EFI_BOOT_SERVICES_DATA: 314 snprintf(name, sizeof(name), "boot_services_data%d", 315 type_count[md->type]++); 316 break; 317 default: 318 continue; 319 } 320 321 if (i >= EFI_DEBUGFS_MAX_BLOBS) { 322 pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n", 323 EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS); 324 break; 325 } 326 327 debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT; 328 debugfs_blob[i].data = memremap(md->phys_addr, 329 debugfs_blob[i].size, 330 MEMREMAP_WB); 331 if (!debugfs_blob[i].data) 332 continue; 333 334 debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]); 335 i++; 336 } 337 } 338 #else 339 static inline void efi_debugfs_init(void) {} 340 #endif 341 342 static void refresh_nv_rng_seed(struct work_struct *work) 343 { 344 u8 seed[EFI_RANDOM_SEED_SIZE]; 345 346 get_random_bytes(seed, sizeof(seed)); 347 efi.set_variable(L"RandomSeed", &LINUX_EFI_RANDOM_SEED_TABLE_GUID, 348 EFI_VARIABLE_NON_VOLATILE | EFI_VARIABLE_BOOTSERVICE_ACCESS | 349 EFI_VARIABLE_RUNTIME_ACCESS, sizeof(seed), seed); 350 memzero_explicit(seed, sizeof(seed)); 351 } 352 static int refresh_nv_rng_seed_notification(struct notifier_block *nb, unsigned long action, void *data) 353 { 354 static DECLARE_WORK(work, refresh_nv_rng_seed); 355 schedule_work(&work); 356 return NOTIFY_DONE; 357 } 358 static struct notifier_block refresh_nv_rng_seed_nb = { .notifier_call = refresh_nv_rng_seed_notification }; 359 360 /* 361 * We register the efi subsystem with the firmware subsystem and the 362 * efivars subsystem with the efi subsystem, if the system was booted with 363 * EFI. 364 */ 365 static int __init efisubsys_init(void) 366 { 367 int error; 368 369 if (!efi_enabled(EFI_RUNTIME_SERVICES)) 370 efi.runtime_supported_mask = 0; 371 372 if (!efi_enabled(EFI_BOOT)) 373 return 0; 374 375 if (efi.runtime_supported_mask) { 376 /* 377 * Since we process only one efi_runtime_service() at a time, an 378 * ordered workqueue (which creates only one execution context) 379 * should suffice for all our needs. 380 */ 381 efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0); 382 if (!efi_rts_wq) { 383 pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n"); 384 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); 385 efi.runtime_supported_mask = 0; 386 return 0; 387 } 388 } 389 390 if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES)) 391 platform_device_register_simple("rtc-efi", 0, NULL, 0); 392 393 /* We register the efi directory at /sys/firmware/efi */ 394 efi_kobj = kobject_create_and_add("efi", firmware_kobj); 395 if (!efi_kobj) { 396 pr_err("efi: Firmware registration failed.\n"); 397 destroy_workqueue(efi_rts_wq); 398 return -ENOMEM; 399 } 400 401 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE | 402 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) { 403 error = generic_ops_register(); 404 if (error) 405 goto err_put; 406 efivar_ssdt_load(); 407 platform_device_register_simple("efivars", 0, NULL, 0); 408 } 409 410 error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group); 411 if (error) { 412 pr_err("efi: Sysfs attribute export failed with error %d.\n", 413 error); 414 goto err_unregister; 415 } 416 417 /* and the standard mountpoint for efivarfs */ 418 error = sysfs_create_mount_point(efi_kobj, "efivars"); 419 if (error) { 420 pr_err("efivars: Subsystem registration failed.\n"); 421 goto err_remove_group; 422 } 423 424 if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS)) 425 efi_debugfs_init(); 426 427 #ifdef CONFIG_EFI_COCO_SECRET 428 if (efi.coco_secret != EFI_INVALID_TABLE_ADDR) 429 platform_device_register_simple("efi_secret", 0, NULL, 0); 430 #endif 431 432 execute_with_initialized_rng(&refresh_nv_rng_seed_nb); 433 return 0; 434 435 err_remove_group: 436 sysfs_remove_group(efi_kobj, &efi_subsys_attr_group); 437 err_unregister: 438 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE | 439 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) 440 generic_ops_unregister(); 441 err_put: 442 kobject_put(efi_kobj); 443 efi_kobj = NULL; 444 destroy_workqueue(efi_rts_wq); 445 return error; 446 } 447 448 subsys_initcall(efisubsys_init); 449 450 void __init efi_find_mirror(void) 451 { 452 efi_memory_desc_t *md; 453 u64 mirror_size = 0, total_size = 0; 454 455 if (!efi_enabled(EFI_MEMMAP)) 456 return; 457 458 for_each_efi_memory_desc(md) { 459 unsigned long long start = md->phys_addr; 460 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; 461 462 total_size += size; 463 if (md->attribute & EFI_MEMORY_MORE_RELIABLE) { 464 memblock_mark_mirror(start, size); 465 mirror_size += size; 466 } 467 } 468 if (mirror_size) 469 pr_info("Memory: %lldM/%lldM mirrored memory\n", 470 mirror_size>>20, total_size>>20); 471 } 472 473 /* 474 * Find the efi memory descriptor for a given physical address. Given a 475 * physical address, determine if it exists within an EFI Memory Map entry, 476 * and if so, populate the supplied memory descriptor with the appropriate 477 * data. 478 */ 479 int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md) 480 { 481 efi_memory_desc_t *md; 482 483 if (!efi_enabled(EFI_MEMMAP)) { 484 pr_err_once("EFI_MEMMAP is not enabled.\n"); 485 return -EINVAL; 486 } 487 488 if (!out_md) { 489 pr_err_once("out_md is null.\n"); 490 return -EINVAL; 491 } 492 493 for_each_efi_memory_desc(md) { 494 u64 size; 495 u64 end; 496 497 size = md->num_pages << EFI_PAGE_SHIFT; 498 end = md->phys_addr + size; 499 if (phys_addr >= md->phys_addr && phys_addr < end) { 500 memcpy(out_md, md, sizeof(*out_md)); 501 return 0; 502 } 503 } 504 return -ENOENT; 505 } 506 507 /* 508 * Calculate the highest address of an efi memory descriptor. 509 */ 510 u64 __init efi_mem_desc_end(efi_memory_desc_t *md) 511 { 512 u64 size = md->num_pages << EFI_PAGE_SHIFT; 513 u64 end = md->phys_addr + size; 514 return end; 515 } 516 517 void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {} 518 519 /** 520 * efi_mem_reserve - Reserve an EFI memory region 521 * @addr: Physical address to reserve 522 * @size: Size of reservation 523 * 524 * Mark a region as reserved from general kernel allocation and 525 * prevent it being released by efi_free_boot_services(). 526 * 527 * This function should be called drivers once they've parsed EFI 528 * configuration tables to figure out where their data lives, e.g. 529 * efi_esrt_init(). 530 */ 531 void __init efi_mem_reserve(phys_addr_t addr, u64 size) 532 { 533 if (!memblock_is_region_reserved(addr, size)) 534 memblock_reserve(addr, size); 535 536 /* 537 * Some architectures (x86) reserve all boot services ranges 538 * until efi_free_boot_services() because of buggy firmware 539 * implementations. This means the above memblock_reserve() is 540 * superfluous on x86 and instead what it needs to do is 541 * ensure the @start, @size is not freed. 542 */ 543 efi_arch_mem_reserve(addr, size); 544 } 545 546 static const efi_config_table_type_t common_tables[] __initconst = { 547 {ACPI_20_TABLE_GUID, &efi.acpi20, "ACPI 2.0" }, 548 {ACPI_TABLE_GUID, &efi.acpi, "ACPI" }, 549 {SMBIOS_TABLE_GUID, &efi.smbios, "SMBIOS" }, 550 {SMBIOS3_TABLE_GUID, &efi.smbios3, "SMBIOS 3.0" }, 551 {EFI_SYSTEM_RESOURCE_TABLE_GUID, &efi.esrt, "ESRT" }, 552 {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, &efi_mem_attr_table, "MEMATTR" }, 553 {LINUX_EFI_RANDOM_SEED_TABLE_GUID, &efi_rng_seed, "RNG" }, 554 {LINUX_EFI_TPM_EVENT_LOG_GUID, &efi.tpm_log, "TPMEventLog" }, 555 {LINUX_EFI_TPM_FINAL_LOG_GUID, &efi.tpm_final_log, "TPMFinalLog" }, 556 {LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" }, 557 {LINUX_EFI_INITRD_MEDIA_GUID, &initrd, "INITRD" }, 558 {EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" }, 559 #ifdef CONFIG_EFI_RCI2_TABLE 560 {DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys }, 561 #endif 562 #ifdef CONFIG_LOAD_UEFI_KEYS 563 {LINUX_EFI_MOK_VARIABLE_TABLE_GUID, &efi.mokvar_table, "MOKvar" }, 564 #endif 565 #ifdef CONFIG_EFI_COCO_SECRET 566 {LINUX_EFI_COCO_SECRET_AREA_GUID, &efi.coco_secret, "CocoSecret" }, 567 #endif 568 #ifdef CONFIG_EFI_GENERIC_STUB 569 {LINUX_EFI_SCREEN_INFO_TABLE_GUID, &screen_info_table }, 570 #endif 571 {}, 572 }; 573 574 static __init int match_config_table(const efi_guid_t *guid, 575 unsigned long table, 576 const efi_config_table_type_t *table_types) 577 { 578 int i; 579 580 for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) { 581 if (!efi_guidcmp(*guid, table_types[i].guid)) { 582 *(table_types[i].ptr) = table; 583 if (table_types[i].name[0]) 584 pr_cont("%s=0x%lx ", 585 table_types[i].name, table); 586 return 1; 587 } 588 } 589 590 return 0; 591 } 592 593 int __init efi_config_parse_tables(const efi_config_table_t *config_tables, 594 int count, 595 const efi_config_table_type_t *arch_tables) 596 { 597 const efi_config_table_64_t *tbl64 = (void *)config_tables; 598 const efi_config_table_32_t *tbl32 = (void *)config_tables; 599 const efi_guid_t *guid; 600 unsigned long table; 601 int i; 602 603 pr_info(""); 604 for (i = 0; i < count; i++) { 605 if (!IS_ENABLED(CONFIG_X86)) { 606 guid = &config_tables[i].guid; 607 table = (unsigned long)config_tables[i].table; 608 } else if (efi_enabled(EFI_64BIT)) { 609 guid = &tbl64[i].guid; 610 table = tbl64[i].table; 611 612 if (IS_ENABLED(CONFIG_X86_32) && 613 tbl64[i].table > U32_MAX) { 614 pr_cont("\n"); 615 pr_err("Table located above 4GB, disabling EFI.\n"); 616 return -EINVAL; 617 } 618 } else { 619 guid = &tbl32[i].guid; 620 table = tbl32[i].table; 621 } 622 623 if (!match_config_table(guid, table, common_tables) && arch_tables) 624 match_config_table(guid, table, arch_tables); 625 } 626 pr_cont("\n"); 627 set_bit(EFI_CONFIG_TABLES, &efi.flags); 628 629 if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) { 630 struct linux_efi_random_seed *seed; 631 u32 size = 0; 632 633 seed = early_memremap(efi_rng_seed, sizeof(*seed)); 634 if (seed != NULL) { 635 size = min_t(u32, seed->size, SZ_1K); // sanity check 636 early_memunmap(seed, sizeof(*seed)); 637 } else { 638 pr_err("Could not map UEFI random seed!\n"); 639 } 640 if (size > 0) { 641 seed = early_memremap(efi_rng_seed, 642 sizeof(*seed) + size); 643 if (seed != NULL) { 644 add_bootloader_randomness(seed->bits, size); 645 memzero_explicit(seed->bits, size); 646 early_memunmap(seed, sizeof(*seed) + size); 647 } else { 648 pr_err("Could not map UEFI random seed!\n"); 649 } 650 } 651 } 652 653 if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP)) 654 efi_memattr_init(); 655 656 efi_tpm_eventlog_init(); 657 658 if (mem_reserve != EFI_INVALID_TABLE_ADDR) { 659 unsigned long prsv = mem_reserve; 660 661 while (prsv) { 662 struct linux_efi_memreserve *rsv; 663 u8 *p; 664 665 /* 666 * Just map a full page: that is what we will get 667 * anyway, and it permits us to map the entire entry 668 * before knowing its size. 669 */ 670 p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE), 671 PAGE_SIZE); 672 if (p == NULL) { 673 pr_err("Could not map UEFI memreserve entry!\n"); 674 return -ENOMEM; 675 } 676 677 rsv = (void *)(p + prsv % PAGE_SIZE); 678 679 /* reserve the entry itself */ 680 memblock_reserve(prsv, 681 struct_size(rsv, entry, rsv->size)); 682 683 for (i = 0; i < atomic_read(&rsv->count); i++) { 684 memblock_reserve(rsv->entry[i].base, 685 rsv->entry[i].size); 686 } 687 688 prsv = rsv->next; 689 early_memunmap(p, PAGE_SIZE); 690 } 691 } 692 693 if (rt_prop != EFI_INVALID_TABLE_ADDR) { 694 efi_rt_properties_table_t *tbl; 695 696 tbl = early_memremap(rt_prop, sizeof(*tbl)); 697 if (tbl) { 698 efi.runtime_supported_mask &= tbl->runtime_services_supported; 699 early_memunmap(tbl, sizeof(*tbl)); 700 } 701 } 702 703 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && 704 initrd != EFI_INVALID_TABLE_ADDR && phys_initrd_size == 0) { 705 struct linux_efi_initrd *tbl; 706 707 tbl = early_memremap(initrd, sizeof(*tbl)); 708 if (tbl) { 709 phys_initrd_start = tbl->base; 710 phys_initrd_size = tbl->size; 711 early_memunmap(tbl, sizeof(*tbl)); 712 } 713 } 714 715 return 0; 716 } 717 718 int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr, 719 int min_major_version) 720 { 721 if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) { 722 pr_err("System table signature incorrect!\n"); 723 return -EINVAL; 724 } 725 726 if ((systab_hdr->revision >> 16) < min_major_version) 727 pr_err("Warning: System table version %d.%02d, expected %d.00 or greater!\n", 728 systab_hdr->revision >> 16, 729 systab_hdr->revision & 0xffff, 730 min_major_version); 731 732 return 0; 733 } 734 735 #ifndef CONFIG_IA64 736 static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor, 737 size_t size) 738 { 739 const efi_char16_t *ret; 740 741 ret = early_memremap_ro(fw_vendor, size); 742 if (!ret) 743 pr_err("Could not map the firmware vendor!\n"); 744 return ret; 745 } 746 747 static void __init unmap_fw_vendor(const void *fw_vendor, size_t size) 748 { 749 early_memunmap((void *)fw_vendor, size); 750 } 751 #else 752 #define map_fw_vendor(p, s) __va(p) 753 #define unmap_fw_vendor(v, s) 754 #endif 755 756 void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr, 757 unsigned long fw_vendor) 758 { 759 char vendor[100] = "unknown"; 760 const efi_char16_t *c16; 761 size_t i; 762 763 c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t)); 764 if (c16) { 765 for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i) 766 vendor[i] = c16[i]; 767 vendor[i] = '\0'; 768 769 unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t)); 770 } 771 772 pr_info("EFI v%u.%.02u by %s\n", 773 systab_hdr->revision >> 16, 774 systab_hdr->revision & 0xffff, 775 vendor); 776 777 if (IS_ENABLED(CONFIG_X86_64) && 778 systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION && 779 !strcmp(vendor, "Apple")) { 780 pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n"); 781 efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION; 782 } 783 } 784 785 static __initdata char memory_type_name[][13] = { 786 "Reserved", 787 "Loader Code", 788 "Loader Data", 789 "Boot Code", 790 "Boot Data", 791 "Runtime Code", 792 "Runtime Data", 793 "Conventional", 794 "Unusable", 795 "ACPI Reclaim", 796 "ACPI Mem NVS", 797 "MMIO", 798 "MMIO Port", 799 "PAL Code", 800 "Persistent", 801 }; 802 803 char * __init efi_md_typeattr_format(char *buf, size_t size, 804 const efi_memory_desc_t *md) 805 { 806 char *pos; 807 int type_len; 808 u64 attr; 809 810 pos = buf; 811 if (md->type >= ARRAY_SIZE(memory_type_name)) 812 type_len = snprintf(pos, size, "[type=%u", md->type); 813 else 814 type_len = snprintf(pos, size, "[%-*s", 815 (int)(sizeof(memory_type_name[0]) - 1), 816 memory_type_name[md->type]); 817 if (type_len >= size) 818 return buf; 819 820 pos += type_len; 821 size -= type_len; 822 823 attr = md->attribute; 824 if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT | 825 EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO | 826 EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP | 827 EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO | 828 EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE)) 829 snprintf(pos, size, "|attr=0x%016llx]", 830 (unsigned long long)attr); 831 else 832 snprintf(pos, size, 833 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]", 834 attr & EFI_MEMORY_RUNTIME ? "RUN" : "", 835 attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "", 836 attr & EFI_MEMORY_CPU_CRYPTO ? "CC" : "", 837 attr & EFI_MEMORY_SP ? "SP" : "", 838 attr & EFI_MEMORY_NV ? "NV" : "", 839 attr & EFI_MEMORY_XP ? "XP" : "", 840 attr & EFI_MEMORY_RP ? "RP" : "", 841 attr & EFI_MEMORY_WP ? "WP" : "", 842 attr & EFI_MEMORY_RO ? "RO" : "", 843 attr & EFI_MEMORY_UCE ? "UCE" : "", 844 attr & EFI_MEMORY_WB ? "WB" : "", 845 attr & EFI_MEMORY_WT ? "WT" : "", 846 attr & EFI_MEMORY_WC ? "WC" : "", 847 attr & EFI_MEMORY_UC ? "UC" : ""); 848 return buf; 849 } 850 851 /* 852 * IA64 has a funky EFI memory map that doesn't work the same way as 853 * other architectures. 854 */ 855 #ifndef CONFIG_IA64 856 /* 857 * efi_mem_attributes - lookup memmap attributes for physical address 858 * @phys_addr: the physical address to lookup 859 * 860 * Search in the EFI memory map for the region covering 861 * @phys_addr. Returns the EFI memory attributes if the region 862 * was found in the memory map, 0 otherwise. 863 */ 864 u64 efi_mem_attributes(unsigned long phys_addr) 865 { 866 efi_memory_desc_t *md; 867 868 if (!efi_enabled(EFI_MEMMAP)) 869 return 0; 870 871 for_each_efi_memory_desc(md) { 872 if ((md->phys_addr <= phys_addr) && 873 (phys_addr < (md->phys_addr + 874 (md->num_pages << EFI_PAGE_SHIFT)))) 875 return md->attribute; 876 } 877 return 0; 878 } 879 880 /* 881 * efi_mem_type - lookup memmap type for physical address 882 * @phys_addr: the physical address to lookup 883 * 884 * Search in the EFI memory map for the region covering @phys_addr. 885 * Returns the EFI memory type if the region was found in the memory 886 * map, -EINVAL otherwise. 887 */ 888 int efi_mem_type(unsigned long phys_addr) 889 { 890 const efi_memory_desc_t *md; 891 892 if (!efi_enabled(EFI_MEMMAP)) 893 return -ENOTSUPP; 894 895 for_each_efi_memory_desc(md) { 896 if ((md->phys_addr <= phys_addr) && 897 (phys_addr < (md->phys_addr + 898 (md->num_pages << EFI_PAGE_SHIFT)))) 899 return md->type; 900 } 901 return -EINVAL; 902 } 903 #endif 904 905 int efi_status_to_err(efi_status_t status) 906 { 907 int err; 908 909 switch (status) { 910 case EFI_SUCCESS: 911 err = 0; 912 break; 913 case EFI_INVALID_PARAMETER: 914 err = -EINVAL; 915 break; 916 case EFI_OUT_OF_RESOURCES: 917 err = -ENOSPC; 918 break; 919 case EFI_DEVICE_ERROR: 920 err = -EIO; 921 break; 922 case EFI_WRITE_PROTECTED: 923 err = -EROFS; 924 break; 925 case EFI_SECURITY_VIOLATION: 926 err = -EACCES; 927 break; 928 case EFI_NOT_FOUND: 929 err = -ENOENT; 930 break; 931 case EFI_ABORTED: 932 err = -EINTR; 933 break; 934 default: 935 err = -EINVAL; 936 } 937 938 return err; 939 } 940 EXPORT_SYMBOL_GPL(efi_status_to_err); 941 942 static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock); 943 static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init; 944 945 static int __init efi_memreserve_map_root(void) 946 { 947 if (mem_reserve == EFI_INVALID_TABLE_ADDR) 948 return -ENODEV; 949 950 efi_memreserve_root = memremap(mem_reserve, 951 sizeof(*efi_memreserve_root), 952 MEMREMAP_WB); 953 if (WARN_ON_ONCE(!efi_memreserve_root)) 954 return -ENOMEM; 955 return 0; 956 } 957 958 static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size) 959 { 960 struct resource *res, *parent; 961 int ret; 962 963 res = kzalloc(sizeof(struct resource), GFP_ATOMIC); 964 if (!res) 965 return -ENOMEM; 966 967 res->name = "reserved"; 968 res->flags = IORESOURCE_MEM; 969 res->start = addr; 970 res->end = addr + size - 1; 971 972 /* we expect a conflict with a 'System RAM' region */ 973 parent = request_resource_conflict(&iomem_resource, res); 974 ret = parent ? request_resource(parent, res) : 0; 975 976 /* 977 * Given that efi_mem_reserve_iomem() can be called at any 978 * time, only call memblock_reserve() if the architecture 979 * keeps the infrastructure around. 980 */ 981 if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret) 982 memblock_reserve(addr, size); 983 984 return ret; 985 } 986 987 int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) 988 { 989 struct linux_efi_memreserve *rsv; 990 unsigned long prsv; 991 int rc, index; 992 993 if (efi_memreserve_root == (void *)ULONG_MAX) 994 return -ENODEV; 995 996 if (!efi_memreserve_root) { 997 rc = efi_memreserve_map_root(); 998 if (rc) 999 return rc; 1000 } 1001 1002 /* first try to find a slot in an existing linked list entry */ 1003 for (prsv = efi_memreserve_root->next; prsv; ) { 1004 rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB); 1005 index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size); 1006 if (index < rsv->size) { 1007 rsv->entry[index].base = addr; 1008 rsv->entry[index].size = size; 1009 1010 memunmap(rsv); 1011 return efi_mem_reserve_iomem(addr, size); 1012 } 1013 prsv = rsv->next; 1014 memunmap(rsv); 1015 } 1016 1017 /* no slot found - allocate a new linked list entry */ 1018 rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC); 1019 if (!rsv) 1020 return -ENOMEM; 1021 1022 rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K); 1023 if (rc) { 1024 free_page((unsigned long)rsv); 1025 return rc; 1026 } 1027 1028 /* 1029 * The memremap() call above assumes that a linux_efi_memreserve entry 1030 * never crosses a page boundary, so let's ensure that this remains true 1031 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by 1032 * using SZ_4K explicitly in the size calculation below. 1033 */ 1034 rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K); 1035 atomic_set(&rsv->count, 1); 1036 rsv->entry[0].base = addr; 1037 rsv->entry[0].size = size; 1038 1039 spin_lock(&efi_mem_reserve_persistent_lock); 1040 rsv->next = efi_memreserve_root->next; 1041 efi_memreserve_root->next = __pa(rsv); 1042 spin_unlock(&efi_mem_reserve_persistent_lock); 1043 1044 return efi_mem_reserve_iomem(addr, size); 1045 } 1046 1047 static int __init efi_memreserve_root_init(void) 1048 { 1049 if (efi_memreserve_root) 1050 return 0; 1051 if (efi_memreserve_map_root()) 1052 efi_memreserve_root = (void *)ULONG_MAX; 1053 return 0; 1054 } 1055 early_initcall(efi_memreserve_root_init); 1056 1057 #ifdef CONFIG_KEXEC 1058 static int update_efi_random_seed(struct notifier_block *nb, 1059 unsigned long code, void *unused) 1060 { 1061 struct linux_efi_random_seed *seed; 1062 u32 size = 0; 1063 1064 if (!kexec_in_progress) 1065 return NOTIFY_DONE; 1066 1067 seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB); 1068 if (seed != NULL) { 1069 size = min(seed->size, EFI_RANDOM_SEED_SIZE); 1070 memunmap(seed); 1071 } else { 1072 pr_err("Could not map UEFI random seed!\n"); 1073 } 1074 if (size > 0) { 1075 seed = memremap(efi_rng_seed, sizeof(*seed) + size, 1076 MEMREMAP_WB); 1077 if (seed != NULL) { 1078 seed->size = size; 1079 get_random_bytes(seed->bits, seed->size); 1080 memunmap(seed); 1081 } else { 1082 pr_err("Could not map UEFI random seed!\n"); 1083 } 1084 } 1085 return NOTIFY_DONE; 1086 } 1087 1088 static struct notifier_block efi_random_seed_nb = { 1089 .notifier_call = update_efi_random_seed, 1090 }; 1091 1092 static int __init register_update_efi_random_seed(void) 1093 { 1094 if (efi_rng_seed == EFI_INVALID_TABLE_ADDR) 1095 return 0; 1096 return register_reboot_notifier(&efi_random_seed_nb); 1097 } 1098 late_initcall(register_update_efi_random_seed); 1099 #endif 1100