1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * efi.c - EFI subsystem 4 * 5 * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com> 6 * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com> 7 * Copyright (C) 2013 Tom Gundersen <teg@jklm.no> 8 * 9 * This code registers /sys/firmware/efi{,/efivars} when EFI is supported, 10 * allowing the efivarfs to be mounted or the efivars module to be loaded. 11 * The existance of /sys/firmware/efi may also be used by userspace to 12 * determine that the system supports EFI. 13 */ 14 15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16 17 #include <linux/kobject.h> 18 #include <linux/module.h> 19 #include <linux/init.h> 20 #include <linux/debugfs.h> 21 #include <linux/device.h> 22 #include <linux/efi.h> 23 #include <linux/of.h> 24 #include <linux/initrd.h> 25 #include <linux/io.h> 26 #include <linux/kexec.h> 27 #include <linux/platform_device.h> 28 #include <linux/random.h> 29 #include <linux/reboot.h> 30 #include <linux/slab.h> 31 #include <linux/acpi.h> 32 #include <linux/ucs2_string.h> 33 #include <linux/memblock.h> 34 #include <linux/security.h> 35 36 #include <asm/early_ioremap.h> 37 38 struct efi __read_mostly efi = { 39 .runtime_supported_mask = EFI_RT_SUPPORTED_ALL, 40 .acpi = EFI_INVALID_TABLE_ADDR, 41 .acpi20 = EFI_INVALID_TABLE_ADDR, 42 .smbios = EFI_INVALID_TABLE_ADDR, 43 .smbios3 = EFI_INVALID_TABLE_ADDR, 44 .esrt = EFI_INVALID_TABLE_ADDR, 45 .tpm_log = EFI_INVALID_TABLE_ADDR, 46 .tpm_final_log = EFI_INVALID_TABLE_ADDR, 47 #ifdef CONFIG_LOAD_UEFI_KEYS 48 .mokvar_table = EFI_INVALID_TABLE_ADDR, 49 #endif 50 #ifdef CONFIG_EFI_COCO_SECRET 51 .coco_secret = EFI_INVALID_TABLE_ADDR, 52 #endif 53 }; 54 EXPORT_SYMBOL(efi); 55 56 unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR; 57 static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR; 58 static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR; 59 static unsigned long __initdata initrd = EFI_INVALID_TABLE_ADDR; 60 61 extern unsigned long screen_info_table; 62 63 struct mm_struct efi_mm = { 64 .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock), 65 .mm_users = ATOMIC_INIT(2), 66 .mm_count = ATOMIC_INIT(1), 67 .write_protect_seq = SEQCNT_ZERO(efi_mm.write_protect_seq), 68 MMAP_LOCK_INITIALIZER(efi_mm) 69 .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock), 70 .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), 71 .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0}, 72 }; 73 74 struct workqueue_struct *efi_rts_wq; 75 76 static bool disable_runtime = IS_ENABLED(CONFIG_EFI_DISABLE_RUNTIME); 77 static int __init setup_noefi(char *arg) 78 { 79 disable_runtime = true; 80 return 0; 81 } 82 early_param("noefi", setup_noefi); 83 84 bool efi_runtime_disabled(void) 85 { 86 return disable_runtime; 87 } 88 89 bool __pure __efi_soft_reserve_enabled(void) 90 { 91 return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE); 92 } 93 94 static int __init parse_efi_cmdline(char *str) 95 { 96 if (!str) { 97 pr_warn("need at least one option\n"); 98 return -EINVAL; 99 } 100 101 if (parse_option_str(str, "debug")) 102 set_bit(EFI_DBG, &efi.flags); 103 104 if (parse_option_str(str, "noruntime")) 105 disable_runtime = true; 106 107 if (parse_option_str(str, "runtime")) 108 disable_runtime = false; 109 110 if (parse_option_str(str, "nosoftreserve")) 111 set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags); 112 113 return 0; 114 } 115 early_param("efi", parse_efi_cmdline); 116 117 struct kobject *efi_kobj; 118 119 /* 120 * Let's not leave out systab information that snuck into 121 * the efivars driver 122 * Note, do not add more fields in systab sysfs file as it breaks sysfs 123 * one value per file rule! 124 */ 125 static ssize_t systab_show(struct kobject *kobj, 126 struct kobj_attribute *attr, char *buf) 127 { 128 char *str = buf; 129 130 if (!kobj || !buf) 131 return -EINVAL; 132 133 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 134 str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20); 135 if (efi.acpi != EFI_INVALID_TABLE_ADDR) 136 str += sprintf(str, "ACPI=0x%lx\n", efi.acpi); 137 /* 138 * If both SMBIOS and SMBIOS3 entry points are implemented, the 139 * SMBIOS3 entry point shall be preferred, so we list it first to 140 * let applications stop parsing after the first match. 141 */ 142 if (efi.smbios3 != EFI_INVALID_TABLE_ADDR) 143 str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3); 144 if (efi.smbios != EFI_INVALID_TABLE_ADDR) 145 str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios); 146 147 if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86)) 148 str = efi_systab_show_arch(str); 149 150 return str - buf; 151 } 152 153 static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400); 154 155 static ssize_t fw_platform_size_show(struct kobject *kobj, 156 struct kobj_attribute *attr, char *buf) 157 { 158 return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32); 159 } 160 161 extern __weak struct kobj_attribute efi_attr_fw_vendor; 162 extern __weak struct kobj_attribute efi_attr_runtime; 163 extern __weak struct kobj_attribute efi_attr_config_table; 164 static struct kobj_attribute efi_attr_fw_platform_size = 165 __ATTR_RO(fw_platform_size); 166 167 static struct attribute *efi_subsys_attrs[] = { 168 &efi_attr_systab.attr, 169 &efi_attr_fw_platform_size.attr, 170 &efi_attr_fw_vendor.attr, 171 &efi_attr_runtime.attr, 172 &efi_attr_config_table.attr, 173 NULL, 174 }; 175 176 umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr, 177 int n) 178 { 179 return attr->mode; 180 } 181 182 static const struct attribute_group efi_subsys_attr_group = { 183 .attrs = efi_subsys_attrs, 184 .is_visible = efi_attr_is_visible, 185 }; 186 187 static struct efivars generic_efivars; 188 static struct efivar_operations generic_ops; 189 190 static bool generic_ops_supported(void) 191 { 192 unsigned long name_size; 193 efi_status_t status; 194 efi_char16_t name; 195 efi_guid_t guid; 196 197 name_size = sizeof(name); 198 199 status = efi.get_next_variable(&name_size, &name, &guid); 200 if (status == EFI_UNSUPPORTED) 201 return false; 202 203 return true; 204 } 205 206 static int generic_ops_register(void) 207 { 208 if (!generic_ops_supported()) 209 return 0; 210 211 generic_ops.get_variable = efi.get_variable; 212 generic_ops.get_next_variable = efi.get_next_variable; 213 generic_ops.query_variable_store = efi_query_variable_store; 214 215 if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) { 216 generic_ops.set_variable = efi.set_variable; 217 generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking; 218 } 219 return efivars_register(&generic_efivars, &generic_ops); 220 } 221 222 static void generic_ops_unregister(void) 223 { 224 if (!generic_ops.get_variable) 225 return; 226 227 efivars_unregister(&generic_efivars); 228 } 229 230 #ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS 231 #define EFIVAR_SSDT_NAME_MAX 16UL 232 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata; 233 static int __init efivar_ssdt_setup(char *str) 234 { 235 int ret = security_locked_down(LOCKDOWN_ACPI_TABLES); 236 237 if (ret) 238 return ret; 239 240 if (strlen(str) < sizeof(efivar_ssdt)) 241 memcpy(efivar_ssdt, str, strlen(str)); 242 else 243 pr_warn("efivar_ssdt: name too long: %s\n", str); 244 return 1; 245 } 246 __setup("efivar_ssdt=", efivar_ssdt_setup); 247 248 static __init int efivar_ssdt_load(void) 249 { 250 unsigned long name_size = 256; 251 efi_char16_t *name = NULL; 252 efi_status_t status; 253 efi_guid_t guid; 254 255 if (!efivar_ssdt[0]) 256 return 0; 257 258 name = kzalloc(name_size, GFP_KERNEL); 259 if (!name) 260 return -ENOMEM; 261 262 for (;;) { 263 char utf8_name[EFIVAR_SSDT_NAME_MAX]; 264 unsigned long data_size = 0; 265 void *data; 266 int limit; 267 268 status = efi.get_next_variable(&name_size, name, &guid); 269 if (status == EFI_NOT_FOUND) { 270 break; 271 } else if (status == EFI_BUFFER_TOO_SMALL) { 272 name = krealloc(name, name_size, GFP_KERNEL); 273 if (!name) 274 return -ENOMEM; 275 continue; 276 } 277 278 limit = min(EFIVAR_SSDT_NAME_MAX, name_size); 279 ucs2_as_utf8(utf8_name, name, limit - 1); 280 if (strncmp(utf8_name, efivar_ssdt, limit) != 0) 281 continue; 282 283 pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, &guid); 284 285 status = efi.get_variable(name, &guid, NULL, &data_size, NULL); 286 if (status != EFI_BUFFER_TOO_SMALL || !data_size) 287 return -EIO; 288 289 data = kmalloc(data_size, GFP_KERNEL); 290 if (!data) 291 return -ENOMEM; 292 293 status = efi.get_variable(name, &guid, NULL, &data_size, data); 294 if (status == EFI_SUCCESS) { 295 acpi_status ret = acpi_load_table(data, NULL); 296 if (ret) 297 pr_err("failed to load table: %u\n", ret); 298 else 299 continue; 300 } else { 301 pr_err("failed to get var data: 0x%lx\n", status); 302 } 303 kfree(data); 304 } 305 return 0; 306 } 307 #else 308 static inline int efivar_ssdt_load(void) { return 0; } 309 #endif 310 311 #ifdef CONFIG_DEBUG_FS 312 313 #define EFI_DEBUGFS_MAX_BLOBS 32 314 315 static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS]; 316 317 static void __init efi_debugfs_init(void) 318 { 319 struct dentry *efi_debugfs; 320 efi_memory_desc_t *md; 321 char name[32]; 322 int type_count[EFI_BOOT_SERVICES_DATA + 1] = {}; 323 int i = 0; 324 325 efi_debugfs = debugfs_create_dir("efi", NULL); 326 if (IS_ERR_OR_NULL(efi_debugfs)) 327 return; 328 329 for_each_efi_memory_desc(md) { 330 switch (md->type) { 331 case EFI_BOOT_SERVICES_CODE: 332 snprintf(name, sizeof(name), "boot_services_code%d", 333 type_count[md->type]++); 334 break; 335 case EFI_BOOT_SERVICES_DATA: 336 snprintf(name, sizeof(name), "boot_services_data%d", 337 type_count[md->type]++); 338 break; 339 default: 340 continue; 341 } 342 343 if (i >= EFI_DEBUGFS_MAX_BLOBS) { 344 pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n", 345 EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS); 346 break; 347 } 348 349 debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT; 350 debugfs_blob[i].data = memremap(md->phys_addr, 351 debugfs_blob[i].size, 352 MEMREMAP_WB); 353 if (!debugfs_blob[i].data) 354 continue; 355 356 debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]); 357 i++; 358 } 359 } 360 #else 361 static inline void efi_debugfs_init(void) {} 362 #endif 363 364 /* 365 * We register the efi subsystem with the firmware subsystem and the 366 * efivars subsystem with the efi subsystem, if the system was booted with 367 * EFI. 368 */ 369 static int __init efisubsys_init(void) 370 { 371 int error; 372 373 if (!efi_enabled(EFI_RUNTIME_SERVICES)) 374 efi.runtime_supported_mask = 0; 375 376 if (!efi_enabled(EFI_BOOT)) 377 return 0; 378 379 if (efi.runtime_supported_mask) { 380 /* 381 * Since we process only one efi_runtime_service() at a time, an 382 * ordered workqueue (which creates only one execution context) 383 * should suffice for all our needs. 384 */ 385 efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0); 386 if (!efi_rts_wq) { 387 pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n"); 388 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); 389 efi.runtime_supported_mask = 0; 390 return 0; 391 } 392 } 393 394 if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES)) 395 platform_device_register_simple("rtc-efi", 0, NULL, 0); 396 397 /* We register the efi directory at /sys/firmware/efi */ 398 efi_kobj = kobject_create_and_add("efi", firmware_kobj); 399 if (!efi_kobj) { 400 pr_err("efi: Firmware registration failed.\n"); 401 error = -ENOMEM; 402 goto err_destroy_wq; 403 } 404 405 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE | 406 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) { 407 error = generic_ops_register(); 408 if (error) 409 goto err_put; 410 efivar_ssdt_load(); 411 platform_device_register_simple("efivars", 0, NULL, 0); 412 } 413 414 error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group); 415 if (error) { 416 pr_err("efi: Sysfs attribute export failed with error %d.\n", 417 error); 418 goto err_unregister; 419 } 420 421 /* and the standard mountpoint for efivarfs */ 422 error = sysfs_create_mount_point(efi_kobj, "efivars"); 423 if (error) { 424 pr_err("efivars: Subsystem registration failed.\n"); 425 goto err_remove_group; 426 } 427 428 if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS)) 429 efi_debugfs_init(); 430 431 #ifdef CONFIG_EFI_COCO_SECRET 432 if (efi.coco_secret != EFI_INVALID_TABLE_ADDR) 433 platform_device_register_simple("efi_secret", 0, NULL, 0); 434 #endif 435 436 return 0; 437 438 err_remove_group: 439 sysfs_remove_group(efi_kobj, &efi_subsys_attr_group); 440 err_unregister: 441 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE | 442 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) 443 generic_ops_unregister(); 444 err_put: 445 kobject_put(efi_kobj); 446 efi_kobj = NULL; 447 err_destroy_wq: 448 if (efi_rts_wq) 449 destroy_workqueue(efi_rts_wq); 450 451 return error; 452 } 453 454 subsys_initcall(efisubsys_init); 455 456 void __init efi_find_mirror(void) 457 { 458 efi_memory_desc_t *md; 459 u64 mirror_size = 0, total_size = 0; 460 461 if (!efi_enabled(EFI_MEMMAP)) 462 return; 463 464 for_each_efi_memory_desc(md) { 465 unsigned long long start = md->phys_addr; 466 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; 467 468 total_size += size; 469 if (md->attribute & EFI_MEMORY_MORE_RELIABLE) { 470 memblock_mark_mirror(start, size); 471 mirror_size += size; 472 } 473 } 474 if (mirror_size) 475 pr_info("Memory: %lldM/%lldM mirrored memory\n", 476 mirror_size>>20, total_size>>20); 477 } 478 479 /* 480 * Find the efi memory descriptor for a given physical address. Given a 481 * physical address, determine if it exists within an EFI Memory Map entry, 482 * and if so, populate the supplied memory descriptor with the appropriate 483 * data. 484 */ 485 int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md) 486 { 487 efi_memory_desc_t *md; 488 489 if (!efi_enabled(EFI_MEMMAP)) { 490 pr_err_once("EFI_MEMMAP is not enabled.\n"); 491 return -EINVAL; 492 } 493 494 if (!out_md) { 495 pr_err_once("out_md is null.\n"); 496 return -EINVAL; 497 } 498 499 for_each_efi_memory_desc(md) { 500 u64 size; 501 u64 end; 502 503 /* skip bogus entries (including empty ones) */ 504 if ((md->phys_addr & (EFI_PAGE_SIZE - 1)) || 505 (md->num_pages <= 0) || 506 (md->num_pages > (U64_MAX - md->phys_addr) >> EFI_PAGE_SHIFT)) 507 continue; 508 509 size = md->num_pages << EFI_PAGE_SHIFT; 510 end = md->phys_addr + size; 511 if (phys_addr >= md->phys_addr && phys_addr < end) { 512 memcpy(out_md, md, sizeof(*out_md)); 513 return 0; 514 } 515 } 516 return -ENOENT; 517 } 518 519 extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md) 520 __weak __alias(__efi_mem_desc_lookup); 521 522 /* 523 * Calculate the highest address of an efi memory descriptor. 524 */ 525 u64 __init efi_mem_desc_end(efi_memory_desc_t *md) 526 { 527 u64 size = md->num_pages << EFI_PAGE_SHIFT; 528 u64 end = md->phys_addr + size; 529 return end; 530 } 531 532 void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {} 533 534 /** 535 * efi_mem_reserve - Reserve an EFI memory region 536 * @addr: Physical address to reserve 537 * @size: Size of reservation 538 * 539 * Mark a region as reserved from general kernel allocation and 540 * prevent it being released by efi_free_boot_services(). 541 * 542 * This function should be called drivers once they've parsed EFI 543 * configuration tables to figure out where their data lives, e.g. 544 * efi_esrt_init(). 545 */ 546 void __init efi_mem_reserve(phys_addr_t addr, u64 size) 547 { 548 /* efi_mem_reserve() does not work under Xen */ 549 if (WARN_ON_ONCE(efi_enabled(EFI_PARAVIRT))) 550 return; 551 552 if (!memblock_is_region_reserved(addr, size)) 553 memblock_reserve(addr, size); 554 555 /* 556 * Some architectures (x86) reserve all boot services ranges 557 * until efi_free_boot_services() because of buggy firmware 558 * implementations. This means the above memblock_reserve() is 559 * superfluous on x86 and instead what it needs to do is 560 * ensure the @start, @size is not freed. 561 */ 562 efi_arch_mem_reserve(addr, size); 563 } 564 565 static const efi_config_table_type_t common_tables[] __initconst = { 566 {ACPI_20_TABLE_GUID, &efi.acpi20, "ACPI 2.0" }, 567 {ACPI_TABLE_GUID, &efi.acpi, "ACPI" }, 568 {SMBIOS_TABLE_GUID, &efi.smbios, "SMBIOS" }, 569 {SMBIOS3_TABLE_GUID, &efi.smbios3, "SMBIOS 3.0" }, 570 {EFI_SYSTEM_RESOURCE_TABLE_GUID, &efi.esrt, "ESRT" }, 571 {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, &efi_mem_attr_table, "MEMATTR" }, 572 {LINUX_EFI_RANDOM_SEED_TABLE_GUID, &efi_rng_seed, "RNG" }, 573 {LINUX_EFI_TPM_EVENT_LOG_GUID, &efi.tpm_log, "TPMEventLog" }, 574 {LINUX_EFI_TPM_FINAL_LOG_GUID, &efi.tpm_final_log, "TPMFinalLog" }, 575 {LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" }, 576 {LINUX_EFI_INITRD_MEDIA_GUID, &initrd, "INITRD" }, 577 {EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" }, 578 #ifdef CONFIG_EFI_RCI2_TABLE 579 {DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys }, 580 #endif 581 #ifdef CONFIG_LOAD_UEFI_KEYS 582 {LINUX_EFI_MOK_VARIABLE_TABLE_GUID, &efi.mokvar_table, "MOKvar" }, 583 #endif 584 #ifdef CONFIG_EFI_COCO_SECRET 585 {LINUX_EFI_COCO_SECRET_AREA_GUID, &efi.coco_secret, "CocoSecret" }, 586 #endif 587 #ifdef CONFIG_EFI_GENERIC_STUB 588 {LINUX_EFI_SCREEN_INFO_TABLE_GUID, &screen_info_table }, 589 #endif 590 {}, 591 }; 592 593 static __init int match_config_table(const efi_guid_t *guid, 594 unsigned long table, 595 const efi_config_table_type_t *table_types) 596 { 597 int i; 598 599 for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) { 600 if (efi_guidcmp(*guid, table_types[i].guid)) 601 continue; 602 603 if (!efi_config_table_is_usable(guid, table)) { 604 if (table_types[i].name[0]) 605 pr_cont("(%s=0x%lx unusable) ", 606 table_types[i].name, table); 607 return 1; 608 } 609 610 *(table_types[i].ptr) = table; 611 if (table_types[i].name[0]) 612 pr_cont("%s=0x%lx ", table_types[i].name, table); 613 return 1; 614 } 615 616 return 0; 617 } 618 619 int __init efi_config_parse_tables(const efi_config_table_t *config_tables, 620 int count, 621 const efi_config_table_type_t *arch_tables) 622 { 623 const efi_config_table_64_t *tbl64 = (void *)config_tables; 624 const efi_config_table_32_t *tbl32 = (void *)config_tables; 625 const efi_guid_t *guid; 626 unsigned long table; 627 int i; 628 629 pr_info(""); 630 for (i = 0; i < count; i++) { 631 if (!IS_ENABLED(CONFIG_X86)) { 632 guid = &config_tables[i].guid; 633 table = (unsigned long)config_tables[i].table; 634 } else if (efi_enabled(EFI_64BIT)) { 635 guid = &tbl64[i].guid; 636 table = tbl64[i].table; 637 638 if (IS_ENABLED(CONFIG_X86_32) && 639 tbl64[i].table > U32_MAX) { 640 pr_cont("\n"); 641 pr_err("Table located above 4GB, disabling EFI.\n"); 642 return -EINVAL; 643 } 644 } else { 645 guid = &tbl32[i].guid; 646 table = tbl32[i].table; 647 } 648 649 if (!match_config_table(guid, table, common_tables) && arch_tables) 650 match_config_table(guid, table, arch_tables); 651 } 652 pr_cont("\n"); 653 set_bit(EFI_CONFIG_TABLES, &efi.flags); 654 655 if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) { 656 struct linux_efi_random_seed *seed; 657 u32 size = 0; 658 659 seed = early_memremap(efi_rng_seed, sizeof(*seed)); 660 if (seed != NULL) { 661 size = min_t(u32, seed->size, SZ_1K); // sanity check 662 early_memunmap(seed, sizeof(*seed)); 663 } else { 664 pr_err("Could not map UEFI random seed!\n"); 665 } 666 if (size > 0) { 667 seed = early_memremap(efi_rng_seed, 668 sizeof(*seed) + size); 669 if (seed != NULL) { 670 add_bootloader_randomness(seed->bits, size); 671 memzero_explicit(seed->bits, size); 672 early_memunmap(seed, sizeof(*seed) + size); 673 } else { 674 pr_err("Could not map UEFI random seed!\n"); 675 } 676 } 677 } 678 679 if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP)) 680 efi_memattr_init(); 681 682 efi_tpm_eventlog_init(); 683 684 if (mem_reserve != EFI_INVALID_TABLE_ADDR) { 685 unsigned long prsv = mem_reserve; 686 687 while (prsv) { 688 struct linux_efi_memreserve *rsv; 689 u8 *p; 690 691 /* 692 * Just map a full page: that is what we will get 693 * anyway, and it permits us to map the entire entry 694 * before knowing its size. 695 */ 696 p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE), 697 PAGE_SIZE); 698 if (p == NULL) { 699 pr_err("Could not map UEFI memreserve entry!\n"); 700 return -ENOMEM; 701 } 702 703 rsv = (void *)(p + prsv % PAGE_SIZE); 704 705 /* reserve the entry itself */ 706 memblock_reserve(prsv, 707 struct_size(rsv, entry, rsv->size)); 708 709 for (i = 0; i < atomic_read(&rsv->count); i++) { 710 memblock_reserve(rsv->entry[i].base, 711 rsv->entry[i].size); 712 } 713 714 prsv = rsv->next; 715 early_memunmap(p, PAGE_SIZE); 716 } 717 } 718 719 if (rt_prop != EFI_INVALID_TABLE_ADDR) { 720 efi_rt_properties_table_t *tbl; 721 722 tbl = early_memremap(rt_prop, sizeof(*tbl)); 723 if (tbl) { 724 efi.runtime_supported_mask &= tbl->runtime_services_supported; 725 early_memunmap(tbl, sizeof(*tbl)); 726 } 727 } 728 729 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && 730 initrd != EFI_INVALID_TABLE_ADDR && phys_initrd_size == 0) { 731 struct linux_efi_initrd *tbl; 732 733 tbl = early_memremap(initrd, sizeof(*tbl)); 734 if (tbl) { 735 phys_initrd_start = tbl->base; 736 phys_initrd_size = tbl->size; 737 early_memunmap(tbl, sizeof(*tbl)); 738 } 739 } 740 741 return 0; 742 } 743 744 int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr) 745 { 746 if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) { 747 pr_err("System table signature incorrect!\n"); 748 return -EINVAL; 749 } 750 751 return 0; 752 } 753 754 #ifndef CONFIG_IA64 755 static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor, 756 size_t size) 757 { 758 const efi_char16_t *ret; 759 760 ret = early_memremap_ro(fw_vendor, size); 761 if (!ret) 762 pr_err("Could not map the firmware vendor!\n"); 763 return ret; 764 } 765 766 static void __init unmap_fw_vendor(const void *fw_vendor, size_t size) 767 { 768 early_memunmap((void *)fw_vendor, size); 769 } 770 #else 771 #define map_fw_vendor(p, s) __va(p) 772 #define unmap_fw_vendor(v, s) 773 #endif 774 775 void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr, 776 unsigned long fw_vendor) 777 { 778 char vendor[100] = "unknown"; 779 const efi_char16_t *c16; 780 size_t i; 781 u16 rev; 782 783 c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t)); 784 if (c16) { 785 for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i) 786 vendor[i] = c16[i]; 787 vendor[i] = '\0'; 788 789 unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t)); 790 } 791 792 rev = (u16)systab_hdr->revision; 793 pr_info("EFI v%u.%u", systab_hdr->revision >> 16, rev / 10); 794 795 rev %= 10; 796 if (rev) 797 pr_cont(".%u", rev); 798 799 pr_cont(" by %s\n", vendor); 800 801 if (IS_ENABLED(CONFIG_X86_64) && 802 systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION && 803 !strcmp(vendor, "Apple")) { 804 pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n"); 805 efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION; 806 } 807 } 808 809 static __initdata char memory_type_name[][13] = { 810 "Reserved", 811 "Loader Code", 812 "Loader Data", 813 "Boot Code", 814 "Boot Data", 815 "Runtime Code", 816 "Runtime Data", 817 "Conventional", 818 "Unusable", 819 "ACPI Reclaim", 820 "ACPI Mem NVS", 821 "MMIO", 822 "MMIO Port", 823 "PAL Code", 824 "Persistent", 825 }; 826 827 char * __init efi_md_typeattr_format(char *buf, size_t size, 828 const efi_memory_desc_t *md) 829 { 830 char *pos; 831 int type_len; 832 u64 attr; 833 834 pos = buf; 835 if (md->type >= ARRAY_SIZE(memory_type_name)) 836 type_len = snprintf(pos, size, "[type=%u", md->type); 837 else 838 type_len = snprintf(pos, size, "[%-*s", 839 (int)(sizeof(memory_type_name[0]) - 1), 840 memory_type_name[md->type]); 841 if (type_len >= size) 842 return buf; 843 844 pos += type_len; 845 size -= type_len; 846 847 attr = md->attribute; 848 if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT | 849 EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO | 850 EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP | 851 EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO | 852 EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE)) 853 snprintf(pos, size, "|attr=0x%016llx]", 854 (unsigned long long)attr); 855 else 856 snprintf(pos, size, 857 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]", 858 attr & EFI_MEMORY_RUNTIME ? "RUN" : "", 859 attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "", 860 attr & EFI_MEMORY_CPU_CRYPTO ? "CC" : "", 861 attr & EFI_MEMORY_SP ? "SP" : "", 862 attr & EFI_MEMORY_NV ? "NV" : "", 863 attr & EFI_MEMORY_XP ? "XP" : "", 864 attr & EFI_MEMORY_RP ? "RP" : "", 865 attr & EFI_MEMORY_WP ? "WP" : "", 866 attr & EFI_MEMORY_RO ? "RO" : "", 867 attr & EFI_MEMORY_UCE ? "UCE" : "", 868 attr & EFI_MEMORY_WB ? "WB" : "", 869 attr & EFI_MEMORY_WT ? "WT" : "", 870 attr & EFI_MEMORY_WC ? "WC" : "", 871 attr & EFI_MEMORY_UC ? "UC" : ""); 872 return buf; 873 } 874 875 /* 876 * IA64 has a funky EFI memory map that doesn't work the same way as 877 * other architectures. 878 */ 879 #ifndef CONFIG_IA64 880 /* 881 * efi_mem_attributes - lookup memmap attributes for physical address 882 * @phys_addr: the physical address to lookup 883 * 884 * Search in the EFI memory map for the region covering 885 * @phys_addr. Returns the EFI memory attributes if the region 886 * was found in the memory map, 0 otherwise. 887 */ 888 u64 efi_mem_attributes(unsigned long phys_addr) 889 { 890 efi_memory_desc_t *md; 891 892 if (!efi_enabled(EFI_MEMMAP)) 893 return 0; 894 895 for_each_efi_memory_desc(md) { 896 if ((md->phys_addr <= phys_addr) && 897 (phys_addr < (md->phys_addr + 898 (md->num_pages << EFI_PAGE_SHIFT)))) 899 return md->attribute; 900 } 901 return 0; 902 } 903 904 /* 905 * efi_mem_type - lookup memmap type for physical address 906 * @phys_addr: the physical address to lookup 907 * 908 * Search in the EFI memory map for the region covering @phys_addr. 909 * Returns the EFI memory type if the region was found in the memory 910 * map, -EINVAL otherwise. 911 */ 912 int efi_mem_type(unsigned long phys_addr) 913 { 914 const efi_memory_desc_t *md; 915 916 if (!efi_enabled(EFI_MEMMAP)) 917 return -ENOTSUPP; 918 919 for_each_efi_memory_desc(md) { 920 if ((md->phys_addr <= phys_addr) && 921 (phys_addr < (md->phys_addr + 922 (md->num_pages << EFI_PAGE_SHIFT)))) 923 return md->type; 924 } 925 return -EINVAL; 926 } 927 #endif 928 929 int efi_status_to_err(efi_status_t status) 930 { 931 int err; 932 933 switch (status) { 934 case EFI_SUCCESS: 935 err = 0; 936 break; 937 case EFI_INVALID_PARAMETER: 938 err = -EINVAL; 939 break; 940 case EFI_OUT_OF_RESOURCES: 941 err = -ENOSPC; 942 break; 943 case EFI_DEVICE_ERROR: 944 err = -EIO; 945 break; 946 case EFI_WRITE_PROTECTED: 947 err = -EROFS; 948 break; 949 case EFI_SECURITY_VIOLATION: 950 err = -EACCES; 951 break; 952 case EFI_NOT_FOUND: 953 err = -ENOENT; 954 break; 955 case EFI_ABORTED: 956 err = -EINTR; 957 break; 958 default: 959 err = -EINVAL; 960 } 961 962 return err; 963 } 964 EXPORT_SYMBOL_GPL(efi_status_to_err); 965 966 static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock); 967 static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init; 968 969 static int __init efi_memreserve_map_root(void) 970 { 971 if (mem_reserve == EFI_INVALID_TABLE_ADDR) 972 return -ENODEV; 973 974 efi_memreserve_root = memremap(mem_reserve, 975 sizeof(*efi_memreserve_root), 976 MEMREMAP_WB); 977 if (WARN_ON_ONCE(!efi_memreserve_root)) 978 return -ENOMEM; 979 return 0; 980 } 981 982 static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size) 983 { 984 struct resource *res, *parent; 985 int ret; 986 987 res = kzalloc(sizeof(struct resource), GFP_ATOMIC); 988 if (!res) 989 return -ENOMEM; 990 991 res->name = "reserved"; 992 res->flags = IORESOURCE_MEM; 993 res->start = addr; 994 res->end = addr + size - 1; 995 996 /* we expect a conflict with a 'System RAM' region */ 997 parent = request_resource_conflict(&iomem_resource, res); 998 ret = parent ? request_resource(parent, res) : 0; 999 1000 /* 1001 * Given that efi_mem_reserve_iomem() can be called at any 1002 * time, only call memblock_reserve() if the architecture 1003 * keeps the infrastructure around. 1004 */ 1005 if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret) 1006 memblock_reserve(addr, size); 1007 1008 return ret; 1009 } 1010 1011 int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) 1012 { 1013 struct linux_efi_memreserve *rsv; 1014 unsigned long prsv; 1015 int rc, index; 1016 1017 if (efi_memreserve_root == (void *)ULONG_MAX) 1018 return -ENODEV; 1019 1020 if (!efi_memreserve_root) { 1021 rc = efi_memreserve_map_root(); 1022 if (rc) 1023 return rc; 1024 } 1025 1026 /* first try to find a slot in an existing linked list entry */ 1027 for (prsv = efi_memreserve_root->next; prsv; ) { 1028 rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB); 1029 if (!rsv) 1030 return -ENOMEM; 1031 index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size); 1032 if (index < rsv->size) { 1033 rsv->entry[index].base = addr; 1034 rsv->entry[index].size = size; 1035 1036 memunmap(rsv); 1037 return efi_mem_reserve_iomem(addr, size); 1038 } 1039 prsv = rsv->next; 1040 memunmap(rsv); 1041 } 1042 1043 /* no slot found - allocate a new linked list entry */ 1044 rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC); 1045 if (!rsv) 1046 return -ENOMEM; 1047 1048 rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K); 1049 if (rc) { 1050 free_page((unsigned long)rsv); 1051 return rc; 1052 } 1053 1054 /* 1055 * The memremap() call above assumes that a linux_efi_memreserve entry 1056 * never crosses a page boundary, so let's ensure that this remains true 1057 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by 1058 * using SZ_4K explicitly in the size calculation below. 1059 */ 1060 rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K); 1061 atomic_set(&rsv->count, 1); 1062 rsv->entry[0].base = addr; 1063 rsv->entry[0].size = size; 1064 1065 spin_lock(&efi_mem_reserve_persistent_lock); 1066 rsv->next = efi_memreserve_root->next; 1067 efi_memreserve_root->next = __pa(rsv); 1068 spin_unlock(&efi_mem_reserve_persistent_lock); 1069 1070 return efi_mem_reserve_iomem(addr, size); 1071 } 1072 1073 static int __init efi_memreserve_root_init(void) 1074 { 1075 if (efi_memreserve_root) 1076 return 0; 1077 if (efi_memreserve_map_root()) 1078 efi_memreserve_root = (void *)ULONG_MAX; 1079 return 0; 1080 } 1081 early_initcall(efi_memreserve_root_init); 1082 1083 #ifdef CONFIG_KEXEC 1084 static int update_efi_random_seed(struct notifier_block *nb, 1085 unsigned long code, void *unused) 1086 { 1087 struct linux_efi_random_seed *seed; 1088 u32 size = 0; 1089 1090 if (!kexec_in_progress) 1091 return NOTIFY_DONE; 1092 1093 seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB); 1094 if (seed != NULL) { 1095 size = min(seed->size, EFI_RANDOM_SEED_SIZE); 1096 memunmap(seed); 1097 } else { 1098 pr_err("Could not map UEFI random seed!\n"); 1099 } 1100 if (size > 0) { 1101 seed = memremap(efi_rng_seed, sizeof(*seed) + size, 1102 MEMREMAP_WB); 1103 if (seed != NULL) { 1104 seed->size = size; 1105 get_random_bytes(seed->bits, seed->size); 1106 memunmap(seed); 1107 } else { 1108 pr_err("Could not map UEFI random seed!\n"); 1109 } 1110 } 1111 return NOTIFY_DONE; 1112 } 1113 1114 static struct notifier_block efi_random_seed_nb = { 1115 .notifier_call = update_efi_random_seed, 1116 }; 1117 1118 static int __init register_update_efi_random_seed(void) 1119 { 1120 if (efi_rng_seed == EFI_INVALID_TABLE_ADDR) 1121 return 0; 1122 return register_reboot_notifier(&efi_random_seed_nb); 1123 } 1124 late_initcall(register_update_efi_random_seed); 1125 #endif 1126