1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * efi.c - EFI subsystem 4 * 5 * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com> 6 * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com> 7 * Copyright (C) 2013 Tom Gundersen <teg@jklm.no> 8 * 9 * This code registers /sys/firmware/efi{,/efivars} when EFI is supported, 10 * allowing the efivarfs to be mounted or the efivars module to be loaded. 11 * The existance of /sys/firmware/efi may also be used by userspace to 12 * determine that the system supports EFI. 13 */ 14 15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16 17 #include <linux/kobject.h> 18 #include <linux/module.h> 19 #include <linux/init.h> 20 #include <linux/debugfs.h> 21 #include <linux/device.h> 22 #include <linux/efi.h> 23 #include <linux/of.h> 24 #include <linux/initrd.h> 25 #include <linux/io.h> 26 #include <linux/kexec.h> 27 #include <linux/platform_device.h> 28 #include <linux/random.h> 29 #include <linux/reboot.h> 30 #include <linux/slab.h> 31 #include <linux/acpi.h> 32 #include <linux/ucs2_string.h> 33 #include <linux/memblock.h> 34 #include <linux/security.h> 35 36 #include <asm/early_ioremap.h> 37 38 struct efi __read_mostly efi = { 39 .runtime_supported_mask = EFI_RT_SUPPORTED_ALL, 40 .acpi = EFI_INVALID_TABLE_ADDR, 41 .acpi20 = EFI_INVALID_TABLE_ADDR, 42 .smbios = EFI_INVALID_TABLE_ADDR, 43 .smbios3 = EFI_INVALID_TABLE_ADDR, 44 .esrt = EFI_INVALID_TABLE_ADDR, 45 .tpm_log = EFI_INVALID_TABLE_ADDR, 46 .tpm_final_log = EFI_INVALID_TABLE_ADDR, 47 #ifdef CONFIG_LOAD_UEFI_KEYS 48 .mokvar_table = EFI_INVALID_TABLE_ADDR, 49 #endif 50 #ifdef CONFIG_EFI_COCO_SECRET 51 .coco_secret = EFI_INVALID_TABLE_ADDR, 52 #endif 53 }; 54 EXPORT_SYMBOL(efi); 55 56 unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR; 57 static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR; 58 static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR; 59 static unsigned long __initdata initrd = EFI_INVALID_TABLE_ADDR; 60 61 extern unsigned long screen_info_table; 62 63 struct mm_struct efi_mm = { 64 .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock), 65 .mm_users = ATOMIC_INIT(2), 66 .mm_count = ATOMIC_INIT(1), 67 .write_protect_seq = SEQCNT_ZERO(efi_mm.write_protect_seq), 68 MMAP_LOCK_INITIALIZER(efi_mm) 69 .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock), 70 .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), 71 .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0}, 72 }; 73 74 struct workqueue_struct *efi_rts_wq; 75 76 static bool disable_runtime = IS_ENABLED(CONFIG_EFI_DISABLE_RUNTIME); 77 static int __init setup_noefi(char *arg) 78 { 79 disable_runtime = true; 80 return 0; 81 } 82 early_param("noefi", setup_noefi); 83 84 bool efi_runtime_disabled(void) 85 { 86 return disable_runtime; 87 } 88 89 bool __pure __efi_soft_reserve_enabled(void) 90 { 91 return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE); 92 } 93 94 static int __init parse_efi_cmdline(char *str) 95 { 96 if (!str) { 97 pr_warn("need at least one option\n"); 98 return -EINVAL; 99 } 100 101 if (parse_option_str(str, "debug")) 102 set_bit(EFI_DBG, &efi.flags); 103 104 if (parse_option_str(str, "noruntime")) 105 disable_runtime = true; 106 107 if (parse_option_str(str, "runtime")) 108 disable_runtime = false; 109 110 if (parse_option_str(str, "nosoftreserve")) 111 set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags); 112 113 return 0; 114 } 115 early_param("efi", parse_efi_cmdline); 116 117 struct kobject *efi_kobj; 118 119 /* 120 * Let's not leave out systab information that snuck into 121 * the efivars driver 122 * Note, do not add more fields in systab sysfs file as it breaks sysfs 123 * one value per file rule! 124 */ 125 static ssize_t systab_show(struct kobject *kobj, 126 struct kobj_attribute *attr, char *buf) 127 { 128 char *str = buf; 129 130 if (!kobj || !buf) 131 return -EINVAL; 132 133 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 134 str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20); 135 if (efi.acpi != EFI_INVALID_TABLE_ADDR) 136 str += sprintf(str, "ACPI=0x%lx\n", efi.acpi); 137 /* 138 * If both SMBIOS and SMBIOS3 entry points are implemented, the 139 * SMBIOS3 entry point shall be preferred, so we list it first to 140 * let applications stop parsing after the first match. 141 */ 142 if (efi.smbios3 != EFI_INVALID_TABLE_ADDR) 143 str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3); 144 if (efi.smbios != EFI_INVALID_TABLE_ADDR) 145 str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios); 146 147 if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86)) 148 str = efi_systab_show_arch(str); 149 150 return str - buf; 151 } 152 153 static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400); 154 155 static ssize_t fw_platform_size_show(struct kobject *kobj, 156 struct kobj_attribute *attr, char *buf) 157 { 158 return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32); 159 } 160 161 extern __weak struct kobj_attribute efi_attr_fw_vendor; 162 extern __weak struct kobj_attribute efi_attr_runtime; 163 extern __weak struct kobj_attribute efi_attr_config_table; 164 static struct kobj_attribute efi_attr_fw_platform_size = 165 __ATTR_RO(fw_platform_size); 166 167 static struct attribute *efi_subsys_attrs[] = { 168 &efi_attr_systab.attr, 169 &efi_attr_fw_platform_size.attr, 170 &efi_attr_fw_vendor.attr, 171 &efi_attr_runtime.attr, 172 &efi_attr_config_table.attr, 173 NULL, 174 }; 175 176 umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr, 177 int n) 178 { 179 return attr->mode; 180 } 181 182 static const struct attribute_group efi_subsys_attr_group = { 183 .attrs = efi_subsys_attrs, 184 .is_visible = efi_attr_is_visible, 185 }; 186 187 static struct efivars generic_efivars; 188 static struct efivar_operations generic_ops; 189 190 static bool generic_ops_supported(void) 191 { 192 unsigned long name_size; 193 efi_status_t status; 194 efi_char16_t name; 195 efi_guid_t guid; 196 197 name_size = sizeof(name); 198 199 status = efi.get_next_variable(&name_size, &name, &guid); 200 if (status == EFI_UNSUPPORTED) 201 return false; 202 203 return true; 204 } 205 206 static int generic_ops_register(void) 207 { 208 if (!generic_ops_supported()) 209 return 0; 210 211 generic_ops.get_variable = efi.get_variable; 212 generic_ops.get_next_variable = efi.get_next_variable; 213 generic_ops.query_variable_store = efi_query_variable_store; 214 215 if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) { 216 generic_ops.set_variable = efi.set_variable; 217 generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking; 218 } 219 return efivars_register(&generic_efivars, &generic_ops); 220 } 221 222 static void generic_ops_unregister(void) 223 { 224 if (!generic_ops.get_variable) 225 return; 226 227 efivars_unregister(&generic_efivars); 228 } 229 230 #ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS 231 #define EFIVAR_SSDT_NAME_MAX 16UL 232 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata; 233 static int __init efivar_ssdt_setup(char *str) 234 { 235 int ret = security_locked_down(LOCKDOWN_ACPI_TABLES); 236 237 if (ret) 238 return ret; 239 240 if (strlen(str) < sizeof(efivar_ssdt)) 241 memcpy(efivar_ssdt, str, strlen(str)); 242 else 243 pr_warn("efivar_ssdt: name too long: %s\n", str); 244 return 1; 245 } 246 __setup("efivar_ssdt=", efivar_ssdt_setup); 247 248 static __init int efivar_ssdt_load(void) 249 { 250 unsigned long name_size = 256; 251 efi_char16_t *name = NULL; 252 efi_status_t status; 253 efi_guid_t guid; 254 255 if (!efivar_ssdt[0]) 256 return 0; 257 258 name = kzalloc(name_size, GFP_KERNEL); 259 if (!name) 260 return -ENOMEM; 261 262 for (;;) { 263 char utf8_name[EFIVAR_SSDT_NAME_MAX]; 264 unsigned long data_size = 0; 265 void *data; 266 int limit; 267 268 status = efi.get_next_variable(&name_size, name, &guid); 269 if (status == EFI_NOT_FOUND) { 270 break; 271 } else if (status == EFI_BUFFER_TOO_SMALL) { 272 name = krealloc(name, name_size, GFP_KERNEL); 273 if (!name) 274 return -ENOMEM; 275 continue; 276 } 277 278 limit = min(EFIVAR_SSDT_NAME_MAX, name_size); 279 ucs2_as_utf8(utf8_name, name, limit - 1); 280 if (strncmp(utf8_name, efivar_ssdt, limit) != 0) 281 continue; 282 283 pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, &guid); 284 285 status = efi.get_variable(name, &guid, NULL, &data_size, NULL); 286 if (status != EFI_BUFFER_TOO_SMALL || !data_size) 287 return -EIO; 288 289 data = kmalloc(data_size, GFP_KERNEL); 290 if (!data) 291 return -ENOMEM; 292 293 status = efi.get_variable(name, &guid, NULL, &data_size, data); 294 if (status == EFI_SUCCESS) { 295 acpi_status ret = acpi_load_table(data, NULL); 296 if (ret) 297 pr_err("failed to load table: %u\n", ret); 298 else 299 continue; 300 } else { 301 pr_err("failed to get var data: 0x%lx\n", status); 302 } 303 kfree(data); 304 } 305 return 0; 306 } 307 #else 308 static inline int efivar_ssdt_load(void) { return 0; } 309 #endif 310 311 #ifdef CONFIG_DEBUG_FS 312 313 #define EFI_DEBUGFS_MAX_BLOBS 32 314 315 static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS]; 316 317 static void __init efi_debugfs_init(void) 318 { 319 struct dentry *efi_debugfs; 320 efi_memory_desc_t *md; 321 char name[32]; 322 int type_count[EFI_BOOT_SERVICES_DATA + 1] = {}; 323 int i = 0; 324 325 efi_debugfs = debugfs_create_dir("efi", NULL); 326 if (IS_ERR_OR_NULL(efi_debugfs)) 327 return; 328 329 for_each_efi_memory_desc(md) { 330 switch (md->type) { 331 case EFI_BOOT_SERVICES_CODE: 332 snprintf(name, sizeof(name), "boot_services_code%d", 333 type_count[md->type]++); 334 break; 335 case EFI_BOOT_SERVICES_DATA: 336 snprintf(name, sizeof(name), "boot_services_data%d", 337 type_count[md->type]++); 338 break; 339 default: 340 continue; 341 } 342 343 if (i >= EFI_DEBUGFS_MAX_BLOBS) { 344 pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n", 345 EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS); 346 break; 347 } 348 349 debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT; 350 debugfs_blob[i].data = memremap(md->phys_addr, 351 debugfs_blob[i].size, 352 MEMREMAP_WB); 353 if (!debugfs_blob[i].data) 354 continue; 355 356 debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]); 357 i++; 358 } 359 } 360 #else 361 static inline void efi_debugfs_init(void) {} 362 #endif 363 364 static void refresh_nv_rng_seed(struct work_struct *work) 365 { 366 u8 seed[EFI_RANDOM_SEED_SIZE]; 367 368 get_random_bytes(seed, sizeof(seed)); 369 efi.set_variable(L"RandomSeed", &LINUX_EFI_RANDOM_SEED_TABLE_GUID, 370 EFI_VARIABLE_NON_VOLATILE | EFI_VARIABLE_BOOTSERVICE_ACCESS | 371 EFI_VARIABLE_RUNTIME_ACCESS, sizeof(seed), seed); 372 memzero_explicit(seed, sizeof(seed)); 373 } 374 static int refresh_nv_rng_seed_notification(struct notifier_block *nb, unsigned long action, void *data) 375 { 376 static DECLARE_WORK(work, refresh_nv_rng_seed); 377 schedule_work(&work); 378 return NOTIFY_DONE; 379 } 380 static struct notifier_block refresh_nv_rng_seed_nb = { .notifier_call = refresh_nv_rng_seed_notification }; 381 382 /* 383 * We register the efi subsystem with the firmware subsystem and the 384 * efivars subsystem with the efi subsystem, if the system was booted with 385 * EFI. 386 */ 387 static int __init efisubsys_init(void) 388 { 389 int error; 390 391 if (!efi_enabled(EFI_RUNTIME_SERVICES)) 392 efi.runtime_supported_mask = 0; 393 394 if (!efi_enabled(EFI_BOOT)) 395 return 0; 396 397 if (efi.runtime_supported_mask) { 398 /* 399 * Since we process only one efi_runtime_service() at a time, an 400 * ordered workqueue (which creates only one execution context) 401 * should suffice for all our needs. 402 */ 403 efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0); 404 if (!efi_rts_wq) { 405 pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n"); 406 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); 407 efi.runtime_supported_mask = 0; 408 return 0; 409 } 410 } 411 412 if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES)) 413 platform_device_register_simple("rtc-efi", 0, NULL, 0); 414 415 /* We register the efi directory at /sys/firmware/efi */ 416 efi_kobj = kobject_create_and_add("efi", firmware_kobj); 417 if (!efi_kobj) { 418 pr_err("efi: Firmware registration failed.\n"); 419 error = -ENOMEM; 420 goto err_destroy_wq; 421 } 422 423 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE | 424 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) { 425 error = generic_ops_register(); 426 if (error) 427 goto err_put; 428 efivar_ssdt_load(); 429 platform_device_register_simple("efivars", 0, NULL, 0); 430 } 431 432 error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group); 433 if (error) { 434 pr_err("efi: Sysfs attribute export failed with error %d.\n", 435 error); 436 goto err_unregister; 437 } 438 439 /* and the standard mountpoint for efivarfs */ 440 error = sysfs_create_mount_point(efi_kobj, "efivars"); 441 if (error) { 442 pr_err("efivars: Subsystem registration failed.\n"); 443 goto err_remove_group; 444 } 445 446 if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS)) 447 efi_debugfs_init(); 448 449 #ifdef CONFIG_EFI_COCO_SECRET 450 if (efi.coco_secret != EFI_INVALID_TABLE_ADDR) 451 platform_device_register_simple("efi_secret", 0, NULL, 0); 452 #endif 453 454 if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) 455 execute_with_initialized_rng(&refresh_nv_rng_seed_nb); 456 457 return 0; 458 459 err_remove_group: 460 sysfs_remove_group(efi_kobj, &efi_subsys_attr_group); 461 err_unregister: 462 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE | 463 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) 464 generic_ops_unregister(); 465 err_put: 466 kobject_put(efi_kobj); 467 efi_kobj = NULL; 468 err_destroy_wq: 469 if (efi_rts_wq) 470 destroy_workqueue(efi_rts_wq); 471 472 return error; 473 } 474 475 subsys_initcall(efisubsys_init); 476 477 void __init efi_find_mirror(void) 478 { 479 efi_memory_desc_t *md; 480 u64 mirror_size = 0, total_size = 0; 481 482 if (!efi_enabled(EFI_MEMMAP)) 483 return; 484 485 for_each_efi_memory_desc(md) { 486 unsigned long long start = md->phys_addr; 487 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; 488 489 total_size += size; 490 if (md->attribute & EFI_MEMORY_MORE_RELIABLE) { 491 memblock_mark_mirror(start, size); 492 mirror_size += size; 493 } 494 } 495 if (mirror_size) 496 pr_info("Memory: %lldM/%lldM mirrored memory\n", 497 mirror_size>>20, total_size>>20); 498 } 499 500 /* 501 * Find the efi memory descriptor for a given physical address. Given a 502 * physical address, determine if it exists within an EFI Memory Map entry, 503 * and if so, populate the supplied memory descriptor with the appropriate 504 * data. 505 */ 506 int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md) 507 { 508 efi_memory_desc_t *md; 509 510 if (!efi_enabled(EFI_MEMMAP)) { 511 pr_err_once("EFI_MEMMAP is not enabled.\n"); 512 return -EINVAL; 513 } 514 515 if (!out_md) { 516 pr_err_once("out_md is null.\n"); 517 return -EINVAL; 518 } 519 520 for_each_efi_memory_desc(md) { 521 u64 size; 522 u64 end; 523 524 /* skip bogus entries (including empty ones) */ 525 if ((md->phys_addr & (EFI_PAGE_SIZE - 1)) || 526 (md->num_pages <= 0) || 527 (md->num_pages > (U64_MAX - md->phys_addr) >> EFI_PAGE_SHIFT)) 528 continue; 529 530 size = md->num_pages << EFI_PAGE_SHIFT; 531 end = md->phys_addr + size; 532 if (phys_addr >= md->phys_addr && phys_addr < end) { 533 memcpy(out_md, md, sizeof(*out_md)); 534 return 0; 535 } 536 } 537 return -ENOENT; 538 } 539 540 extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md) 541 __weak __alias(__efi_mem_desc_lookup); 542 543 /* 544 * Calculate the highest address of an efi memory descriptor. 545 */ 546 u64 __init efi_mem_desc_end(efi_memory_desc_t *md) 547 { 548 u64 size = md->num_pages << EFI_PAGE_SHIFT; 549 u64 end = md->phys_addr + size; 550 return end; 551 } 552 553 void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {} 554 555 /** 556 * efi_mem_reserve - Reserve an EFI memory region 557 * @addr: Physical address to reserve 558 * @size: Size of reservation 559 * 560 * Mark a region as reserved from general kernel allocation and 561 * prevent it being released by efi_free_boot_services(). 562 * 563 * This function should be called drivers once they've parsed EFI 564 * configuration tables to figure out where their data lives, e.g. 565 * efi_esrt_init(). 566 */ 567 void __init efi_mem_reserve(phys_addr_t addr, u64 size) 568 { 569 /* efi_mem_reserve() does not work under Xen */ 570 if (WARN_ON_ONCE(efi_enabled(EFI_PARAVIRT))) 571 return; 572 573 if (!memblock_is_region_reserved(addr, size)) 574 memblock_reserve(addr, size); 575 576 /* 577 * Some architectures (x86) reserve all boot services ranges 578 * until efi_free_boot_services() because of buggy firmware 579 * implementations. This means the above memblock_reserve() is 580 * superfluous on x86 and instead what it needs to do is 581 * ensure the @start, @size is not freed. 582 */ 583 efi_arch_mem_reserve(addr, size); 584 } 585 586 static const efi_config_table_type_t common_tables[] __initconst = { 587 {ACPI_20_TABLE_GUID, &efi.acpi20, "ACPI 2.0" }, 588 {ACPI_TABLE_GUID, &efi.acpi, "ACPI" }, 589 {SMBIOS_TABLE_GUID, &efi.smbios, "SMBIOS" }, 590 {SMBIOS3_TABLE_GUID, &efi.smbios3, "SMBIOS 3.0" }, 591 {EFI_SYSTEM_RESOURCE_TABLE_GUID, &efi.esrt, "ESRT" }, 592 {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, &efi_mem_attr_table, "MEMATTR" }, 593 {LINUX_EFI_RANDOM_SEED_TABLE_GUID, &efi_rng_seed, "RNG" }, 594 {LINUX_EFI_TPM_EVENT_LOG_GUID, &efi.tpm_log, "TPMEventLog" }, 595 {LINUX_EFI_TPM_FINAL_LOG_GUID, &efi.tpm_final_log, "TPMFinalLog" }, 596 {LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" }, 597 {LINUX_EFI_INITRD_MEDIA_GUID, &initrd, "INITRD" }, 598 {EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" }, 599 #ifdef CONFIG_EFI_RCI2_TABLE 600 {DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys }, 601 #endif 602 #ifdef CONFIG_LOAD_UEFI_KEYS 603 {LINUX_EFI_MOK_VARIABLE_TABLE_GUID, &efi.mokvar_table, "MOKvar" }, 604 #endif 605 #ifdef CONFIG_EFI_COCO_SECRET 606 {LINUX_EFI_COCO_SECRET_AREA_GUID, &efi.coco_secret, "CocoSecret" }, 607 #endif 608 #ifdef CONFIG_EFI_GENERIC_STUB 609 {LINUX_EFI_SCREEN_INFO_TABLE_GUID, &screen_info_table }, 610 #endif 611 {}, 612 }; 613 614 static __init int match_config_table(const efi_guid_t *guid, 615 unsigned long table, 616 const efi_config_table_type_t *table_types) 617 { 618 int i; 619 620 for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) { 621 if (efi_guidcmp(*guid, table_types[i].guid)) 622 continue; 623 624 if (!efi_config_table_is_usable(guid, table)) { 625 if (table_types[i].name[0]) 626 pr_cont("(%s=0x%lx unusable) ", 627 table_types[i].name, table); 628 return 1; 629 } 630 631 *(table_types[i].ptr) = table; 632 if (table_types[i].name[0]) 633 pr_cont("%s=0x%lx ", table_types[i].name, table); 634 return 1; 635 } 636 637 return 0; 638 } 639 640 int __init efi_config_parse_tables(const efi_config_table_t *config_tables, 641 int count, 642 const efi_config_table_type_t *arch_tables) 643 { 644 const efi_config_table_64_t *tbl64 = (void *)config_tables; 645 const efi_config_table_32_t *tbl32 = (void *)config_tables; 646 const efi_guid_t *guid; 647 unsigned long table; 648 int i; 649 650 pr_info(""); 651 for (i = 0; i < count; i++) { 652 if (!IS_ENABLED(CONFIG_X86)) { 653 guid = &config_tables[i].guid; 654 table = (unsigned long)config_tables[i].table; 655 } else if (efi_enabled(EFI_64BIT)) { 656 guid = &tbl64[i].guid; 657 table = tbl64[i].table; 658 659 if (IS_ENABLED(CONFIG_X86_32) && 660 tbl64[i].table > U32_MAX) { 661 pr_cont("\n"); 662 pr_err("Table located above 4GB, disabling EFI.\n"); 663 return -EINVAL; 664 } 665 } else { 666 guid = &tbl32[i].guid; 667 table = tbl32[i].table; 668 } 669 670 if (!match_config_table(guid, table, common_tables) && arch_tables) 671 match_config_table(guid, table, arch_tables); 672 } 673 pr_cont("\n"); 674 set_bit(EFI_CONFIG_TABLES, &efi.flags); 675 676 if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) { 677 struct linux_efi_random_seed *seed; 678 u32 size = 0; 679 680 seed = early_memremap(efi_rng_seed, sizeof(*seed)); 681 if (seed != NULL) { 682 size = min_t(u32, seed->size, SZ_1K); // sanity check 683 early_memunmap(seed, sizeof(*seed)); 684 } else { 685 pr_err("Could not map UEFI random seed!\n"); 686 } 687 if (size > 0) { 688 seed = early_memremap(efi_rng_seed, 689 sizeof(*seed) + size); 690 if (seed != NULL) { 691 add_bootloader_randomness(seed->bits, size); 692 memzero_explicit(seed->bits, size); 693 early_memunmap(seed, sizeof(*seed) + size); 694 } else { 695 pr_err("Could not map UEFI random seed!\n"); 696 } 697 } 698 } 699 700 if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP)) 701 efi_memattr_init(); 702 703 efi_tpm_eventlog_init(); 704 705 if (mem_reserve != EFI_INVALID_TABLE_ADDR) { 706 unsigned long prsv = mem_reserve; 707 708 while (prsv) { 709 struct linux_efi_memreserve *rsv; 710 u8 *p; 711 712 /* 713 * Just map a full page: that is what we will get 714 * anyway, and it permits us to map the entire entry 715 * before knowing its size. 716 */ 717 p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE), 718 PAGE_SIZE); 719 if (p == NULL) { 720 pr_err("Could not map UEFI memreserve entry!\n"); 721 return -ENOMEM; 722 } 723 724 rsv = (void *)(p + prsv % PAGE_SIZE); 725 726 /* reserve the entry itself */ 727 memblock_reserve(prsv, 728 struct_size(rsv, entry, rsv->size)); 729 730 for (i = 0; i < atomic_read(&rsv->count); i++) { 731 memblock_reserve(rsv->entry[i].base, 732 rsv->entry[i].size); 733 } 734 735 prsv = rsv->next; 736 early_memunmap(p, PAGE_SIZE); 737 } 738 } 739 740 if (rt_prop != EFI_INVALID_TABLE_ADDR) { 741 efi_rt_properties_table_t *tbl; 742 743 tbl = early_memremap(rt_prop, sizeof(*tbl)); 744 if (tbl) { 745 efi.runtime_supported_mask &= tbl->runtime_services_supported; 746 early_memunmap(tbl, sizeof(*tbl)); 747 } 748 } 749 750 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && 751 initrd != EFI_INVALID_TABLE_ADDR && phys_initrd_size == 0) { 752 struct linux_efi_initrd *tbl; 753 754 tbl = early_memremap(initrd, sizeof(*tbl)); 755 if (tbl) { 756 phys_initrd_start = tbl->base; 757 phys_initrd_size = tbl->size; 758 early_memunmap(tbl, sizeof(*tbl)); 759 } 760 } 761 762 return 0; 763 } 764 765 int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr) 766 { 767 if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) { 768 pr_err("System table signature incorrect!\n"); 769 return -EINVAL; 770 } 771 772 return 0; 773 } 774 775 #ifndef CONFIG_IA64 776 static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor, 777 size_t size) 778 { 779 const efi_char16_t *ret; 780 781 ret = early_memremap_ro(fw_vendor, size); 782 if (!ret) 783 pr_err("Could not map the firmware vendor!\n"); 784 return ret; 785 } 786 787 static void __init unmap_fw_vendor(const void *fw_vendor, size_t size) 788 { 789 early_memunmap((void *)fw_vendor, size); 790 } 791 #else 792 #define map_fw_vendor(p, s) __va(p) 793 #define unmap_fw_vendor(v, s) 794 #endif 795 796 void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr, 797 unsigned long fw_vendor) 798 { 799 char vendor[100] = "unknown"; 800 const efi_char16_t *c16; 801 size_t i; 802 u16 rev; 803 804 c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t)); 805 if (c16) { 806 for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i) 807 vendor[i] = c16[i]; 808 vendor[i] = '\0'; 809 810 unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t)); 811 } 812 813 rev = (u16)systab_hdr->revision; 814 pr_info("EFI v%u.%u", systab_hdr->revision >> 16, rev / 10); 815 816 rev %= 10; 817 if (rev) 818 pr_cont(".%u", rev); 819 820 pr_cont(" by %s\n", vendor); 821 822 if (IS_ENABLED(CONFIG_X86_64) && 823 systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION && 824 !strcmp(vendor, "Apple")) { 825 pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n"); 826 efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION; 827 } 828 } 829 830 static __initdata char memory_type_name[][13] = { 831 "Reserved", 832 "Loader Code", 833 "Loader Data", 834 "Boot Code", 835 "Boot Data", 836 "Runtime Code", 837 "Runtime Data", 838 "Conventional", 839 "Unusable", 840 "ACPI Reclaim", 841 "ACPI Mem NVS", 842 "MMIO", 843 "MMIO Port", 844 "PAL Code", 845 "Persistent", 846 }; 847 848 char * __init efi_md_typeattr_format(char *buf, size_t size, 849 const efi_memory_desc_t *md) 850 { 851 char *pos; 852 int type_len; 853 u64 attr; 854 855 pos = buf; 856 if (md->type >= ARRAY_SIZE(memory_type_name)) 857 type_len = snprintf(pos, size, "[type=%u", md->type); 858 else 859 type_len = snprintf(pos, size, "[%-*s", 860 (int)(sizeof(memory_type_name[0]) - 1), 861 memory_type_name[md->type]); 862 if (type_len >= size) 863 return buf; 864 865 pos += type_len; 866 size -= type_len; 867 868 attr = md->attribute; 869 if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT | 870 EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO | 871 EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP | 872 EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO | 873 EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE)) 874 snprintf(pos, size, "|attr=0x%016llx]", 875 (unsigned long long)attr); 876 else 877 snprintf(pos, size, 878 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]", 879 attr & EFI_MEMORY_RUNTIME ? "RUN" : "", 880 attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "", 881 attr & EFI_MEMORY_CPU_CRYPTO ? "CC" : "", 882 attr & EFI_MEMORY_SP ? "SP" : "", 883 attr & EFI_MEMORY_NV ? "NV" : "", 884 attr & EFI_MEMORY_XP ? "XP" : "", 885 attr & EFI_MEMORY_RP ? "RP" : "", 886 attr & EFI_MEMORY_WP ? "WP" : "", 887 attr & EFI_MEMORY_RO ? "RO" : "", 888 attr & EFI_MEMORY_UCE ? "UCE" : "", 889 attr & EFI_MEMORY_WB ? "WB" : "", 890 attr & EFI_MEMORY_WT ? "WT" : "", 891 attr & EFI_MEMORY_WC ? "WC" : "", 892 attr & EFI_MEMORY_UC ? "UC" : ""); 893 return buf; 894 } 895 896 /* 897 * IA64 has a funky EFI memory map that doesn't work the same way as 898 * other architectures. 899 */ 900 #ifndef CONFIG_IA64 901 /* 902 * efi_mem_attributes - lookup memmap attributes for physical address 903 * @phys_addr: the physical address to lookup 904 * 905 * Search in the EFI memory map for the region covering 906 * @phys_addr. Returns the EFI memory attributes if the region 907 * was found in the memory map, 0 otherwise. 908 */ 909 u64 efi_mem_attributes(unsigned long phys_addr) 910 { 911 efi_memory_desc_t *md; 912 913 if (!efi_enabled(EFI_MEMMAP)) 914 return 0; 915 916 for_each_efi_memory_desc(md) { 917 if ((md->phys_addr <= phys_addr) && 918 (phys_addr < (md->phys_addr + 919 (md->num_pages << EFI_PAGE_SHIFT)))) 920 return md->attribute; 921 } 922 return 0; 923 } 924 925 /* 926 * efi_mem_type - lookup memmap type for physical address 927 * @phys_addr: the physical address to lookup 928 * 929 * Search in the EFI memory map for the region covering @phys_addr. 930 * Returns the EFI memory type if the region was found in the memory 931 * map, -EINVAL otherwise. 932 */ 933 int efi_mem_type(unsigned long phys_addr) 934 { 935 const efi_memory_desc_t *md; 936 937 if (!efi_enabled(EFI_MEMMAP)) 938 return -ENOTSUPP; 939 940 for_each_efi_memory_desc(md) { 941 if ((md->phys_addr <= phys_addr) && 942 (phys_addr < (md->phys_addr + 943 (md->num_pages << EFI_PAGE_SHIFT)))) 944 return md->type; 945 } 946 return -EINVAL; 947 } 948 #endif 949 950 int efi_status_to_err(efi_status_t status) 951 { 952 int err; 953 954 switch (status) { 955 case EFI_SUCCESS: 956 err = 0; 957 break; 958 case EFI_INVALID_PARAMETER: 959 err = -EINVAL; 960 break; 961 case EFI_OUT_OF_RESOURCES: 962 err = -ENOSPC; 963 break; 964 case EFI_DEVICE_ERROR: 965 err = -EIO; 966 break; 967 case EFI_WRITE_PROTECTED: 968 err = -EROFS; 969 break; 970 case EFI_SECURITY_VIOLATION: 971 err = -EACCES; 972 break; 973 case EFI_NOT_FOUND: 974 err = -ENOENT; 975 break; 976 case EFI_ABORTED: 977 err = -EINTR; 978 break; 979 default: 980 err = -EINVAL; 981 } 982 983 return err; 984 } 985 EXPORT_SYMBOL_GPL(efi_status_to_err); 986 987 static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock); 988 static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init; 989 990 static int __init efi_memreserve_map_root(void) 991 { 992 if (mem_reserve == EFI_INVALID_TABLE_ADDR) 993 return -ENODEV; 994 995 efi_memreserve_root = memremap(mem_reserve, 996 sizeof(*efi_memreserve_root), 997 MEMREMAP_WB); 998 if (WARN_ON_ONCE(!efi_memreserve_root)) 999 return -ENOMEM; 1000 return 0; 1001 } 1002 1003 static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size) 1004 { 1005 struct resource *res, *parent; 1006 int ret; 1007 1008 res = kzalloc(sizeof(struct resource), GFP_ATOMIC); 1009 if (!res) 1010 return -ENOMEM; 1011 1012 res->name = "reserved"; 1013 res->flags = IORESOURCE_MEM; 1014 res->start = addr; 1015 res->end = addr + size - 1; 1016 1017 /* we expect a conflict with a 'System RAM' region */ 1018 parent = request_resource_conflict(&iomem_resource, res); 1019 ret = parent ? request_resource(parent, res) : 0; 1020 1021 /* 1022 * Given that efi_mem_reserve_iomem() can be called at any 1023 * time, only call memblock_reserve() if the architecture 1024 * keeps the infrastructure around. 1025 */ 1026 if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret) 1027 memblock_reserve(addr, size); 1028 1029 return ret; 1030 } 1031 1032 int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) 1033 { 1034 struct linux_efi_memreserve *rsv; 1035 unsigned long prsv; 1036 int rc, index; 1037 1038 if (efi_memreserve_root == (void *)ULONG_MAX) 1039 return -ENODEV; 1040 1041 if (!efi_memreserve_root) { 1042 rc = efi_memreserve_map_root(); 1043 if (rc) 1044 return rc; 1045 } 1046 1047 /* first try to find a slot in an existing linked list entry */ 1048 for (prsv = efi_memreserve_root->next; prsv; ) { 1049 rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB); 1050 if (!rsv) 1051 return -ENOMEM; 1052 index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size); 1053 if (index < rsv->size) { 1054 rsv->entry[index].base = addr; 1055 rsv->entry[index].size = size; 1056 1057 memunmap(rsv); 1058 return efi_mem_reserve_iomem(addr, size); 1059 } 1060 prsv = rsv->next; 1061 memunmap(rsv); 1062 } 1063 1064 /* no slot found - allocate a new linked list entry */ 1065 rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC); 1066 if (!rsv) 1067 return -ENOMEM; 1068 1069 rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K); 1070 if (rc) { 1071 free_page((unsigned long)rsv); 1072 return rc; 1073 } 1074 1075 /* 1076 * The memremap() call above assumes that a linux_efi_memreserve entry 1077 * never crosses a page boundary, so let's ensure that this remains true 1078 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by 1079 * using SZ_4K explicitly in the size calculation below. 1080 */ 1081 rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K); 1082 atomic_set(&rsv->count, 1); 1083 rsv->entry[0].base = addr; 1084 rsv->entry[0].size = size; 1085 1086 spin_lock(&efi_mem_reserve_persistent_lock); 1087 rsv->next = efi_memreserve_root->next; 1088 efi_memreserve_root->next = __pa(rsv); 1089 spin_unlock(&efi_mem_reserve_persistent_lock); 1090 1091 return efi_mem_reserve_iomem(addr, size); 1092 } 1093 1094 static int __init efi_memreserve_root_init(void) 1095 { 1096 if (efi_memreserve_root) 1097 return 0; 1098 if (efi_memreserve_map_root()) 1099 efi_memreserve_root = (void *)ULONG_MAX; 1100 return 0; 1101 } 1102 early_initcall(efi_memreserve_root_init); 1103 1104 #ifdef CONFIG_KEXEC 1105 static int update_efi_random_seed(struct notifier_block *nb, 1106 unsigned long code, void *unused) 1107 { 1108 struct linux_efi_random_seed *seed; 1109 u32 size = 0; 1110 1111 if (!kexec_in_progress) 1112 return NOTIFY_DONE; 1113 1114 seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB); 1115 if (seed != NULL) { 1116 size = min(seed->size, EFI_RANDOM_SEED_SIZE); 1117 memunmap(seed); 1118 } else { 1119 pr_err("Could not map UEFI random seed!\n"); 1120 } 1121 if (size > 0) { 1122 seed = memremap(efi_rng_seed, sizeof(*seed) + size, 1123 MEMREMAP_WB); 1124 if (seed != NULL) { 1125 seed->size = size; 1126 get_random_bytes(seed->bits, seed->size); 1127 memunmap(seed); 1128 } else { 1129 pr_err("Could not map UEFI random seed!\n"); 1130 } 1131 } 1132 return NOTIFY_DONE; 1133 } 1134 1135 static struct notifier_block efi_random_seed_nb = { 1136 .notifier_call = update_efi_random_seed, 1137 }; 1138 1139 static int __init register_update_efi_random_seed(void) 1140 { 1141 if (efi_rng_seed == EFI_INVALID_TABLE_ADDR) 1142 return 0; 1143 return register_reboot_notifier(&efi_random_seed_nb); 1144 } 1145 late_initcall(register_update_efi_random_seed); 1146 #endif 1147