1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * efi.c - EFI subsystem 4 * 5 * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com> 6 * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com> 7 * Copyright (C) 2013 Tom Gundersen <teg@jklm.no> 8 * 9 * This code registers /sys/firmware/efi{,/efivars} when EFI is supported, 10 * allowing the efivarfs to be mounted or the efivars module to be loaded. 11 * The existance of /sys/firmware/efi may also be used by userspace to 12 * determine that the system supports EFI. 13 */ 14 15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16 17 #include <linux/kobject.h> 18 #include <linux/module.h> 19 #include <linux/init.h> 20 #include <linux/debugfs.h> 21 #include <linux/device.h> 22 #include <linux/efi.h> 23 #include <linux/of.h> 24 #include <linux/io.h> 25 #include <linux/kexec.h> 26 #include <linux/platform_device.h> 27 #include <linux/random.h> 28 #include <linux/reboot.h> 29 #include <linux/slab.h> 30 #include <linux/acpi.h> 31 #include <linux/ucs2_string.h> 32 #include <linux/memblock.h> 33 #include <linux/security.h> 34 35 #include <asm/early_ioremap.h> 36 37 struct efi __read_mostly efi = { 38 .runtime_supported_mask = EFI_RT_SUPPORTED_ALL, 39 .acpi = EFI_INVALID_TABLE_ADDR, 40 .acpi20 = EFI_INVALID_TABLE_ADDR, 41 .smbios = EFI_INVALID_TABLE_ADDR, 42 .smbios3 = EFI_INVALID_TABLE_ADDR, 43 .esrt = EFI_INVALID_TABLE_ADDR, 44 .tpm_log = EFI_INVALID_TABLE_ADDR, 45 .tpm_final_log = EFI_INVALID_TABLE_ADDR, 46 }; 47 EXPORT_SYMBOL(efi); 48 49 unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR; 50 static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR; 51 static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR; 52 53 struct mm_struct efi_mm = { 54 .mm_rb = RB_ROOT, 55 .mm_users = ATOMIC_INIT(2), 56 .mm_count = ATOMIC_INIT(1), 57 MMAP_LOCK_INITIALIZER(efi_mm) 58 .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock), 59 .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), 60 .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0}, 61 }; 62 63 struct workqueue_struct *efi_rts_wq; 64 65 static bool disable_runtime; 66 static int __init setup_noefi(char *arg) 67 { 68 disable_runtime = true; 69 return 0; 70 } 71 early_param("noefi", setup_noefi); 72 73 bool efi_runtime_disabled(void) 74 { 75 return disable_runtime; 76 } 77 78 bool __pure __efi_soft_reserve_enabled(void) 79 { 80 return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE); 81 } 82 83 static int __init parse_efi_cmdline(char *str) 84 { 85 if (!str) { 86 pr_warn("need at least one option\n"); 87 return -EINVAL; 88 } 89 90 if (parse_option_str(str, "debug")) 91 set_bit(EFI_DBG, &efi.flags); 92 93 if (parse_option_str(str, "noruntime")) 94 disable_runtime = true; 95 96 if (parse_option_str(str, "nosoftreserve")) 97 set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags); 98 99 return 0; 100 } 101 early_param("efi", parse_efi_cmdline); 102 103 struct kobject *efi_kobj; 104 105 /* 106 * Let's not leave out systab information that snuck into 107 * the efivars driver 108 * Note, do not add more fields in systab sysfs file as it breaks sysfs 109 * one value per file rule! 110 */ 111 static ssize_t systab_show(struct kobject *kobj, 112 struct kobj_attribute *attr, char *buf) 113 { 114 char *str = buf; 115 116 if (!kobj || !buf) 117 return -EINVAL; 118 119 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 120 str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20); 121 if (efi.acpi != EFI_INVALID_TABLE_ADDR) 122 str += sprintf(str, "ACPI=0x%lx\n", efi.acpi); 123 /* 124 * If both SMBIOS and SMBIOS3 entry points are implemented, the 125 * SMBIOS3 entry point shall be preferred, so we list it first to 126 * let applications stop parsing after the first match. 127 */ 128 if (efi.smbios3 != EFI_INVALID_TABLE_ADDR) 129 str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3); 130 if (efi.smbios != EFI_INVALID_TABLE_ADDR) 131 str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios); 132 133 if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86)) 134 str = efi_systab_show_arch(str); 135 136 return str - buf; 137 } 138 139 static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400); 140 141 static ssize_t fw_platform_size_show(struct kobject *kobj, 142 struct kobj_attribute *attr, char *buf) 143 { 144 return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32); 145 } 146 147 extern __weak struct kobj_attribute efi_attr_fw_vendor; 148 extern __weak struct kobj_attribute efi_attr_runtime; 149 extern __weak struct kobj_attribute efi_attr_config_table; 150 static struct kobj_attribute efi_attr_fw_platform_size = 151 __ATTR_RO(fw_platform_size); 152 153 static struct attribute *efi_subsys_attrs[] = { 154 &efi_attr_systab.attr, 155 &efi_attr_fw_platform_size.attr, 156 &efi_attr_fw_vendor.attr, 157 &efi_attr_runtime.attr, 158 &efi_attr_config_table.attr, 159 NULL, 160 }; 161 162 umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr, 163 int n) 164 { 165 return attr->mode; 166 } 167 168 static const struct attribute_group efi_subsys_attr_group = { 169 .attrs = efi_subsys_attrs, 170 .is_visible = efi_attr_is_visible, 171 }; 172 173 static struct efivars generic_efivars; 174 static struct efivar_operations generic_ops; 175 176 static int generic_ops_register(void) 177 { 178 generic_ops.get_variable = efi.get_variable; 179 generic_ops.set_variable = efi.set_variable; 180 generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking; 181 generic_ops.get_next_variable = efi.get_next_variable; 182 generic_ops.query_variable_store = efi_query_variable_store; 183 184 return efivars_register(&generic_efivars, &generic_ops, efi_kobj); 185 } 186 187 static void generic_ops_unregister(void) 188 { 189 efivars_unregister(&generic_efivars); 190 } 191 192 #ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS 193 #define EFIVAR_SSDT_NAME_MAX 16 194 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata; 195 static int __init efivar_ssdt_setup(char *str) 196 { 197 int ret = security_locked_down(LOCKDOWN_ACPI_TABLES); 198 199 if (ret) 200 return ret; 201 202 if (strlen(str) < sizeof(efivar_ssdt)) 203 memcpy(efivar_ssdt, str, strlen(str)); 204 else 205 pr_warn("efivar_ssdt: name too long: %s\n", str); 206 return 0; 207 } 208 __setup("efivar_ssdt=", efivar_ssdt_setup); 209 210 static __init int efivar_ssdt_iter(efi_char16_t *name, efi_guid_t vendor, 211 unsigned long name_size, void *data) 212 { 213 struct efivar_entry *entry; 214 struct list_head *list = data; 215 char utf8_name[EFIVAR_SSDT_NAME_MAX]; 216 int limit = min_t(unsigned long, EFIVAR_SSDT_NAME_MAX, name_size); 217 218 ucs2_as_utf8(utf8_name, name, limit - 1); 219 if (strncmp(utf8_name, efivar_ssdt, limit) != 0) 220 return 0; 221 222 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 223 if (!entry) 224 return 0; 225 226 memcpy(entry->var.VariableName, name, name_size); 227 memcpy(&entry->var.VendorGuid, &vendor, sizeof(efi_guid_t)); 228 229 efivar_entry_add(entry, list); 230 231 return 0; 232 } 233 234 static __init int efivar_ssdt_load(void) 235 { 236 LIST_HEAD(entries); 237 struct efivar_entry *entry, *aux; 238 unsigned long size; 239 void *data; 240 int ret; 241 242 if (!efivar_ssdt[0]) 243 return 0; 244 245 ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries); 246 247 list_for_each_entry_safe(entry, aux, &entries, list) { 248 pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, 249 &entry->var.VendorGuid); 250 251 list_del(&entry->list); 252 253 ret = efivar_entry_size(entry, &size); 254 if (ret) { 255 pr_err("failed to get var size\n"); 256 goto free_entry; 257 } 258 259 data = kmalloc(size, GFP_KERNEL); 260 if (!data) { 261 ret = -ENOMEM; 262 goto free_entry; 263 } 264 265 ret = efivar_entry_get(entry, NULL, &size, data); 266 if (ret) { 267 pr_err("failed to get var data\n"); 268 goto free_data; 269 } 270 271 ret = acpi_load_table(data, NULL); 272 if (ret) { 273 pr_err("failed to load table: %d\n", ret); 274 goto free_data; 275 } 276 277 goto free_entry; 278 279 free_data: 280 kfree(data); 281 282 free_entry: 283 kfree(entry); 284 } 285 286 return ret; 287 } 288 #else 289 static inline int efivar_ssdt_load(void) { return 0; } 290 #endif 291 292 #ifdef CONFIG_DEBUG_FS 293 294 #define EFI_DEBUGFS_MAX_BLOBS 32 295 296 static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS]; 297 298 static void __init efi_debugfs_init(void) 299 { 300 struct dentry *efi_debugfs; 301 efi_memory_desc_t *md; 302 char name[32]; 303 int type_count[EFI_BOOT_SERVICES_DATA + 1] = {}; 304 int i = 0; 305 306 efi_debugfs = debugfs_create_dir("efi", NULL); 307 if (IS_ERR_OR_NULL(efi_debugfs)) 308 return; 309 310 for_each_efi_memory_desc(md) { 311 switch (md->type) { 312 case EFI_BOOT_SERVICES_CODE: 313 snprintf(name, sizeof(name), "boot_services_code%d", 314 type_count[md->type]++); 315 break; 316 case EFI_BOOT_SERVICES_DATA: 317 snprintf(name, sizeof(name), "boot_services_data%d", 318 type_count[md->type]++); 319 break; 320 default: 321 continue; 322 } 323 324 if (i >= EFI_DEBUGFS_MAX_BLOBS) { 325 pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n", 326 EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS); 327 break; 328 } 329 330 debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT; 331 debugfs_blob[i].data = memremap(md->phys_addr, 332 debugfs_blob[i].size, 333 MEMREMAP_WB); 334 if (!debugfs_blob[i].data) 335 continue; 336 337 debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]); 338 i++; 339 } 340 } 341 #else 342 static inline void efi_debugfs_init(void) {} 343 #endif 344 345 /* 346 * We register the efi subsystem with the firmware subsystem and the 347 * efivars subsystem with the efi subsystem, if the system was booted with 348 * EFI. 349 */ 350 static int __init efisubsys_init(void) 351 { 352 int error; 353 354 if (!efi_enabled(EFI_RUNTIME_SERVICES)) 355 efi.runtime_supported_mask = 0; 356 357 if (!efi_enabled(EFI_BOOT)) 358 return 0; 359 360 if (efi.runtime_supported_mask) { 361 /* 362 * Since we process only one efi_runtime_service() at a time, an 363 * ordered workqueue (which creates only one execution context) 364 * should suffice for all our needs. 365 */ 366 efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0); 367 if (!efi_rts_wq) { 368 pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n"); 369 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); 370 efi.runtime_supported_mask = 0; 371 return 0; 372 } 373 } 374 375 if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES)) 376 platform_device_register_simple("rtc-efi", 0, NULL, 0); 377 378 /* We register the efi directory at /sys/firmware/efi */ 379 efi_kobj = kobject_create_and_add("efi", firmware_kobj); 380 if (!efi_kobj) { 381 pr_err("efi: Firmware registration failed.\n"); 382 return -ENOMEM; 383 } 384 385 if (efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES)) { 386 efivar_ssdt_load(); 387 error = generic_ops_register(); 388 if (error) 389 goto err_put; 390 platform_device_register_simple("efivars", 0, NULL, 0); 391 } 392 393 error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group); 394 if (error) { 395 pr_err("efi: Sysfs attribute export failed with error %d.\n", 396 error); 397 goto err_unregister; 398 } 399 400 error = efi_runtime_map_init(efi_kobj); 401 if (error) 402 goto err_remove_group; 403 404 /* and the standard mountpoint for efivarfs */ 405 error = sysfs_create_mount_point(efi_kobj, "efivars"); 406 if (error) { 407 pr_err("efivars: Subsystem registration failed.\n"); 408 goto err_remove_group; 409 } 410 411 if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS)) 412 efi_debugfs_init(); 413 414 return 0; 415 416 err_remove_group: 417 sysfs_remove_group(efi_kobj, &efi_subsys_attr_group); 418 err_unregister: 419 if (efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES)) 420 generic_ops_unregister(); 421 err_put: 422 kobject_put(efi_kobj); 423 return error; 424 } 425 426 subsys_initcall(efisubsys_init); 427 428 /* 429 * Find the efi memory descriptor for a given physical address. Given a 430 * physical address, determine if it exists within an EFI Memory Map entry, 431 * and if so, populate the supplied memory descriptor with the appropriate 432 * data. 433 */ 434 int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md) 435 { 436 efi_memory_desc_t *md; 437 438 if (!efi_enabled(EFI_MEMMAP)) { 439 pr_err_once("EFI_MEMMAP is not enabled.\n"); 440 return -EINVAL; 441 } 442 443 if (!out_md) { 444 pr_err_once("out_md is null.\n"); 445 return -EINVAL; 446 } 447 448 for_each_efi_memory_desc(md) { 449 u64 size; 450 u64 end; 451 452 size = md->num_pages << EFI_PAGE_SHIFT; 453 end = md->phys_addr + size; 454 if (phys_addr >= md->phys_addr && phys_addr < end) { 455 memcpy(out_md, md, sizeof(*out_md)); 456 return 0; 457 } 458 } 459 return -ENOENT; 460 } 461 462 /* 463 * Calculate the highest address of an efi memory descriptor. 464 */ 465 u64 __init efi_mem_desc_end(efi_memory_desc_t *md) 466 { 467 u64 size = md->num_pages << EFI_PAGE_SHIFT; 468 u64 end = md->phys_addr + size; 469 return end; 470 } 471 472 void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {} 473 474 /** 475 * efi_mem_reserve - Reserve an EFI memory region 476 * @addr: Physical address to reserve 477 * @size: Size of reservation 478 * 479 * Mark a region as reserved from general kernel allocation and 480 * prevent it being released by efi_free_boot_services(). 481 * 482 * This function should be called drivers once they've parsed EFI 483 * configuration tables to figure out where their data lives, e.g. 484 * efi_esrt_init(). 485 */ 486 void __init efi_mem_reserve(phys_addr_t addr, u64 size) 487 { 488 if (!memblock_is_region_reserved(addr, size)) 489 memblock_reserve(addr, size); 490 491 /* 492 * Some architectures (x86) reserve all boot services ranges 493 * until efi_free_boot_services() because of buggy firmware 494 * implementations. This means the above memblock_reserve() is 495 * superfluous on x86 and instead what it needs to do is 496 * ensure the @start, @size is not freed. 497 */ 498 efi_arch_mem_reserve(addr, size); 499 } 500 501 static const efi_config_table_type_t common_tables[] __initconst = { 502 {ACPI_20_TABLE_GUID, &efi.acpi20, "ACPI 2.0" }, 503 {ACPI_TABLE_GUID, &efi.acpi, "ACPI" }, 504 {SMBIOS_TABLE_GUID, &efi.smbios, "SMBIOS" }, 505 {SMBIOS3_TABLE_GUID, &efi.smbios3, "SMBIOS 3.0" }, 506 {EFI_SYSTEM_RESOURCE_TABLE_GUID, &efi.esrt, "ESRT" }, 507 {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, &efi_mem_attr_table, "MEMATTR" }, 508 {LINUX_EFI_RANDOM_SEED_TABLE_GUID, &efi_rng_seed, "RNG" }, 509 {LINUX_EFI_TPM_EVENT_LOG_GUID, &efi.tpm_log, "TPMEventLog" }, 510 {LINUX_EFI_TPM_FINAL_LOG_GUID, &efi.tpm_final_log, "TPMFinalLog" }, 511 {LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" }, 512 {EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" }, 513 #ifdef CONFIG_EFI_RCI2_TABLE 514 {DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys }, 515 #endif 516 {}, 517 }; 518 519 static __init int match_config_table(const efi_guid_t *guid, 520 unsigned long table, 521 const efi_config_table_type_t *table_types) 522 { 523 int i; 524 525 for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) { 526 if (!efi_guidcmp(*guid, table_types[i].guid)) { 527 *(table_types[i].ptr) = table; 528 if (table_types[i].name[0]) 529 pr_cont("%s=0x%lx ", 530 table_types[i].name, table); 531 return 1; 532 } 533 } 534 535 return 0; 536 } 537 538 int __init efi_config_parse_tables(const efi_config_table_t *config_tables, 539 int count, 540 const efi_config_table_type_t *arch_tables) 541 { 542 const efi_config_table_64_t *tbl64 = (void *)config_tables; 543 const efi_config_table_32_t *tbl32 = (void *)config_tables; 544 const efi_guid_t *guid; 545 unsigned long table; 546 int i; 547 548 pr_info(""); 549 for (i = 0; i < count; i++) { 550 if (!IS_ENABLED(CONFIG_X86)) { 551 guid = &config_tables[i].guid; 552 table = (unsigned long)config_tables[i].table; 553 } else if (efi_enabled(EFI_64BIT)) { 554 guid = &tbl64[i].guid; 555 table = tbl64[i].table; 556 557 if (IS_ENABLED(CONFIG_X86_32) && 558 tbl64[i].table > U32_MAX) { 559 pr_cont("\n"); 560 pr_err("Table located above 4GB, disabling EFI.\n"); 561 return -EINVAL; 562 } 563 } else { 564 guid = &tbl32[i].guid; 565 table = tbl32[i].table; 566 } 567 568 if (!match_config_table(guid, table, common_tables) && arch_tables) 569 match_config_table(guid, table, arch_tables); 570 } 571 pr_cont("\n"); 572 set_bit(EFI_CONFIG_TABLES, &efi.flags); 573 574 if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) { 575 struct linux_efi_random_seed *seed; 576 u32 size = 0; 577 578 seed = early_memremap(efi_rng_seed, sizeof(*seed)); 579 if (seed != NULL) { 580 size = READ_ONCE(seed->size); 581 early_memunmap(seed, sizeof(*seed)); 582 } else { 583 pr_err("Could not map UEFI random seed!\n"); 584 } 585 if (size > 0) { 586 seed = early_memremap(efi_rng_seed, 587 sizeof(*seed) + size); 588 if (seed != NULL) { 589 pr_notice("seeding entropy pool\n"); 590 add_bootloader_randomness(seed->bits, size); 591 early_memunmap(seed, sizeof(*seed) + size); 592 } else { 593 pr_err("Could not map UEFI random seed!\n"); 594 } 595 } 596 } 597 598 if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP)) 599 efi_memattr_init(); 600 601 efi_tpm_eventlog_init(); 602 603 if (mem_reserve != EFI_INVALID_TABLE_ADDR) { 604 unsigned long prsv = mem_reserve; 605 606 while (prsv) { 607 struct linux_efi_memreserve *rsv; 608 u8 *p; 609 610 /* 611 * Just map a full page: that is what we will get 612 * anyway, and it permits us to map the entire entry 613 * before knowing its size. 614 */ 615 p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE), 616 PAGE_SIZE); 617 if (p == NULL) { 618 pr_err("Could not map UEFI memreserve entry!\n"); 619 return -ENOMEM; 620 } 621 622 rsv = (void *)(p + prsv % PAGE_SIZE); 623 624 /* reserve the entry itself */ 625 memblock_reserve(prsv, 626 struct_size(rsv, entry, rsv->size)); 627 628 for (i = 0; i < atomic_read(&rsv->count); i++) { 629 memblock_reserve(rsv->entry[i].base, 630 rsv->entry[i].size); 631 } 632 633 prsv = rsv->next; 634 early_memunmap(p, PAGE_SIZE); 635 } 636 } 637 638 if (rt_prop != EFI_INVALID_TABLE_ADDR) { 639 efi_rt_properties_table_t *tbl; 640 641 tbl = early_memremap(rt_prop, sizeof(*tbl)); 642 if (tbl) { 643 efi.runtime_supported_mask &= tbl->runtime_services_supported; 644 early_memunmap(tbl, sizeof(*tbl)); 645 } 646 } 647 648 return 0; 649 } 650 651 int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr, 652 int min_major_version) 653 { 654 if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) { 655 pr_err("System table signature incorrect!\n"); 656 return -EINVAL; 657 } 658 659 if ((systab_hdr->revision >> 16) < min_major_version) 660 pr_err("Warning: System table version %d.%02d, expected %d.00 or greater!\n", 661 systab_hdr->revision >> 16, 662 systab_hdr->revision & 0xffff, 663 min_major_version); 664 665 return 0; 666 } 667 668 #ifndef CONFIG_IA64 669 static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor, 670 size_t size) 671 { 672 const efi_char16_t *ret; 673 674 ret = early_memremap_ro(fw_vendor, size); 675 if (!ret) 676 pr_err("Could not map the firmware vendor!\n"); 677 return ret; 678 } 679 680 static void __init unmap_fw_vendor(const void *fw_vendor, size_t size) 681 { 682 early_memunmap((void *)fw_vendor, size); 683 } 684 #else 685 #define map_fw_vendor(p, s) __va(p) 686 #define unmap_fw_vendor(v, s) 687 #endif 688 689 void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr, 690 unsigned long fw_vendor) 691 { 692 char vendor[100] = "unknown"; 693 const efi_char16_t *c16; 694 size_t i; 695 696 c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t)); 697 if (c16) { 698 for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i) 699 vendor[i] = c16[i]; 700 vendor[i] = '\0'; 701 702 unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t)); 703 } 704 705 pr_info("EFI v%u.%.02u by %s\n", 706 systab_hdr->revision >> 16, 707 systab_hdr->revision & 0xffff, 708 vendor); 709 } 710 711 static __initdata char memory_type_name[][20] = { 712 "Reserved", 713 "Loader Code", 714 "Loader Data", 715 "Boot Code", 716 "Boot Data", 717 "Runtime Code", 718 "Runtime Data", 719 "Conventional Memory", 720 "Unusable Memory", 721 "ACPI Reclaim Memory", 722 "ACPI Memory NVS", 723 "Memory Mapped I/O", 724 "MMIO Port Space", 725 "PAL Code", 726 "Persistent Memory", 727 }; 728 729 char * __init efi_md_typeattr_format(char *buf, size_t size, 730 const efi_memory_desc_t *md) 731 { 732 char *pos; 733 int type_len; 734 u64 attr; 735 736 pos = buf; 737 if (md->type >= ARRAY_SIZE(memory_type_name)) 738 type_len = snprintf(pos, size, "[type=%u", md->type); 739 else 740 type_len = snprintf(pos, size, "[%-*s", 741 (int)(sizeof(memory_type_name[0]) - 1), 742 memory_type_name[md->type]); 743 if (type_len >= size) 744 return buf; 745 746 pos += type_len; 747 size -= type_len; 748 749 attr = md->attribute; 750 if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT | 751 EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO | 752 EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP | 753 EFI_MEMORY_NV | EFI_MEMORY_SP | 754 EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE)) 755 snprintf(pos, size, "|attr=0x%016llx]", 756 (unsigned long long)attr); 757 else 758 snprintf(pos, size, 759 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]", 760 attr & EFI_MEMORY_RUNTIME ? "RUN" : "", 761 attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "", 762 attr & EFI_MEMORY_SP ? "SP" : "", 763 attr & EFI_MEMORY_NV ? "NV" : "", 764 attr & EFI_MEMORY_XP ? "XP" : "", 765 attr & EFI_MEMORY_RP ? "RP" : "", 766 attr & EFI_MEMORY_WP ? "WP" : "", 767 attr & EFI_MEMORY_RO ? "RO" : "", 768 attr & EFI_MEMORY_UCE ? "UCE" : "", 769 attr & EFI_MEMORY_WB ? "WB" : "", 770 attr & EFI_MEMORY_WT ? "WT" : "", 771 attr & EFI_MEMORY_WC ? "WC" : "", 772 attr & EFI_MEMORY_UC ? "UC" : ""); 773 return buf; 774 } 775 776 /* 777 * IA64 has a funky EFI memory map that doesn't work the same way as 778 * other architectures. 779 */ 780 #ifndef CONFIG_IA64 781 /* 782 * efi_mem_attributes - lookup memmap attributes for physical address 783 * @phys_addr: the physical address to lookup 784 * 785 * Search in the EFI memory map for the region covering 786 * @phys_addr. Returns the EFI memory attributes if the region 787 * was found in the memory map, 0 otherwise. 788 */ 789 u64 efi_mem_attributes(unsigned long phys_addr) 790 { 791 efi_memory_desc_t *md; 792 793 if (!efi_enabled(EFI_MEMMAP)) 794 return 0; 795 796 for_each_efi_memory_desc(md) { 797 if ((md->phys_addr <= phys_addr) && 798 (phys_addr < (md->phys_addr + 799 (md->num_pages << EFI_PAGE_SHIFT)))) 800 return md->attribute; 801 } 802 return 0; 803 } 804 805 /* 806 * efi_mem_type - lookup memmap type for physical address 807 * @phys_addr: the physical address to lookup 808 * 809 * Search in the EFI memory map for the region covering @phys_addr. 810 * Returns the EFI memory type if the region was found in the memory 811 * map, -EINVAL otherwise. 812 */ 813 int efi_mem_type(unsigned long phys_addr) 814 { 815 const efi_memory_desc_t *md; 816 817 if (!efi_enabled(EFI_MEMMAP)) 818 return -ENOTSUPP; 819 820 for_each_efi_memory_desc(md) { 821 if ((md->phys_addr <= phys_addr) && 822 (phys_addr < (md->phys_addr + 823 (md->num_pages << EFI_PAGE_SHIFT)))) 824 return md->type; 825 } 826 return -EINVAL; 827 } 828 #endif 829 830 int efi_status_to_err(efi_status_t status) 831 { 832 int err; 833 834 switch (status) { 835 case EFI_SUCCESS: 836 err = 0; 837 break; 838 case EFI_INVALID_PARAMETER: 839 err = -EINVAL; 840 break; 841 case EFI_OUT_OF_RESOURCES: 842 err = -ENOSPC; 843 break; 844 case EFI_DEVICE_ERROR: 845 err = -EIO; 846 break; 847 case EFI_WRITE_PROTECTED: 848 err = -EROFS; 849 break; 850 case EFI_SECURITY_VIOLATION: 851 err = -EACCES; 852 break; 853 case EFI_NOT_FOUND: 854 err = -ENOENT; 855 break; 856 case EFI_ABORTED: 857 err = -EINTR; 858 break; 859 default: 860 err = -EINVAL; 861 } 862 863 return err; 864 } 865 866 static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock); 867 static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init; 868 869 static int __init efi_memreserve_map_root(void) 870 { 871 if (mem_reserve == EFI_INVALID_TABLE_ADDR) 872 return -ENODEV; 873 874 efi_memreserve_root = memremap(mem_reserve, 875 sizeof(*efi_memreserve_root), 876 MEMREMAP_WB); 877 if (WARN_ON_ONCE(!efi_memreserve_root)) 878 return -ENOMEM; 879 return 0; 880 } 881 882 static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size) 883 { 884 struct resource *res, *parent; 885 886 res = kzalloc(sizeof(struct resource), GFP_ATOMIC); 887 if (!res) 888 return -ENOMEM; 889 890 res->name = "reserved"; 891 res->flags = IORESOURCE_MEM; 892 res->start = addr; 893 res->end = addr + size - 1; 894 895 /* we expect a conflict with a 'System RAM' region */ 896 parent = request_resource_conflict(&iomem_resource, res); 897 return parent ? request_resource(parent, res) : 0; 898 } 899 900 int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) 901 { 902 struct linux_efi_memreserve *rsv; 903 unsigned long prsv; 904 int rc, index; 905 906 if (efi_memreserve_root == (void *)ULONG_MAX) 907 return -ENODEV; 908 909 if (!efi_memreserve_root) { 910 rc = efi_memreserve_map_root(); 911 if (rc) 912 return rc; 913 } 914 915 /* first try to find a slot in an existing linked list entry */ 916 for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) { 917 rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB); 918 index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size); 919 if (index < rsv->size) { 920 rsv->entry[index].base = addr; 921 rsv->entry[index].size = size; 922 923 memunmap(rsv); 924 return efi_mem_reserve_iomem(addr, size); 925 } 926 memunmap(rsv); 927 } 928 929 /* no slot found - allocate a new linked list entry */ 930 rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC); 931 if (!rsv) 932 return -ENOMEM; 933 934 rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K); 935 if (rc) { 936 free_page((unsigned long)rsv); 937 return rc; 938 } 939 940 /* 941 * The memremap() call above assumes that a linux_efi_memreserve entry 942 * never crosses a page boundary, so let's ensure that this remains true 943 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by 944 * using SZ_4K explicitly in the size calculation below. 945 */ 946 rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K); 947 atomic_set(&rsv->count, 1); 948 rsv->entry[0].base = addr; 949 rsv->entry[0].size = size; 950 951 spin_lock(&efi_mem_reserve_persistent_lock); 952 rsv->next = efi_memreserve_root->next; 953 efi_memreserve_root->next = __pa(rsv); 954 spin_unlock(&efi_mem_reserve_persistent_lock); 955 956 return efi_mem_reserve_iomem(addr, size); 957 } 958 959 static int __init efi_memreserve_root_init(void) 960 { 961 if (efi_memreserve_root) 962 return 0; 963 if (efi_memreserve_map_root()) 964 efi_memreserve_root = (void *)ULONG_MAX; 965 return 0; 966 } 967 early_initcall(efi_memreserve_root_init); 968 969 #ifdef CONFIG_KEXEC 970 static int update_efi_random_seed(struct notifier_block *nb, 971 unsigned long code, void *unused) 972 { 973 struct linux_efi_random_seed *seed; 974 u32 size = 0; 975 976 if (!kexec_in_progress) 977 return NOTIFY_DONE; 978 979 seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB); 980 if (seed != NULL) { 981 size = min(seed->size, EFI_RANDOM_SEED_SIZE); 982 memunmap(seed); 983 } else { 984 pr_err("Could not map UEFI random seed!\n"); 985 } 986 if (size > 0) { 987 seed = memremap(efi_rng_seed, sizeof(*seed) + size, 988 MEMREMAP_WB); 989 if (seed != NULL) { 990 seed->size = size; 991 get_random_bytes(seed->bits, seed->size); 992 memunmap(seed); 993 } else { 994 pr_err("Could not map UEFI random seed!\n"); 995 } 996 } 997 return NOTIFY_DONE; 998 } 999 1000 static struct notifier_block efi_random_seed_nb = { 1001 .notifier_call = update_efi_random_seed, 1002 }; 1003 1004 static int __init register_update_efi_random_seed(void) 1005 { 1006 if (efi_rng_seed == EFI_INVALID_TABLE_ADDR) 1007 return 0; 1008 return register_reboot_notifier(&efi_random_seed_nb); 1009 } 1010 late_initcall(register_update_efi_random_seed); 1011 #endif 1012