1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * efi.c - EFI subsystem 4 * 5 * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com> 6 * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com> 7 * Copyright (C) 2013 Tom Gundersen <teg@jklm.no> 8 * 9 * This code registers /sys/firmware/efi{,/efivars} when EFI is supported, 10 * allowing the efivarfs to be mounted or the efivars module to be loaded. 11 * The existance of /sys/firmware/efi may also be used by userspace to 12 * determine that the system supports EFI. 13 */ 14 15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16 17 #include <linux/kobject.h> 18 #include <linux/module.h> 19 #include <linux/init.h> 20 #include <linux/debugfs.h> 21 #include <linux/device.h> 22 #include <linux/efi.h> 23 #include <linux/of.h> 24 #include <linux/io.h> 25 #include <linux/kexec.h> 26 #include <linux/platform_device.h> 27 #include <linux/random.h> 28 #include <linux/reboot.h> 29 #include <linux/slab.h> 30 #include <linux/acpi.h> 31 #include <linux/ucs2_string.h> 32 #include <linux/memblock.h> 33 #include <linux/security.h> 34 35 #include <asm/early_ioremap.h> 36 37 struct efi __read_mostly efi = { 38 .runtime_supported_mask = EFI_RT_SUPPORTED_ALL, 39 .acpi = EFI_INVALID_TABLE_ADDR, 40 .acpi20 = EFI_INVALID_TABLE_ADDR, 41 .smbios = EFI_INVALID_TABLE_ADDR, 42 .smbios3 = EFI_INVALID_TABLE_ADDR, 43 .esrt = EFI_INVALID_TABLE_ADDR, 44 .tpm_log = EFI_INVALID_TABLE_ADDR, 45 .tpm_final_log = EFI_INVALID_TABLE_ADDR, 46 }; 47 EXPORT_SYMBOL(efi); 48 49 unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR; 50 static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR; 51 static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR; 52 53 struct mm_struct efi_mm = { 54 .mm_rb = RB_ROOT, 55 .mm_users = ATOMIC_INIT(2), 56 .mm_count = ATOMIC_INIT(1), 57 MMAP_LOCK_INITIALIZER(efi_mm) 58 .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock), 59 .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), 60 .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0}, 61 }; 62 63 struct workqueue_struct *efi_rts_wq; 64 65 static bool disable_runtime; 66 static int __init setup_noefi(char *arg) 67 { 68 disable_runtime = true; 69 return 0; 70 } 71 early_param("noefi", setup_noefi); 72 73 bool efi_runtime_disabled(void) 74 { 75 return disable_runtime; 76 } 77 78 bool __pure __efi_soft_reserve_enabled(void) 79 { 80 return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE); 81 } 82 83 static int __init parse_efi_cmdline(char *str) 84 { 85 if (!str) { 86 pr_warn("need at least one option\n"); 87 return -EINVAL; 88 } 89 90 if (parse_option_str(str, "debug")) 91 set_bit(EFI_DBG, &efi.flags); 92 93 if (parse_option_str(str, "noruntime")) 94 disable_runtime = true; 95 96 if (parse_option_str(str, "nosoftreserve")) 97 set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags); 98 99 return 0; 100 } 101 early_param("efi", parse_efi_cmdline); 102 103 struct kobject *efi_kobj; 104 105 /* 106 * Let's not leave out systab information that snuck into 107 * the efivars driver 108 * Note, do not add more fields in systab sysfs file as it breaks sysfs 109 * one value per file rule! 110 */ 111 static ssize_t systab_show(struct kobject *kobj, 112 struct kobj_attribute *attr, char *buf) 113 { 114 char *str = buf; 115 116 if (!kobj || !buf) 117 return -EINVAL; 118 119 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 120 str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20); 121 if (efi.acpi != EFI_INVALID_TABLE_ADDR) 122 str += sprintf(str, "ACPI=0x%lx\n", efi.acpi); 123 /* 124 * If both SMBIOS and SMBIOS3 entry points are implemented, the 125 * SMBIOS3 entry point shall be preferred, so we list it first to 126 * let applications stop parsing after the first match. 127 */ 128 if (efi.smbios3 != EFI_INVALID_TABLE_ADDR) 129 str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3); 130 if (efi.smbios != EFI_INVALID_TABLE_ADDR) 131 str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios); 132 133 if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86)) 134 str = efi_systab_show_arch(str); 135 136 return str - buf; 137 } 138 139 static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400); 140 141 static ssize_t fw_platform_size_show(struct kobject *kobj, 142 struct kobj_attribute *attr, char *buf) 143 { 144 return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32); 145 } 146 147 extern __weak struct kobj_attribute efi_attr_fw_vendor; 148 extern __weak struct kobj_attribute efi_attr_runtime; 149 extern __weak struct kobj_attribute efi_attr_config_table; 150 static struct kobj_attribute efi_attr_fw_platform_size = 151 __ATTR_RO(fw_platform_size); 152 153 static struct attribute *efi_subsys_attrs[] = { 154 &efi_attr_systab.attr, 155 &efi_attr_fw_platform_size.attr, 156 &efi_attr_fw_vendor.attr, 157 &efi_attr_runtime.attr, 158 &efi_attr_config_table.attr, 159 NULL, 160 }; 161 162 umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr, 163 int n) 164 { 165 return attr->mode; 166 } 167 168 static const struct attribute_group efi_subsys_attr_group = { 169 .attrs = efi_subsys_attrs, 170 .is_visible = efi_attr_is_visible, 171 }; 172 173 static struct efivars generic_efivars; 174 static struct efivar_operations generic_ops; 175 176 static int generic_ops_register(void) 177 { 178 generic_ops.get_variable = efi.get_variable; 179 generic_ops.get_next_variable = efi.get_next_variable; 180 generic_ops.query_variable_store = efi_query_variable_store; 181 182 if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) { 183 generic_ops.set_variable = efi.set_variable; 184 generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking; 185 } 186 return efivars_register(&generic_efivars, &generic_ops, efi_kobj); 187 } 188 189 static void generic_ops_unregister(void) 190 { 191 efivars_unregister(&generic_efivars); 192 } 193 194 #ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS 195 #define EFIVAR_SSDT_NAME_MAX 16 196 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata; 197 static int __init efivar_ssdt_setup(char *str) 198 { 199 int ret = security_locked_down(LOCKDOWN_ACPI_TABLES); 200 201 if (ret) 202 return ret; 203 204 if (strlen(str) < sizeof(efivar_ssdt)) 205 memcpy(efivar_ssdt, str, strlen(str)); 206 else 207 pr_warn("efivar_ssdt: name too long: %s\n", str); 208 return 0; 209 } 210 __setup("efivar_ssdt=", efivar_ssdt_setup); 211 212 static __init int efivar_ssdt_iter(efi_char16_t *name, efi_guid_t vendor, 213 unsigned long name_size, void *data) 214 { 215 struct efivar_entry *entry; 216 struct list_head *list = data; 217 char utf8_name[EFIVAR_SSDT_NAME_MAX]; 218 int limit = min_t(unsigned long, EFIVAR_SSDT_NAME_MAX, name_size); 219 220 ucs2_as_utf8(utf8_name, name, limit - 1); 221 if (strncmp(utf8_name, efivar_ssdt, limit) != 0) 222 return 0; 223 224 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 225 if (!entry) 226 return 0; 227 228 memcpy(entry->var.VariableName, name, name_size); 229 memcpy(&entry->var.VendorGuid, &vendor, sizeof(efi_guid_t)); 230 231 efivar_entry_add(entry, list); 232 233 return 0; 234 } 235 236 static __init int efivar_ssdt_load(void) 237 { 238 LIST_HEAD(entries); 239 struct efivar_entry *entry, *aux; 240 unsigned long size; 241 void *data; 242 int ret; 243 244 if (!efivar_ssdt[0]) 245 return 0; 246 247 ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries); 248 249 list_for_each_entry_safe(entry, aux, &entries, list) { 250 pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, 251 &entry->var.VendorGuid); 252 253 list_del(&entry->list); 254 255 ret = efivar_entry_size(entry, &size); 256 if (ret) { 257 pr_err("failed to get var size\n"); 258 goto free_entry; 259 } 260 261 data = kmalloc(size, GFP_KERNEL); 262 if (!data) { 263 ret = -ENOMEM; 264 goto free_entry; 265 } 266 267 ret = efivar_entry_get(entry, NULL, &size, data); 268 if (ret) { 269 pr_err("failed to get var data\n"); 270 goto free_data; 271 } 272 273 ret = acpi_load_table(data, NULL); 274 if (ret) { 275 pr_err("failed to load table: %d\n", ret); 276 goto free_data; 277 } 278 279 goto free_entry; 280 281 free_data: 282 kfree(data); 283 284 free_entry: 285 kfree(entry); 286 } 287 288 return ret; 289 } 290 #else 291 static inline int efivar_ssdt_load(void) { return 0; } 292 #endif 293 294 #ifdef CONFIG_DEBUG_FS 295 296 #define EFI_DEBUGFS_MAX_BLOBS 32 297 298 static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS]; 299 300 static void __init efi_debugfs_init(void) 301 { 302 struct dentry *efi_debugfs; 303 efi_memory_desc_t *md; 304 char name[32]; 305 int type_count[EFI_BOOT_SERVICES_DATA + 1] = {}; 306 int i = 0; 307 308 efi_debugfs = debugfs_create_dir("efi", NULL); 309 if (IS_ERR_OR_NULL(efi_debugfs)) 310 return; 311 312 for_each_efi_memory_desc(md) { 313 switch (md->type) { 314 case EFI_BOOT_SERVICES_CODE: 315 snprintf(name, sizeof(name), "boot_services_code%d", 316 type_count[md->type]++); 317 break; 318 case EFI_BOOT_SERVICES_DATA: 319 snprintf(name, sizeof(name), "boot_services_data%d", 320 type_count[md->type]++); 321 break; 322 default: 323 continue; 324 } 325 326 if (i >= EFI_DEBUGFS_MAX_BLOBS) { 327 pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n", 328 EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS); 329 break; 330 } 331 332 debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT; 333 debugfs_blob[i].data = memremap(md->phys_addr, 334 debugfs_blob[i].size, 335 MEMREMAP_WB); 336 if (!debugfs_blob[i].data) 337 continue; 338 339 debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]); 340 i++; 341 } 342 } 343 #else 344 static inline void efi_debugfs_init(void) {} 345 #endif 346 347 /* 348 * We register the efi subsystem with the firmware subsystem and the 349 * efivars subsystem with the efi subsystem, if the system was booted with 350 * EFI. 351 */ 352 static int __init efisubsys_init(void) 353 { 354 int error; 355 356 if (!efi_enabled(EFI_RUNTIME_SERVICES)) 357 efi.runtime_supported_mask = 0; 358 359 if (!efi_enabled(EFI_BOOT)) 360 return 0; 361 362 if (efi.runtime_supported_mask) { 363 /* 364 * Since we process only one efi_runtime_service() at a time, an 365 * ordered workqueue (which creates only one execution context) 366 * should suffice for all our needs. 367 */ 368 efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0); 369 if (!efi_rts_wq) { 370 pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n"); 371 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); 372 efi.runtime_supported_mask = 0; 373 return 0; 374 } 375 } 376 377 if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES)) 378 platform_device_register_simple("rtc-efi", 0, NULL, 0); 379 380 /* We register the efi directory at /sys/firmware/efi */ 381 efi_kobj = kobject_create_and_add("efi", firmware_kobj); 382 if (!efi_kobj) { 383 pr_err("efi: Firmware registration failed.\n"); 384 destroy_workqueue(efi_rts_wq); 385 return -ENOMEM; 386 } 387 388 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE | 389 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) { 390 efivar_ssdt_load(); 391 error = generic_ops_register(); 392 if (error) 393 goto err_put; 394 platform_device_register_simple("efivars", 0, NULL, 0); 395 } 396 397 error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group); 398 if (error) { 399 pr_err("efi: Sysfs attribute export failed with error %d.\n", 400 error); 401 goto err_unregister; 402 } 403 404 error = efi_runtime_map_init(efi_kobj); 405 if (error) 406 goto err_remove_group; 407 408 /* and the standard mountpoint for efivarfs */ 409 error = sysfs_create_mount_point(efi_kobj, "efivars"); 410 if (error) { 411 pr_err("efivars: Subsystem registration failed.\n"); 412 goto err_remove_group; 413 } 414 415 if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS)) 416 efi_debugfs_init(); 417 418 return 0; 419 420 err_remove_group: 421 sysfs_remove_group(efi_kobj, &efi_subsys_attr_group); 422 err_unregister: 423 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE | 424 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) 425 generic_ops_unregister(); 426 err_put: 427 kobject_put(efi_kobj); 428 destroy_workqueue(efi_rts_wq); 429 return error; 430 } 431 432 subsys_initcall(efisubsys_init); 433 434 /* 435 * Find the efi memory descriptor for a given physical address. Given a 436 * physical address, determine if it exists within an EFI Memory Map entry, 437 * and if so, populate the supplied memory descriptor with the appropriate 438 * data. 439 */ 440 int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md) 441 { 442 efi_memory_desc_t *md; 443 444 if (!efi_enabled(EFI_MEMMAP)) { 445 pr_err_once("EFI_MEMMAP is not enabled.\n"); 446 return -EINVAL; 447 } 448 449 if (!out_md) { 450 pr_err_once("out_md is null.\n"); 451 return -EINVAL; 452 } 453 454 for_each_efi_memory_desc(md) { 455 u64 size; 456 u64 end; 457 458 size = md->num_pages << EFI_PAGE_SHIFT; 459 end = md->phys_addr + size; 460 if (phys_addr >= md->phys_addr && phys_addr < end) { 461 memcpy(out_md, md, sizeof(*out_md)); 462 return 0; 463 } 464 } 465 return -ENOENT; 466 } 467 468 /* 469 * Calculate the highest address of an efi memory descriptor. 470 */ 471 u64 __init efi_mem_desc_end(efi_memory_desc_t *md) 472 { 473 u64 size = md->num_pages << EFI_PAGE_SHIFT; 474 u64 end = md->phys_addr + size; 475 return end; 476 } 477 478 void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {} 479 480 /** 481 * efi_mem_reserve - Reserve an EFI memory region 482 * @addr: Physical address to reserve 483 * @size: Size of reservation 484 * 485 * Mark a region as reserved from general kernel allocation and 486 * prevent it being released by efi_free_boot_services(). 487 * 488 * This function should be called drivers once they've parsed EFI 489 * configuration tables to figure out where their data lives, e.g. 490 * efi_esrt_init(). 491 */ 492 void __init efi_mem_reserve(phys_addr_t addr, u64 size) 493 { 494 if (!memblock_is_region_reserved(addr, size)) 495 memblock_reserve(addr, size); 496 497 /* 498 * Some architectures (x86) reserve all boot services ranges 499 * until efi_free_boot_services() because of buggy firmware 500 * implementations. This means the above memblock_reserve() is 501 * superfluous on x86 and instead what it needs to do is 502 * ensure the @start, @size is not freed. 503 */ 504 efi_arch_mem_reserve(addr, size); 505 } 506 507 static const efi_config_table_type_t common_tables[] __initconst = { 508 {ACPI_20_TABLE_GUID, &efi.acpi20, "ACPI 2.0" }, 509 {ACPI_TABLE_GUID, &efi.acpi, "ACPI" }, 510 {SMBIOS_TABLE_GUID, &efi.smbios, "SMBIOS" }, 511 {SMBIOS3_TABLE_GUID, &efi.smbios3, "SMBIOS 3.0" }, 512 {EFI_SYSTEM_RESOURCE_TABLE_GUID, &efi.esrt, "ESRT" }, 513 {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, &efi_mem_attr_table, "MEMATTR" }, 514 {LINUX_EFI_RANDOM_SEED_TABLE_GUID, &efi_rng_seed, "RNG" }, 515 {LINUX_EFI_TPM_EVENT_LOG_GUID, &efi.tpm_log, "TPMEventLog" }, 516 {LINUX_EFI_TPM_FINAL_LOG_GUID, &efi.tpm_final_log, "TPMFinalLog" }, 517 {LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" }, 518 {EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" }, 519 #ifdef CONFIG_EFI_RCI2_TABLE 520 {DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys }, 521 #endif 522 {}, 523 }; 524 525 static __init int match_config_table(const efi_guid_t *guid, 526 unsigned long table, 527 const efi_config_table_type_t *table_types) 528 { 529 int i; 530 531 for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) { 532 if (!efi_guidcmp(*guid, table_types[i].guid)) { 533 *(table_types[i].ptr) = table; 534 if (table_types[i].name[0]) 535 pr_cont("%s=0x%lx ", 536 table_types[i].name, table); 537 return 1; 538 } 539 } 540 541 return 0; 542 } 543 544 int __init efi_config_parse_tables(const efi_config_table_t *config_tables, 545 int count, 546 const efi_config_table_type_t *arch_tables) 547 { 548 const efi_config_table_64_t *tbl64 = (void *)config_tables; 549 const efi_config_table_32_t *tbl32 = (void *)config_tables; 550 const efi_guid_t *guid; 551 unsigned long table; 552 int i; 553 554 pr_info(""); 555 for (i = 0; i < count; i++) { 556 if (!IS_ENABLED(CONFIG_X86)) { 557 guid = &config_tables[i].guid; 558 table = (unsigned long)config_tables[i].table; 559 } else if (efi_enabled(EFI_64BIT)) { 560 guid = &tbl64[i].guid; 561 table = tbl64[i].table; 562 563 if (IS_ENABLED(CONFIG_X86_32) && 564 tbl64[i].table > U32_MAX) { 565 pr_cont("\n"); 566 pr_err("Table located above 4GB, disabling EFI.\n"); 567 return -EINVAL; 568 } 569 } else { 570 guid = &tbl32[i].guid; 571 table = tbl32[i].table; 572 } 573 574 if (!match_config_table(guid, table, common_tables) && arch_tables) 575 match_config_table(guid, table, arch_tables); 576 } 577 pr_cont("\n"); 578 set_bit(EFI_CONFIG_TABLES, &efi.flags); 579 580 if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) { 581 struct linux_efi_random_seed *seed; 582 u32 size = 0; 583 584 seed = early_memremap(efi_rng_seed, sizeof(*seed)); 585 if (seed != NULL) { 586 size = READ_ONCE(seed->size); 587 early_memunmap(seed, sizeof(*seed)); 588 } else { 589 pr_err("Could not map UEFI random seed!\n"); 590 } 591 if (size > 0) { 592 seed = early_memremap(efi_rng_seed, 593 sizeof(*seed) + size); 594 if (seed != NULL) { 595 pr_notice("seeding entropy pool\n"); 596 add_bootloader_randomness(seed->bits, size); 597 early_memunmap(seed, sizeof(*seed) + size); 598 } else { 599 pr_err("Could not map UEFI random seed!\n"); 600 } 601 } 602 } 603 604 if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP)) 605 efi_memattr_init(); 606 607 efi_tpm_eventlog_init(); 608 609 if (mem_reserve != EFI_INVALID_TABLE_ADDR) { 610 unsigned long prsv = mem_reserve; 611 612 while (prsv) { 613 struct linux_efi_memreserve *rsv; 614 u8 *p; 615 616 /* 617 * Just map a full page: that is what we will get 618 * anyway, and it permits us to map the entire entry 619 * before knowing its size. 620 */ 621 p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE), 622 PAGE_SIZE); 623 if (p == NULL) { 624 pr_err("Could not map UEFI memreserve entry!\n"); 625 return -ENOMEM; 626 } 627 628 rsv = (void *)(p + prsv % PAGE_SIZE); 629 630 /* reserve the entry itself */ 631 memblock_reserve(prsv, 632 struct_size(rsv, entry, rsv->size)); 633 634 for (i = 0; i < atomic_read(&rsv->count); i++) { 635 memblock_reserve(rsv->entry[i].base, 636 rsv->entry[i].size); 637 } 638 639 prsv = rsv->next; 640 early_memunmap(p, PAGE_SIZE); 641 } 642 } 643 644 if (rt_prop != EFI_INVALID_TABLE_ADDR) { 645 efi_rt_properties_table_t *tbl; 646 647 tbl = early_memremap(rt_prop, sizeof(*tbl)); 648 if (tbl) { 649 efi.runtime_supported_mask &= tbl->runtime_services_supported; 650 early_memunmap(tbl, sizeof(*tbl)); 651 } 652 } 653 654 return 0; 655 } 656 657 int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr, 658 int min_major_version) 659 { 660 if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) { 661 pr_err("System table signature incorrect!\n"); 662 return -EINVAL; 663 } 664 665 if ((systab_hdr->revision >> 16) < min_major_version) 666 pr_err("Warning: System table version %d.%02d, expected %d.00 or greater!\n", 667 systab_hdr->revision >> 16, 668 systab_hdr->revision & 0xffff, 669 min_major_version); 670 671 return 0; 672 } 673 674 #ifndef CONFIG_IA64 675 static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor, 676 size_t size) 677 { 678 const efi_char16_t *ret; 679 680 ret = early_memremap_ro(fw_vendor, size); 681 if (!ret) 682 pr_err("Could not map the firmware vendor!\n"); 683 return ret; 684 } 685 686 static void __init unmap_fw_vendor(const void *fw_vendor, size_t size) 687 { 688 early_memunmap((void *)fw_vendor, size); 689 } 690 #else 691 #define map_fw_vendor(p, s) __va(p) 692 #define unmap_fw_vendor(v, s) 693 #endif 694 695 void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr, 696 unsigned long fw_vendor) 697 { 698 char vendor[100] = "unknown"; 699 const efi_char16_t *c16; 700 size_t i; 701 702 c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t)); 703 if (c16) { 704 for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i) 705 vendor[i] = c16[i]; 706 vendor[i] = '\0'; 707 708 unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t)); 709 } 710 711 pr_info("EFI v%u.%.02u by %s\n", 712 systab_hdr->revision >> 16, 713 systab_hdr->revision & 0xffff, 714 vendor); 715 } 716 717 static __initdata char memory_type_name[][20] = { 718 "Reserved", 719 "Loader Code", 720 "Loader Data", 721 "Boot Code", 722 "Boot Data", 723 "Runtime Code", 724 "Runtime Data", 725 "Conventional Memory", 726 "Unusable Memory", 727 "ACPI Reclaim Memory", 728 "ACPI Memory NVS", 729 "Memory Mapped I/O", 730 "MMIO Port Space", 731 "PAL Code", 732 "Persistent Memory", 733 }; 734 735 char * __init efi_md_typeattr_format(char *buf, size_t size, 736 const efi_memory_desc_t *md) 737 { 738 char *pos; 739 int type_len; 740 u64 attr; 741 742 pos = buf; 743 if (md->type >= ARRAY_SIZE(memory_type_name)) 744 type_len = snprintf(pos, size, "[type=%u", md->type); 745 else 746 type_len = snprintf(pos, size, "[%-*s", 747 (int)(sizeof(memory_type_name[0]) - 1), 748 memory_type_name[md->type]); 749 if (type_len >= size) 750 return buf; 751 752 pos += type_len; 753 size -= type_len; 754 755 attr = md->attribute; 756 if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT | 757 EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO | 758 EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP | 759 EFI_MEMORY_NV | EFI_MEMORY_SP | 760 EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE)) 761 snprintf(pos, size, "|attr=0x%016llx]", 762 (unsigned long long)attr); 763 else 764 snprintf(pos, size, 765 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]", 766 attr & EFI_MEMORY_RUNTIME ? "RUN" : "", 767 attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "", 768 attr & EFI_MEMORY_SP ? "SP" : "", 769 attr & EFI_MEMORY_NV ? "NV" : "", 770 attr & EFI_MEMORY_XP ? "XP" : "", 771 attr & EFI_MEMORY_RP ? "RP" : "", 772 attr & EFI_MEMORY_WP ? "WP" : "", 773 attr & EFI_MEMORY_RO ? "RO" : "", 774 attr & EFI_MEMORY_UCE ? "UCE" : "", 775 attr & EFI_MEMORY_WB ? "WB" : "", 776 attr & EFI_MEMORY_WT ? "WT" : "", 777 attr & EFI_MEMORY_WC ? "WC" : "", 778 attr & EFI_MEMORY_UC ? "UC" : ""); 779 return buf; 780 } 781 782 /* 783 * IA64 has a funky EFI memory map that doesn't work the same way as 784 * other architectures. 785 */ 786 #ifndef CONFIG_IA64 787 /* 788 * efi_mem_attributes - lookup memmap attributes for physical address 789 * @phys_addr: the physical address to lookup 790 * 791 * Search in the EFI memory map for the region covering 792 * @phys_addr. Returns the EFI memory attributes if the region 793 * was found in the memory map, 0 otherwise. 794 */ 795 u64 efi_mem_attributes(unsigned long phys_addr) 796 { 797 efi_memory_desc_t *md; 798 799 if (!efi_enabled(EFI_MEMMAP)) 800 return 0; 801 802 for_each_efi_memory_desc(md) { 803 if ((md->phys_addr <= phys_addr) && 804 (phys_addr < (md->phys_addr + 805 (md->num_pages << EFI_PAGE_SHIFT)))) 806 return md->attribute; 807 } 808 return 0; 809 } 810 811 /* 812 * efi_mem_type - lookup memmap type for physical address 813 * @phys_addr: the physical address to lookup 814 * 815 * Search in the EFI memory map for the region covering @phys_addr. 816 * Returns the EFI memory type if the region was found in the memory 817 * map, -EINVAL otherwise. 818 */ 819 int efi_mem_type(unsigned long phys_addr) 820 { 821 const efi_memory_desc_t *md; 822 823 if (!efi_enabled(EFI_MEMMAP)) 824 return -ENOTSUPP; 825 826 for_each_efi_memory_desc(md) { 827 if ((md->phys_addr <= phys_addr) && 828 (phys_addr < (md->phys_addr + 829 (md->num_pages << EFI_PAGE_SHIFT)))) 830 return md->type; 831 } 832 return -EINVAL; 833 } 834 #endif 835 836 int efi_status_to_err(efi_status_t status) 837 { 838 int err; 839 840 switch (status) { 841 case EFI_SUCCESS: 842 err = 0; 843 break; 844 case EFI_INVALID_PARAMETER: 845 err = -EINVAL; 846 break; 847 case EFI_OUT_OF_RESOURCES: 848 err = -ENOSPC; 849 break; 850 case EFI_DEVICE_ERROR: 851 err = -EIO; 852 break; 853 case EFI_WRITE_PROTECTED: 854 err = -EROFS; 855 break; 856 case EFI_SECURITY_VIOLATION: 857 err = -EACCES; 858 break; 859 case EFI_NOT_FOUND: 860 err = -ENOENT; 861 break; 862 case EFI_ABORTED: 863 err = -EINTR; 864 break; 865 default: 866 err = -EINVAL; 867 } 868 869 return err; 870 } 871 872 static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock); 873 static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init; 874 875 static int __init efi_memreserve_map_root(void) 876 { 877 if (mem_reserve == EFI_INVALID_TABLE_ADDR) 878 return -ENODEV; 879 880 efi_memreserve_root = memremap(mem_reserve, 881 sizeof(*efi_memreserve_root), 882 MEMREMAP_WB); 883 if (WARN_ON_ONCE(!efi_memreserve_root)) 884 return -ENOMEM; 885 return 0; 886 } 887 888 static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size) 889 { 890 struct resource *res, *parent; 891 892 res = kzalloc(sizeof(struct resource), GFP_ATOMIC); 893 if (!res) 894 return -ENOMEM; 895 896 res->name = "reserved"; 897 res->flags = IORESOURCE_MEM; 898 res->start = addr; 899 res->end = addr + size - 1; 900 901 /* we expect a conflict with a 'System RAM' region */ 902 parent = request_resource_conflict(&iomem_resource, res); 903 return parent ? request_resource(parent, res) : 0; 904 } 905 906 int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) 907 { 908 struct linux_efi_memreserve *rsv; 909 unsigned long prsv; 910 int rc, index; 911 912 if (efi_memreserve_root == (void *)ULONG_MAX) 913 return -ENODEV; 914 915 if (!efi_memreserve_root) { 916 rc = efi_memreserve_map_root(); 917 if (rc) 918 return rc; 919 } 920 921 /* first try to find a slot in an existing linked list entry */ 922 for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) { 923 rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB); 924 index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size); 925 if (index < rsv->size) { 926 rsv->entry[index].base = addr; 927 rsv->entry[index].size = size; 928 929 memunmap(rsv); 930 return efi_mem_reserve_iomem(addr, size); 931 } 932 memunmap(rsv); 933 } 934 935 /* no slot found - allocate a new linked list entry */ 936 rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC); 937 if (!rsv) 938 return -ENOMEM; 939 940 rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K); 941 if (rc) { 942 free_page((unsigned long)rsv); 943 return rc; 944 } 945 946 /* 947 * The memremap() call above assumes that a linux_efi_memreserve entry 948 * never crosses a page boundary, so let's ensure that this remains true 949 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by 950 * using SZ_4K explicitly in the size calculation below. 951 */ 952 rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K); 953 atomic_set(&rsv->count, 1); 954 rsv->entry[0].base = addr; 955 rsv->entry[0].size = size; 956 957 spin_lock(&efi_mem_reserve_persistent_lock); 958 rsv->next = efi_memreserve_root->next; 959 efi_memreserve_root->next = __pa(rsv); 960 spin_unlock(&efi_mem_reserve_persistent_lock); 961 962 return efi_mem_reserve_iomem(addr, size); 963 } 964 965 static int __init efi_memreserve_root_init(void) 966 { 967 if (efi_memreserve_root) 968 return 0; 969 if (efi_memreserve_map_root()) 970 efi_memreserve_root = (void *)ULONG_MAX; 971 return 0; 972 } 973 early_initcall(efi_memreserve_root_init); 974 975 #ifdef CONFIG_KEXEC 976 static int update_efi_random_seed(struct notifier_block *nb, 977 unsigned long code, void *unused) 978 { 979 struct linux_efi_random_seed *seed; 980 u32 size = 0; 981 982 if (!kexec_in_progress) 983 return NOTIFY_DONE; 984 985 seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB); 986 if (seed != NULL) { 987 size = min(seed->size, EFI_RANDOM_SEED_SIZE); 988 memunmap(seed); 989 } else { 990 pr_err("Could not map UEFI random seed!\n"); 991 } 992 if (size > 0) { 993 seed = memremap(efi_rng_seed, sizeof(*seed) + size, 994 MEMREMAP_WB); 995 if (seed != NULL) { 996 seed->size = size; 997 get_random_bytes(seed->bits, seed->size); 998 memunmap(seed); 999 } else { 1000 pr_err("Could not map UEFI random seed!\n"); 1001 } 1002 } 1003 return NOTIFY_DONE; 1004 } 1005 1006 static struct notifier_block efi_random_seed_nb = { 1007 .notifier_call = update_efi_random_seed, 1008 }; 1009 1010 static int __init register_update_efi_random_seed(void) 1011 { 1012 if (efi_rng_seed == EFI_INVALID_TABLE_ADDR) 1013 return 0; 1014 return register_reboot_notifier(&efi_random_seed_nb); 1015 } 1016 late_initcall(register_update_efi_random_seed); 1017 #endif 1018