xref: /openbmc/linux/drivers/firmware/efi/efi.c (revision 0661cb2a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * efi.c - EFI subsystem
4  *
5  * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
6  * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
7  * Copyright (C) 2013 Tom Gundersen <teg@jklm.no>
8  *
9  * This code registers /sys/firmware/efi{,/efivars} when EFI is supported,
10  * allowing the efivarfs to be mounted or the efivars module to be loaded.
11  * The existance of /sys/firmware/efi may also be used by userspace to
12  * determine that the system supports EFI.
13  */
14 
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 
17 #include <linux/kobject.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/debugfs.h>
21 #include <linux/device.h>
22 #include <linux/efi.h>
23 #include <linux/of.h>
24 #include <linux/io.h>
25 #include <linux/kexec.h>
26 #include <linux/platform_device.h>
27 #include <linux/random.h>
28 #include <linux/reboot.h>
29 #include <linux/slab.h>
30 #include <linux/acpi.h>
31 #include <linux/ucs2_string.h>
32 #include <linux/memblock.h>
33 #include <linux/security.h>
34 
35 #include <asm/early_ioremap.h>
36 
37 struct efi __read_mostly efi = {
38 	.runtime_supported_mask = EFI_RT_SUPPORTED_ALL,
39 	.acpi			= EFI_INVALID_TABLE_ADDR,
40 	.acpi20			= EFI_INVALID_TABLE_ADDR,
41 	.smbios			= EFI_INVALID_TABLE_ADDR,
42 	.smbios3		= EFI_INVALID_TABLE_ADDR,
43 	.esrt			= EFI_INVALID_TABLE_ADDR,
44 	.tpm_log		= EFI_INVALID_TABLE_ADDR,
45 	.tpm_final_log		= EFI_INVALID_TABLE_ADDR,
46 #ifdef CONFIG_LOAD_UEFI_KEYS
47 	.mokvar_table		= EFI_INVALID_TABLE_ADDR,
48 #endif
49 };
50 EXPORT_SYMBOL(efi);
51 
52 unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
53 static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
54 static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
55 
56 struct mm_struct efi_mm = {
57 	.mm_rb			= RB_ROOT,
58 	.mm_users		= ATOMIC_INIT(2),
59 	.mm_count		= ATOMIC_INIT(1),
60 	.write_protect_seq      = SEQCNT_ZERO(efi_mm.write_protect_seq),
61 	MMAP_LOCK_INITIALIZER(efi_mm)
62 	.page_table_lock	= __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
63 	.mmlist			= LIST_HEAD_INIT(efi_mm.mmlist),
64 	.cpu_bitmap		= { [BITS_TO_LONGS(NR_CPUS)] = 0},
65 };
66 
67 struct workqueue_struct *efi_rts_wq;
68 
69 static bool disable_runtime;
70 static int __init setup_noefi(char *arg)
71 {
72 	disable_runtime = true;
73 	return 0;
74 }
75 early_param("noefi", setup_noefi);
76 
77 bool efi_runtime_disabled(void)
78 {
79 	return disable_runtime;
80 }
81 
82 bool __pure __efi_soft_reserve_enabled(void)
83 {
84 	return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
85 }
86 
87 static int __init parse_efi_cmdline(char *str)
88 {
89 	if (!str) {
90 		pr_warn("need at least one option\n");
91 		return -EINVAL;
92 	}
93 
94 	if (parse_option_str(str, "debug"))
95 		set_bit(EFI_DBG, &efi.flags);
96 
97 	if (parse_option_str(str, "noruntime"))
98 		disable_runtime = true;
99 
100 	if (parse_option_str(str, "nosoftreserve"))
101 		set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
102 
103 	return 0;
104 }
105 early_param("efi", parse_efi_cmdline);
106 
107 struct kobject *efi_kobj;
108 
109 /*
110  * Let's not leave out systab information that snuck into
111  * the efivars driver
112  * Note, do not add more fields in systab sysfs file as it breaks sysfs
113  * one value per file rule!
114  */
115 static ssize_t systab_show(struct kobject *kobj,
116 			   struct kobj_attribute *attr, char *buf)
117 {
118 	char *str = buf;
119 
120 	if (!kobj || !buf)
121 		return -EINVAL;
122 
123 	if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
124 		str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
125 	if (efi.acpi != EFI_INVALID_TABLE_ADDR)
126 		str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
127 	/*
128 	 * If both SMBIOS and SMBIOS3 entry points are implemented, the
129 	 * SMBIOS3 entry point shall be preferred, so we list it first to
130 	 * let applications stop parsing after the first match.
131 	 */
132 	if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
133 		str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
134 	if (efi.smbios != EFI_INVALID_TABLE_ADDR)
135 		str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
136 
137 	if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86))
138 		str = efi_systab_show_arch(str);
139 
140 	return str - buf;
141 }
142 
143 static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
144 
145 static ssize_t fw_platform_size_show(struct kobject *kobj,
146 				     struct kobj_attribute *attr, char *buf)
147 {
148 	return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
149 }
150 
151 extern __weak struct kobj_attribute efi_attr_fw_vendor;
152 extern __weak struct kobj_attribute efi_attr_runtime;
153 extern __weak struct kobj_attribute efi_attr_config_table;
154 static struct kobj_attribute efi_attr_fw_platform_size =
155 	__ATTR_RO(fw_platform_size);
156 
157 static struct attribute *efi_subsys_attrs[] = {
158 	&efi_attr_systab.attr,
159 	&efi_attr_fw_platform_size.attr,
160 	&efi_attr_fw_vendor.attr,
161 	&efi_attr_runtime.attr,
162 	&efi_attr_config_table.attr,
163 	NULL,
164 };
165 
166 umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr,
167 				   int n)
168 {
169 	return attr->mode;
170 }
171 
172 static const struct attribute_group efi_subsys_attr_group = {
173 	.attrs = efi_subsys_attrs,
174 	.is_visible = efi_attr_is_visible,
175 };
176 
177 static struct efivars generic_efivars;
178 static struct efivar_operations generic_ops;
179 
180 static int generic_ops_register(void)
181 {
182 	generic_ops.get_variable = efi.get_variable;
183 	generic_ops.get_next_variable = efi.get_next_variable;
184 	generic_ops.query_variable_store = efi_query_variable_store;
185 
186 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
187 		generic_ops.set_variable = efi.set_variable;
188 		generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
189 	}
190 	return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
191 }
192 
193 static void generic_ops_unregister(void)
194 {
195 	efivars_unregister(&generic_efivars);
196 }
197 
198 #ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
199 #define EFIVAR_SSDT_NAME_MAX	16
200 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
201 static int __init efivar_ssdt_setup(char *str)
202 {
203 	int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
204 
205 	if (ret)
206 		return ret;
207 
208 	if (strlen(str) < sizeof(efivar_ssdt))
209 		memcpy(efivar_ssdt, str, strlen(str));
210 	else
211 		pr_warn("efivar_ssdt: name too long: %s\n", str);
212 	return 0;
213 }
214 __setup("efivar_ssdt=", efivar_ssdt_setup);
215 
216 static __init int efivar_ssdt_iter(efi_char16_t *name, efi_guid_t vendor,
217 				   unsigned long name_size, void *data)
218 {
219 	struct efivar_entry *entry;
220 	struct list_head *list = data;
221 	char utf8_name[EFIVAR_SSDT_NAME_MAX];
222 	int limit = min_t(unsigned long, EFIVAR_SSDT_NAME_MAX, name_size);
223 
224 	ucs2_as_utf8(utf8_name, name, limit - 1);
225 	if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
226 		return 0;
227 
228 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
229 	if (!entry)
230 		return 0;
231 
232 	memcpy(entry->var.VariableName, name, name_size);
233 	memcpy(&entry->var.VendorGuid, &vendor, sizeof(efi_guid_t));
234 
235 	efivar_entry_add(entry, list);
236 
237 	return 0;
238 }
239 
240 static __init int efivar_ssdt_load(void)
241 {
242 	LIST_HEAD(entries);
243 	struct efivar_entry *entry, *aux;
244 	unsigned long size;
245 	void *data;
246 	int ret;
247 
248 	if (!efivar_ssdt[0])
249 		return 0;
250 
251 	ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries);
252 
253 	list_for_each_entry_safe(entry, aux, &entries, list) {
254 		pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt,
255 			&entry->var.VendorGuid);
256 
257 		list_del(&entry->list);
258 
259 		ret = efivar_entry_size(entry, &size);
260 		if (ret) {
261 			pr_err("failed to get var size\n");
262 			goto free_entry;
263 		}
264 
265 		data = kmalloc(size, GFP_KERNEL);
266 		if (!data) {
267 			ret = -ENOMEM;
268 			goto free_entry;
269 		}
270 
271 		ret = efivar_entry_get(entry, NULL, &size, data);
272 		if (ret) {
273 			pr_err("failed to get var data\n");
274 			goto free_data;
275 		}
276 
277 		ret = acpi_load_table(data, NULL);
278 		if (ret) {
279 			pr_err("failed to load table: %d\n", ret);
280 			goto free_data;
281 		}
282 
283 		goto free_entry;
284 
285 free_data:
286 		kfree(data);
287 
288 free_entry:
289 		kfree(entry);
290 	}
291 
292 	return ret;
293 }
294 #else
295 static inline int efivar_ssdt_load(void) { return 0; }
296 #endif
297 
298 #ifdef CONFIG_DEBUG_FS
299 
300 #define EFI_DEBUGFS_MAX_BLOBS 32
301 
302 static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS];
303 
304 static void __init efi_debugfs_init(void)
305 {
306 	struct dentry *efi_debugfs;
307 	efi_memory_desc_t *md;
308 	char name[32];
309 	int type_count[EFI_BOOT_SERVICES_DATA + 1] = {};
310 	int i = 0;
311 
312 	efi_debugfs = debugfs_create_dir("efi", NULL);
313 	if (IS_ERR_OR_NULL(efi_debugfs))
314 		return;
315 
316 	for_each_efi_memory_desc(md) {
317 		switch (md->type) {
318 		case EFI_BOOT_SERVICES_CODE:
319 			snprintf(name, sizeof(name), "boot_services_code%d",
320 				 type_count[md->type]++);
321 			break;
322 		case EFI_BOOT_SERVICES_DATA:
323 			snprintf(name, sizeof(name), "boot_services_data%d",
324 				 type_count[md->type]++);
325 			break;
326 		default:
327 			continue;
328 		}
329 
330 		if (i >= EFI_DEBUGFS_MAX_BLOBS) {
331 			pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n",
332 				EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS);
333 			break;
334 		}
335 
336 		debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT;
337 		debugfs_blob[i].data = memremap(md->phys_addr,
338 						debugfs_blob[i].size,
339 						MEMREMAP_WB);
340 		if (!debugfs_blob[i].data)
341 			continue;
342 
343 		debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]);
344 		i++;
345 	}
346 }
347 #else
348 static inline void efi_debugfs_init(void) {}
349 #endif
350 
351 /*
352  * We register the efi subsystem with the firmware subsystem and the
353  * efivars subsystem with the efi subsystem, if the system was booted with
354  * EFI.
355  */
356 static int __init efisubsys_init(void)
357 {
358 	int error;
359 
360 	if (!efi_enabled(EFI_RUNTIME_SERVICES))
361 		efi.runtime_supported_mask = 0;
362 
363 	if (!efi_enabled(EFI_BOOT))
364 		return 0;
365 
366 	if (efi.runtime_supported_mask) {
367 		/*
368 		 * Since we process only one efi_runtime_service() at a time, an
369 		 * ordered workqueue (which creates only one execution context)
370 		 * should suffice for all our needs.
371 		 */
372 		efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
373 		if (!efi_rts_wq) {
374 			pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
375 			clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
376 			efi.runtime_supported_mask = 0;
377 			return 0;
378 		}
379 	}
380 
381 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES))
382 		platform_device_register_simple("rtc-efi", 0, NULL, 0);
383 
384 	/* We register the efi directory at /sys/firmware/efi */
385 	efi_kobj = kobject_create_and_add("efi", firmware_kobj);
386 	if (!efi_kobj) {
387 		pr_err("efi: Firmware registration failed.\n");
388 		destroy_workqueue(efi_rts_wq);
389 		return -ENOMEM;
390 	}
391 
392 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
393 				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
394 		error = generic_ops_register();
395 		if (error)
396 			goto err_put;
397 		efivar_ssdt_load();
398 		platform_device_register_simple("efivars", 0, NULL, 0);
399 	}
400 
401 	error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
402 	if (error) {
403 		pr_err("efi: Sysfs attribute export failed with error %d.\n",
404 		       error);
405 		goto err_unregister;
406 	}
407 
408 	error = efi_runtime_map_init(efi_kobj);
409 	if (error)
410 		goto err_remove_group;
411 
412 	/* and the standard mountpoint for efivarfs */
413 	error = sysfs_create_mount_point(efi_kobj, "efivars");
414 	if (error) {
415 		pr_err("efivars: Subsystem registration failed.\n");
416 		goto err_remove_group;
417 	}
418 
419 	if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
420 		efi_debugfs_init();
421 
422 	return 0;
423 
424 err_remove_group:
425 	sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
426 err_unregister:
427 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
428 				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME))
429 		generic_ops_unregister();
430 err_put:
431 	kobject_put(efi_kobj);
432 	destroy_workqueue(efi_rts_wq);
433 	return error;
434 }
435 
436 subsys_initcall(efisubsys_init);
437 
438 /*
439  * Find the efi memory descriptor for a given physical address.  Given a
440  * physical address, determine if it exists within an EFI Memory Map entry,
441  * and if so, populate the supplied memory descriptor with the appropriate
442  * data.
443  */
444 int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
445 {
446 	efi_memory_desc_t *md;
447 
448 	if (!efi_enabled(EFI_MEMMAP)) {
449 		pr_err_once("EFI_MEMMAP is not enabled.\n");
450 		return -EINVAL;
451 	}
452 
453 	if (!out_md) {
454 		pr_err_once("out_md is null.\n");
455 		return -EINVAL;
456         }
457 
458 	for_each_efi_memory_desc(md) {
459 		u64 size;
460 		u64 end;
461 
462 		size = md->num_pages << EFI_PAGE_SHIFT;
463 		end = md->phys_addr + size;
464 		if (phys_addr >= md->phys_addr && phys_addr < end) {
465 			memcpy(out_md, md, sizeof(*out_md));
466 			return 0;
467 		}
468 	}
469 	return -ENOENT;
470 }
471 
472 /*
473  * Calculate the highest address of an efi memory descriptor.
474  */
475 u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
476 {
477 	u64 size = md->num_pages << EFI_PAGE_SHIFT;
478 	u64 end = md->phys_addr + size;
479 	return end;
480 }
481 
482 void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
483 
484 /**
485  * efi_mem_reserve - Reserve an EFI memory region
486  * @addr: Physical address to reserve
487  * @size: Size of reservation
488  *
489  * Mark a region as reserved from general kernel allocation and
490  * prevent it being released by efi_free_boot_services().
491  *
492  * This function should be called drivers once they've parsed EFI
493  * configuration tables to figure out where their data lives, e.g.
494  * efi_esrt_init().
495  */
496 void __init efi_mem_reserve(phys_addr_t addr, u64 size)
497 {
498 	if (!memblock_is_region_reserved(addr, size))
499 		memblock_reserve(addr, size);
500 
501 	/*
502 	 * Some architectures (x86) reserve all boot services ranges
503 	 * until efi_free_boot_services() because of buggy firmware
504 	 * implementations. This means the above memblock_reserve() is
505 	 * superfluous on x86 and instead what it needs to do is
506 	 * ensure the @start, @size is not freed.
507 	 */
508 	efi_arch_mem_reserve(addr, size);
509 }
510 
511 static const efi_config_table_type_t common_tables[] __initconst = {
512 	{ACPI_20_TABLE_GUID,			&efi.acpi20,		"ACPI 2.0"	},
513 	{ACPI_TABLE_GUID,			&efi.acpi,		"ACPI"		},
514 	{SMBIOS_TABLE_GUID,			&efi.smbios,		"SMBIOS"	},
515 	{SMBIOS3_TABLE_GUID,			&efi.smbios3,		"SMBIOS 3.0"	},
516 	{EFI_SYSTEM_RESOURCE_TABLE_GUID,	&efi.esrt,		"ESRT"		},
517 	{EFI_MEMORY_ATTRIBUTES_TABLE_GUID,	&efi_mem_attr_table,	"MEMATTR"	},
518 	{LINUX_EFI_RANDOM_SEED_TABLE_GUID,	&efi_rng_seed,		"RNG"		},
519 	{LINUX_EFI_TPM_EVENT_LOG_GUID,		&efi.tpm_log,		"TPMEventLog"	},
520 	{LINUX_EFI_TPM_FINAL_LOG_GUID,		&efi.tpm_final_log,	"TPMFinalLog"	},
521 	{LINUX_EFI_MEMRESERVE_TABLE_GUID,	&mem_reserve,		"MEMRESERVE"	},
522 	{EFI_RT_PROPERTIES_TABLE_GUID,		&rt_prop,		"RTPROP"	},
523 #ifdef CONFIG_EFI_RCI2_TABLE
524 	{DELLEMC_EFI_RCI2_TABLE_GUID,		&rci2_table_phys			},
525 #endif
526 #ifdef CONFIG_LOAD_UEFI_KEYS
527 	{LINUX_EFI_MOK_VARIABLE_TABLE_GUID,	&efi.mokvar_table,	"MOKvar"	},
528 #endif
529 	{},
530 };
531 
532 static __init int match_config_table(const efi_guid_t *guid,
533 				     unsigned long table,
534 				     const efi_config_table_type_t *table_types)
535 {
536 	int i;
537 
538 	for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
539 		if (!efi_guidcmp(*guid, table_types[i].guid)) {
540 			*(table_types[i].ptr) = table;
541 			if (table_types[i].name[0])
542 				pr_cont("%s=0x%lx ",
543 					table_types[i].name, table);
544 			return 1;
545 		}
546 	}
547 
548 	return 0;
549 }
550 
551 int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
552 				   int count,
553 				   const efi_config_table_type_t *arch_tables)
554 {
555 	const efi_config_table_64_t *tbl64 = (void *)config_tables;
556 	const efi_config_table_32_t *tbl32 = (void *)config_tables;
557 	const efi_guid_t *guid;
558 	unsigned long table;
559 	int i;
560 
561 	pr_info("");
562 	for (i = 0; i < count; i++) {
563 		if (!IS_ENABLED(CONFIG_X86)) {
564 			guid = &config_tables[i].guid;
565 			table = (unsigned long)config_tables[i].table;
566 		} else if (efi_enabled(EFI_64BIT)) {
567 			guid = &tbl64[i].guid;
568 			table = tbl64[i].table;
569 
570 			if (IS_ENABLED(CONFIG_X86_32) &&
571 			    tbl64[i].table > U32_MAX) {
572 				pr_cont("\n");
573 				pr_err("Table located above 4GB, disabling EFI.\n");
574 				return -EINVAL;
575 			}
576 		} else {
577 			guid = &tbl32[i].guid;
578 			table = tbl32[i].table;
579 		}
580 
581 		if (!match_config_table(guid, table, common_tables) && arch_tables)
582 			match_config_table(guid, table, arch_tables);
583 	}
584 	pr_cont("\n");
585 	set_bit(EFI_CONFIG_TABLES, &efi.flags);
586 
587 	if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) {
588 		struct linux_efi_random_seed *seed;
589 		u32 size = 0;
590 
591 		seed = early_memremap(efi_rng_seed, sizeof(*seed));
592 		if (seed != NULL) {
593 			size = READ_ONCE(seed->size);
594 			early_memunmap(seed, sizeof(*seed));
595 		} else {
596 			pr_err("Could not map UEFI random seed!\n");
597 		}
598 		if (size > 0) {
599 			seed = early_memremap(efi_rng_seed,
600 					      sizeof(*seed) + size);
601 			if (seed != NULL) {
602 				pr_notice("seeding entropy pool\n");
603 				add_bootloader_randomness(seed->bits, size);
604 				early_memunmap(seed, sizeof(*seed) + size);
605 			} else {
606 				pr_err("Could not map UEFI random seed!\n");
607 			}
608 		}
609 	}
610 
611 	if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
612 		efi_memattr_init();
613 
614 	efi_tpm_eventlog_init();
615 
616 	if (mem_reserve != EFI_INVALID_TABLE_ADDR) {
617 		unsigned long prsv = mem_reserve;
618 
619 		while (prsv) {
620 			struct linux_efi_memreserve *rsv;
621 			u8 *p;
622 
623 			/*
624 			 * Just map a full page: that is what we will get
625 			 * anyway, and it permits us to map the entire entry
626 			 * before knowing its size.
627 			 */
628 			p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
629 					   PAGE_SIZE);
630 			if (p == NULL) {
631 				pr_err("Could not map UEFI memreserve entry!\n");
632 				return -ENOMEM;
633 			}
634 
635 			rsv = (void *)(p + prsv % PAGE_SIZE);
636 
637 			/* reserve the entry itself */
638 			memblock_reserve(prsv,
639 					 struct_size(rsv, entry, rsv->size));
640 
641 			for (i = 0; i < atomic_read(&rsv->count); i++) {
642 				memblock_reserve(rsv->entry[i].base,
643 						 rsv->entry[i].size);
644 			}
645 
646 			prsv = rsv->next;
647 			early_memunmap(p, PAGE_SIZE);
648 		}
649 	}
650 
651 	if (rt_prop != EFI_INVALID_TABLE_ADDR) {
652 		efi_rt_properties_table_t *tbl;
653 
654 		tbl = early_memremap(rt_prop, sizeof(*tbl));
655 		if (tbl) {
656 			efi.runtime_supported_mask &= tbl->runtime_services_supported;
657 			early_memunmap(tbl, sizeof(*tbl));
658 		}
659 	}
660 
661 	return 0;
662 }
663 
664 int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr,
665 				   int min_major_version)
666 {
667 	if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
668 		pr_err("System table signature incorrect!\n");
669 		return -EINVAL;
670 	}
671 
672 	if ((systab_hdr->revision >> 16) < min_major_version)
673 		pr_err("Warning: System table version %d.%02d, expected %d.00 or greater!\n",
674 		       systab_hdr->revision >> 16,
675 		       systab_hdr->revision & 0xffff,
676 		       min_major_version);
677 
678 	return 0;
679 }
680 
681 #ifndef CONFIG_IA64
682 static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor,
683 						size_t size)
684 {
685 	const efi_char16_t *ret;
686 
687 	ret = early_memremap_ro(fw_vendor, size);
688 	if (!ret)
689 		pr_err("Could not map the firmware vendor!\n");
690 	return ret;
691 }
692 
693 static void __init unmap_fw_vendor(const void *fw_vendor, size_t size)
694 {
695 	early_memunmap((void *)fw_vendor, size);
696 }
697 #else
698 #define map_fw_vendor(p, s)	__va(p)
699 #define unmap_fw_vendor(v, s)
700 #endif
701 
702 void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
703 				     unsigned long fw_vendor)
704 {
705 	char vendor[100] = "unknown";
706 	const efi_char16_t *c16;
707 	size_t i;
708 
709 	c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
710 	if (c16) {
711 		for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
712 			vendor[i] = c16[i];
713 		vendor[i] = '\0';
714 
715 		unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
716 	}
717 
718 	pr_info("EFI v%u.%.02u by %s\n",
719 		systab_hdr->revision >> 16,
720 		systab_hdr->revision & 0xffff,
721 		vendor);
722 }
723 
724 static __initdata char memory_type_name[][13] = {
725 	"Reserved",
726 	"Loader Code",
727 	"Loader Data",
728 	"Boot Code",
729 	"Boot Data",
730 	"Runtime Code",
731 	"Runtime Data",
732 	"Conventional",
733 	"Unusable",
734 	"ACPI Reclaim",
735 	"ACPI Mem NVS",
736 	"MMIO",
737 	"MMIO Port",
738 	"PAL Code",
739 	"Persistent",
740 };
741 
742 char * __init efi_md_typeattr_format(char *buf, size_t size,
743 				     const efi_memory_desc_t *md)
744 {
745 	char *pos;
746 	int type_len;
747 	u64 attr;
748 
749 	pos = buf;
750 	if (md->type >= ARRAY_SIZE(memory_type_name))
751 		type_len = snprintf(pos, size, "[type=%u", md->type);
752 	else
753 		type_len = snprintf(pos, size, "[%-*s",
754 				    (int)(sizeof(memory_type_name[0]) - 1),
755 				    memory_type_name[md->type]);
756 	if (type_len >= size)
757 		return buf;
758 
759 	pos += type_len;
760 	size -= type_len;
761 
762 	attr = md->attribute;
763 	if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
764 		     EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
765 		     EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
766 		     EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
767 		     EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
768 		snprintf(pos, size, "|attr=0x%016llx]",
769 			 (unsigned long long)attr);
770 	else
771 		snprintf(pos, size,
772 			 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
773 			 attr & EFI_MEMORY_RUNTIME		? "RUN" : "",
774 			 attr & EFI_MEMORY_MORE_RELIABLE	? "MR"  : "",
775 			 attr & EFI_MEMORY_CPU_CRYPTO   	? "CC"  : "",
776 			 attr & EFI_MEMORY_SP			? "SP"  : "",
777 			 attr & EFI_MEMORY_NV			? "NV"  : "",
778 			 attr & EFI_MEMORY_XP			? "XP"  : "",
779 			 attr & EFI_MEMORY_RP			? "RP"  : "",
780 			 attr & EFI_MEMORY_WP			? "WP"  : "",
781 			 attr & EFI_MEMORY_RO			? "RO"  : "",
782 			 attr & EFI_MEMORY_UCE			? "UCE" : "",
783 			 attr & EFI_MEMORY_WB			? "WB"  : "",
784 			 attr & EFI_MEMORY_WT			? "WT"  : "",
785 			 attr & EFI_MEMORY_WC			? "WC"  : "",
786 			 attr & EFI_MEMORY_UC			? "UC"  : "");
787 	return buf;
788 }
789 
790 /*
791  * IA64 has a funky EFI memory map that doesn't work the same way as
792  * other architectures.
793  */
794 #ifndef CONFIG_IA64
795 /*
796  * efi_mem_attributes - lookup memmap attributes for physical address
797  * @phys_addr: the physical address to lookup
798  *
799  * Search in the EFI memory map for the region covering
800  * @phys_addr. Returns the EFI memory attributes if the region
801  * was found in the memory map, 0 otherwise.
802  */
803 u64 efi_mem_attributes(unsigned long phys_addr)
804 {
805 	efi_memory_desc_t *md;
806 
807 	if (!efi_enabled(EFI_MEMMAP))
808 		return 0;
809 
810 	for_each_efi_memory_desc(md) {
811 		if ((md->phys_addr <= phys_addr) &&
812 		    (phys_addr < (md->phys_addr +
813 		    (md->num_pages << EFI_PAGE_SHIFT))))
814 			return md->attribute;
815 	}
816 	return 0;
817 }
818 
819 /*
820  * efi_mem_type - lookup memmap type for physical address
821  * @phys_addr: the physical address to lookup
822  *
823  * Search in the EFI memory map for the region covering @phys_addr.
824  * Returns the EFI memory type if the region was found in the memory
825  * map, -EINVAL otherwise.
826  */
827 int efi_mem_type(unsigned long phys_addr)
828 {
829 	const efi_memory_desc_t *md;
830 
831 	if (!efi_enabled(EFI_MEMMAP))
832 		return -ENOTSUPP;
833 
834 	for_each_efi_memory_desc(md) {
835 		if ((md->phys_addr <= phys_addr) &&
836 		    (phys_addr < (md->phys_addr +
837 				  (md->num_pages << EFI_PAGE_SHIFT))))
838 			return md->type;
839 	}
840 	return -EINVAL;
841 }
842 #endif
843 
844 int efi_status_to_err(efi_status_t status)
845 {
846 	int err;
847 
848 	switch (status) {
849 	case EFI_SUCCESS:
850 		err = 0;
851 		break;
852 	case EFI_INVALID_PARAMETER:
853 		err = -EINVAL;
854 		break;
855 	case EFI_OUT_OF_RESOURCES:
856 		err = -ENOSPC;
857 		break;
858 	case EFI_DEVICE_ERROR:
859 		err = -EIO;
860 		break;
861 	case EFI_WRITE_PROTECTED:
862 		err = -EROFS;
863 		break;
864 	case EFI_SECURITY_VIOLATION:
865 		err = -EACCES;
866 		break;
867 	case EFI_NOT_FOUND:
868 		err = -ENOENT;
869 		break;
870 	case EFI_ABORTED:
871 		err = -EINTR;
872 		break;
873 	default:
874 		err = -EINVAL;
875 	}
876 
877 	return err;
878 }
879 
880 static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
881 static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
882 
883 static int __init efi_memreserve_map_root(void)
884 {
885 	if (mem_reserve == EFI_INVALID_TABLE_ADDR)
886 		return -ENODEV;
887 
888 	efi_memreserve_root = memremap(mem_reserve,
889 				       sizeof(*efi_memreserve_root),
890 				       MEMREMAP_WB);
891 	if (WARN_ON_ONCE(!efi_memreserve_root))
892 		return -ENOMEM;
893 	return 0;
894 }
895 
896 static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
897 {
898 	struct resource *res, *parent;
899 	int ret;
900 
901 	res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
902 	if (!res)
903 		return -ENOMEM;
904 
905 	res->name	= "reserved";
906 	res->flags	= IORESOURCE_MEM;
907 	res->start	= addr;
908 	res->end	= addr + size - 1;
909 
910 	/* we expect a conflict with a 'System RAM' region */
911 	parent = request_resource_conflict(&iomem_resource, res);
912 	ret = parent ? request_resource(parent, res) : 0;
913 
914 	/*
915 	 * Given that efi_mem_reserve_iomem() can be called at any
916 	 * time, only call memblock_reserve() if the architecture
917 	 * keeps the infrastructure around.
918 	 */
919 	if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
920 		memblock_reserve(addr, size);
921 
922 	return ret;
923 }
924 
925 int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
926 {
927 	struct linux_efi_memreserve *rsv;
928 	unsigned long prsv;
929 	int rc, index;
930 
931 	if (efi_memreserve_root == (void *)ULONG_MAX)
932 		return -ENODEV;
933 
934 	if (!efi_memreserve_root) {
935 		rc = efi_memreserve_map_root();
936 		if (rc)
937 			return rc;
938 	}
939 
940 	/* first try to find a slot in an existing linked list entry */
941 	for (prsv = efi_memreserve_root->next; prsv; ) {
942 		rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
943 		index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
944 		if (index < rsv->size) {
945 			rsv->entry[index].base = addr;
946 			rsv->entry[index].size = size;
947 
948 			memunmap(rsv);
949 			return efi_mem_reserve_iomem(addr, size);
950 		}
951 		prsv = rsv->next;
952 		memunmap(rsv);
953 	}
954 
955 	/* no slot found - allocate a new linked list entry */
956 	rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
957 	if (!rsv)
958 		return -ENOMEM;
959 
960 	rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
961 	if (rc) {
962 		free_page((unsigned long)rsv);
963 		return rc;
964 	}
965 
966 	/*
967 	 * The memremap() call above assumes that a linux_efi_memreserve entry
968 	 * never crosses a page boundary, so let's ensure that this remains true
969 	 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
970 	 * using SZ_4K explicitly in the size calculation below.
971 	 */
972 	rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
973 	atomic_set(&rsv->count, 1);
974 	rsv->entry[0].base = addr;
975 	rsv->entry[0].size = size;
976 
977 	spin_lock(&efi_mem_reserve_persistent_lock);
978 	rsv->next = efi_memreserve_root->next;
979 	efi_memreserve_root->next = __pa(rsv);
980 	spin_unlock(&efi_mem_reserve_persistent_lock);
981 
982 	return efi_mem_reserve_iomem(addr, size);
983 }
984 
985 static int __init efi_memreserve_root_init(void)
986 {
987 	if (efi_memreserve_root)
988 		return 0;
989 	if (efi_memreserve_map_root())
990 		efi_memreserve_root = (void *)ULONG_MAX;
991 	return 0;
992 }
993 early_initcall(efi_memreserve_root_init);
994 
995 #ifdef CONFIG_KEXEC
996 static int update_efi_random_seed(struct notifier_block *nb,
997 				  unsigned long code, void *unused)
998 {
999 	struct linux_efi_random_seed *seed;
1000 	u32 size = 0;
1001 
1002 	if (!kexec_in_progress)
1003 		return NOTIFY_DONE;
1004 
1005 	seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB);
1006 	if (seed != NULL) {
1007 		size = min(seed->size, EFI_RANDOM_SEED_SIZE);
1008 		memunmap(seed);
1009 	} else {
1010 		pr_err("Could not map UEFI random seed!\n");
1011 	}
1012 	if (size > 0) {
1013 		seed = memremap(efi_rng_seed, sizeof(*seed) + size,
1014 				MEMREMAP_WB);
1015 		if (seed != NULL) {
1016 			seed->size = size;
1017 			get_random_bytes(seed->bits, seed->size);
1018 			memunmap(seed);
1019 		} else {
1020 			pr_err("Could not map UEFI random seed!\n");
1021 		}
1022 	}
1023 	return NOTIFY_DONE;
1024 }
1025 
1026 static struct notifier_block efi_random_seed_nb = {
1027 	.notifier_call = update_efi_random_seed,
1028 };
1029 
1030 static int __init register_update_efi_random_seed(void)
1031 {
1032 	if (efi_rng_seed == EFI_INVALID_TABLE_ADDR)
1033 		return 0;
1034 	return register_reboot_notifier(&efi_random_seed_nb);
1035 }
1036 late_initcall(register_update_efi_random_seed);
1037 #endif
1038