xref: /openbmc/linux/drivers/firmware/efi/efi.c (revision cbabf03c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * efi.c - EFI subsystem
4  *
5  * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
6  * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
7  * Copyright (C) 2013 Tom Gundersen <teg@jklm.no>
8  *
9  * This code registers /sys/firmware/efi{,/efivars} when EFI is supported,
10  * allowing the efivarfs to be mounted or the efivars module to be loaded.
11  * The existance of /sys/firmware/efi may also be used by userspace to
12  * determine that the system supports EFI.
13  */
14 
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 
17 #include <linux/kobject.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/debugfs.h>
21 #include <linux/device.h>
22 #include <linux/efi.h>
23 #include <linux/of.h>
24 #include <linux/io.h>
25 #include <linux/kexec.h>
26 #include <linux/platform_device.h>
27 #include <linux/random.h>
28 #include <linux/reboot.h>
29 #include <linux/slab.h>
30 #include <linux/acpi.h>
31 #include <linux/ucs2_string.h>
32 #include <linux/memblock.h>
33 #include <linux/security.h>
34 
35 #include <asm/early_ioremap.h>
36 
37 struct efi __read_mostly efi = {
38 	.runtime_supported_mask = EFI_RT_SUPPORTED_ALL,
39 	.acpi			= EFI_INVALID_TABLE_ADDR,
40 	.acpi20			= EFI_INVALID_TABLE_ADDR,
41 	.smbios			= EFI_INVALID_TABLE_ADDR,
42 	.smbios3		= EFI_INVALID_TABLE_ADDR,
43 	.esrt			= EFI_INVALID_TABLE_ADDR,
44 	.tpm_log		= EFI_INVALID_TABLE_ADDR,
45 	.tpm_final_log		= EFI_INVALID_TABLE_ADDR,
46 #ifdef CONFIG_LOAD_UEFI_KEYS
47 	.mokvar_table		= EFI_INVALID_TABLE_ADDR,
48 #endif
49 #ifdef CONFIG_EFI_COCO_SECRET
50 	.coco_secret		= EFI_INVALID_TABLE_ADDR,
51 #endif
52 };
53 EXPORT_SYMBOL(efi);
54 
55 unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
56 static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
57 static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
58 
59 struct mm_struct efi_mm = {
60 	.mm_rb			= RB_ROOT,
61 	.mm_users		= ATOMIC_INIT(2),
62 	.mm_count		= ATOMIC_INIT(1),
63 	.write_protect_seq      = SEQCNT_ZERO(efi_mm.write_protect_seq),
64 	MMAP_LOCK_INITIALIZER(efi_mm)
65 	.page_table_lock	= __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
66 	.mmlist			= LIST_HEAD_INIT(efi_mm.mmlist),
67 	.cpu_bitmap		= { [BITS_TO_LONGS(NR_CPUS)] = 0},
68 };
69 
70 struct workqueue_struct *efi_rts_wq;
71 
72 static bool disable_runtime = IS_ENABLED(CONFIG_EFI_DISABLE_RUNTIME);
73 static int __init setup_noefi(char *arg)
74 {
75 	disable_runtime = true;
76 	return 0;
77 }
78 early_param("noefi", setup_noefi);
79 
80 bool efi_runtime_disabled(void)
81 {
82 	return disable_runtime;
83 }
84 
85 bool __pure __efi_soft_reserve_enabled(void)
86 {
87 	return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
88 }
89 
90 static int __init parse_efi_cmdline(char *str)
91 {
92 	if (!str) {
93 		pr_warn("need at least one option\n");
94 		return -EINVAL;
95 	}
96 
97 	if (parse_option_str(str, "debug"))
98 		set_bit(EFI_DBG, &efi.flags);
99 
100 	if (parse_option_str(str, "noruntime"))
101 		disable_runtime = true;
102 
103 	if (parse_option_str(str, "runtime"))
104 		disable_runtime = false;
105 
106 	if (parse_option_str(str, "nosoftreserve"))
107 		set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
108 
109 	return 0;
110 }
111 early_param("efi", parse_efi_cmdline);
112 
113 struct kobject *efi_kobj;
114 
115 /*
116  * Let's not leave out systab information that snuck into
117  * the efivars driver
118  * Note, do not add more fields in systab sysfs file as it breaks sysfs
119  * one value per file rule!
120  */
121 static ssize_t systab_show(struct kobject *kobj,
122 			   struct kobj_attribute *attr, char *buf)
123 {
124 	char *str = buf;
125 
126 	if (!kobj || !buf)
127 		return -EINVAL;
128 
129 	if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
130 		str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
131 	if (efi.acpi != EFI_INVALID_TABLE_ADDR)
132 		str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
133 	/*
134 	 * If both SMBIOS and SMBIOS3 entry points are implemented, the
135 	 * SMBIOS3 entry point shall be preferred, so we list it first to
136 	 * let applications stop parsing after the first match.
137 	 */
138 	if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
139 		str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
140 	if (efi.smbios != EFI_INVALID_TABLE_ADDR)
141 		str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
142 
143 	if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86))
144 		str = efi_systab_show_arch(str);
145 
146 	return str - buf;
147 }
148 
149 static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
150 
151 static ssize_t fw_platform_size_show(struct kobject *kobj,
152 				     struct kobj_attribute *attr, char *buf)
153 {
154 	return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
155 }
156 
157 extern __weak struct kobj_attribute efi_attr_fw_vendor;
158 extern __weak struct kobj_attribute efi_attr_runtime;
159 extern __weak struct kobj_attribute efi_attr_config_table;
160 static struct kobj_attribute efi_attr_fw_platform_size =
161 	__ATTR_RO(fw_platform_size);
162 
163 static struct attribute *efi_subsys_attrs[] = {
164 	&efi_attr_systab.attr,
165 	&efi_attr_fw_platform_size.attr,
166 	&efi_attr_fw_vendor.attr,
167 	&efi_attr_runtime.attr,
168 	&efi_attr_config_table.attr,
169 	NULL,
170 };
171 
172 umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr,
173 				   int n)
174 {
175 	return attr->mode;
176 }
177 
178 static const struct attribute_group efi_subsys_attr_group = {
179 	.attrs = efi_subsys_attrs,
180 	.is_visible = efi_attr_is_visible,
181 };
182 
183 static struct efivars generic_efivars;
184 static struct efivar_operations generic_ops;
185 
186 static int generic_ops_register(void)
187 {
188 	generic_ops.get_variable = efi.get_variable;
189 	generic_ops.get_next_variable = efi.get_next_variable;
190 	generic_ops.query_variable_store = efi_query_variable_store;
191 
192 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
193 		generic_ops.set_variable = efi.set_variable;
194 		generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
195 	}
196 	return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
197 }
198 
199 static void generic_ops_unregister(void)
200 {
201 	efivars_unregister(&generic_efivars);
202 }
203 
204 #ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
205 #define EFIVAR_SSDT_NAME_MAX	16
206 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
207 static int __init efivar_ssdt_setup(char *str)
208 {
209 	int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
210 
211 	if (ret)
212 		return ret;
213 
214 	if (strlen(str) < sizeof(efivar_ssdt))
215 		memcpy(efivar_ssdt, str, strlen(str));
216 	else
217 		pr_warn("efivar_ssdt: name too long: %s\n", str);
218 	return 1;
219 }
220 __setup("efivar_ssdt=", efivar_ssdt_setup);
221 
222 static __init int efivar_ssdt_iter(efi_char16_t *name, efi_guid_t vendor,
223 				   unsigned long name_size, void *data)
224 {
225 	struct efivar_entry *entry;
226 	struct list_head *list = data;
227 	char utf8_name[EFIVAR_SSDT_NAME_MAX];
228 	int limit = min_t(unsigned long, EFIVAR_SSDT_NAME_MAX, name_size);
229 
230 	ucs2_as_utf8(utf8_name, name, limit - 1);
231 	if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
232 		return 0;
233 
234 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
235 	if (!entry)
236 		return 0;
237 
238 	memcpy(entry->var.VariableName, name, name_size);
239 	memcpy(&entry->var.VendorGuid, &vendor, sizeof(efi_guid_t));
240 
241 	efivar_entry_add(entry, list);
242 
243 	return 0;
244 }
245 
246 static __init int efivar_ssdt_load(void)
247 {
248 	LIST_HEAD(entries);
249 	struct efivar_entry *entry, *aux;
250 	unsigned long size;
251 	void *data;
252 	int ret;
253 
254 	if (!efivar_ssdt[0])
255 		return 0;
256 
257 	ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries);
258 
259 	list_for_each_entry_safe(entry, aux, &entries, list) {
260 		pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt,
261 			&entry->var.VendorGuid);
262 
263 		list_del(&entry->list);
264 
265 		ret = efivar_entry_size(entry, &size);
266 		if (ret) {
267 			pr_err("failed to get var size\n");
268 			goto free_entry;
269 		}
270 
271 		data = kmalloc(size, GFP_KERNEL);
272 		if (!data) {
273 			ret = -ENOMEM;
274 			goto free_entry;
275 		}
276 
277 		ret = efivar_entry_get(entry, NULL, &size, data);
278 		if (ret) {
279 			pr_err("failed to get var data\n");
280 			goto free_data;
281 		}
282 
283 		ret = acpi_load_table(data, NULL);
284 		if (ret) {
285 			pr_err("failed to load table: %d\n", ret);
286 			goto free_data;
287 		}
288 
289 		goto free_entry;
290 
291 free_data:
292 		kfree(data);
293 
294 free_entry:
295 		kfree(entry);
296 	}
297 
298 	return ret;
299 }
300 #else
301 static inline int efivar_ssdt_load(void) { return 0; }
302 #endif
303 
304 #ifdef CONFIG_DEBUG_FS
305 
306 #define EFI_DEBUGFS_MAX_BLOBS 32
307 
308 static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS];
309 
310 static void __init efi_debugfs_init(void)
311 {
312 	struct dentry *efi_debugfs;
313 	efi_memory_desc_t *md;
314 	char name[32];
315 	int type_count[EFI_BOOT_SERVICES_DATA + 1] = {};
316 	int i = 0;
317 
318 	efi_debugfs = debugfs_create_dir("efi", NULL);
319 	if (IS_ERR_OR_NULL(efi_debugfs))
320 		return;
321 
322 	for_each_efi_memory_desc(md) {
323 		switch (md->type) {
324 		case EFI_BOOT_SERVICES_CODE:
325 			snprintf(name, sizeof(name), "boot_services_code%d",
326 				 type_count[md->type]++);
327 			break;
328 		case EFI_BOOT_SERVICES_DATA:
329 			snprintf(name, sizeof(name), "boot_services_data%d",
330 				 type_count[md->type]++);
331 			break;
332 		default:
333 			continue;
334 		}
335 
336 		if (i >= EFI_DEBUGFS_MAX_BLOBS) {
337 			pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n",
338 				EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS);
339 			break;
340 		}
341 
342 		debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT;
343 		debugfs_blob[i].data = memremap(md->phys_addr,
344 						debugfs_blob[i].size,
345 						MEMREMAP_WB);
346 		if (!debugfs_blob[i].data)
347 			continue;
348 
349 		debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]);
350 		i++;
351 	}
352 }
353 #else
354 static inline void efi_debugfs_init(void) {}
355 #endif
356 
357 /*
358  * We register the efi subsystem with the firmware subsystem and the
359  * efivars subsystem with the efi subsystem, if the system was booted with
360  * EFI.
361  */
362 static int __init efisubsys_init(void)
363 {
364 	int error;
365 
366 	if (!efi_enabled(EFI_RUNTIME_SERVICES))
367 		efi.runtime_supported_mask = 0;
368 
369 	if (!efi_enabled(EFI_BOOT))
370 		return 0;
371 
372 	if (efi.runtime_supported_mask) {
373 		/*
374 		 * Since we process only one efi_runtime_service() at a time, an
375 		 * ordered workqueue (which creates only one execution context)
376 		 * should suffice for all our needs.
377 		 */
378 		efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
379 		if (!efi_rts_wq) {
380 			pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
381 			clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
382 			efi.runtime_supported_mask = 0;
383 			return 0;
384 		}
385 	}
386 
387 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES))
388 		platform_device_register_simple("rtc-efi", 0, NULL, 0);
389 
390 	/* We register the efi directory at /sys/firmware/efi */
391 	efi_kobj = kobject_create_and_add("efi", firmware_kobj);
392 	if (!efi_kobj) {
393 		pr_err("efi: Firmware registration failed.\n");
394 		destroy_workqueue(efi_rts_wq);
395 		return -ENOMEM;
396 	}
397 
398 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
399 				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
400 		error = generic_ops_register();
401 		if (error)
402 			goto err_put;
403 		efivar_ssdt_load();
404 		platform_device_register_simple("efivars", 0, NULL, 0);
405 	}
406 
407 	error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
408 	if (error) {
409 		pr_err("efi: Sysfs attribute export failed with error %d.\n",
410 		       error);
411 		goto err_unregister;
412 	}
413 
414 	error = efi_runtime_map_init(efi_kobj);
415 	if (error)
416 		goto err_remove_group;
417 
418 	/* and the standard mountpoint for efivarfs */
419 	error = sysfs_create_mount_point(efi_kobj, "efivars");
420 	if (error) {
421 		pr_err("efivars: Subsystem registration failed.\n");
422 		goto err_remove_group;
423 	}
424 
425 	if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
426 		efi_debugfs_init();
427 
428 	return 0;
429 
430 err_remove_group:
431 	sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
432 err_unregister:
433 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
434 				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME))
435 		generic_ops_unregister();
436 err_put:
437 	kobject_put(efi_kobj);
438 	destroy_workqueue(efi_rts_wq);
439 	return error;
440 }
441 
442 subsys_initcall(efisubsys_init);
443 
444 /*
445  * Find the efi memory descriptor for a given physical address.  Given a
446  * physical address, determine if it exists within an EFI Memory Map entry,
447  * and if so, populate the supplied memory descriptor with the appropriate
448  * data.
449  */
450 int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
451 {
452 	efi_memory_desc_t *md;
453 
454 	if (!efi_enabled(EFI_MEMMAP)) {
455 		pr_err_once("EFI_MEMMAP is not enabled.\n");
456 		return -EINVAL;
457 	}
458 
459 	if (!out_md) {
460 		pr_err_once("out_md is null.\n");
461 		return -EINVAL;
462         }
463 
464 	for_each_efi_memory_desc(md) {
465 		u64 size;
466 		u64 end;
467 
468 		size = md->num_pages << EFI_PAGE_SHIFT;
469 		end = md->phys_addr + size;
470 		if (phys_addr >= md->phys_addr && phys_addr < end) {
471 			memcpy(out_md, md, sizeof(*out_md));
472 			return 0;
473 		}
474 	}
475 	return -ENOENT;
476 }
477 
478 /*
479  * Calculate the highest address of an efi memory descriptor.
480  */
481 u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
482 {
483 	u64 size = md->num_pages << EFI_PAGE_SHIFT;
484 	u64 end = md->phys_addr + size;
485 	return end;
486 }
487 
488 void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
489 
490 /**
491  * efi_mem_reserve - Reserve an EFI memory region
492  * @addr: Physical address to reserve
493  * @size: Size of reservation
494  *
495  * Mark a region as reserved from general kernel allocation and
496  * prevent it being released by efi_free_boot_services().
497  *
498  * This function should be called drivers once they've parsed EFI
499  * configuration tables to figure out where their data lives, e.g.
500  * efi_esrt_init().
501  */
502 void __init efi_mem_reserve(phys_addr_t addr, u64 size)
503 {
504 	if (!memblock_is_region_reserved(addr, size))
505 		memblock_reserve(addr, size);
506 
507 	/*
508 	 * Some architectures (x86) reserve all boot services ranges
509 	 * until efi_free_boot_services() because of buggy firmware
510 	 * implementations. This means the above memblock_reserve() is
511 	 * superfluous on x86 and instead what it needs to do is
512 	 * ensure the @start, @size is not freed.
513 	 */
514 	efi_arch_mem_reserve(addr, size);
515 }
516 
517 static const efi_config_table_type_t common_tables[] __initconst = {
518 	{ACPI_20_TABLE_GUID,			&efi.acpi20,		"ACPI 2.0"	},
519 	{ACPI_TABLE_GUID,			&efi.acpi,		"ACPI"		},
520 	{SMBIOS_TABLE_GUID,			&efi.smbios,		"SMBIOS"	},
521 	{SMBIOS3_TABLE_GUID,			&efi.smbios3,		"SMBIOS 3.0"	},
522 	{EFI_SYSTEM_RESOURCE_TABLE_GUID,	&efi.esrt,		"ESRT"		},
523 	{EFI_MEMORY_ATTRIBUTES_TABLE_GUID,	&efi_mem_attr_table,	"MEMATTR"	},
524 	{LINUX_EFI_RANDOM_SEED_TABLE_GUID,	&efi_rng_seed,		"RNG"		},
525 	{LINUX_EFI_TPM_EVENT_LOG_GUID,		&efi.tpm_log,		"TPMEventLog"	},
526 	{LINUX_EFI_TPM_FINAL_LOG_GUID,		&efi.tpm_final_log,	"TPMFinalLog"	},
527 	{LINUX_EFI_MEMRESERVE_TABLE_GUID,	&mem_reserve,		"MEMRESERVE"	},
528 	{EFI_RT_PROPERTIES_TABLE_GUID,		&rt_prop,		"RTPROP"	},
529 #ifdef CONFIG_EFI_RCI2_TABLE
530 	{DELLEMC_EFI_RCI2_TABLE_GUID,		&rci2_table_phys			},
531 #endif
532 #ifdef CONFIG_LOAD_UEFI_KEYS
533 	{LINUX_EFI_MOK_VARIABLE_TABLE_GUID,	&efi.mokvar_table,	"MOKvar"	},
534 #endif
535 #ifdef CONFIG_EFI_COCO_SECRET
536 	{LINUX_EFI_COCO_SECRET_AREA_GUID,	&efi.coco_secret,	"CocoSecret"	},
537 #endif
538 	{},
539 };
540 
541 static __init int match_config_table(const efi_guid_t *guid,
542 				     unsigned long table,
543 				     const efi_config_table_type_t *table_types)
544 {
545 	int i;
546 
547 	for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
548 		if (!efi_guidcmp(*guid, table_types[i].guid)) {
549 			*(table_types[i].ptr) = table;
550 			if (table_types[i].name[0])
551 				pr_cont("%s=0x%lx ",
552 					table_types[i].name, table);
553 			return 1;
554 		}
555 	}
556 
557 	return 0;
558 }
559 
560 int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
561 				   int count,
562 				   const efi_config_table_type_t *arch_tables)
563 {
564 	const efi_config_table_64_t *tbl64 = (void *)config_tables;
565 	const efi_config_table_32_t *tbl32 = (void *)config_tables;
566 	const efi_guid_t *guid;
567 	unsigned long table;
568 	int i;
569 
570 	pr_info("");
571 	for (i = 0; i < count; i++) {
572 		if (!IS_ENABLED(CONFIG_X86)) {
573 			guid = &config_tables[i].guid;
574 			table = (unsigned long)config_tables[i].table;
575 		} else if (efi_enabled(EFI_64BIT)) {
576 			guid = &tbl64[i].guid;
577 			table = tbl64[i].table;
578 
579 			if (IS_ENABLED(CONFIG_X86_32) &&
580 			    tbl64[i].table > U32_MAX) {
581 				pr_cont("\n");
582 				pr_err("Table located above 4GB, disabling EFI.\n");
583 				return -EINVAL;
584 			}
585 		} else {
586 			guid = &tbl32[i].guid;
587 			table = tbl32[i].table;
588 		}
589 
590 		if (!match_config_table(guid, table, common_tables) && arch_tables)
591 			match_config_table(guid, table, arch_tables);
592 	}
593 	pr_cont("\n");
594 	set_bit(EFI_CONFIG_TABLES, &efi.flags);
595 
596 	if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) {
597 		struct linux_efi_random_seed *seed;
598 		u32 size = 0;
599 
600 		seed = early_memremap(efi_rng_seed, sizeof(*seed));
601 		if (seed != NULL) {
602 			size = READ_ONCE(seed->size);
603 			early_memunmap(seed, sizeof(*seed));
604 		} else {
605 			pr_err("Could not map UEFI random seed!\n");
606 		}
607 		if (size > 0) {
608 			seed = early_memremap(efi_rng_seed,
609 					      sizeof(*seed) + size);
610 			if (seed != NULL) {
611 				pr_notice("seeding entropy pool\n");
612 				add_bootloader_randomness(seed->bits, size);
613 				early_memunmap(seed, sizeof(*seed) + size);
614 			} else {
615 				pr_err("Could not map UEFI random seed!\n");
616 			}
617 		}
618 	}
619 
620 	if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
621 		efi_memattr_init();
622 
623 	efi_tpm_eventlog_init();
624 
625 	if (mem_reserve != EFI_INVALID_TABLE_ADDR) {
626 		unsigned long prsv = mem_reserve;
627 
628 		while (prsv) {
629 			struct linux_efi_memreserve *rsv;
630 			u8 *p;
631 
632 			/*
633 			 * Just map a full page: that is what we will get
634 			 * anyway, and it permits us to map the entire entry
635 			 * before knowing its size.
636 			 */
637 			p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
638 					   PAGE_SIZE);
639 			if (p == NULL) {
640 				pr_err("Could not map UEFI memreserve entry!\n");
641 				return -ENOMEM;
642 			}
643 
644 			rsv = (void *)(p + prsv % PAGE_SIZE);
645 
646 			/* reserve the entry itself */
647 			memblock_reserve(prsv,
648 					 struct_size(rsv, entry, rsv->size));
649 
650 			for (i = 0; i < atomic_read(&rsv->count); i++) {
651 				memblock_reserve(rsv->entry[i].base,
652 						 rsv->entry[i].size);
653 			}
654 
655 			prsv = rsv->next;
656 			early_memunmap(p, PAGE_SIZE);
657 		}
658 	}
659 
660 	if (rt_prop != EFI_INVALID_TABLE_ADDR) {
661 		efi_rt_properties_table_t *tbl;
662 
663 		tbl = early_memremap(rt_prop, sizeof(*tbl));
664 		if (tbl) {
665 			efi.runtime_supported_mask &= tbl->runtime_services_supported;
666 			early_memunmap(tbl, sizeof(*tbl));
667 		}
668 	}
669 
670 	return 0;
671 }
672 
673 int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr,
674 				   int min_major_version)
675 {
676 	if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
677 		pr_err("System table signature incorrect!\n");
678 		return -EINVAL;
679 	}
680 
681 	if ((systab_hdr->revision >> 16) < min_major_version)
682 		pr_err("Warning: System table version %d.%02d, expected %d.00 or greater!\n",
683 		       systab_hdr->revision >> 16,
684 		       systab_hdr->revision & 0xffff,
685 		       min_major_version);
686 
687 	return 0;
688 }
689 
690 #ifndef CONFIG_IA64
691 static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor,
692 						size_t size)
693 {
694 	const efi_char16_t *ret;
695 
696 	ret = early_memremap_ro(fw_vendor, size);
697 	if (!ret)
698 		pr_err("Could not map the firmware vendor!\n");
699 	return ret;
700 }
701 
702 static void __init unmap_fw_vendor(const void *fw_vendor, size_t size)
703 {
704 	early_memunmap((void *)fw_vendor, size);
705 }
706 #else
707 #define map_fw_vendor(p, s)	__va(p)
708 #define unmap_fw_vendor(v, s)
709 #endif
710 
711 void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
712 				     unsigned long fw_vendor)
713 {
714 	char vendor[100] = "unknown";
715 	const efi_char16_t *c16;
716 	size_t i;
717 
718 	c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
719 	if (c16) {
720 		for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
721 			vendor[i] = c16[i];
722 		vendor[i] = '\0';
723 
724 		unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
725 	}
726 
727 	pr_info("EFI v%u.%.02u by %s\n",
728 		systab_hdr->revision >> 16,
729 		systab_hdr->revision & 0xffff,
730 		vendor);
731 
732 	if (IS_ENABLED(CONFIG_X86_64) &&
733 	    systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
734 	    !strcmp(vendor, "Apple")) {
735 		pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n");
736 		efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION;
737 	}
738 }
739 
740 static __initdata char memory_type_name[][13] = {
741 	"Reserved",
742 	"Loader Code",
743 	"Loader Data",
744 	"Boot Code",
745 	"Boot Data",
746 	"Runtime Code",
747 	"Runtime Data",
748 	"Conventional",
749 	"Unusable",
750 	"ACPI Reclaim",
751 	"ACPI Mem NVS",
752 	"MMIO",
753 	"MMIO Port",
754 	"PAL Code",
755 	"Persistent",
756 };
757 
758 char * __init efi_md_typeattr_format(char *buf, size_t size,
759 				     const efi_memory_desc_t *md)
760 {
761 	char *pos;
762 	int type_len;
763 	u64 attr;
764 
765 	pos = buf;
766 	if (md->type >= ARRAY_SIZE(memory_type_name))
767 		type_len = snprintf(pos, size, "[type=%u", md->type);
768 	else
769 		type_len = snprintf(pos, size, "[%-*s",
770 				    (int)(sizeof(memory_type_name[0]) - 1),
771 				    memory_type_name[md->type]);
772 	if (type_len >= size)
773 		return buf;
774 
775 	pos += type_len;
776 	size -= type_len;
777 
778 	attr = md->attribute;
779 	if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
780 		     EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
781 		     EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
782 		     EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
783 		     EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
784 		snprintf(pos, size, "|attr=0x%016llx]",
785 			 (unsigned long long)attr);
786 	else
787 		snprintf(pos, size,
788 			 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
789 			 attr & EFI_MEMORY_RUNTIME		? "RUN" : "",
790 			 attr & EFI_MEMORY_MORE_RELIABLE	? "MR"  : "",
791 			 attr & EFI_MEMORY_CPU_CRYPTO   	? "CC"  : "",
792 			 attr & EFI_MEMORY_SP			? "SP"  : "",
793 			 attr & EFI_MEMORY_NV			? "NV"  : "",
794 			 attr & EFI_MEMORY_XP			? "XP"  : "",
795 			 attr & EFI_MEMORY_RP			? "RP"  : "",
796 			 attr & EFI_MEMORY_WP			? "WP"  : "",
797 			 attr & EFI_MEMORY_RO			? "RO"  : "",
798 			 attr & EFI_MEMORY_UCE			? "UCE" : "",
799 			 attr & EFI_MEMORY_WB			? "WB"  : "",
800 			 attr & EFI_MEMORY_WT			? "WT"  : "",
801 			 attr & EFI_MEMORY_WC			? "WC"  : "",
802 			 attr & EFI_MEMORY_UC			? "UC"  : "");
803 	return buf;
804 }
805 
806 /*
807  * IA64 has a funky EFI memory map that doesn't work the same way as
808  * other architectures.
809  */
810 #ifndef CONFIG_IA64
811 /*
812  * efi_mem_attributes - lookup memmap attributes for physical address
813  * @phys_addr: the physical address to lookup
814  *
815  * Search in the EFI memory map for the region covering
816  * @phys_addr. Returns the EFI memory attributes if the region
817  * was found in the memory map, 0 otherwise.
818  */
819 u64 efi_mem_attributes(unsigned long phys_addr)
820 {
821 	efi_memory_desc_t *md;
822 
823 	if (!efi_enabled(EFI_MEMMAP))
824 		return 0;
825 
826 	for_each_efi_memory_desc(md) {
827 		if ((md->phys_addr <= phys_addr) &&
828 		    (phys_addr < (md->phys_addr +
829 		    (md->num_pages << EFI_PAGE_SHIFT))))
830 			return md->attribute;
831 	}
832 	return 0;
833 }
834 
835 /*
836  * efi_mem_type - lookup memmap type for physical address
837  * @phys_addr: the physical address to lookup
838  *
839  * Search in the EFI memory map for the region covering @phys_addr.
840  * Returns the EFI memory type if the region was found in the memory
841  * map, -EINVAL otherwise.
842  */
843 int efi_mem_type(unsigned long phys_addr)
844 {
845 	const efi_memory_desc_t *md;
846 
847 	if (!efi_enabled(EFI_MEMMAP))
848 		return -ENOTSUPP;
849 
850 	for_each_efi_memory_desc(md) {
851 		if ((md->phys_addr <= phys_addr) &&
852 		    (phys_addr < (md->phys_addr +
853 				  (md->num_pages << EFI_PAGE_SHIFT))))
854 			return md->type;
855 	}
856 	return -EINVAL;
857 }
858 #endif
859 
860 int efi_status_to_err(efi_status_t status)
861 {
862 	int err;
863 
864 	switch (status) {
865 	case EFI_SUCCESS:
866 		err = 0;
867 		break;
868 	case EFI_INVALID_PARAMETER:
869 		err = -EINVAL;
870 		break;
871 	case EFI_OUT_OF_RESOURCES:
872 		err = -ENOSPC;
873 		break;
874 	case EFI_DEVICE_ERROR:
875 		err = -EIO;
876 		break;
877 	case EFI_WRITE_PROTECTED:
878 		err = -EROFS;
879 		break;
880 	case EFI_SECURITY_VIOLATION:
881 		err = -EACCES;
882 		break;
883 	case EFI_NOT_FOUND:
884 		err = -ENOENT;
885 		break;
886 	case EFI_ABORTED:
887 		err = -EINTR;
888 		break;
889 	default:
890 		err = -EINVAL;
891 	}
892 
893 	return err;
894 }
895 
896 static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
897 static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
898 
899 static int __init efi_memreserve_map_root(void)
900 {
901 	if (mem_reserve == EFI_INVALID_TABLE_ADDR)
902 		return -ENODEV;
903 
904 	efi_memreserve_root = memremap(mem_reserve,
905 				       sizeof(*efi_memreserve_root),
906 				       MEMREMAP_WB);
907 	if (WARN_ON_ONCE(!efi_memreserve_root))
908 		return -ENOMEM;
909 	return 0;
910 }
911 
912 static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
913 {
914 	struct resource *res, *parent;
915 	int ret;
916 
917 	res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
918 	if (!res)
919 		return -ENOMEM;
920 
921 	res->name	= "reserved";
922 	res->flags	= IORESOURCE_MEM;
923 	res->start	= addr;
924 	res->end	= addr + size - 1;
925 
926 	/* we expect a conflict with a 'System RAM' region */
927 	parent = request_resource_conflict(&iomem_resource, res);
928 	ret = parent ? request_resource(parent, res) : 0;
929 
930 	/*
931 	 * Given that efi_mem_reserve_iomem() can be called at any
932 	 * time, only call memblock_reserve() if the architecture
933 	 * keeps the infrastructure around.
934 	 */
935 	if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
936 		memblock_reserve(addr, size);
937 
938 	return ret;
939 }
940 
941 int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
942 {
943 	struct linux_efi_memreserve *rsv;
944 	unsigned long prsv;
945 	int rc, index;
946 
947 	if (efi_memreserve_root == (void *)ULONG_MAX)
948 		return -ENODEV;
949 
950 	if (!efi_memreserve_root) {
951 		rc = efi_memreserve_map_root();
952 		if (rc)
953 			return rc;
954 	}
955 
956 	/* first try to find a slot in an existing linked list entry */
957 	for (prsv = efi_memreserve_root->next; prsv; ) {
958 		rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
959 		index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
960 		if (index < rsv->size) {
961 			rsv->entry[index].base = addr;
962 			rsv->entry[index].size = size;
963 
964 			memunmap(rsv);
965 			return efi_mem_reserve_iomem(addr, size);
966 		}
967 		prsv = rsv->next;
968 		memunmap(rsv);
969 	}
970 
971 	/* no slot found - allocate a new linked list entry */
972 	rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
973 	if (!rsv)
974 		return -ENOMEM;
975 
976 	rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
977 	if (rc) {
978 		free_page((unsigned long)rsv);
979 		return rc;
980 	}
981 
982 	/*
983 	 * The memremap() call above assumes that a linux_efi_memreserve entry
984 	 * never crosses a page boundary, so let's ensure that this remains true
985 	 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
986 	 * using SZ_4K explicitly in the size calculation below.
987 	 */
988 	rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
989 	atomic_set(&rsv->count, 1);
990 	rsv->entry[0].base = addr;
991 	rsv->entry[0].size = size;
992 
993 	spin_lock(&efi_mem_reserve_persistent_lock);
994 	rsv->next = efi_memreserve_root->next;
995 	efi_memreserve_root->next = __pa(rsv);
996 	spin_unlock(&efi_mem_reserve_persistent_lock);
997 
998 	return efi_mem_reserve_iomem(addr, size);
999 }
1000 
1001 static int __init efi_memreserve_root_init(void)
1002 {
1003 	if (efi_memreserve_root)
1004 		return 0;
1005 	if (efi_memreserve_map_root())
1006 		efi_memreserve_root = (void *)ULONG_MAX;
1007 	return 0;
1008 }
1009 early_initcall(efi_memreserve_root_init);
1010 
1011 #ifdef CONFIG_KEXEC
1012 static int update_efi_random_seed(struct notifier_block *nb,
1013 				  unsigned long code, void *unused)
1014 {
1015 	struct linux_efi_random_seed *seed;
1016 	u32 size = 0;
1017 
1018 	if (!kexec_in_progress)
1019 		return NOTIFY_DONE;
1020 
1021 	seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB);
1022 	if (seed != NULL) {
1023 		size = min(seed->size, EFI_RANDOM_SEED_SIZE);
1024 		memunmap(seed);
1025 	} else {
1026 		pr_err("Could not map UEFI random seed!\n");
1027 	}
1028 	if (size > 0) {
1029 		seed = memremap(efi_rng_seed, sizeof(*seed) + size,
1030 				MEMREMAP_WB);
1031 		if (seed != NULL) {
1032 			seed->size = size;
1033 			get_random_bytes(seed->bits, seed->size);
1034 			memunmap(seed);
1035 		} else {
1036 			pr_err("Could not map UEFI random seed!\n");
1037 		}
1038 	}
1039 	return NOTIFY_DONE;
1040 }
1041 
1042 static struct notifier_block efi_random_seed_nb = {
1043 	.notifier_call = update_efi_random_seed,
1044 };
1045 
1046 static int __init register_update_efi_random_seed(void)
1047 {
1048 	if (efi_rng_seed == EFI_INVALID_TABLE_ADDR)
1049 		return 0;
1050 	return register_reboot_notifier(&efi_random_seed_nb);
1051 }
1052 late_initcall(register_update_efi_random_seed);
1053 #endif
1054