xref: /openbmc/linux/drivers/firmware/efi/efi.c (revision 6aeadf78)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * efi.c - EFI subsystem
4  *
5  * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
6  * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
7  * Copyright (C) 2013 Tom Gundersen <teg@jklm.no>
8  *
9  * This code registers /sys/firmware/efi{,/efivars} when EFI is supported,
10  * allowing the efivarfs to be mounted or the efivars module to be loaded.
11  * The existance of /sys/firmware/efi may also be used by userspace to
12  * determine that the system supports EFI.
13  */
14 
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 
17 #include <linux/kobject.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/debugfs.h>
21 #include <linux/device.h>
22 #include <linux/efi.h>
23 #include <linux/of.h>
24 #include <linux/initrd.h>
25 #include <linux/io.h>
26 #include <linux/kexec.h>
27 #include <linux/platform_device.h>
28 #include <linux/random.h>
29 #include <linux/reboot.h>
30 #include <linux/slab.h>
31 #include <linux/acpi.h>
32 #include <linux/ucs2_string.h>
33 #include <linux/memblock.h>
34 #include <linux/security.h>
35 
36 #include <asm/early_ioremap.h>
37 
38 struct efi __read_mostly efi = {
39 	.runtime_supported_mask = EFI_RT_SUPPORTED_ALL,
40 	.acpi			= EFI_INVALID_TABLE_ADDR,
41 	.acpi20			= EFI_INVALID_TABLE_ADDR,
42 	.smbios			= EFI_INVALID_TABLE_ADDR,
43 	.smbios3		= EFI_INVALID_TABLE_ADDR,
44 	.esrt			= EFI_INVALID_TABLE_ADDR,
45 	.tpm_log		= EFI_INVALID_TABLE_ADDR,
46 	.tpm_final_log		= EFI_INVALID_TABLE_ADDR,
47 #ifdef CONFIG_LOAD_UEFI_KEYS
48 	.mokvar_table		= EFI_INVALID_TABLE_ADDR,
49 #endif
50 #ifdef CONFIG_EFI_COCO_SECRET
51 	.coco_secret		= EFI_INVALID_TABLE_ADDR,
52 #endif
53 #ifdef CONFIG_UNACCEPTED_MEMORY
54 	.unaccepted		= EFI_INVALID_TABLE_ADDR,
55 #endif
56 };
57 EXPORT_SYMBOL(efi);
58 
59 unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
60 static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
61 static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
62 static unsigned long __initdata initrd = EFI_INVALID_TABLE_ADDR;
63 
64 extern unsigned long screen_info_table;
65 
66 struct mm_struct efi_mm = {
67 	.mm_mt			= MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock),
68 	.mm_users		= ATOMIC_INIT(2),
69 	.mm_count		= ATOMIC_INIT(1),
70 	.write_protect_seq      = SEQCNT_ZERO(efi_mm.write_protect_seq),
71 	MMAP_LOCK_INITIALIZER(efi_mm)
72 	.page_table_lock	= __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
73 	.mmlist			= LIST_HEAD_INIT(efi_mm.mmlist),
74 	.cpu_bitmap		= { [BITS_TO_LONGS(NR_CPUS)] = 0},
75 };
76 
77 struct workqueue_struct *efi_rts_wq;
78 
79 static bool disable_runtime = IS_ENABLED(CONFIG_EFI_DISABLE_RUNTIME);
80 static int __init setup_noefi(char *arg)
81 {
82 	disable_runtime = true;
83 	return 0;
84 }
85 early_param("noefi", setup_noefi);
86 
87 bool efi_runtime_disabled(void)
88 {
89 	return disable_runtime;
90 }
91 
92 bool __pure __efi_soft_reserve_enabled(void)
93 {
94 	return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
95 }
96 
97 static int __init parse_efi_cmdline(char *str)
98 {
99 	if (!str) {
100 		pr_warn("need at least one option\n");
101 		return -EINVAL;
102 	}
103 
104 	if (parse_option_str(str, "debug"))
105 		set_bit(EFI_DBG, &efi.flags);
106 
107 	if (parse_option_str(str, "noruntime"))
108 		disable_runtime = true;
109 
110 	if (parse_option_str(str, "runtime"))
111 		disable_runtime = false;
112 
113 	if (parse_option_str(str, "nosoftreserve"))
114 		set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
115 
116 	return 0;
117 }
118 early_param("efi", parse_efi_cmdline);
119 
120 struct kobject *efi_kobj;
121 
122 /*
123  * Let's not leave out systab information that snuck into
124  * the efivars driver
125  * Note, do not add more fields in systab sysfs file as it breaks sysfs
126  * one value per file rule!
127  */
128 static ssize_t systab_show(struct kobject *kobj,
129 			   struct kobj_attribute *attr, char *buf)
130 {
131 	char *str = buf;
132 
133 	if (!kobj || !buf)
134 		return -EINVAL;
135 
136 	if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
137 		str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
138 	if (efi.acpi != EFI_INVALID_TABLE_ADDR)
139 		str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
140 	/*
141 	 * If both SMBIOS and SMBIOS3 entry points are implemented, the
142 	 * SMBIOS3 entry point shall be preferred, so we list it first to
143 	 * let applications stop parsing after the first match.
144 	 */
145 	if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
146 		str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
147 	if (efi.smbios != EFI_INVALID_TABLE_ADDR)
148 		str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
149 
150 	if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86))
151 		str = efi_systab_show_arch(str);
152 
153 	return str - buf;
154 }
155 
156 static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
157 
158 static ssize_t fw_platform_size_show(struct kobject *kobj,
159 				     struct kobj_attribute *attr, char *buf)
160 {
161 	return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
162 }
163 
164 extern __weak struct kobj_attribute efi_attr_fw_vendor;
165 extern __weak struct kobj_attribute efi_attr_runtime;
166 extern __weak struct kobj_attribute efi_attr_config_table;
167 static struct kobj_attribute efi_attr_fw_platform_size =
168 	__ATTR_RO(fw_platform_size);
169 
170 static struct attribute *efi_subsys_attrs[] = {
171 	&efi_attr_systab.attr,
172 	&efi_attr_fw_platform_size.attr,
173 	&efi_attr_fw_vendor.attr,
174 	&efi_attr_runtime.attr,
175 	&efi_attr_config_table.attr,
176 	NULL,
177 };
178 
179 umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr,
180 				   int n)
181 {
182 	return attr->mode;
183 }
184 
185 static const struct attribute_group efi_subsys_attr_group = {
186 	.attrs = efi_subsys_attrs,
187 	.is_visible = efi_attr_is_visible,
188 };
189 
190 static struct efivars generic_efivars;
191 static struct efivar_operations generic_ops;
192 
193 static bool generic_ops_supported(void)
194 {
195 	unsigned long name_size;
196 	efi_status_t status;
197 	efi_char16_t name;
198 	efi_guid_t guid;
199 
200 	name_size = sizeof(name);
201 
202 	status = efi.get_next_variable(&name_size, &name, &guid);
203 	if (status == EFI_UNSUPPORTED)
204 		return false;
205 
206 	return true;
207 }
208 
209 static int generic_ops_register(void)
210 {
211 	if (!generic_ops_supported())
212 		return 0;
213 
214 	generic_ops.get_variable = efi.get_variable;
215 	generic_ops.get_next_variable = efi.get_next_variable;
216 	generic_ops.query_variable_store = efi_query_variable_store;
217 
218 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
219 		generic_ops.set_variable = efi.set_variable;
220 		generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
221 	}
222 	return efivars_register(&generic_efivars, &generic_ops);
223 }
224 
225 static void generic_ops_unregister(void)
226 {
227 	if (!generic_ops.get_variable)
228 		return;
229 
230 	efivars_unregister(&generic_efivars);
231 }
232 
233 #ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
234 #define EFIVAR_SSDT_NAME_MAX	16UL
235 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
236 static int __init efivar_ssdt_setup(char *str)
237 {
238 	int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
239 
240 	if (ret)
241 		return ret;
242 
243 	if (strlen(str) < sizeof(efivar_ssdt))
244 		memcpy(efivar_ssdt, str, strlen(str));
245 	else
246 		pr_warn("efivar_ssdt: name too long: %s\n", str);
247 	return 1;
248 }
249 __setup("efivar_ssdt=", efivar_ssdt_setup);
250 
251 static __init int efivar_ssdt_load(void)
252 {
253 	unsigned long name_size = 256;
254 	efi_char16_t *name = NULL;
255 	efi_status_t status;
256 	efi_guid_t guid;
257 
258 	if (!efivar_ssdt[0])
259 		return 0;
260 
261 	name = kzalloc(name_size, GFP_KERNEL);
262 	if (!name)
263 		return -ENOMEM;
264 
265 	for (;;) {
266 		char utf8_name[EFIVAR_SSDT_NAME_MAX];
267 		unsigned long data_size = 0;
268 		void *data;
269 		int limit;
270 
271 		status = efi.get_next_variable(&name_size, name, &guid);
272 		if (status == EFI_NOT_FOUND) {
273 			break;
274 		} else if (status == EFI_BUFFER_TOO_SMALL) {
275 			name = krealloc(name, name_size, GFP_KERNEL);
276 			if (!name)
277 				return -ENOMEM;
278 			continue;
279 		}
280 
281 		limit = min(EFIVAR_SSDT_NAME_MAX, name_size);
282 		ucs2_as_utf8(utf8_name, name, limit - 1);
283 		if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
284 			continue;
285 
286 		pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, &guid);
287 
288 		status = efi.get_variable(name, &guid, NULL, &data_size, NULL);
289 		if (status != EFI_BUFFER_TOO_SMALL || !data_size)
290 			return -EIO;
291 
292 		data = kmalloc(data_size, GFP_KERNEL);
293 		if (!data)
294 			return -ENOMEM;
295 
296 		status = efi.get_variable(name, &guid, NULL, &data_size, data);
297 		if (status == EFI_SUCCESS) {
298 			acpi_status ret = acpi_load_table(data, NULL);
299 			if (ret)
300 				pr_err("failed to load table: %u\n", ret);
301 			else
302 				continue;
303 		} else {
304 			pr_err("failed to get var data: 0x%lx\n", status);
305 		}
306 		kfree(data);
307 	}
308 	return 0;
309 }
310 #else
311 static inline int efivar_ssdt_load(void) { return 0; }
312 #endif
313 
314 #ifdef CONFIG_DEBUG_FS
315 
316 #define EFI_DEBUGFS_MAX_BLOBS 32
317 
318 static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS];
319 
320 static void __init efi_debugfs_init(void)
321 {
322 	struct dentry *efi_debugfs;
323 	efi_memory_desc_t *md;
324 	char name[32];
325 	int type_count[EFI_BOOT_SERVICES_DATA + 1] = {};
326 	int i = 0;
327 
328 	efi_debugfs = debugfs_create_dir("efi", NULL);
329 	if (IS_ERR_OR_NULL(efi_debugfs))
330 		return;
331 
332 	for_each_efi_memory_desc(md) {
333 		switch (md->type) {
334 		case EFI_BOOT_SERVICES_CODE:
335 			snprintf(name, sizeof(name), "boot_services_code%d",
336 				 type_count[md->type]++);
337 			break;
338 		case EFI_BOOT_SERVICES_DATA:
339 			snprintf(name, sizeof(name), "boot_services_data%d",
340 				 type_count[md->type]++);
341 			break;
342 		default:
343 			continue;
344 		}
345 
346 		if (i >= EFI_DEBUGFS_MAX_BLOBS) {
347 			pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n",
348 				EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS);
349 			break;
350 		}
351 
352 		debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT;
353 		debugfs_blob[i].data = memremap(md->phys_addr,
354 						debugfs_blob[i].size,
355 						MEMREMAP_WB);
356 		if (!debugfs_blob[i].data)
357 			continue;
358 
359 		debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]);
360 		i++;
361 	}
362 }
363 #else
364 static inline void efi_debugfs_init(void) {}
365 #endif
366 
367 /*
368  * We register the efi subsystem with the firmware subsystem and the
369  * efivars subsystem with the efi subsystem, if the system was booted with
370  * EFI.
371  */
372 static int __init efisubsys_init(void)
373 {
374 	int error;
375 
376 	if (!efi_enabled(EFI_RUNTIME_SERVICES))
377 		efi.runtime_supported_mask = 0;
378 
379 	if (!efi_enabled(EFI_BOOT))
380 		return 0;
381 
382 	if (efi.runtime_supported_mask) {
383 		/*
384 		 * Since we process only one efi_runtime_service() at a time, an
385 		 * ordered workqueue (which creates only one execution context)
386 		 * should suffice for all our needs.
387 		 */
388 		efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
389 		if (!efi_rts_wq) {
390 			pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
391 			clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
392 			efi.runtime_supported_mask = 0;
393 			return 0;
394 		}
395 	}
396 
397 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES))
398 		platform_device_register_simple("rtc-efi", 0, NULL, 0);
399 
400 	/* We register the efi directory at /sys/firmware/efi */
401 	efi_kobj = kobject_create_and_add("efi", firmware_kobj);
402 	if (!efi_kobj) {
403 		pr_err("efi: Firmware registration failed.\n");
404 		error = -ENOMEM;
405 		goto err_destroy_wq;
406 	}
407 
408 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
409 				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
410 		error = generic_ops_register();
411 		if (error)
412 			goto err_put;
413 		efivar_ssdt_load();
414 		platform_device_register_simple("efivars", 0, NULL, 0);
415 	}
416 
417 	error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
418 	if (error) {
419 		pr_err("efi: Sysfs attribute export failed with error %d.\n",
420 		       error);
421 		goto err_unregister;
422 	}
423 
424 	/* and the standard mountpoint for efivarfs */
425 	error = sysfs_create_mount_point(efi_kobj, "efivars");
426 	if (error) {
427 		pr_err("efivars: Subsystem registration failed.\n");
428 		goto err_remove_group;
429 	}
430 
431 	if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
432 		efi_debugfs_init();
433 
434 #ifdef CONFIG_EFI_COCO_SECRET
435 	if (efi.coco_secret != EFI_INVALID_TABLE_ADDR)
436 		platform_device_register_simple("efi_secret", 0, NULL, 0);
437 #endif
438 
439 	return 0;
440 
441 err_remove_group:
442 	sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
443 err_unregister:
444 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
445 				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME))
446 		generic_ops_unregister();
447 err_put:
448 	kobject_put(efi_kobj);
449 	efi_kobj = NULL;
450 err_destroy_wq:
451 	if (efi_rts_wq)
452 		destroy_workqueue(efi_rts_wq);
453 
454 	return error;
455 }
456 
457 subsys_initcall(efisubsys_init);
458 
459 void __init efi_find_mirror(void)
460 {
461 	efi_memory_desc_t *md;
462 	u64 mirror_size = 0, total_size = 0;
463 
464 	if (!efi_enabled(EFI_MEMMAP))
465 		return;
466 
467 	for_each_efi_memory_desc(md) {
468 		unsigned long long start = md->phys_addr;
469 		unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
470 
471 		total_size += size;
472 		if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
473 			memblock_mark_mirror(start, size);
474 			mirror_size += size;
475 		}
476 	}
477 	if (mirror_size)
478 		pr_info("Memory: %lldM/%lldM mirrored memory\n",
479 			mirror_size>>20, total_size>>20);
480 }
481 
482 /*
483  * Find the efi memory descriptor for a given physical address.  Given a
484  * physical address, determine if it exists within an EFI Memory Map entry,
485  * and if so, populate the supplied memory descriptor with the appropriate
486  * data.
487  */
488 int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
489 {
490 	efi_memory_desc_t *md;
491 
492 	if (!efi_enabled(EFI_MEMMAP)) {
493 		pr_err_once("EFI_MEMMAP is not enabled.\n");
494 		return -EINVAL;
495 	}
496 
497 	if (!out_md) {
498 		pr_err_once("out_md is null.\n");
499 		return -EINVAL;
500         }
501 
502 	for_each_efi_memory_desc(md) {
503 		u64 size;
504 		u64 end;
505 
506 		/* skip bogus entries (including empty ones) */
507 		if ((md->phys_addr & (EFI_PAGE_SIZE - 1)) ||
508 		    (md->num_pages <= 0) ||
509 		    (md->num_pages > (U64_MAX - md->phys_addr) >> EFI_PAGE_SHIFT))
510 			continue;
511 
512 		size = md->num_pages << EFI_PAGE_SHIFT;
513 		end = md->phys_addr + size;
514 		if (phys_addr >= md->phys_addr && phys_addr < end) {
515 			memcpy(out_md, md, sizeof(*out_md));
516 			return 0;
517 		}
518 	}
519 	return -ENOENT;
520 }
521 
522 extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
523 	__weak __alias(__efi_mem_desc_lookup);
524 
525 /*
526  * Calculate the highest address of an efi memory descriptor.
527  */
528 u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
529 {
530 	u64 size = md->num_pages << EFI_PAGE_SHIFT;
531 	u64 end = md->phys_addr + size;
532 	return end;
533 }
534 
535 void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
536 
537 /**
538  * efi_mem_reserve - Reserve an EFI memory region
539  * @addr: Physical address to reserve
540  * @size: Size of reservation
541  *
542  * Mark a region as reserved from general kernel allocation and
543  * prevent it being released by efi_free_boot_services().
544  *
545  * This function should be called drivers once they've parsed EFI
546  * configuration tables to figure out where their data lives, e.g.
547  * efi_esrt_init().
548  */
549 void __init efi_mem_reserve(phys_addr_t addr, u64 size)
550 {
551 	/* efi_mem_reserve() does not work under Xen */
552 	if (WARN_ON_ONCE(efi_enabled(EFI_PARAVIRT)))
553 		return;
554 
555 	if (!memblock_is_region_reserved(addr, size))
556 		memblock_reserve(addr, size);
557 
558 	/*
559 	 * Some architectures (x86) reserve all boot services ranges
560 	 * until efi_free_boot_services() because of buggy firmware
561 	 * implementations. This means the above memblock_reserve() is
562 	 * superfluous on x86 and instead what it needs to do is
563 	 * ensure the @start, @size is not freed.
564 	 */
565 	efi_arch_mem_reserve(addr, size);
566 }
567 
568 static const efi_config_table_type_t common_tables[] __initconst = {
569 	{ACPI_20_TABLE_GUID,			&efi.acpi20,		"ACPI 2.0"	},
570 	{ACPI_TABLE_GUID,			&efi.acpi,		"ACPI"		},
571 	{SMBIOS_TABLE_GUID,			&efi.smbios,		"SMBIOS"	},
572 	{SMBIOS3_TABLE_GUID,			&efi.smbios3,		"SMBIOS 3.0"	},
573 	{EFI_SYSTEM_RESOURCE_TABLE_GUID,	&efi.esrt,		"ESRT"		},
574 	{EFI_MEMORY_ATTRIBUTES_TABLE_GUID,	&efi_mem_attr_table,	"MEMATTR"	},
575 	{LINUX_EFI_RANDOM_SEED_TABLE_GUID,	&efi_rng_seed,		"RNG"		},
576 	{LINUX_EFI_TPM_EVENT_LOG_GUID,		&efi.tpm_log,		"TPMEventLog"	},
577 	{LINUX_EFI_TPM_FINAL_LOG_GUID,		&efi.tpm_final_log,	"TPMFinalLog"	},
578 	{LINUX_EFI_MEMRESERVE_TABLE_GUID,	&mem_reserve,		"MEMRESERVE"	},
579 	{LINUX_EFI_INITRD_MEDIA_GUID,		&initrd,		"INITRD"	},
580 	{EFI_RT_PROPERTIES_TABLE_GUID,		&rt_prop,		"RTPROP"	},
581 #ifdef CONFIG_EFI_RCI2_TABLE
582 	{DELLEMC_EFI_RCI2_TABLE_GUID,		&rci2_table_phys			},
583 #endif
584 #ifdef CONFIG_LOAD_UEFI_KEYS
585 	{LINUX_EFI_MOK_VARIABLE_TABLE_GUID,	&efi.mokvar_table,	"MOKvar"	},
586 #endif
587 #ifdef CONFIG_EFI_COCO_SECRET
588 	{LINUX_EFI_COCO_SECRET_AREA_GUID,	&efi.coco_secret,	"CocoSecret"	},
589 #endif
590 #ifdef CONFIG_UNACCEPTED_MEMORY
591 	{LINUX_EFI_UNACCEPTED_MEM_TABLE_GUID,	&efi.unaccepted,	"Unaccepted"	},
592 #endif
593 #ifdef CONFIG_EFI_GENERIC_STUB
594 	{LINUX_EFI_SCREEN_INFO_TABLE_GUID,	&screen_info_table			},
595 #endif
596 	{},
597 };
598 
599 static __init int match_config_table(const efi_guid_t *guid,
600 				     unsigned long table,
601 				     const efi_config_table_type_t *table_types)
602 {
603 	int i;
604 
605 	for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
606 		if (efi_guidcmp(*guid, table_types[i].guid))
607 			continue;
608 
609 		if (!efi_config_table_is_usable(guid, table)) {
610 			if (table_types[i].name[0])
611 				pr_cont("(%s=0x%lx unusable) ",
612 					table_types[i].name, table);
613 			return 1;
614 		}
615 
616 		*(table_types[i].ptr) = table;
617 		if (table_types[i].name[0])
618 			pr_cont("%s=0x%lx ", table_types[i].name, table);
619 		return 1;
620 	}
621 
622 	return 0;
623 }
624 
625 int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
626 				   int count,
627 				   const efi_config_table_type_t *arch_tables)
628 {
629 	const efi_config_table_64_t *tbl64 = (void *)config_tables;
630 	const efi_config_table_32_t *tbl32 = (void *)config_tables;
631 	const efi_guid_t *guid;
632 	unsigned long table;
633 	int i;
634 
635 	pr_info("");
636 	for (i = 0; i < count; i++) {
637 		if (!IS_ENABLED(CONFIG_X86)) {
638 			guid = &config_tables[i].guid;
639 			table = (unsigned long)config_tables[i].table;
640 		} else if (efi_enabled(EFI_64BIT)) {
641 			guid = &tbl64[i].guid;
642 			table = tbl64[i].table;
643 
644 			if (IS_ENABLED(CONFIG_X86_32) &&
645 			    tbl64[i].table > U32_MAX) {
646 				pr_cont("\n");
647 				pr_err("Table located above 4GB, disabling EFI.\n");
648 				return -EINVAL;
649 			}
650 		} else {
651 			guid = &tbl32[i].guid;
652 			table = tbl32[i].table;
653 		}
654 
655 		if (!match_config_table(guid, table, common_tables) && arch_tables)
656 			match_config_table(guid, table, arch_tables);
657 	}
658 	pr_cont("\n");
659 	set_bit(EFI_CONFIG_TABLES, &efi.flags);
660 
661 	if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) {
662 		struct linux_efi_random_seed *seed;
663 		u32 size = 0;
664 
665 		seed = early_memremap(efi_rng_seed, sizeof(*seed));
666 		if (seed != NULL) {
667 			size = min_t(u32, seed->size, SZ_1K); // sanity check
668 			early_memunmap(seed, sizeof(*seed));
669 		} else {
670 			pr_err("Could not map UEFI random seed!\n");
671 		}
672 		if (size > 0) {
673 			seed = early_memremap(efi_rng_seed,
674 					      sizeof(*seed) + size);
675 			if (seed != NULL) {
676 				add_bootloader_randomness(seed->bits, size);
677 				memzero_explicit(seed->bits, size);
678 				early_memunmap(seed, sizeof(*seed) + size);
679 			} else {
680 				pr_err("Could not map UEFI random seed!\n");
681 			}
682 		}
683 	}
684 
685 	if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
686 		efi_memattr_init();
687 
688 	efi_tpm_eventlog_init();
689 
690 	if (mem_reserve != EFI_INVALID_TABLE_ADDR) {
691 		unsigned long prsv = mem_reserve;
692 
693 		while (prsv) {
694 			struct linux_efi_memreserve *rsv;
695 			u8 *p;
696 
697 			/*
698 			 * Just map a full page: that is what we will get
699 			 * anyway, and it permits us to map the entire entry
700 			 * before knowing its size.
701 			 */
702 			p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
703 					   PAGE_SIZE);
704 			if (p == NULL) {
705 				pr_err("Could not map UEFI memreserve entry!\n");
706 				return -ENOMEM;
707 			}
708 
709 			rsv = (void *)(p + prsv % PAGE_SIZE);
710 
711 			/* reserve the entry itself */
712 			memblock_reserve(prsv,
713 					 struct_size(rsv, entry, rsv->size));
714 
715 			for (i = 0; i < atomic_read(&rsv->count); i++) {
716 				memblock_reserve(rsv->entry[i].base,
717 						 rsv->entry[i].size);
718 			}
719 
720 			prsv = rsv->next;
721 			early_memunmap(p, PAGE_SIZE);
722 		}
723 	}
724 
725 	if (rt_prop != EFI_INVALID_TABLE_ADDR) {
726 		efi_rt_properties_table_t *tbl;
727 
728 		tbl = early_memremap(rt_prop, sizeof(*tbl));
729 		if (tbl) {
730 			efi.runtime_supported_mask &= tbl->runtime_services_supported;
731 			early_memunmap(tbl, sizeof(*tbl));
732 		}
733 	}
734 
735 	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) &&
736 	    initrd != EFI_INVALID_TABLE_ADDR && phys_initrd_size == 0) {
737 		struct linux_efi_initrd *tbl;
738 
739 		tbl = early_memremap(initrd, sizeof(*tbl));
740 		if (tbl) {
741 			phys_initrd_start = tbl->base;
742 			phys_initrd_size = tbl->size;
743 			early_memunmap(tbl, sizeof(*tbl));
744 		}
745 	}
746 
747 	if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY) &&
748 	    efi.unaccepted != EFI_INVALID_TABLE_ADDR) {
749 		struct efi_unaccepted_memory *unaccepted;
750 
751 		unaccepted = early_memremap(efi.unaccepted, sizeof(*unaccepted));
752 		if (unaccepted) {
753 			unsigned long size;
754 
755 			if (unaccepted->version == 1) {
756 				size = sizeof(*unaccepted) + unaccepted->size;
757 				memblock_reserve(efi.unaccepted, size);
758 			} else {
759 				efi.unaccepted = EFI_INVALID_TABLE_ADDR;
760 			}
761 
762 			early_memunmap(unaccepted, sizeof(*unaccepted));
763 		}
764 	}
765 
766 	return 0;
767 }
768 
769 int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr)
770 {
771 	if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
772 		pr_err("System table signature incorrect!\n");
773 		return -EINVAL;
774 	}
775 
776 	return 0;
777 }
778 
779 #ifndef CONFIG_IA64
780 static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor,
781 						size_t size)
782 {
783 	const efi_char16_t *ret;
784 
785 	ret = early_memremap_ro(fw_vendor, size);
786 	if (!ret)
787 		pr_err("Could not map the firmware vendor!\n");
788 	return ret;
789 }
790 
791 static void __init unmap_fw_vendor(const void *fw_vendor, size_t size)
792 {
793 	early_memunmap((void *)fw_vendor, size);
794 }
795 #else
796 #define map_fw_vendor(p, s)	__va(p)
797 #define unmap_fw_vendor(v, s)
798 #endif
799 
800 void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
801 				     unsigned long fw_vendor)
802 {
803 	char vendor[100] = "unknown";
804 	const efi_char16_t *c16;
805 	size_t i;
806 	u16 rev;
807 
808 	c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
809 	if (c16) {
810 		for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
811 			vendor[i] = c16[i];
812 		vendor[i] = '\0';
813 
814 		unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
815 	}
816 
817 	rev = (u16)systab_hdr->revision;
818 	pr_info("EFI v%u.%u", systab_hdr->revision >> 16, rev / 10);
819 
820 	rev %= 10;
821 	if (rev)
822 		pr_cont(".%u", rev);
823 
824 	pr_cont(" by %s\n", vendor);
825 
826 	if (IS_ENABLED(CONFIG_X86_64) &&
827 	    systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
828 	    !strcmp(vendor, "Apple")) {
829 		pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n");
830 		efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION;
831 	}
832 }
833 
834 static __initdata char memory_type_name[][13] = {
835 	"Reserved",
836 	"Loader Code",
837 	"Loader Data",
838 	"Boot Code",
839 	"Boot Data",
840 	"Runtime Code",
841 	"Runtime Data",
842 	"Conventional",
843 	"Unusable",
844 	"ACPI Reclaim",
845 	"ACPI Mem NVS",
846 	"MMIO",
847 	"MMIO Port",
848 	"PAL Code",
849 	"Persistent",
850 	"Unaccepted",
851 };
852 
853 char * __init efi_md_typeattr_format(char *buf, size_t size,
854 				     const efi_memory_desc_t *md)
855 {
856 	char *pos;
857 	int type_len;
858 	u64 attr;
859 
860 	pos = buf;
861 	if (md->type >= ARRAY_SIZE(memory_type_name))
862 		type_len = snprintf(pos, size, "[type=%u", md->type);
863 	else
864 		type_len = snprintf(pos, size, "[%-*s",
865 				    (int)(sizeof(memory_type_name[0]) - 1),
866 				    memory_type_name[md->type]);
867 	if (type_len >= size)
868 		return buf;
869 
870 	pos += type_len;
871 	size -= type_len;
872 
873 	attr = md->attribute;
874 	if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
875 		     EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
876 		     EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
877 		     EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
878 		     EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
879 		snprintf(pos, size, "|attr=0x%016llx]",
880 			 (unsigned long long)attr);
881 	else
882 		snprintf(pos, size,
883 			 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
884 			 attr & EFI_MEMORY_RUNTIME		? "RUN" : "",
885 			 attr & EFI_MEMORY_MORE_RELIABLE	? "MR"  : "",
886 			 attr & EFI_MEMORY_CPU_CRYPTO   	? "CC"  : "",
887 			 attr & EFI_MEMORY_SP			? "SP"  : "",
888 			 attr & EFI_MEMORY_NV			? "NV"  : "",
889 			 attr & EFI_MEMORY_XP			? "XP"  : "",
890 			 attr & EFI_MEMORY_RP			? "RP"  : "",
891 			 attr & EFI_MEMORY_WP			? "WP"  : "",
892 			 attr & EFI_MEMORY_RO			? "RO"  : "",
893 			 attr & EFI_MEMORY_UCE			? "UCE" : "",
894 			 attr & EFI_MEMORY_WB			? "WB"  : "",
895 			 attr & EFI_MEMORY_WT			? "WT"  : "",
896 			 attr & EFI_MEMORY_WC			? "WC"  : "",
897 			 attr & EFI_MEMORY_UC			? "UC"  : "");
898 	return buf;
899 }
900 
901 /*
902  * IA64 has a funky EFI memory map that doesn't work the same way as
903  * other architectures.
904  */
905 #ifndef CONFIG_IA64
906 /*
907  * efi_mem_attributes - lookup memmap attributes for physical address
908  * @phys_addr: the physical address to lookup
909  *
910  * Search in the EFI memory map for the region covering
911  * @phys_addr. Returns the EFI memory attributes if the region
912  * was found in the memory map, 0 otherwise.
913  */
914 u64 efi_mem_attributes(unsigned long phys_addr)
915 {
916 	efi_memory_desc_t *md;
917 
918 	if (!efi_enabled(EFI_MEMMAP))
919 		return 0;
920 
921 	for_each_efi_memory_desc(md) {
922 		if ((md->phys_addr <= phys_addr) &&
923 		    (phys_addr < (md->phys_addr +
924 		    (md->num_pages << EFI_PAGE_SHIFT))))
925 			return md->attribute;
926 	}
927 	return 0;
928 }
929 
930 /*
931  * efi_mem_type - lookup memmap type for physical address
932  * @phys_addr: the physical address to lookup
933  *
934  * Search in the EFI memory map for the region covering @phys_addr.
935  * Returns the EFI memory type if the region was found in the memory
936  * map, -EINVAL otherwise.
937  */
938 int efi_mem_type(unsigned long phys_addr)
939 {
940 	const efi_memory_desc_t *md;
941 
942 	if (!efi_enabled(EFI_MEMMAP))
943 		return -ENOTSUPP;
944 
945 	for_each_efi_memory_desc(md) {
946 		if ((md->phys_addr <= phys_addr) &&
947 		    (phys_addr < (md->phys_addr +
948 				  (md->num_pages << EFI_PAGE_SHIFT))))
949 			return md->type;
950 	}
951 	return -EINVAL;
952 }
953 #endif
954 
955 int efi_status_to_err(efi_status_t status)
956 {
957 	int err;
958 
959 	switch (status) {
960 	case EFI_SUCCESS:
961 		err = 0;
962 		break;
963 	case EFI_INVALID_PARAMETER:
964 		err = -EINVAL;
965 		break;
966 	case EFI_OUT_OF_RESOURCES:
967 		err = -ENOSPC;
968 		break;
969 	case EFI_DEVICE_ERROR:
970 		err = -EIO;
971 		break;
972 	case EFI_WRITE_PROTECTED:
973 		err = -EROFS;
974 		break;
975 	case EFI_SECURITY_VIOLATION:
976 		err = -EACCES;
977 		break;
978 	case EFI_NOT_FOUND:
979 		err = -ENOENT;
980 		break;
981 	case EFI_ABORTED:
982 		err = -EINTR;
983 		break;
984 	default:
985 		err = -EINVAL;
986 	}
987 
988 	return err;
989 }
990 EXPORT_SYMBOL_GPL(efi_status_to_err);
991 
992 static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
993 static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
994 
995 static int __init efi_memreserve_map_root(void)
996 {
997 	if (mem_reserve == EFI_INVALID_TABLE_ADDR)
998 		return -ENODEV;
999 
1000 	efi_memreserve_root = memremap(mem_reserve,
1001 				       sizeof(*efi_memreserve_root),
1002 				       MEMREMAP_WB);
1003 	if (WARN_ON_ONCE(!efi_memreserve_root))
1004 		return -ENOMEM;
1005 	return 0;
1006 }
1007 
1008 static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
1009 {
1010 	struct resource *res, *parent;
1011 	int ret;
1012 
1013 	res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
1014 	if (!res)
1015 		return -ENOMEM;
1016 
1017 	res->name	= "reserved";
1018 	res->flags	= IORESOURCE_MEM;
1019 	res->start	= addr;
1020 	res->end	= addr + size - 1;
1021 
1022 	/* we expect a conflict with a 'System RAM' region */
1023 	parent = request_resource_conflict(&iomem_resource, res);
1024 	ret = parent ? request_resource(parent, res) : 0;
1025 
1026 	/*
1027 	 * Given that efi_mem_reserve_iomem() can be called at any
1028 	 * time, only call memblock_reserve() if the architecture
1029 	 * keeps the infrastructure around.
1030 	 */
1031 	if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
1032 		memblock_reserve(addr, size);
1033 
1034 	return ret;
1035 }
1036 
1037 int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
1038 {
1039 	struct linux_efi_memreserve *rsv;
1040 	unsigned long prsv;
1041 	int rc, index;
1042 
1043 	if (efi_memreserve_root == (void *)ULONG_MAX)
1044 		return -ENODEV;
1045 
1046 	if (!efi_memreserve_root) {
1047 		rc = efi_memreserve_map_root();
1048 		if (rc)
1049 			return rc;
1050 	}
1051 
1052 	/* first try to find a slot in an existing linked list entry */
1053 	for (prsv = efi_memreserve_root->next; prsv; ) {
1054 		rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
1055 		if (!rsv)
1056 			return -ENOMEM;
1057 		index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
1058 		if (index < rsv->size) {
1059 			rsv->entry[index].base = addr;
1060 			rsv->entry[index].size = size;
1061 
1062 			memunmap(rsv);
1063 			return efi_mem_reserve_iomem(addr, size);
1064 		}
1065 		prsv = rsv->next;
1066 		memunmap(rsv);
1067 	}
1068 
1069 	/* no slot found - allocate a new linked list entry */
1070 	rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
1071 	if (!rsv)
1072 		return -ENOMEM;
1073 
1074 	rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
1075 	if (rc) {
1076 		free_page((unsigned long)rsv);
1077 		return rc;
1078 	}
1079 
1080 	/*
1081 	 * The memremap() call above assumes that a linux_efi_memreserve entry
1082 	 * never crosses a page boundary, so let's ensure that this remains true
1083 	 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
1084 	 * using SZ_4K explicitly in the size calculation below.
1085 	 */
1086 	rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
1087 	atomic_set(&rsv->count, 1);
1088 	rsv->entry[0].base = addr;
1089 	rsv->entry[0].size = size;
1090 
1091 	spin_lock(&efi_mem_reserve_persistent_lock);
1092 	rsv->next = efi_memreserve_root->next;
1093 	efi_memreserve_root->next = __pa(rsv);
1094 	spin_unlock(&efi_mem_reserve_persistent_lock);
1095 
1096 	return efi_mem_reserve_iomem(addr, size);
1097 }
1098 
1099 static int __init efi_memreserve_root_init(void)
1100 {
1101 	if (efi_memreserve_root)
1102 		return 0;
1103 	if (efi_memreserve_map_root())
1104 		efi_memreserve_root = (void *)ULONG_MAX;
1105 	return 0;
1106 }
1107 early_initcall(efi_memreserve_root_init);
1108 
1109 #ifdef CONFIG_KEXEC
1110 static int update_efi_random_seed(struct notifier_block *nb,
1111 				  unsigned long code, void *unused)
1112 {
1113 	struct linux_efi_random_seed *seed;
1114 	u32 size = 0;
1115 
1116 	if (!kexec_in_progress)
1117 		return NOTIFY_DONE;
1118 
1119 	seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB);
1120 	if (seed != NULL) {
1121 		size = min(seed->size, EFI_RANDOM_SEED_SIZE);
1122 		memunmap(seed);
1123 	} else {
1124 		pr_err("Could not map UEFI random seed!\n");
1125 	}
1126 	if (size > 0) {
1127 		seed = memremap(efi_rng_seed, sizeof(*seed) + size,
1128 				MEMREMAP_WB);
1129 		if (seed != NULL) {
1130 			seed->size = size;
1131 			get_random_bytes(seed->bits, seed->size);
1132 			memunmap(seed);
1133 		} else {
1134 			pr_err("Could not map UEFI random seed!\n");
1135 		}
1136 	}
1137 	return NOTIFY_DONE;
1138 }
1139 
1140 static struct notifier_block efi_random_seed_nb = {
1141 	.notifier_call = update_efi_random_seed,
1142 };
1143 
1144 static int __init register_update_efi_random_seed(void)
1145 {
1146 	if (efi_rng_seed == EFI_INVALID_TABLE_ADDR)
1147 		return 0;
1148 	return register_reboot_notifier(&efi_random_seed_nb);
1149 }
1150 late_initcall(register_update_efi_random_seed);
1151 #endif
1152