xref: /openbmc/linux/drivers/firmware/efi/efi.c (revision b285d2ae)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * efi.c - EFI subsystem
4  *
5  * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
6  * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
7  * Copyright (C) 2013 Tom Gundersen <teg@jklm.no>
8  *
9  * This code registers /sys/firmware/efi{,/efivars} when EFI is supported,
10  * allowing the efivarfs to be mounted or the efivars module to be loaded.
11  * The existance of /sys/firmware/efi may also be used by userspace to
12  * determine that the system supports EFI.
13  */
14 
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 
17 #include <linux/kobject.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/debugfs.h>
21 #include <linux/device.h>
22 #include <linux/efi.h>
23 #include <linux/of.h>
24 #include <linux/io.h>
25 #include <linux/kexec.h>
26 #include <linux/platform_device.h>
27 #include <linux/random.h>
28 #include <linux/reboot.h>
29 #include <linux/slab.h>
30 #include <linux/acpi.h>
31 #include <linux/ucs2_string.h>
32 #include <linux/memblock.h>
33 #include <linux/security.h>
34 
35 #include <asm/early_ioremap.h>
36 
37 struct efi __read_mostly efi = {
38 	.runtime_supported_mask = EFI_RT_SUPPORTED_ALL,
39 	.acpi			= EFI_INVALID_TABLE_ADDR,
40 	.acpi20			= EFI_INVALID_TABLE_ADDR,
41 	.smbios			= EFI_INVALID_TABLE_ADDR,
42 	.smbios3		= EFI_INVALID_TABLE_ADDR,
43 	.esrt			= EFI_INVALID_TABLE_ADDR,
44 	.tpm_log		= EFI_INVALID_TABLE_ADDR,
45 	.tpm_final_log		= EFI_INVALID_TABLE_ADDR,
46 };
47 EXPORT_SYMBOL(efi);
48 
49 unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
50 static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
51 static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
52 
53 struct mm_struct efi_mm = {
54 	.mm_rb			= RB_ROOT,
55 	.mm_users		= ATOMIC_INIT(2),
56 	.mm_count		= ATOMIC_INIT(1),
57 	MMAP_LOCK_INITIALIZER(efi_mm)
58 	.page_table_lock	= __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
59 	.mmlist			= LIST_HEAD_INIT(efi_mm.mmlist),
60 	.cpu_bitmap		= { [BITS_TO_LONGS(NR_CPUS)] = 0},
61 };
62 
63 struct workqueue_struct *efi_rts_wq;
64 
65 static bool disable_runtime;
66 static int __init setup_noefi(char *arg)
67 {
68 	disable_runtime = true;
69 	return 0;
70 }
71 early_param("noefi", setup_noefi);
72 
73 bool efi_runtime_disabled(void)
74 {
75 	return disable_runtime;
76 }
77 
78 bool __pure __efi_soft_reserve_enabled(void)
79 {
80 	return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
81 }
82 
83 static int __init parse_efi_cmdline(char *str)
84 {
85 	if (!str) {
86 		pr_warn("need at least one option\n");
87 		return -EINVAL;
88 	}
89 
90 	if (parse_option_str(str, "debug"))
91 		set_bit(EFI_DBG, &efi.flags);
92 
93 	if (parse_option_str(str, "noruntime"))
94 		disable_runtime = true;
95 
96 	if (parse_option_str(str, "nosoftreserve"))
97 		set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
98 
99 	return 0;
100 }
101 early_param("efi", parse_efi_cmdline);
102 
103 struct kobject *efi_kobj;
104 
105 /*
106  * Let's not leave out systab information that snuck into
107  * the efivars driver
108  * Note, do not add more fields in systab sysfs file as it breaks sysfs
109  * one value per file rule!
110  */
111 static ssize_t systab_show(struct kobject *kobj,
112 			   struct kobj_attribute *attr, char *buf)
113 {
114 	char *str = buf;
115 
116 	if (!kobj || !buf)
117 		return -EINVAL;
118 
119 	if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
120 		str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
121 	if (efi.acpi != EFI_INVALID_TABLE_ADDR)
122 		str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
123 	/*
124 	 * If both SMBIOS and SMBIOS3 entry points are implemented, the
125 	 * SMBIOS3 entry point shall be preferred, so we list it first to
126 	 * let applications stop parsing after the first match.
127 	 */
128 	if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
129 		str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
130 	if (efi.smbios != EFI_INVALID_TABLE_ADDR)
131 		str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
132 
133 	if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86))
134 		str = efi_systab_show_arch(str);
135 
136 	return str - buf;
137 }
138 
139 static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
140 
141 static ssize_t fw_platform_size_show(struct kobject *kobj,
142 				     struct kobj_attribute *attr, char *buf)
143 {
144 	return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
145 }
146 
147 extern __weak struct kobj_attribute efi_attr_fw_vendor;
148 extern __weak struct kobj_attribute efi_attr_runtime;
149 extern __weak struct kobj_attribute efi_attr_config_table;
150 static struct kobj_attribute efi_attr_fw_platform_size =
151 	__ATTR_RO(fw_platform_size);
152 
153 static struct attribute *efi_subsys_attrs[] = {
154 	&efi_attr_systab.attr,
155 	&efi_attr_fw_platform_size.attr,
156 	&efi_attr_fw_vendor.attr,
157 	&efi_attr_runtime.attr,
158 	&efi_attr_config_table.attr,
159 	NULL,
160 };
161 
162 umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr,
163 				   int n)
164 {
165 	return attr->mode;
166 }
167 
168 static const struct attribute_group efi_subsys_attr_group = {
169 	.attrs = efi_subsys_attrs,
170 	.is_visible = efi_attr_is_visible,
171 };
172 
173 static struct efivars generic_efivars;
174 static struct efivar_operations generic_ops;
175 
176 static int generic_ops_register(void)
177 {
178 	generic_ops.get_variable = efi.get_variable;
179 	generic_ops.get_next_variable = efi.get_next_variable;
180 	generic_ops.query_variable_store = efi_query_variable_store;
181 
182 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
183 		generic_ops.set_variable = efi.set_variable;
184 		generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
185 	}
186 	return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
187 }
188 
189 static void generic_ops_unregister(void)
190 {
191 	efivars_unregister(&generic_efivars);
192 }
193 
194 #ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
195 #define EFIVAR_SSDT_NAME_MAX	16
196 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
197 static int __init efivar_ssdt_setup(char *str)
198 {
199 	int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
200 
201 	if (ret)
202 		return ret;
203 
204 	if (strlen(str) < sizeof(efivar_ssdt))
205 		memcpy(efivar_ssdt, str, strlen(str));
206 	else
207 		pr_warn("efivar_ssdt: name too long: %s\n", str);
208 	return 0;
209 }
210 __setup("efivar_ssdt=", efivar_ssdt_setup);
211 
212 static __init int efivar_ssdt_iter(efi_char16_t *name, efi_guid_t vendor,
213 				   unsigned long name_size, void *data)
214 {
215 	struct efivar_entry *entry;
216 	struct list_head *list = data;
217 	char utf8_name[EFIVAR_SSDT_NAME_MAX];
218 	int limit = min_t(unsigned long, EFIVAR_SSDT_NAME_MAX, name_size);
219 
220 	ucs2_as_utf8(utf8_name, name, limit - 1);
221 	if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
222 		return 0;
223 
224 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
225 	if (!entry)
226 		return 0;
227 
228 	memcpy(entry->var.VariableName, name, name_size);
229 	memcpy(&entry->var.VendorGuid, &vendor, sizeof(efi_guid_t));
230 
231 	efivar_entry_add(entry, list);
232 
233 	return 0;
234 }
235 
236 static __init int efivar_ssdt_load(void)
237 {
238 	LIST_HEAD(entries);
239 	struct efivar_entry *entry, *aux;
240 	unsigned long size;
241 	void *data;
242 	int ret;
243 
244 	if (!efivar_ssdt[0])
245 		return 0;
246 
247 	ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries);
248 
249 	list_for_each_entry_safe(entry, aux, &entries, list) {
250 		pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt,
251 			&entry->var.VendorGuid);
252 
253 		list_del(&entry->list);
254 
255 		ret = efivar_entry_size(entry, &size);
256 		if (ret) {
257 			pr_err("failed to get var size\n");
258 			goto free_entry;
259 		}
260 
261 		data = kmalloc(size, GFP_KERNEL);
262 		if (!data) {
263 			ret = -ENOMEM;
264 			goto free_entry;
265 		}
266 
267 		ret = efivar_entry_get(entry, NULL, &size, data);
268 		if (ret) {
269 			pr_err("failed to get var data\n");
270 			goto free_data;
271 		}
272 
273 		ret = acpi_load_table(data, NULL);
274 		if (ret) {
275 			pr_err("failed to load table: %d\n", ret);
276 			goto free_data;
277 		}
278 
279 		goto free_entry;
280 
281 free_data:
282 		kfree(data);
283 
284 free_entry:
285 		kfree(entry);
286 	}
287 
288 	return ret;
289 }
290 #else
291 static inline int efivar_ssdt_load(void) { return 0; }
292 #endif
293 
294 #ifdef CONFIG_DEBUG_FS
295 
296 #define EFI_DEBUGFS_MAX_BLOBS 32
297 
298 static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS];
299 
300 static void __init efi_debugfs_init(void)
301 {
302 	struct dentry *efi_debugfs;
303 	efi_memory_desc_t *md;
304 	char name[32];
305 	int type_count[EFI_BOOT_SERVICES_DATA + 1] = {};
306 	int i = 0;
307 
308 	efi_debugfs = debugfs_create_dir("efi", NULL);
309 	if (IS_ERR_OR_NULL(efi_debugfs))
310 		return;
311 
312 	for_each_efi_memory_desc(md) {
313 		switch (md->type) {
314 		case EFI_BOOT_SERVICES_CODE:
315 			snprintf(name, sizeof(name), "boot_services_code%d",
316 				 type_count[md->type]++);
317 			break;
318 		case EFI_BOOT_SERVICES_DATA:
319 			snprintf(name, sizeof(name), "boot_services_data%d",
320 				 type_count[md->type]++);
321 			break;
322 		default:
323 			continue;
324 		}
325 
326 		if (i >= EFI_DEBUGFS_MAX_BLOBS) {
327 			pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n",
328 				EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS);
329 			break;
330 		}
331 
332 		debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT;
333 		debugfs_blob[i].data = memremap(md->phys_addr,
334 						debugfs_blob[i].size,
335 						MEMREMAP_WB);
336 		if (!debugfs_blob[i].data)
337 			continue;
338 
339 		debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]);
340 		i++;
341 	}
342 }
343 #else
344 static inline void efi_debugfs_init(void) {}
345 #endif
346 
347 /*
348  * We register the efi subsystem with the firmware subsystem and the
349  * efivars subsystem with the efi subsystem, if the system was booted with
350  * EFI.
351  */
352 static int __init efisubsys_init(void)
353 {
354 	int error;
355 
356 	if (!efi_enabled(EFI_RUNTIME_SERVICES))
357 		efi.runtime_supported_mask = 0;
358 
359 	if (!efi_enabled(EFI_BOOT))
360 		return 0;
361 
362 	if (efi.runtime_supported_mask) {
363 		/*
364 		 * Since we process only one efi_runtime_service() at a time, an
365 		 * ordered workqueue (which creates only one execution context)
366 		 * should suffice for all our needs.
367 		 */
368 		efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
369 		if (!efi_rts_wq) {
370 			pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
371 			clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
372 			efi.runtime_supported_mask = 0;
373 			return 0;
374 		}
375 	}
376 
377 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES))
378 		platform_device_register_simple("rtc-efi", 0, NULL, 0);
379 
380 	/* We register the efi directory at /sys/firmware/efi */
381 	efi_kobj = kobject_create_and_add("efi", firmware_kobj);
382 	if (!efi_kobj) {
383 		pr_err("efi: Firmware registration failed.\n");
384 		return -ENOMEM;
385 	}
386 
387 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
388 				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
389 		efivar_ssdt_load();
390 		error = generic_ops_register();
391 		if (error)
392 			goto err_put;
393 		platform_device_register_simple("efivars", 0, NULL, 0);
394 	}
395 
396 	error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
397 	if (error) {
398 		pr_err("efi: Sysfs attribute export failed with error %d.\n",
399 		       error);
400 		goto err_unregister;
401 	}
402 
403 	error = efi_runtime_map_init(efi_kobj);
404 	if (error)
405 		goto err_remove_group;
406 
407 	/* and the standard mountpoint for efivarfs */
408 	error = sysfs_create_mount_point(efi_kobj, "efivars");
409 	if (error) {
410 		pr_err("efivars: Subsystem registration failed.\n");
411 		goto err_remove_group;
412 	}
413 
414 	if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
415 		efi_debugfs_init();
416 
417 	return 0;
418 
419 err_remove_group:
420 	sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
421 err_unregister:
422 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
423 				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME))
424 		generic_ops_unregister();
425 err_put:
426 	kobject_put(efi_kobj);
427 	return error;
428 }
429 
430 subsys_initcall(efisubsys_init);
431 
432 /*
433  * Find the efi memory descriptor for a given physical address.  Given a
434  * physical address, determine if it exists within an EFI Memory Map entry,
435  * and if so, populate the supplied memory descriptor with the appropriate
436  * data.
437  */
438 int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
439 {
440 	efi_memory_desc_t *md;
441 
442 	if (!efi_enabled(EFI_MEMMAP)) {
443 		pr_err_once("EFI_MEMMAP is not enabled.\n");
444 		return -EINVAL;
445 	}
446 
447 	if (!out_md) {
448 		pr_err_once("out_md is null.\n");
449 		return -EINVAL;
450         }
451 
452 	for_each_efi_memory_desc(md) {
453 		u64 size;
454 		u64 end;
455 
456 		size = md->num_pages << EFI_PAGE_SHIFT;
457 		end = md->phys_addr + size;
458 		if (phys_addr >= md->phys_addr && phys_addr < end) {
459 			memcpy(out_md, md, sizeof(*out_md));
460 			return 0;
461 		}
462 	}
463 	return -ENOENT;
464 }
465 
466 /*
467  * Calculate the highest address of an efi memory descriptor.
468  */
469 u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
470 {
471 	u64 size = md->num_pages << EFI_PAGE_SHIFT;
472 	u64 end = md->phys_addr + size;
473 	return end;
474 }
475 
476 void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
477 
478 /**
479  * efi_mem_reserve - Reserve an EFI memory region
480  * @addr: Physical address to reserve
481  * @size: Size of reservation
482  *
483  * Mark a region as reserved from general kernel allocation and
484  * prevent it being released by efi_free_boot_services().
485  *
486  * This function should be called drivers once they've parsed EFI
487  * configuration tables to figure out where their data lives, e.g.
488  * efi_esrt_init().
489  */
490 void __init efi_mem_reserve(phys_addr_t addr, u64 size)
491 {
492 	if (!memblock_is_region_reserved(addr, size))
493 		memblock_reserve(addr, size);
494 
495 	/*
496 	 * Some architectures (x86) reserve all boot services ranges
497 	 * until efi_free_boot_services() because of buggy firmware
498 	 * implementations. This means the above memblock_reserve() is
499 	 * superfluous on x86 and instead what it needs to do is
500 	 * ensure the @start, @size is not freed.
501 	 */
502 	efi_arch_mem_reserve(addr, size);
503 }
504 
505 static const efi_config_table_type_t common_tables[] __initconst = {
506 	{ACPI_20_TABLE_GUID,			&efi.acpi20,		"ACPI 2.0"	},
507 	{ACPI_TABLE_GUID,			&efi.acpi,		"ACPI"		},
508 	{SMBIOS_TABLE_GUID,			&efi.smbios,		"SMBIOS"	},
509 	{SMBIOS3_TABLE_GUID,			&efi.smbios3,		"SMBIOS 3.0"	},
510 	{EFI_SYSTEM_RESOURCE_TABLE_GUID,	&efi.esrt,		"ESRT"		},
511 	{EFI_MEMORY_ATTRIBUTES_TABLE_GUID,	&efi_mem_attr_table,	"MEMATTR"	},
512 	{LINUX_EFI_RANDOM_SEED_TABLE_GUID,	&efi_rng_seed,		"RNG"		},
513 	{LINUX_EFI_TPM_EVENT_LOG_GUID,		&efi.tpm_log,		"TPMEventLog"	},
514 	{LINUX_EFI_TPM_FINAL_LOG_GUID,		&efi.tpm_final_log,	"TPMFinalLog"	},
515 	{LINUX_EFI_MEMRESERVE_TABLE_GUID,	&mem_reserve,		"MEMRESERVE"	},
516 	{EFI_RT_PROPERTIES_TABLE_GUID,		&rt_prop,		"RTPROP"	},
517 #ifdef CONFIG_EFI_RCI2_TABLE
518 	{DELLEMC_EFI_RCI2_TABLE_GUID,		&rci2_table_phys			},
519 #endif
520 	{},
521 };
522 
523 static __init int match_config_table(const efi_guid_t *guid,
524 				     unsigned long table,
525 				     const efi_config_table_type_t *table_types)
526 {
527 	int i;
528 
529 	for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
530 		if (!efi_guidcmp(*guid, table_types[i].guid)) {
531 			*(table_types[i].ptr) = table;
532 			if (table_types[i].name[0])
533 				pr_cont("%s=0x%lx ",
534 					table_types[i].name, table);
535 			return 1;
536 		}
537 	}
538 
539 	return 0;
540 }
541 
542 int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
543 				   int count,
544 				   const efi_config_table_type_t *arch_tables)
545 {
546 	const efi_config_table_64_t *tbl64 = (void *)config_tables;
547 	const efi_config_table_32_t *tbl32 = (void *)config_tables;
548 	const efi_guid_t *guid;
549 	unsigned long table;
550 	int i;
551 
552 	pr_info("");
553 	for (i = 0; i < count; i++) {
554 		if (!IS_ENABLED(CONFIG_X86)) {
555 			guid = &config_tables[i].guid;
556 			table = (unsigned long)config_tables[i].table;
557 		} else if (efi_enabled(EFI_64BIT)) {
558 			guid = &tbl64[i].guid;
559 			table = tbl64[i].table;
560 
561 			if (IS_ENABLED(CONFIG_X86_32) &&
562 			    tbl64[i].table > U32_MAX) {
563 				pr_cont("\n");
564 				pr_err("Table located above 4GB, disabling EFI.\n");
565 				return -EINVAL;
566 			}
567 		} else {
568 			guid = &tbl32[i].guid;
569 			table = tbl32[i].table;
570 		}
571 
572 		if (!match_config_table(guid, table, common_tables) && arch_tables)
573 			match_config_table(guid, table, arch_tables);
574 	}
575 	pr_cont("\n");
576 	set_bit(EFI_CONFIG_TABLES, &efi.flags);
577 
578 	if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) {
579 		struct linux_efi_random_seed *seed;
580 		u32 size = 0;
581 
582 		seed = early_memremap(efi_rng_seed, sizeof(*seed));
583 		if (seed != NULL) {
584 			size = READ_ONCE(seed->size);
585 			early_memunmap(seed, sizeof(*seed));
586 		} else {
587 			pr_err("Could not map UEFI random seed!\n");
588 		}
589 		if (size > 0) {
590 			seed = early_memremap(efi_rng_seed,
591 					      sizeof(*seed) + size);
592 			if (seed != NULL) {
593 				pr_notice("seeding entropy pool\n");
594 				add_bootloader_randomness(seed->bits, size);
595 				early_memunmap(seed, sizeof(*seed) + size);
596 			} else {
597 				pr_err("Could not map UEFI random seed!\n");
598 			}
599 		}
600 	}
601 
602 	if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
603 		efi_memattr_init();
604 
605 	efi_tpm_eventlog_init();
606 
607 	if (mem_reserve != EFI_INVALID_TABLE_ADDR) {
608 		unsigned long prsv = mem_reserve;
609 
610 		while (prsv) {
611 			struct linux_efi_memreserve *rsv;
612 			u8 *p;
613 
614 			/*
615 			 * Just map a full page: that is what we will get
616 			 * anyway, and it permits us to map the entire entry
617 			 * before knowing its size.
618 			 */
619 			p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
620 					   PAGE_SIZE);
621 			if (p == NULL) {
622 				pr_err("Could not map UEFI memreserve entry!\n");
623 				return -ENOMEM;
624 			}
625 
626 			rsv = (void *)(p + prsv % PAGE_SIZE);
627 
628 			/* reserve the entry itself */
629 			memblock_reserve(prsv,
630 					 struct_size(rsv, entry, rsv->size));
631 
632 			for (i = 0; i < atomic_read(&rsv->count); i++) {
633 				memblock_reserve(rsv->entry[i].base,
634 						 rsv->entry[i].size);
635 			}
636 
637 			prsv = rsv->next;
638 			early_memunmap(p, PAGE_SIZE);
639 		}
640 	}
641 
642 	if (rt_prop != EFI_INVALID_TABLE_ADDR) {
643 		efi_rt_properties_table_t *tbl;
644 
645 		tbl = early_memremap(rt_prop, sizeof(*tbl));
646 		if (tbl) {
647 			efi.runtime_supported_mask &= tbl->runtime_services_supported;
648 			early_memunmap(tbl, sizeof(*tbl));
649 		}
650 	}
651 
652 	return 0;
653 }
654 
655 int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr,
656 				   int min_major_version)
657 {
658 	if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
659 		pr_err("System table signature incorrect!\n");
660 		return -EINVAL;
661 	}
662 
663 	if ((systab_hdr->revision >> 16) < min_major_version)
664 		pr_err("Warning: System table version %d.%02d, expected %d.00 or greater!\n",
665 		       systab_hdr->revision >> 16,
666 		       systab_hdr->revision & 0xffff,
667 		       min_major_version);
668 
669 	return 0;
670 }
671 
672 #ifndef CONFIG_IA64
673 static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor,
674 						size_t size)
675 {
676 	const efi_char16_t *ret;
677 
678 	ret = early_memremap_ro(fw_vendor, size);
679 	if (!ret)
680 		pr_err("Could not map the firmware vendor!\n");
681 	return ret;
682 }
683 
684 static void __init unmap_fw_vendor(const void *fw_vendor, size_t size)
685 {
686 	early_memunmap((void *)fw_vendor, size);
687 }
688 #else
689 #define map_fw_vendor(p, s)	__va(p)
690 #define unmap_fw_vendor(v, s)
691 #endif
692 
693 void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
694 				     unsigned long fw_vendor)
695 {
696 	char vendor[100] = "unknown";
697 	const efi_char16_t *c16;
698 	size_t i;
699 
700 	c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
701 	if (c16) {
702 		for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
703 			vendor[i] = c16[i];
704 		vendor[i] = '\0';
705 
706 		unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
707 	}
708 
709 	pr_info("EFI v%u.%.02u by %s\n",
710 		systab_hdr->revision >> 16,
711 		systab_hdr->revision & 0xffff,
712 		vendor);
713 }
714 
715 static __initdata char memory_type_name[][20] = {
716 	"Reserved",
717 	"Loader Code",
718 	"Loader Data",
719 	"Boot Code",
720 	"Boot Data",
721 	"Runtime Code",
722 	"Runtime Data",
723 	"Conventional Memory",
724 	"Unusable Memory",
725 	"ACPI Reclaim Memory",
726 	"ACPI Memory NVS",
727 	"Memory Mapped I/O",
728 	"MMIO Port Space",
729 	"PAL Code",
730 	"Persistent Memory",
731 };
732 
733 char * __init efi_md_typeattr_format(char *buf, size_t size,
734 				     const efi_memory_desc_t *md)
735 {
736 	char *pos;
737 	int type_len;
738 	u64 attr;
739 
740 	pos = buf;
741 	if (md->type >= ARRAY_SIZE(memory_type_name))
742 		type_len = snprintf(pos, size, "[type=%u", md->type);
743 	else
744 		type_len = snprintf(pos, size, "[%-*s",
745 				    (int)(sizeof(memory_type_name[0]) - 1),
746 				    memory_type_name[md->type]);
747 	if (type_len >= size)
748 		return buf;
749 
750 	pos += type_len;
751 	size -= type_len;
752 
753 	attr = md->attribute;
754 	if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
755 		     EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
756 		     EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
757 		     EFI_MEMORY_NV | EFI_MEMORY_SP |
758 		     EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
759 		snprintf(pos, size, "|attr=0x%016llx]",
760 			 (unsigned long long)attr);
761 	else
762 		snprintf(pos, size,
763 			 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
764 			 attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
765 			 attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "",
766 			 attr & EFI_MEMORY_SP      ? "SP"  : "",
767 			 attr & EFI_MEMORY_NV      ? "NV"  : "",
768 			 attr & EFI_MEMORY_XP      ? "XP"  : "",
769 			 attr & EFI_MEMORY_RP      ? "RP"  : "",
770 			 attr & EFI_MEMORY_WP      ? "WP"  : "",
771 			 attr & EFI_MEMORY_RO      ? "RO"  : "",
772 			 attr & EFI_MEMORY_UCE     ? "UCE" : "",
773 			 attr & EFI_MEMORY_WB      ? "WB"  : "",
774 			 attr & EFI_MEMORY_WT      ? "WT"  : "",
775 			 attr & EFI_MEMORY_WC      ? "WC"  : "",
776 			 attr & EFI_MEMORY_UC      ? "UC"  : "");
777 	return buf;
778 }
779 
780 /*
781  * IA64 has a funky EFI memory map that doesn't work the same way as
782  * other architectures.
783  */
784 #ifndef CONFIG_IA64
785 /*
786  * efi_mem_attributes - lookup memmap attributes for physical address
787  * @phys_addr: the physical address to lookup
788  *
789  * Search in the EFI memory map for the region covering
790  * @phys_addr. Returns the EFI memory attributes if the region
791  * was found in the memory map, 0 otherwise.
792  */
793 u64 efi_mem_attributes(unsigned long phys_addr)
794 {
795 	efi_memory_desc_t *md;
796 
797 	if (!efi_enabled(EFI_MEMMAP))
798 		return 0;
799 
800 	for_each_efi_memory_desc(md) {
801 		if ((md->phys_addr <= phys_addr) &&
802 		    (phys_addr < (md->phys_addr +
803 		    (md->num_pages << EFI_PAGE_SHIFT))))
804 			return md->attribute;
805 	}
806 	return 0;
807 }
808 
809 /*
810  * efi_mem_type - lookup memmap type for physical address
811  * @phys_addr: the physical address to lookup
812  *
813  * Search in the EFI memory map for the region covering @phys_addr.
814  * Returns the EFI memory type if the region was found in the memory
815  * map, -EINVAL otherwise.
816  */
817 int efi_mem_type(unsigned long phys_addr)
818 {
819 	const efi_memory_desc_t *md;
820 
821 	if (!efi_enabled(EFI_MEMMAP))
822 		return -ENOTSUPP;
823 
824 	for_each_efi_memory_desc(md) {
825 		if ((md->phys_addr <= phys_addr) &&
826 		    (phys_addr < (md->phys_addr +
827 				  (md->num_pages << EFI_PAGE_SHIFT))))
828 			return md->type;
829 	}
830 	return -EINVAL;
831 }
832 #endif
833 
834 int efi_status_to_err(efi_status_t status)
835 {
836 	int err;
837 
838 	switch (status) {
839 	case EFI_SUCCESS:
840 		err = 0;
841 		break;
842 	case EFI_INVALID_PARAMETER:
843 		err = -EINVAL;
844 		break;
845 	case EFI_OUT_OF_RESOURCES:
846 		err = -ENOSPC;
847 		break;
848 	case EFI_DEVICE_ERROR:
849 		err = -EIO;
850 		break;
851 	case EFI_WRITE_PROTECTED:
852 		err = -EROFS;
853 		break;
854 	case EFI_SECURITY_VIOLATION:
855 		err = -EACCES;
856 		break;
857 	case EFI_NOT_FOUND:
858 		err = -ENOENT;
859 		break;
860 	case EFI_ABORTED:
861 		err = -EINTR;
862 		break;
863 	default:
864 		err = -EINVAL;
865 	}
866 
867 	return err;
868 }
869 
870 static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
871 static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
872 
873 static int __init efi_memreserve_map_root(void)
874 {
875 	if (mem_reserve == EFI_INVALID_TABLE_ADDR)
876 		return -ENODEV;
877 
878 	efi_memreserve_root = memremap(mem_reserve,
879 				       sizeof(*efi_memreserve_root),
880 				       MEMREMAP_WB);
881 	if (WARN_ON_ONCE(!efi_memreserve_root))
882 		return -ENOMEM;
883 	return 0;
884 }
885 
886 static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
887 {
888 	struct resource *res, *parent;
889 
890 	res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
891 	if (!res)
892 		return -ENOMEM;
893 
894 	res->name	= "reserved";
895 	res->flags	= IORESOURCE_MEM;
896 	res->start	= addr;
897 	res->end	= addr + size - 1;
898 
899 	/* we expect a conflict with a 'System RAM' region */
900 	parent = request_resource_conflict(&iomem_resource, res);
901 	return parent ? request_resource(parent, res) : 0;
902 }
903 
904 int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
905 {
906 	struct linux_efi_memreserve *rsv;
907 	unsigned long prsv;
908 	int rc, index;
909 
910 	if (efi_memreserve_root == (void *)ULONG_MAX)
911 		return -ENODEV;
912 
913 	if (!efi_memreserve_root) {
914 		rc = efi_memreserve_map_root();
915 		if (rc)
916 			return rc;
917 	}
918 
919 	/* first try to find a slot in an existing linked list entry */
920 	for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) {
921 		rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
922 		index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
923 		if (index < rsv->size) {
924 			rsv->entry[index].base = addr;
925 			rsv->entry[index].size = size;
926 
927 			memunmap(rsv);
928 			return efi_mem_reserve_iomem(addr, size);
929 		}
930 		memunmap(rsv);
931 	}
932 
933 	/* no slot found - allocate a new linked list entry */
934 	rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
935 	if (!rsv)
936 		return -ENOMEM;
937 
938 	rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
939 	if (rc) {
940 		free_page((unsigned long)rsv);
941 		return rc;
942 	}
943 
944 	/*
945 	 * The memremap() call above assumes that a linux_efi_memreserve entry
946 	 * never crosses a page boundary, so let's ensure that this remains true
947 	 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
948 	 * using SZ_4K explicitly in the size calculation below.
949 	 */
950 	rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
951 	atomic_set(&rsv->count, 1);
952 	rsv->entry[0].base = addr;
953 	rsv->entry[0].size = size;
954 
955 	spin_lock(&efi_mem_reserve_persistent_lock);
956 	rsv->next = efi_memreserve_root->next;
957 	efi_memreserve_root->next = __pa(rsv);
958 	spin_unlock(&efi_mem_reserve_persistent_lock);
959 
960 	return efi_mem_reserve_iomem(addr, size);
961 }
962 
963 static int __init efi_memreserve_root_init(void)
964 {
965 	if (efi_memreserve_root)
966 		return 0;
967 	if (efi_memreserve_map_root())
968 		efi_memreserve_root = (void *)ULONG_MAX;
969 	return 0;
970 }
971 early_initcall(efi_memreserve_root_init);
972 
973 #ifdef CONFIG_KEXEC
974 static int update_efi_random_seed(struct notifier_block *nb,
975 				  unsigned long code, void *unused)
976 {
977 	struct linux_efi_random_seed *seed;
978 	u32 size = 0;
979 
980 	if (!kexec_in_progress)
981 		return NOTIFY_DONE;
982 
983 	seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB);
984 	if (seed != NULL) {
985 		size = min(seed->size, EFI_RANDOM_SEED_SIZE);
986 		memunmap(seed);
987 	} else {
988 		pr_err("Could not map UEFI random seed!\n");
989 	}
990 	if (size > 0) {
991 		seed = memremap(efi_rng_seed, sizeof(*seed) + size,
992 				MEMREMAP_WB);
993 		if (seed != NULL) {
994 			seed->size = size;
995 			get_random_bytes(seed->bits, seed->size);
996 			memunmap(seed);
997 		} else {
998 			pr_err("Could not map UEFI random seed!\n");
999 		}
1000 	}
1001 	return NOTIFY_DONE;
1002 }
1003 
1004 static struct notifier_block efi_random_seed_nb = {
1005 	.notifier_call = update_efi_random_seed,
1006 };
1007 
1008 static int __init register_update_efi_random_seed(void)
1009 {
1010 	if (efi_rng_seed == EFI_INVALID_TABLE_ADDR)
1011 		return 0;
1012 	return register_reboot_notifier(&efi_random_seed_nb);
1013 }
1014 late_initcall(register_update_efi_random_seed);
1015 #endif
1016