xref: /openbmc/linux/drivers/firmware/efi/efi.c (revision 0cd08b10)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * efi.c - EFI subsystem
4  *
5  * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
6  * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
7  * Copyright (C) 2013 Tom Gundersen <teg@jklm.no>
8  *
9  * This code registers /sys/firmware/efi{,/efivars} when EFI is supported,
10  * allowing the efivarfs to be mounted or the efivars module to be loaded.
11  * The existance of /sys/firmware/efi may also be used by userspace to
12  * determine that the system supports EFI.
13  */
14 
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 
17 #include <linux/kobject.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/debugfs.h>
21 #include <linux/device.h>
22 #include <linux/efi.h>
23 #include <linux/of.h>
24 #include <linux/io.h>
25 #include <linux/kexec.h>
26 #include <linux/platform_device.h>
27 #include <linux/random.h>
28 #include <linux/reboot.h>
29 #include <linux/slab.h>
30 #include <linux/acpi.h>
31 #include <linux/ucs2_string.h>
32 #include <linux/memblock.h>
33 #include <linux/security.h>
34 
35 #include <asm/early_ioremap.h>
36 
37 struct efi __read_mostly efi = {
38 	.runtime_supported_mask = EFI_RT_SUPPORTED_ALL,
39 	.acpi			= EFI_INVALID_TABLE_ADDR,
40 	.acpi20			= EFI_INVALID_TABLE_ADDR,
41 	.smbios			= EFI_INVALID_TABLE_ADDR,
42 	.smbios3		= EFI_INVALID_TABLE_ADDR,
43 	.esrt			= EFI_INVALID_TABLE_ADDR,
44 	.tpm_log		= EFI_INVALID_TABLE_ADDR,
45 	.tpm_final_log		= EFI_INVALID_TABLE_ADDR,
46 };
47 EXPORT_SYMBOL(efi);
48 
49 unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
50 static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
51 static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
52 
53 struct mm_struct efi_mm = {
54 	.mm_rb			= RB_ROOT,
55 	.mm_users		= ATOMIC_INIT(2),
56 	.mm_count		= ATOMIC_INIT(1),
57 	MMAP_LOCK_INITIALIZER(efi_mm)
58 	.page_table_lock	= __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
59 	.mmlist			= LIST_HEAD_INIT(efi_mm.mmlist),
60 	.cpu_bitmap		= { [BITS_TO_LONGS(NR_CPUS)] = 0},
61 };
62 
63 struct workqueue_struct *efi_rts_wq;
64 
65 static bool disable_runtime;
66 static int __init setup_noefi(char *arg)
67 {
68 	disable_runtime = true;
69 	return 0;
70 }
71 early_param("noefi", setup_noefi);
72 
73 bool efi_runtime_disabled(void)
74 {
75 	return disable_runtime;
76 }
77 
78 bool __pure __efi_soft_reserve_enabled(void)
79 {
80 	return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
81 }
82 
83 static int __init parse_efi_cmdline(char *str)
84 {
85 	if (!str) {
86 		pr_warn("need at least one option\n");
87 		return -EINVAL;
88 	}
89 
90 	if (parse_option_str(str, "debug"))
91 		set_bit(EFI_DBG, &efi.flags);
92 
93 	if (parse_option_str(str, "noruntime"))
94 		disable_runtime = true;
95 
96 	if (parse_option_str(str, "nosoftreserve"))
97 		set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
98 
99 	return 0;
100 }
101 early_param("efi", parse_efi_cmdline);
102 
103 struct kobject *efi_kobj;
104 
105 /*
106  * Let's not leave out systab information that snuck into
107  * the efivars driver
108  * Note, do not add more fields in systab sysfs file as it breaks sysfs
109  * one value per file rule!
110  */
111 static ssize_t systab_show(struct kobject *kobj,
112 			   struct kobj_attribute *attr, char *buf)
113 {
114 	char *str = buf;
115 
116 	if (!kobj || !buf)
117 		return -EINVAL;
118 
119 	if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
120 		str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
121 	if (efi.acpi != EFI_INVALID_TABLE_ADDR)
122 		str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
123 	/*
124 	 * If both SMBIOS and SMBIOS3 entry points are implemented, the
125 	 * SMBIOS3 entry point shall be preferred, so we list it first to
126 	 * let applications stop parsing after the first match.
127 	 */
128 	if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
129 		str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
130 	if (efi.smbios != EFI_INVALID_TABLE_ADDR)
131 		str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
132 
133 	if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86))
134 		str = efi_systab_show_arch(str);
135 
136 	return str - buf;
137 }
138 
139 static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
140 
141 static ssize_t fw_platform_size_show(struct kobject *kobj,
142 				     struct kobj_attribute *attr, char *buf)
143 {
144 	return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
145 }
146 
147 extern __weak struct kobj_attribute efi_attr_fw_vendor;
148 extern __weak struct kobj_attribute efi_attr_runtime;
149 extern __weak struct kobj_attribute efi_attr_config_table;
150 static struct kobj_attribute efi_attr_fw_platform_size =
151 	__ATTR_RO(fw_platform_size);
152 
153 static struct attribute *efi_subsys_attrs[] = {
154 	&efi_attr_systab.attr,
155 	&efi_attr_fw_platform_size.attr,
156 	&efi_attr_fw_vendor.attr,
157 	&efi_attr_runtime.attr,
158 	&efi_attr_config_table.attr,
159 	NULL,
160 };
161 
162 umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr,
163 				   int n)
164 {
165 	return attr->mode;
166 }
167 
168 static const struct attribute_group efi_subsys_attr_group = {
169 	.attrs = efi_subsys_attrs,
170 	.is_visible = efi_attr_is_visible,
171 };
172 
173 static struct efivars generic_efivars;
174 static struct efivar_operations generic_ops;
175 
176 static int generic_ops_register(void)
177 {
178 	generic_ops.get_variable = efi.get_variable;
179 	generic_ops.set_variable = efi.set_variable;
180 	generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
181 	generic_ops.get_next_variable = efi.get_next_variable;
182 	generic_ops.query_variable_store = efi_query_variable_store;
183 
184 	return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
185 }
186 
187 static void generic_ops_unregister(void)
188 {
189 	efivars_unregister(&generic_efivars);
190 }
191 
192 #if IS_ENABLED(CONFIG_ACPI)
193 #define EFIVAR_SSDT_NAME_MAX	16
194 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
195 static int __init efivar_ssdt_setup(char *str)
196 {
197 	int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
198 
199 	if (ret)
200 		return ret;
201 
202 	if (strlen(str) < sizeof(efivar_ssdt))
203 		memcpy(efivar_ssdt, str, strlen(str));
204 	else
205 		pr_warn("efivar_ssdt: name too long: %s\n", str);
206 	return 0;
207 }
208 __setup("efivar_ssdt=", efivar_ssdt_setup);
209 
210 static __init int efivar_ssdt_iter(efi_char16_t *name, efi_guid_t vendor,
211 				   unsigned long name_size, void *data)
212 {
213 	struct efivar_entry *entry;
214 	struct list_head *list = data;
215 	char utf8_name[EFIVAR_SSDT_NAME_MAX];
216 	int limit = min_t(unsigned long, EFIVAR_SSDT_NAME_MAX, name_size);
217 
218 	ucs2_as_utf8(utf8_name, name, limit - 1);
219 	if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
220 		return 0;
221 
222 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
223 	if (!entry)
224 		return 0;
225 
226 	memcpy(entry->var.VariableName, name, name_size);
227 	memcpy(&entry->var.VendorGuid, &vendor, sizeof(efi_guid_t));
228 
229 	efivar_entry_add(entry, list);
230 
231 	return 0;
232 }
233 
234 static __init int efivar_ssdt_load(void)
235 {
236 	LIST_HEAD(entries);
237 	struct efivar_entry *entry, *aux;
238 	unsigned long size;
239 	void *data;
240 	int ret;
241 
242 	if (!efivar_ssdt[0])
243 		return 0;
244 
245 	ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries);
246 
247 	list_for_each_entry_safe(entry, aux, &entries, list) {
248 		pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt,
249 			&entry->var.VendorGuid);
250 
251 		list_del(&entry->list);
252 
253 		ret = efivar_entry_size(entry, &size);
254 		if (ret) {
255 			pr_err("failed to get var size\n");
256 			goto free_entry;
257 		}
258 
259 		data = kmalloc(size, GFP_KERNEL);
260 		if (!data) {
261 			ret = -ENOMEM;
262 			goto free_entry;
263 		}
264 
265 		ret = efivar_entry_get(entry, NULL, &size, data);
266 		if (ret) {
267 			pr_err("failed to get var data\n");
268 			goto free_data;
269 		}
270 
271 		ret = acpi_load_table(data, NULL);
272 		if (ret) {
273 			pr_err("failed to load table: %d\n", ret);
274 			goto free_data;
275 		}
276 
277 		goto free_entry;
278 
279 free_data:
280 		kfree(data);
281 
282 free_entry:
283 		kfree(entry);
284 	}
285 
286 	return ret;
287 }
288 #else
289 static inline int efivar_ssdt_load(void) { return 0; }
290 #endif
291 
292 #ifdef CONFIG_DEBUG_FS
293 
294 #define EFI_DEBUGFS_MAX_BLOBS 32
295 
296 static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS];
297 
298 static void __init efi_debugfs_init(void)
299 {
300 	struct dentry *efi_debugfs;
301 	efi_memory_desc_t *md;
302 	char name[32];
303 	int type_count[EFI_BOOT_SERVICES_DATA + 1] = {};
304 	int i = 0;
305 
306 	efi_debugfs = debugfs_create_dir("efi", NULL);
307 	if (IS_ERR_OR_NULL(efi_debugfs))
308 		return;
309 
310 	for_each_efi_memory_desc(md) {
311 		switch (md->type) {
312 		case EFI_BOOT_SERVICES_CODE:
313 			snprintf(name, sizeof(name), "boot_services_code%d",
314 				 type_count[md->type]++);
315 			break;
316 		case EFI_BOOT_SERVICES_DATA:
317 			snprintf(name, sizeof(name), "boot_services_data%d",
318 				 type_count[md->type]++);
319 			break;
320 		default:
321 			continue;
322 		}
323 
324 		if (i >= EFI_DEBUGFS_MAX_BLOBS) {
325 			pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n",
326 				EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS);
327 			break;
328 		}
329 
330 		debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT;
331 		debugfs_blob[i].data = memremap(md->phys_addr,
332 						debugfs_blob[i].size,
333 						MEMREMAP_WB);
334 		if (!debugfs_blob[i].data)
335 			continue;
336 
337 		debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]);
338 		i++;
339 	}
340 }
341 #else
342 static inline void efi_debugfs_init(void) {}
343 #endif
344 
345 /*
346  * We register the efi subsystem with the firmware subsystem and the
347  * efivars subsystem with the efi subsystem, if the system was booted with
348  * EFI.
349  */
350 static int __init efisubsys_init(void)
351 {
352 	int error;
353 
354 	if (!efi_enabled(EFI_RUNTIME_SERVICES))
355 		efi.runtime_supported_mask = 0;
356 
357 	if (!efi_enabled(EFI_BOOT))
358 		return 0;
359 
360 	if (efi.runtime_supported_mask) {
361 		/*
362 		 * Since we process only one efi_runtime_service() at a time, an
363 		 * ordered workqueue (which creates only one execution context)
364 		 * should suffice for all our needs.
365 		 */
366 		efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
367 		if (!efi_rts_wq) {
368 			pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
369 			clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
370 			efi.runtime_supported_mask = 0;
371 			return 0;
372 		}
373 	}
374 
375 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES))
376 		platform_device_register_simple("rtc-efi", 0, NULL, 0);
377 
378 	/* We register the efi directory at /sys/firmware/efi */
379 	efi_kobj = kobject_create_and_add("efi", firmware_kobj);
380 	if (!efi_kobj) {
381 		pr_err("efi: Firmware registration failed.\n");
382 		return -ENOMEM;
383 	}
384 
385 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES)) {
386 		efivar_ssdt_load();
387 		error = generic_ops_register();
388 		if (error)
389 			goto err_put;
390 		platform_device_register_simple("efivars", 0, NULL, 0);
391 	}
392 
393 	error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
394 	if (error) {
395 		pr_err("efi: Sysfs attribute export failed with error %d.\n",
396 		       error);
397 		goto err_unregister;
398 	}
399 
400 	error = efi_runtime_map_init(efi_kobj);
401 	if (error)
402 		goto err_remove_group;
403 
404 	/* and the standard mountpoint for efivarfs */
405 	error = sysfs_create_mount_point(efi_kobj, "efivars");
406 	if (error) {
407 		pr_err("efivars: Subsystem registration failed.\n");
408 		goto err_remove_group;
409 	}
410 
411 	if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
412 		efi_debugfs_init();
413 
414 	return 0;
415 
416 err_remove_group:
417 	sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
418 err_unregister:
419 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES))
420 		generic_ops_unregister();
421 err_put:
422 	kobject_put(efi_kobj);
423 	return error;
424 }
425 
426 subsys_initcall(efisubsys_init);
427 
428 /*
429  * Find the efi memory descriptor for a given physical address.  Given a
430  * physical address, determine if it exists within an EFI Memory Map entry,
431  * and if so, populate the supplied memory descriptor with the appropriate
432  * data.
433  */
434 int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
435 {
436 	efi_memory_desc_t *md;
437 
438 	if (!efi_enabled(EFI_MEMMAP)) {
439 		pr_err_once("EFI_MEMMAP is not enabled.\n");
440 		return -EINVAL;
441 	}
442 
443 	if (!out_md) {
444 		pr_err_once("out_md is null.\n");
445 		return -EINVAL;
446         }
447 
448 	for_each_efi_memory_desc(md) {
449 		u64 size;
450 		u64 end;
451 
452 		size = md->num_pages << EFI_PAGE_SHIFT;
453 		end = md->phys_addr + size;
454 		if (phys_addr >= md->phys_addr && phys_addr < end) {
455 			memcpy(out_md, md, sizeof(*out_md));
456 			return 0;
457 		}
458 	}
459 	return -ENOENT;
460 }
461 
462 /*
463  * Calculate the highest address of an efi memory descriptor.
464  */
465 u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
466 {
467 	u64 size = md->num_pages << EFI_PAGE_SHIFT;
468 	u64 end = md->phys_addr + size;
469 	return end;
470 }
471 
472 void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
473 
474 /**
475  * efi_mem_reserve - Reserve an EFI memory region
476  * @addr: Physical address to reserve
477  * @size: Size of reservation
478  *
479  * Mark a region as reserved from general kernel allocation and
480  * prevent it being released by efi_free_boot_services().
481  *
482  * This function should be called drivers once they've parsed EFI
483  * configuration tables to figure out where their data lives, e.g.
484  * efi_esrt_init().
485  */
486 void __init efi_mem_reserve(phys_addr_t addr, u64 size)
487 {
488 	if (!memblock_is_region_reserved(addr, size))
489 		memblock_reserve(addr, size);
490 
491 	/*
492 	 * Some architectures (x86) reserve all boot services ranges
493 	 * until efi_free_boot_services() because of buggy firmware
494 	 * implementations. This means the above memblock_reserve() is
495 	 * superfluous on x86 and instead what it needs to do is
496 	 * ensure the @start, @size is not freed.
497 	 */
498 	efi_arch_mem_reserve(addr, size);
499 }
500 
501 static const efi_config_table_type_t common_tables[] __initconst = {
502 	{ACPI_20_TABLE_GUID,			&efi.acpi20,		"ACPI 2.0"	},
503 	{ACPI_TABLE_GUID,			&efi.acpi,		"ACPI"		},
504 	{SMBIOS_TABLE_GUID,			&efi.smbios,		"SMBIOS"	},
505 	{SMBIOS3_TABLE_GUID,			&efi.smbios3,		"SMBIOS 3.0"	},
506 	{EFI_SYSTEM_RESOURCE_TABLE_GUID,	&efi.esrt,		"ESRT"		},
507 	{EFI_MEMORY_ATTRIBUTES_TABLE_GUID,	&efi_mem_attr_table,	"MEMATTR"	},
508 	{LINUX_EFI_RANDOM_SEED_TABLE_GUID,	&efi_rng_seed,		"RNG"		},
509 	{LINUX_EFI_TPM_EVENT_LOG_GUID,		&efi.tpm_log,		"TPMEventLog"	},
510 	{LINUX_EFI_TPM_FINAL_LOG_GUID,		&efi.tpm_final_log,	"TPMFinalLog"	},
511 	{LINUX_EFI_MEMRESERVE_TABLE_GUID,	&mem_reserve,		"MEMRESERVE"	},
512 	{EFI_RT_PROPERTIES_TABLE_GUID,		&rt_prop,		"RTPROP"	},
513 #ifdef CONFIG_EFI_RCI2_TABLE
514 	{DELLEMC_EFI_RCI2_TABLE_GUID,		&rci2_table_phys			},
515 #endif
516 	{},
517 };
518 
519 static __init int match_config_table(const efi_guid_t *guid,
520 				     unsigned long table,
521 				     const efi_config_table_type_t *table_types)
522 {
523 	int i;
524 
525 	for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
526 		if (!efi_guidcmp(*guid, table_types[i].guid)) {
527 			*(table_types[i].ptr) = table;
528 			if (table_types[i].name[0])
529 				pr_cont("%s=0x%lx ",
530 					table_types[i].name, table);
531 			return 1;
532 		}
533 	}
534 
535 	return 0;
536 }
537 
538 int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
539 				   int count,
540 				   const efi_config_table_type_t *arch_tables)
541 {
542 	const efi_config_table_64_t *tbl64 = (void *)config_tables;
543 	const efi_config_table_32_t *tbl32 = (void *)config_tables;
544 	const efi_guid_t *guid;
545 	unsigned long table;
546 	int i;
547 
548 	pr_info("");
549 	for (i = 0; i < count; i++) {
550 		if (!IS_ENABLED(CONFIG_X86)) {
551 			guid = &config_tables[i].guid;
552 			table = (unsigned long)config_tables[i].table;
553 		} else if (efi_enabled(EFI_64BIT)) {
554 			guid = &tbl64[i].guid;
555 			table = tbl64[i].table;
556 
557 			if (IS_ENABLED(CONFIG_X86_32) &&
558 			    tbl64[i].table > U32_MAX) {
559 				pr_cont("\n");
560 				pr_err("Table located above 4GB, disabling EFI.\n");
561 				return -EINVAL;
562 			}
563 		} else {
564 			guid = &tbl32[i].guid;
565 			table = tbl32[i].table;
566 		}
567 
568 		if (!match_config_table(guid, table, common_tables) && arch_tables)
569 			match_config_table(guid, table, arch_tables);
570 	}
571 	pr_cont("\n");
572 	set_bit(EFI_CONFIG_TABLES, &efi.flags);
573 
574 	if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) {
575 		struct linux_efi_random_seed *seed;
576 		u32 size = 0;
577 
578 		seed = early_memremap(efi_rng_seed, sizeof(*seed));
579 		if (seed != NULL) {
580 			size = READ_ONCE(seed->size);
581 			early_memunmap(seed, sizeof(*seed));
582 		} else {
583 			pr_err("Could not map UEFI random seed!\n");
584 		}
585 		if (size > 0) {
586 			seed = early_memremap(efi_rng_seed,
587 					      sizeof(*seed) + size);
588 			if (seed != NULL) {
589 				pr_notice("seeding entropy pool\n");
590 				add_bootloader_randomness(seed->bits, size);
591 				early_memunmap(seed, sizeof(*seed) + size);
592 			} else {
593 				pr_err("Could not map UEFI random seed!\n");
594 			}
595 		}
596 	}
597 
598 	if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
599 		efi_memattr_init();
600 
601 	efi_tpm_eventlog_init();
602 
603 	if (mem_reserve != EFI_INVALID_TABLE_ADDR) {
604 		unsigned long prsv = mem_reserve;
605 
606 		while (prsv) {
607 			struct linux_efi_memreserve *rsv;
608 			u8 *p;
609 
610 			/*
611 			 * Just map a full page: that is what we will get
612 			 * anyway, and it permits us to map the entire entry
613 			 * before knowing its size.
614 			 */
615 			p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
616 					   PAGE_SIZE);
617 			if (p == NULL) {
618 				pr_err("Could not map UEFI memreserve entry!\n");
619 				return -ENOMEM;
620 			}
621 
622 			rsv = (void *)(p + prsv % PAGE_SIZE);
623 
624 			/* reserve the entry itself */
625 			memblock_reserve(prsv, EFI_MEMRESERVE_SIZE(rsv->size));
626 
627 			for (i = 0; i < atomic_read(&rsv->count); i++) {
628 				memblock_reserve(rsv->entry[i].base,
629 						 rsv->entry[i].size);
630 			}
631 
632 			prsv = rsv->next;
633 			early_memunmap(p, PAGE_SIZE);
634 		}
635 	}
636 
637 	if (rt_prop != EFI_INVALID_TABLE_ADDR) {
638 		efi_rt_properties_table_t *tbl;
639 
640 		tbl = early_memremap(rt_prop, sizeof(*tbl));
641 		if (tbl) {
642 			efi.runtime_supported_mask &= tbl->runtime_services_supported;
643 			early_memunmap(tbl, sizeof(*tbl));
644 		}
645 	}
646 
647 	return 0;
648 }
649 
650 int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr,
651 				   int min_major_version)
652 {
653 	if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
654 		pr_err("System table signature incorrect!\n");
655 		return -EINVAL;
656 	}
657 
658 	if ((systab_hdr->revision >> 16) < min_major_version)
659 		pr_err("Warning: System table version %d.%02d, expected %d.00 or greater!\n",
660 		       systab_hdr->revision >> 16,
661 		       systab_hdr->revision & 0xffff,
662 		       min_major_version);
663 
664 	return 0;
665 }
666 
667 #ifndef CONFIG_IA64
668 static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor,
669 						size_t size)
670 {
671 	const efi_char16_t *ret;
672 
673 	ret = early_memremap_ro(fw_vendor, size);
674 	if (!ret)
675 		pr_err("Could not map the firmware vendor!\n");
676 	return ret;
677 }
678 
679 static void __init unmap_fw_vendor(const void *fw_vendor, size_t size)
680 {
681 	early_memunmap((void *)fw_vendor, size);
682 }
683 #else
684 #define map_fw_vendor(p, s)	__va(p)
685 #define unmap_fw_vendor(v, s)
686 #endif
687 
688 void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
689 				     unsigned long fw_vendor)
690 {
691 	char vendor[100] = "unknown";
692 	const efi_char16_t *c16;
693 	size_t i;
694 
695 	c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
696 	if (c16) {
697 		for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
698 			vendor[i] = c16[i];
699 		vendor[i] = '\0';
700 
701 		unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
702 	}
703 
704 	pr_info("EFI v%u.%.02u by %s\n",
705 		systab_hdr->revision >> 16,
706 		systab_hdr->revision & 0xffff,
707 		vendor);
708 }
709 
710 static __initdata char memory_type_name[][20] = {
711 	"Reserved",
712 	"Loader Code",
713 	"Loader Data",
714 	"Boot Code",
715 	"Boot Data",
716 	"Runtime Code",
717 	"Runtime Data",
718 	"Conventional Memory",
719 	"Unusable Memory",
720 	"ACPI Reclaim Memory",
721 	"ACPI Memory NVS",
722 	"Memory Mapped I/O",
723 	"MMIO Port Space",
724 	"PAL Code",
725 	"Persistent Memory",
726 };
727 
728 char * __init efi_md_typeattr_format(char *buf, size_t size,
729 				     const efi_memory_desc_t *md)
730 {
731 	char *pos;
732 	int type_len;
733 	u64 attr;
734 
735 	pos = buf;
736 	if (md->type >= ARRAY_SIZE(memory_type_name))
737 		type_len = snprintf(pos, size, "[type=%u", md->type);
738 	else
739 		type_len = snprintf(pos, size, "[%-*s",
740 				    (int)(sizeof(memory_type_name[0]) - 1),
741 				    memory_type_name[md->type]);
742 	if (type_len >= size)
743 		return buf;
744 
745 	pos += type_len;
746 	size -= type_len;
747 
748 	attr = md->attribute;
749 	if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
750 		     EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
751 		     EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
752 		     EFI_MEMORY_NV | EFI_MEMORY_SP |
753 		     EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
754 		snprintf(pos, size, "|attr=0x%016llx]",
755 			 (unsigned long long)attr);
756 	else
757 		snprintf(pos, size,
758 			 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
759 			 attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
760 			 attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "",
761 			 attr & EFI_MEMORY_SP      ? "SP"  : "",
762 			 attr & EFI_MEMORY_NV      ? "NV"  : "",
763 			 attr & EFI_MEMORY_XP      ? "XP"  : "",
764 			 attr & EFI_MEMORY_RP      ? "RP"  : "",
765 			 attr & EFI_MEMORY_WP      ? "WP"  : "",
766 			 attr & EFI_MEMORY_RO      ? "RO"  : "",
767 			 attr & EFI_MEMORY_UCE     ? "UCE" : "",
768 			 attr & EFI_MEMORY_WB      ? "WB"  : "",
769 			 attr & EFI_MEMORY_WT      ? "WT"  : "",
770 			 attr & EFI_MEMORY_WC      ? "WC"  : "",
771 			 attr & EFI_MEMORY_UC      ? "UC"  : "");
772 	return buf;
773 }
774 
775 /*
776  * IA64 has a funky EFI memory map that doesn't work the same way as
777  * other architectures.
778  */
779 #ifndef CONFIG_IA64
780 /*
781  * efi_mem_attributes - lookup memmap attributes for physical address
782  * @phys_addr: the physical address to lookup
783  *
784  * Search in the EFI memory map for the region covering
785  * @phys_addr. Returns the EFI memory attributes if the region
786  * was found in the memory map, 0 otherwise.
787  */
788 u64 efi_mem_attributes(unsigned long phys_addr)
789 {
790 	efi_memory_desc_t *md;
791 
792 	if (!efi_enabled(EFI_MEMMAP))
793 		return 0;
794 
795 	for_each_efi_memory_desc(md) {
796 		if ((md->phys_addr <= phys_addr) &&
797 		    (phys_addr < (md->phys_addr +
798 		    (md->num_pages << EFI_PAGE_SHIFT))))
799 			return md->attribute;
800 	}
801 	return 0;
802 }
803 
804 /*
805  * efi_mem_type - lookup memmap type for physical address
806  * @phys_addr: the physical address to lookup
807  *
808  * Search in the EFI memory map for the region covering @phys_addr.
809  * Returns the EFI memory type if the region was found in the memory
810  * map, -EINVAL otherwise.
811  */
812 int efi_mem_type(unsigned long phys_addr)
813 {
814 	const efi_memory_desc_t *md;
815 
816 	if (!efi_enabled(EFI_MEMMAP))
817 		return -ENOTSUPP;
818 
819 	for_each_efi_memory_desc(md) {
820 		if ((md->phys_addr <= phys_addr) &&
821 		    (phys_addr < (md->phys_addr +
822 				  (md->num_pages << EFI_PAGE_SHIFT))))
823 			return md->type;
824 	}
825 	return -EINVAL;
826 }
827 #endif
828 
829 int efi_status_to_err(efi_status_t status)
830 {
831 	int err;
832 
833 	switch (status) {
834 	case EFI_SUCCESS:
835 		err = 0;
836 		break;
837 	case EFI_INVALID_PARAMETER:
838 		err = -EINVAL;
839 		break;
840 	case EFI_OUT_OF_RESOURCES:
841 		err = -ENOSPC;
842 		break;
843 	case EFI_DEVICE_ERROR:
844 		err = -EIO;
845 		break;
846 	case EFI_WRITE_PROTECTED:
847 		err = -EROFS;
848 		break;
849 	case EFI_SECURITY_VIOLATION:
850 		err = -EACCES;
851 		break;
852 	case EFI_NOT_FOUND:
853 		err = -ENOENT;
854 		break;
855 	case EFI_ABORTED:
856 		err = -EINTR;
857 		break;
858 	default:
859 		err = -EINVAL;
860 	}
861 
862 	return err;
863 }
864 
865 static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
866 static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
867 
868 static int __init efi_memreserve_map_root(void)
869 {
870 	if (mem_reserve == EFI_INVALID_TABLE_ADDR)
871 		return -ENODEV;
872 
873 	efi_memreserve_root = memremap(mem_reserve,
874 				       sizeof(*efi_memreserve_root),
875 				       MEMREMAP_WB);
876 	if (WARN_ON_ONCE(!efi_memreserve_root))
877 		return -ENOMEM;
878 	return 0;
879 }
880 
881 static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
882 {
883 	struct resource *res, *parent;
884 
885 	res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
886 	if (!res)
887 		return -ENOMEM;
888 
889 	res->name	= "reserved";
890 	res->flags	= IORESOURCE_MEM;
891 	res->start	= addr;
892 	res->end	= addr + size - 1;
893 
894 	/* we expect a conflict with a 'System RAM' region */
895 	parent = request_resource_conflict(&iomem_resource, res);
896 	return parent ? request_resource(parent, res) : 0;
897 }
898 
899 int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
900 {
901 	struct linux_efi_memreserve *rsv;
902 	unsigned long prsv;
903 	int rc, index;
904 
905 	if (efi_memreserve_root == (void *)ULONG_MAX)
906 		return -ENODEV;
907 
908 	if (!efi_memreserve_root) {
909 		rc = efi_memreserve_map_root();
910 		if (rc)
911 			return rc;
912 	}
913 
914 	/* first try to find a slot in an existing linked list entry */
915 	for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) {
916 		rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
917 		index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
918 		if (index < rsv->size) {
919 			rsv->entry[index].base = addr;
920 			rsv->entry[index].size = size;
921 
922 			memunmap(rsv);
923 			return efi_mem_reserve_iomem(addr, size);
924 		}
925 		memunmap(rsv);
926 	}
927 
928 	/* no slot found - allocate a new linked list entry */
929 	rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
930 	if (!rsv)
931 		return -ENOMEM;
932 
933 	rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
934 	if (rc) {
935 		free_page((unsigned long)rsv);
936 		return rc;
937 	}
938 
939 	/*
940 	 * The memremap() call above assumes that a linux_efi_memreserve entry
941 	 * never crosses a page boundary, so let's ensure that this remains true
942 	 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
943 	 * using SZ_4K explicitly in the size calculation below.
944 	 */
945 	rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
946 	atomic_set(&rsv->count, 1);
947 	rsv->entry[0].base = addr;
948 	rsv->entry[0].size = size;
949 
950 	spin_lock(&efi_mem_reserve_persistent_lock);
951 	rsv->next = efi_memreserve_root->next;
952 	efi_memreserve_root->next = __pa(rsv);
953 	spin_unlock(&efi_mem_reserve_persistent_lock);
954 
955 	return efi_mem_reserve_iomem(addr, size);
956 }
957 
958 static int __init efi_memreserve_root_init(void)
959 {
960 	if (efi_memreserve_root)
961 		return 0;
962 	if (efi_memreserve_map_root())
963 		efi_memreserve_root = (void *)ULONG_MAX;
964 	return 0;
965 }
966 early_initcall(efi_memreserve_root_init);
967 
968 #ifdef CONFIG_KEXEC
969 static int update_efi_random_seed(struct notifier_block *nb,
970 				  unsigned long code, void *unused)
971 {
972 	struct linux_efi_random_seed *seed;
973 	u32 size = 0;
974 
975 	if (!kexec_in_progress)
976 		return NOTIFY_DONE;
977 
978 	seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB);
979 	if (seed != NULL) {
980 		size = min(seed->size, EFI_RANDOM_SEED_SIZE);
981 		memunmap(seed);
982 	} else {
983 		pr_err("Could not map UEFI random seed!\n");
984 	}
985 	if (size > 0) {
986 		seed = memremap(efi_rng_seed, sizeof(*seed) + size,
987 				MEMREMAP_WB);
988 		if (seed != NULL) {
989 			seed->size = size;
990 			get_random_bytes(seed->bits, seed->size);
991 			memunmap(seed);
992 		} else {
993 			pr_err("Could not map UEFI random seed!\n");
994 		}
995 	}
996 	return NOTIFY_DONE;
997 }
998 
999 static struct notifier_block efi_random_seed_nb = {
1000 	.notifier_call = update_efi_random_seed,
1001 };
1002 
1003 static int __init register_update_efi_random_seed(void)
1004 {
1005 	if (efi_rng_seed == EFI_INVALID_TABLE_ADDR)
1006 		return 0;
1007 	return register_reboot_notifier(&efi_random_seed_nb);
1008 }
1009 late_initcall(register_update_efi_random_seed);
1010 #endif
1011