xref: /openbmc/linux/drivers/firmware/efi/efi.c (revision c4a7b9b5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * efi.c - EFI subsystem
4  *
5  * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
6  * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
7  * Copyright (C) 2013 Tom Gundersen <teg@jklm.no>
8  *
9  * This code registers /sys/firmware/efi{,/efivars} when EFI is supported,
10  * allowing the efivarfs to be mounted or the efivars module to be loaded.
11  * The existance of /sys/firmware/efi may also be used by userspace to
12  * determine that the system supports EFI.
13  */
14 
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 
17 #include <linux/kobject.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/debugfs.h>
21 #include <linux/device.h>
22 #include <linux/efi.h>
23 #include <linux/of.h>
24 #include <linux/initrd.h>
25 #include <linux/io.h>
26 #include <linux/kexec.h>
27 #include <linux/platform_device.h>
28 #include <linux/random.h>
29 #include <linux/reboot.h>
30 #include <linux/slab.h>
31 #include <linux/acpi.h>
32 #include <linux/ucs2_string.h>
33 #include <linux/memblock.h>
34 #include <linux/security.h>
35 
36 #include <asm/early_ioremap.h>
37 
38 struct efi __read_mostly efi = {
39 	.runtime_supported_mask = EFI_RT_SUPPORTED_ALL,
40 	.acpi			= EFI_INVALID_TABLE_ADDR,
41 	.acpi20			= EFI_INVALID_TABLE_ADDR,
42 	.smbios			= EFI_INVALID_TABLE_ADDR,
43 	.smbios3		= EFI_INVALID_TABLE_ADDR,
44 	.esrt			= EFI_INVALID_TABLE_ADDR,
45 	.tpm_log		= EFI_INVALID_TABLE_ADDR,
46 	.tpm_final_log		= EFI_INVALID_TABLE_ADDR,
47 #ifdef CONFIG_LOAD_UEFI_KEYS
48 	.mokvar_table		= EFI_INVALID_TABLE_ADDR,
49 #endif
50 #ifdef CONFIG_EFI_COCO_SECRET
51 	.coco_secret		= EFI_INVALID_TABLE_ADDR,
52 #endif
53 };
54 EXPORT_SYMBOL(efi);
55 
56 unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
57 static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
58 static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
59 static unsigned long __initdata initrd = EFI_INVALID_TABLE_ADDR;
60 
61 struct mm_struct efi_mm = {
62 	.mm_mt			= MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock),
63 	.mm_users		= ATOMIC_INIT(2),
64 	.mm_count		= ATOMIC_INIT(1),
65 	.write_protect_seq      = SEQCNT_ZERO(efi_mm.write_protect_seq),
66 	MMAP_LOCK_INITIALIZER(efi_mm)
67 	.page_table_lock	= __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
68 	.mmlist			= LIST_HEAD_INIT(efi_mm.mmlist),
69 	.cpu_bitmap		= { [BITS_TO_LONGS(NR_CPUS)] = 0},
70 };
71 
72 struct workqueue_struct *efi_rts_wq;
73 
74 static bool disable_runtime = IS_ENABLED(CONFIG_EFI_DISABLE_RUNTIME);
75 static int __init setup_noefi(char *arg)
76 {
77 	disable_runtime = true;
78 	return 0;
79 }
80 early_param("noefi", setup_noefi);
81 
82 bool efi_runtime_disabled(void)
83 {
84 	return disable_runtime;
85 }
86 
87 bool __pure __efi_soft_reserve_enabled(void)
88 {
89 	return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
90 }
91 
92 static int __init parse_efi_cmdline(char *str)
93 {
94 	if (!str) {
95 		pr_warn("need at least one option\n");
96 		return -EINVAL;
97 	}
98 
99 	if (parse_option_str(str, "debug"))
100 		set_bit(EFI_DBG, &efi.flags);
101 
102 	if (parse_option_str(str, "noruntime"))
103 		disable_runtime = true;
104 
105 	if (parse_option_str(str, "runtime"))
106 		disable_runtime = false;
107 
108 	if (parse_option_str(str, "nosoftreserve"))
109 		set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
110 
111 	return 0;
112 }
113 early_param("efi", parse_efi_cmdline);
114 
115 struct kobject *efi_kobj;
116 
117 /*
118  * Let's not leave out systab information that snuck into
119  * the efivars driver
120  * Note, do not add more fields in systab sysfs file as it breaks sysfs
121  * one value per file rule!
122  */
123 static ssize_t systab_show(struct kobject *kobj,
124 			   struct kobj_attribute *attr, char *buf)
125 {
126 	char *str = buf;
127 
128 	if (!kobj || !buf)
129 		return -EINVAL;
130 
131 	if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
132 		str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
133 	if (efi.acpi != EFI_INVALID_TABLE_ADDR)
134 		str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
135 	/*
136 	 * If both SMBIOS and SMBIOS3 entry points are implemented, the
137 	 * SMBIOS3 entry point shall be preferred, so we list it first to
138 	 * let applications stop parsing after the first match.
139 	 */
140 	if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
141 		str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
142 	if (efi.smbios != EFI_INVALID_TABLE_ADDR)
143 		str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
144 
145 	if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86))
146 		str = efi_systab_show_arch(str);
147 
148 	return str - buf;
149 }
150 
151 static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
152 
153 static ssize_t fw_platform_size_show(struct kobject *kobj,
154 				     struct kobj_attribute *attr, char *buf)
155 {
156 	return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
157 }
158 
159 extern __weak struct kobj_attribute efi_attr_fw_vendor;
160 extern __weak struct kobj_attribute efi_attr_runtime;
161 extern __weak struct kobj_attribute efi_attr_config_table;
162 static struct kobj_attribute efi_attr_fw_platform_size =
163 	__ATTR_RO(fw_platform_size);
164 
165 static struct attribute *efi_subsys_attrs[] = {
166 	&efi_attr_systab.attr,
167 	&efi_attr_fw_platform_size.attr,
168 	&efi_attr_fw_vendor.attr,
169 	&efi_attr_runtime.attr,
170 	&efi_attr_config_table.attr,
171 	NULL,
172 };
173 
174 umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr,
175 				   int n)
176 {
177 	return attr->mode;
178 }
179 
180 static const struct attribute_group efi_subsys_attr_group = {
181 	.attrs = efi_subsys_attrs,
182 	.is_visible = efi_attr_is_visible,
183 };
184 
185 static struct efivars generic_efivars;
186 static struct efivar_operations generic_ops;
187 
188 static int generic_ops_register(void)
189 {
190 	generic_ops.get_variable = efi.get_variable;
191 	generic_ops.get_next_variable = efi.get_next_variable;
192 	generic_ops.query_variable_store = efi_query_variable_store;
193 
194 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
195 		generic_ops.set_variable = efi.set_variable;
196 		generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
197 	}
198 	return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
199 }
200 
201 static void generic_ops_unregister(void)
202 {
203 	efivars_unregister(&generic_efivars);
204 }
205 
206 #ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
207 #define EFIVAR_SSDT_NAME_MAX	16UL
208 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
209 static int __init efivar_ssdt_setup(char *str)
210 {
211 	int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
212 
213 	if (ret)
214 		return ret;
215 
216 	if (strlen(str) < sizeof(efivar_ssdt))
217 		memcpy(efivar_ssdt, str, strlen(str));
218 	else
219 		pr_warn("efivar_ssdt: name too long: %s\n", str);
220 	return 1;
221 }
222 __setup("efivar_ssdt=", efivar_ssdt_setup);
223 
224 static __init int efivar_ssdt_load(void)
225 {
226 	unsigned long name_size = 256;
227 	efi_char16_t *name = NULL;
228 	efi_status_t status;
229 	efi_guid_t guid;
230 
231 	if (!efivar_ssdt[0])
232 		return 0;
233 
234 	name = kzalloc(name_size, GFP_KERNEL);
235 	if (!name)
236 		return -ENOMEM;
237 
238 	for (;;) {
239 		char utf8_name[EFIVAR_SSDT_NAME_MAX];
240 		unsigned long data_size = 0;
241 		void *data;
242 		int limit;
243 
244 		status = efi.get_next_variable(&name_size, name, &guid);
245 		if (status == EFI_NOT_FOUND) {
246 			break;
247 		} else if (status == EFI_BUFFER_TOO_SMALL) {
248 			name = krealloc(name, name_size, GFP_KERNEL);
249 			if (!name)
250 				return -ENOMEM;
251 			continue;
252 		}
253 
254 		limit = min(EFIVAR_SSDT_NAME_MAX, name_size);
255 		ucs2_as_utf8(utf8_name, name, limit - 1);
256 		if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
257 			continue;
258 
259 		pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, &guid);
260 
261 		status = efi.get_variable(name, &guid, NULL, &data_size, NULL);
262 		if (status != EFI_BUFFER_TOO_SMALL || !data_size)
263 			return -EIO;
264 
265 		data = kmalloc(data_size, GFP_KERNEL);
266 		if (!data)
267 			return -ENOMEM;
268 
269 		status = efi.get_variable(name, &guid, NULL, &data_size, data);
270 		if (status == EFI_SUCCESS) {
271 			acpi_status ret = acpi_load_table(data, NULL);
272 			if (ret)
273 				pr_err("failed to load table: %u\n", ret);
274 		} else {
275 			pr_err("failed to get var data: 0x%lx\n", status);
276 		}
277 		kfree(data);
278 	}
279 	return 0;
280 }
281 #else
282 static inline int efivar_ssdt_load(void) { return 0; }
283 #endif
284 
285 #ifdef CONFIG_DEBUG_FS
286 
287 #define EFI_DEBUGFS_MAX_BLOBS 32
288 
289 static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS];
290 
291 static void __init efi_debugfs_init(void)
292 {
293 	struct dentry *efi_debugfs;
294 	efi_memory_desc_t *md;
295 	char name[32];
296 	int type_count[EFI_BOOT_SERVICES_DATA + 1] = {};
297 	int i = 0;
298 
299 	efi_debugfs = debugfs_create_dir("efi", NULL);
300 	if (IS_ERR_OR_NULL(efi_debugfs))
301 		return;
302 
303 	for_each_efi_memory_desc(md) {
304 		switch (md->type) {
305 		case EFI_BOOT_SERVICES_CODE:
306 			snprintf(name, sizeof(name), "boot_services_code%d",
307 				 type_count[md->type]++);
308 			break;
309 		case EFI_BOOT_SERVICES_DATA:
310 			snprintf(name, sizeof(name), "boot_services_data%d",
311 				 type_count[md->type]++);
312 			break;
313 		default:
314 			continue;
315 		}
316 
317 		if (i >= EFI_DEBUGFS_MAX_BLOBS) {
318 			pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n",
319 				EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS);
320 			break;
321 		}
322 
323 		debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT;
324 		debugfs_blob[i].data = memremap(md->phys_addr,
325 						debugfs_blob[i].size,
326 						MEMREMAP_WB);
327 		if (!debugfs_blob[i].data)
328 			continue;
329 
330 		debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]);
331 		i++;
332 	}
333 }
334 #else
335 static inline void efi_debugfs_init(void) {}
336 #endif
337 
338 /*
339  * We register the efi subsystem with the firmware subsystem and the
340  * efivars subsystem with the efi subsystem, if the system was booted with
341  * EFI.
342  */
343 static int __init efisubsys_init(void)
344 {
345 	int error;
346 
347 	if (!efi_enabled(EFI_RUNTIME_SERVICES))
348 		efi.runtime_supported_mask = 0;
349 
350 	if (!efi_enabled(EFI_BOOT))
351 		return 0;
352 
353 	if (efi.runtime_supported_mask) {
354 		/*
355 		 * Since we process only one efi_runtime_service() at a time, an
356 		 * ordered workqueue (which creates only one execution context)
357 		 * should suffice for all our needs.
358 		 */
359 		efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
360 		if (!efi_rts_wq) {
361 			pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
362 			clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
363 			efi.runtime_supported_mask = 0;
364 			return 0;
365 		}
366 	}
367 
368 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES))
369 		platform_device_register_simple("rtc-efi", 0, NULL, 0);
370 
371 	/* We register the efi directory at /sys/firmware/efi */
372 	efi_kobj = kobject_create_and_add("efi", firmware_kobj);
373 	if (!efi_kobj) {
374 		pr_err("efi: Firmware registration failed.\n");
375 		destroy_workqueue(efi_rts_wq);
376 		return -ENOMEM;
377 	}
378 
379 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
380 				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
381 		error = generic_ops_register();
382 		if (error)
383 			goto err_put;
384 		efivar_ssdt_load();
385 		platform_device_register_simple("efivars", 0, NULL, 0);
386 	}
387 
388 	error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
389 	if (error) {
390 		pr_err("efi: Sysfs attribute export failed with error %d.\n",
391 		       error);
392 		goto err_unregister;
393 	}
394 
395 	error = efi_runtime_map_init(efi_kobj);
396 	if (error)
397 		goto err_remove_group;
398 
399 	/* and the standard mountpoint for efivarfs */
400 	error = sysfs_create_mount_point(efi_kobj, "efivars");
401 	if (error) {
402 		pr_err("efivars: Subsystem registration failed.\n");
403 		goto err_remove_group;
404 	}
405 
406 	if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
407 		efi_debugfs_init();
408 
409 #ifdef CONFIG_EFI_COCO_SECRET
410 	if (efi.coco_secret != EFI_INVALID_TABLE_ADDR)
411 		platform_device_register_simple("efi_secret", 0, NULL, 0);
412 #endif
413 
414 	return 0;
415 
416 err_remove_group:
417 	sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
418 err_unregister:
419 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
420 				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME))
421 		generic_ops_unregister();
422 err_put:
423 	kobject_put(efi_kobj);
424 	destroy_workqueue(efi_rts_wq);
425 	return error;
426 }
427 
428 subsys_initcall(efisubsys_init);
429 
430 void __init efi_find_mirror(void)
431 {
432 	efi_memory_desc_t *md;
433 	u64 mirror_size = 0, total_size = 0;
434 
435 	if (!efi_enabled(EFI_MEMMAP))
436 		return;
437 
438 	for_each_efi_memory_desc(md) {
439 		unsigned long long start = md->phys_addr;
440 		unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
441 
442 		total_size += size;
443 		if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
444 			memblock_mark_mirror(start, size);
445 			mirror_size += size;
446 		}
447 	}
448 	if (mirror_size)
449 		pr_info("Memory: %lldM/%lldM mirrored memory\n",
450 			mirror_size>>20, total_size>>20);
451 }
452 
453 /*
454  * Find the efi memory descriptor for a given physical address.  Given a
455  * physical address, determine if it exists within an EFI Memory Map entry,
456  * and if so, populate the supplied memory descriptor with the appropriate
457  * data.
458  */
459 int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
460 {
461 	efi_memory_desc_t *md;
462 
463 	if (!efi_enabled(EFI_MEMMAP)) {
464 		pr_err_once("EFI_MEMMAP is not enabled.\n");
465 		return -EINVAL;
466 	}
467 
468 	if (!out_md) {
469 		pr_err_once("out_md is null.\n");
470 		return -EINVAL;
471         }
472 
473 	for_each_efi_memory_desc(md) {
474 		u64 size;
475 		u64 end;
476 
477 		size = md->num_pages << EFI_PAGE_SHIFT;
478 		end = md->phys_addr + size;
479 		if (phys_addr >= md->phys_addr && phys_addr < end) {
480 			memcpy(out_md, md, sizeof(*out_md));
481 			return 0;
482 		}
483 	}
484 	return -ENOENT;
485 }
486 
487 /*
488  * Calculate the highest address of an efi memory descriptor.
489  */
490 u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
491 {
492 	u64 size = md->num_pages << EFI_PAGE_SHIFT;
493 	u64 end = md->phys_addr + size;
494 	return end;
495 }
496 
497 void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
498 
499 /**
500  * efi_mem_reserve - Reserve an EFI memory region
501  * @addr: Physical address to reserve
502  * @size: Size of reservation
503  *
504  * Mark a region as reserved from general kernel allocation and
505  * prevent it being released by efi_free_boot_services().
506  *
507  * This function should be called drivers once they've parsed EFI
508  * configuration tables to figure out where their data lives, e.g.
509  * efi_esrt_init().
510  */
511 void __init efi_mem_reserve(phys_addr_t addr, u64 size)
512 {
513 	if (!memblock_is_region_reserved(addr, size))
514 		memblock_reserve(addr, size);
515 
516 	/*
517 	 * Some architectures (x86) reserve all boot services ranges
518 	 * until efi_free_boot_services() because of buggy firmware
519 	 * implementations. This means the above memblock_reserve() is
520 	 * superfluous on x86 and instead what it needs to do is
521 	 * ensure the @start, @size is not freed.
522 	 */
523 	efi_arch_mem_reserve(addr, size);
524 }
525 
526 static const efi_config_table_type_t common_tables[] __initconst = {
527 	{ACPI_20_TABLE_GUID,			&efi.acpi20,		"ACPI 2.0"	},
528 	{ACPI_TABLE_GUID,			&efi.acpi,		"ACPI"		},
529 	{SMBIOS_TABLE_GUID,			&efi.smbios,		"SMBIOS"	},
530 	{SMBIOS3_TABLE_GUID,			&efi.smbios3,		"SMBIOS 3.0"	},
531 	{EFI_SYSTEM_RESOURCE_TABLE_GUID,	&efi.esrt,		"ESRT"		},
532 	{EFI_MEMORY_ATTRIBUTES_TABLE_GUID,	&efi_mem_attr_table,	"MEMATTR"	},
533 	{LINUX_EFI_RANDOM_SEED_TABLE_GUID,	&efi_rng_seed,		"RNG"		},
534 	{LINUX_EFI_TPM_EVENT_LOG_GUID,		&efi.tpm_log,		"TPMEventLog"	},
535 	{LINUX_EFI_TPM_FINAL_LOG_GUID,		&efi.tpm_final_log,	"TPMFinalLog"	},
536 	{LINUX_EFI_MEMRESERVE_TABLE_GUID,	&mem_reserve,		"MEMRESERVE"	},
537 	{LINUX_EFI_INITRD_MEDIA_GUID,		&initrd,		"INITRD"	},
538 	{EFI_RT_PROPERTIES_TABLE_GUID,		&rt_prop,		"RTPROP"	},
539 #ifdef CONFIG_EFI_RCI2_TABLE
540 	{DELLEMC_EFI_RCI2_TABLE_GUID,		&rci2_table_phys			},
541 #endif
542 #ifdef CONFIG_LOAD_UEFI_KEYS
543 	{LINUX_EFI_MOK_VARIABLE_TABLE_GUID,	&efi.mokvar_table,	"MOKvar"	},
544 #endif
545 #ifdef CONFIG_EFI_COCO_SECRET
546 	{LINUX_EFI_COCO_SECRET_AREA_GUID,	&efi.coco_secret,	"CocoSecret"	},
547 #endif
548 	{},
549 };
550 
551 static __init int match_config_table(const efi_guid_t *guid,
552 				     unsigned long table,
553 				     const efi_config_table_type_t *table_types)
554 {
555 	int i;
556 
557 	for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
558 		if (!efi_guidcmp(*guid, table_types[i].guid)) {
559 			*(table_types[i].ptr) = table;
560 			if (table_types[i].name[0])
561 				pr_cont("%s=0x%lx ",
562 					table_types[i].name, table);
563 			return 1;
564 		}
565 	}
566 
567 	return 0;
568 }
569 
570 int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
571 				   int count,
572 				   const efi_config_table_type_t *arch_tables)
573 {
574 	const efi_config_table_64_t *tbl64 = (void *)config_tables;
575 	const efi_config_table_32_t *tbl32 = (void *)config_tables;
576 	const efi_guid_t *guid;
577 	unsigned long table;
578 	int i;
579 
580 	pr_info("");
581 	for (i = 0; i < count; i++) {
582 		if (!IS_ENABLED(CONFIG_X86)) {
583 			guid = &config_tables[i].guid;
584 			table = (unsigned long)config_tables[i].table;
585 		} else if (efi_enabled(EFI_64BIT)) {
586 			guid = &tbl64[i].guid;
587 			table = tbl64[i].table;
588 
589 			if (IS_ENABLED(CONFIG_X86_32) &&
590 			    tbl64[i].table > U32_MAX) {
591 				pr_cont("\n");
592 				pr_err("Table located above 4GB, disabling EFI.\n");
593 				return -EINVAL;
594 			}
595 		} else {
596 			guid = &tbl32[i].guid;
597 			table = tbl32[i].table;
598 		}
599 
600 		if (!match_config_table(guid, table, common_tables) && arch_tables)
601 			match_config_table(guid, table, arch_tables);
602 	}
603 	pr_cont("\n");
604 	set_bit(EFI_CONFIG_TABLES, &efi.flags);
605 
606 	if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) {
607 		struct linux_efi_random_seed *seed;
608 		u32 size = 0;
609 
610 		seed = early_memremap(efi_rng_seed, sizeof(*seed));
611 		if (seed != NULL) {
612 			size = READ_ONCE(seed->size);
613 			early_memunmap(seed, sizeof(*seed));
614 		} else {
615 			pr_err("Could not map UEFI random seed!\n");
616 		}
617 		if (size > 0) {
618 			seed = early_memremap(efi_rng_seed,
619 					      sizeof(*seed) + size);
620 			if (seed != NULL) {
621 				pr_notice("seeding entropy pool\n");
622 				add_bootloader_randomness(seed->bits, size);
623 				early_memunmap(seed, sizeof(*seed) + size);
624 			} else {
625 				pr_err("Could not map UEFI random seed!\n");
626 			}
627 		}
628 	}
629 
630 	if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
631 		efi_memattr_init();
632 
633 	efi_tpm_eventlog_init();
634 
635 	if (mem_reserve != EFI_INVALID_TABLE_ADDR) {
636 		unsigned long prsv = mem_reserve;
637 
638 		while (prsv) {
639 			struct linux_efi_memreserve *rsv;
640 			u8 *p;
641 
642 			/*
643 			 * Just map a full page: that is what we will get
644 			 * anyway, and it permits us to map the entire entry
645 			 * before knowing its size.
646 			 */
647 			p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
648 					   PAGE_SIZE);
649 			if (p == NULL) {
650 				pr_err("Could not map UEFI memreserve entry!\n");
651 				return -ENOMEM;
652 			}
653 
654 			rsv = (void *)(p + prsv % PAGE_SIZE);
655 
656 			/* reserve the entry itself */
657 			memblock_reserve(prsv,
658 					 struct_size(rsv, entry, rsv->size));
659 
660 			for (i = 0; i < atomic_read(&rsv->count); i++) {
661 				memblock_reserve(rsv->entry[i].base,
662 						 rsv->entry[i].size);
663 			}
664 
665 			prsv = rsv->next;
666 			early_memunmap(p, PAGE_SIZE);
667 		}
668 	}
669 
670 	if (rt_prop != EFI_INVALID_TABLE_ADDR) {
671 		efi_rt_properties_table_t *tbl;
672 
673 		tbl = early_memremap(rt_prop, sizeof(*tbl));
674 		if (tbl) {
675 			efi.runtime_supported_mask &= tbl->runtime_services_supported;
676 			early_memunmap(tbl, sizeof(*tbl));
677 		}
678 	}
679 
680 	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) &&
681 	    initrd != EFI_INVALID_TABLE_ADDR && phys_initrd_size == 0) {
682 		struct linux_efi_initrd *tbl;
683 
684 		tbl = early_memremap(initrd, sizeof(*tbl));
685 		if (tbl) {
686 			phys_initrd_start = tbl->base;
687 			phys_initrd_size = tbl->size;
688 			early_memunmap(tbl, sizeof(*tbl));
689 		}
690 	}
691 
692 	return 0;
693 }
694 
695 int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr,
696 				   int min_major_version)
697 {
698 	if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
699 		pr_err("System table signature incorrect!\n");
700 		return -EINVAL;
701 	}
702 
703 	if ((systab_hdr->revision >> 16) < min_major_version)
704 		pr_err("Warning: System table version %d.%02d, expected %d.00 or greater!\n",
705 		       systab_hdr->revision >> 16,
706 		       systab_hdr->revision & 0xffff,
707 		       min_major_version);
708 
709 	return 0;
710 }
711 
712 #ifndef CONFIG_IA64
713 static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor,
714 						size_t size)
715 {
716 	const efi_char16_t *ret;
717 
718 	ret = early_memremap_ro(fw_vendor, size);
719 	if (!ret)
720 		pr_err("Could not map the firmware vendor!\n");
721 	return ret;
722 }
723 
724 static void __init unmap_fw_vendor(const void *fw_vendor, size_t size)
725 {
726 	early_memunmap((void *)fw_vendor, size);
727 }
728 #else
729 #define map_fw_vendor(p, s)	__va(p)
730 #define unmap_fw_vendor(v, s)
731 #endif
732 
733 void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
734 				     unsigned long fw_vendor)
735 {
736 	char vendor[100] = "unknown";
737 	const efi_char16_t *c16;
738 	size_t i;
739 
740 	c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
741 	if (c16) {
742 		for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
743 			vendor[i] = c16[i];
744 		vendor[i] = '\0';
745 
746 		unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
747 	}
748 
749 	pr_info("EFI v%u.%.02u by %s\n",
750 		systab_hdr->revision >> 16,
751 		systab_hdr->revision & 0xffff,
752 		vendor);
753 
754 	if (IS_ENABLED(CONFIG_X86_64) &&
755 	    systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
756 	    !strcmp(vendor, "Apple")) {
757 		pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n");
758 		efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION;
759 	}
760 }
761 
762 static __initdata char memory_type_name[][13] = {
763 	"Reserved",
764 	"Loader Code",
765 	"Loader Data",
766 	"Boot Code",
767 	"Boot Data",
768 	"Runtime Code",
769 	"Runtime Data",
770 	"Conventional",
771 	"Unusable",
772 	"ACPI Reclaim",
773 	"ACPI Mem NVS",
774 	"MMIO",
775 	"MMIO Port",
776 	"PAL Code",
777 	"Persistent",
778 };
779 
780 char * __init efi_md_typeattr_format(char *buf, size_t size,
781 				     const efi_memory_desc_t *md)
782 {
783 	char *pos;
784 	int type_len;
785 	u64 attr;
786 
787 	pos = buf;
788 	if (md->type >= ARRAY_SIZE(memory_type_name))
789 		type_len = snprintf(pos, size, "[type=%u", md->type);
790 	else
791 		type_len = snprintf(pos, size, "[%-*s",
792 				    (int)(sizeof(memory_type_name[0]) - 1),
793 				    memory_type_name[md->type]);
794 	if (type_len >= size)
795 		return buf;
796 
797 	pos += type_len;
798 	size -= type_len;
799 
800 	attr = md->attribute;
801 	if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
802 		     EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
803 		     EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
804 		     EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
805 		     EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
806 		snprintf(pos, size, "|attr=0x%016llx]",
807 			 (unsigned long long)attr);
808 	else
809 		snprintf(pos, size,
810 			 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
811 			 attr & EFI_MEMORY_RUNTIME		? "RUN" : "",
812 			 attr & EFI_MEMORY_MORE_RELIABLE	? "MR"  : "",
813 			 attr & EFI_MEMORY_CPU_CRYPTO   	? "CC"  : "",
814 			 attr & EFI_MEMORY_SP			? "SP"  : "",
815 			 attr & EFI_MEMORY_NV			? "NV"  : "",
816 			 attr & EFI_MEMORY_XP			? "XP"  : "",
817 			 attr & EFI_MEMORY_RP			? "RP"  : "",
818 			 attr & EFI_MEMORY_WP			? "WP"  : "",
819 			 attr & EFI_MEMORY_RO			? "RO"  : "",
820 			 attr & EFI_MEMORY_UCE			? "UCE" : "",
821 			 attr & EFI_MEMORY_WB			? "WB"  : "",
822 			 attr & EFI_MEMORY_WT			? "WT"  : "",
823 			 attr & EFI_MEMORY_WC			? "WC"  : "",
824 			 attr & EFI_MEMORY_UC			? "UC"  : "");
825 	return buf;
826 }
827 
828 /*
829  * IA64 has a funky EFI memory map that doesn't work the same way as
830  * other architectures.
831  */
832 #ifndef CONFIG_IA64
833 /*
834  * efi_mem_attributes - lookup memmap attributes for physical address
835  * @phys_addr: the physical address to lookup
836  *
837  * Search in the EFI memory map for the region covering
838  * @phys_addr. Returns the EFI memory attributes if the region
839  * was found in the memory map, 0 otherwise.
840  */
841 u64 efi_mem_attributes(unsigned long phys_addr)
842 {
843 	efi_memory_desc_t *md;
844 
845 	if (!efi_enabled(EFI_MEMMAP))
846 		return 0;
847 
848 	for_each_efi_memory_desc(md) {
849 		if ((md->phys_addr <= phys_addr) &&
850 		    (phys_addr < (md->phys_addr +
851 		    (md->num_pages << EFI_PAGE_SHIFT))))
852 			return md->attribute;
853 	}
854 	return 0;
855 }
856 
857 /*
858  * efi_mem_type - lookup memmap type for physical address
859  * @phys_addr: the physical address to lookup
860  *
861  * Search in the EFI memory map for the region covering @phys_addr.
862  * Returns the EFI memory type if the region was found in the memory
863  * map, -EINVAL otherwise.
864  */
865 int efi_mem_type(unsigned long phys_addr)
866 {
867 	const efi_memory_desc_t *md;
868 
869 	if (!efi_enabled(EFI_MEMMAP))
870 		return -ENOTSUPP;
871 
872 	for_each_efi_memory_desc(md) {
873 		if ((md->phys_addr <= phys_addr) &&
874 		    (phys_addr < (md->phys_addr +
875 				  (md->num_pages << EFI_PAGE_SHIFT))))
876 			return md->type;
877 	}
878 	return -EINVAL;
879 }
880 #endif
881 
882 int efi_status_to_err(efi_status_t status)
883 {
884 	int err;
885 
886 	switch (status) {
887 	case EFI_SUCCESS:
888 		err = 0;
889 		break;
890 	case EFI_INVALID_PARAMETER:
891 		err = -EINVAL;
892 		break;
893 	case EFI_OUT_OF_RESOURCES:
894 		err = -ENOSPC;
895 		break;
896 	case EFI_DEVICE_ERROR:
897 		err = -EIO;
898 		break;
899 	case EFI_WRITE_PROTECTED:
900 		err = -EROFS;
901 		break;
902 	case EFI_SECURITY_VIOLATION:
903 		err = -EACCES;
904 		break;
905 	case EFI_NOT_FOUND:
906 		err = -ENOENT;
907 		break;
908 	case EFI_ABORTED:
909 		err = -EINTR;
910 		break;
911 	default:
912 		err = -EINVAL;
913 	}
914 
915 	return err;
916 }
917 EXPORT_SYMBOL_GPL(efi_status_to_err);
918 
919 static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
920 static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
921 
922 static int __init efi_memreserve_map_root(void)
923 {
924 	if (mem_reserve == EFI_INVALID_TABLE_ADDR)
925 		return -ENODEV;
926 
927 	efi_memreserve_root = memremap(mem_reserve,
928 				       sizeof(*efi_memreserve_root),
929 				       MEMREMAP_WB);
930 	if (WARN_ON_ONCE(!efi_memreserve_root))
931 		return -ENOMEM;
932 	return 0;
933 }
934 
935 static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
936 {
937 	struct resource *res, *parent;
938 	int ret;
939 
940 	res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
941 	if (!res)
942 		return -ENOMEM;
943 
944 	res->name	= "reserved";
945 	res->flags	= IORESOURCE_MEM;
946 	res->start	= addr;
947 	res->end	= addr + size - 1;
948 
949 	/* we expect a conflict with a 'System RAM' region */
950 	parent = request_resource_conflict(&iomem_resource, res);
951 	ret = parent ? request_resource(parent, res) : 0;
952 
953 	/*
954 	 * Given that efi_mem_reserve_iomem() can be called at any
955 	 * time, only call memblock_reserve() if the architecture
956 	 * keeps the infrastructure around.
957 	 */
958 	if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
959 		memblock_reserve(addr, size);
960 
961 	return ret;
962 }
963 
964 int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
965 {
966 	struct linux_efi_memreserve *rsv;
967 	unsigned long prsv;
968 	int rc, index;
969 
970 	if (efi_memreserve_root == (void *)ULONG_MAX)
971 		return -ENODEV;
972 
973 	if (!efi_memreserve_root) {
974 		rc = efi_memreserve_map_root();
975 		if (rc)
976 			return rc;
977 	}
978 
979 	/* first try to find a slot in an existing linked list entry */
980 	for (prsv = efi_memreserve_root->next; prsv; ) {
981 		rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
982 		index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
983 		if (index < rsv->size) {
984 			rsv->entry[index].base = addr;
985 			rsv->entry[index].size = size;
986 
987 			memunmap(rsv);
988 			return efi_mem_reserve_iomem(addr, size);
989 		}
990 		prsv = rsv->next;
991 		memunmap(rsv);
992 	}
993 
994 	/* no slot found - allocate a new linked list entry */
995 	rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
996 	if (!rsv)
997 		return -ENOMEM;
998 
999 	rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
1000 	if (rc) {
1001 		free_page((unsigned long)rsv);
1002 		return rc;
1003 	}
1004 
1005 	/*
1006 	 * The memremap() call above assumes that a linux_efi_memreserve entry
1007 	 * never crosses a page boundary, so let's ensure that this remains true
1008 	 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
1009 	 * using SZ_4K explicitly in the size calculation below.
1010 	 */
1011 	rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
1012 	atomic_set(&rsv->count, 1);
1013 	rsv->entry[0].base = addr;
1014 	rsv->entry[0].size = size;
1015 
1016 	spin_lock(&efi_mem_reserve_persistent_lock);
1017 	rsv->next = efi_memreserve_root->next;
1018 	efi_memreserve_root->next = __pa(rsv);
1019 	spin_unlock(&efi_mem_reserve_persistent_lock);
1020 
1021 	return efi_mem_reserve_iomem(addr, size);
1022 }
1023 
1024 static int __init efi_memreserve_root_init(void)
1025 {
1026 	if (efi_memreserve_root)
1027 		return 0;
1028 	if (efi_memreserve_map_root())
1029 		efi_memreserve_root = (void *)ULONG_MAX;
1030 	return 0;
1031 }
1032 early_initcall(efi_memreserve_root_init);
1033 
1034 #ifdef CONFIG_KEXEC
1035 static int update_efi_random_seed(struct notifier_block *nb,
1036 				  unsigned long code, void *unused)
1037 {
1038 	struct linux_efi_random_seed *seed;
1039 	u32 size = 0;
1040 
1041 	if (!kexec_in_progress)
1042 		return NOTIFY_DONE;
1043 
1044 	seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB);
1045 	if (seed != NULL) {
1046 		size = min(seed->size, EFI_RANDOM_SEED_SIZE);
1047 		memunmap(seed);
1048 	} else {
1049 		pr_err("Could not map UEFI random seed!\n");
1050 	}
1051 	if (size > 0) {
1052 		seed = memremap(efi_rng_seed, sizeof(*seed) + size,
1053 				MEMREMAP_WB);
1054 		if (seed != NULL) {
1055 			seed->size = size;
1056 			get_random_bytes(seed->bits, seed->size);
1057 			memunmap(seed);
1058 		} else {
1059 			pr_err("Could not map UEFI random seed!\n");
1060 		}
1061 	}
1062 	return NOTIFY_DONE;
1063 }
1064 
1065 static struct notifier_block efi_random_seed_nb = {
1066 	.notifier_call = update_efi_random_seed,
1067 };
1068 
1069 static int __init register_update_efi_random_seed(void)
1070 {
1071 	if (efi_rng_seed == EFI_INVALID_TABLE_ADDR)
1072 		return 0;
1073 	return register_reboot_notifier(&efi_random_seed_nb);
1074 }
1075 late_initcall(register_update_efi_random_seed);
1076 #endif
1077