xref: /openbmc/linux/drivers/firmware/efi/efi.c (revision 9114ba99)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * efi.c - EFI subsystem
4  *
5  * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
6  * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
7  * Copyright (C) 2013 Tom Gundersen <teg@jklm.no>
8  *
9  * This code registers /sys/firmware/efi{,/efivars} when EFI is supported,
10  * allowing the efivarfs to be mounted or the efivars module to be loaded.
11  * The existance of /sys/firmware/efi may also be used by userspace to
12  * determine that the system supports EFI.
13  */
14 
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 
17 #include <linux/kobject.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/debugfs.h>
21 #include <linux/device.h>
22 #include <linux/efi.h>
23 #include <linux/of.h>
24 #include <linux/initrd.h>
25 #include <linux/io.h>
26 #include <linux/kexec.h>
27 #include <linux/platform_device.h>
28 #include <linux/random.h>
29 #include <linux/reboot.h>
30 #include <linux/slab.h>
31 #include <linux/acpi.h>
32 #include <linux/ucs2_string.h>
33 #include <linux/memblock.h>
34 #include <linux/security.h>
35 
36 #include <asm/early_ioremap.h>
37 
38 struct efi __read_mostly efi = {
39 	.runtime_supported_mask = EFI_RT_SUPPORTED_ALL,
40 	.acpi			= EFI_INVALID_TABLE_ADDR,
41 	.acpi20			= EFI_INVALID_TABLE_ADDR,
42 	.smbios			= EFI_INVALID_TABLE_ADDR,
43 	.smbios3		= EFI_INVALID_TABLE_ADDR,
44 	.esrt			= EFI_INVALID_TABLE_ADDR,
45 	.tpm_log		= EFI_INVALID_TABLE_ADDR,
46 	.tpm_final_log		= EFI_INVALID_TABLE_ADDR,
47 #ifdef CONFIG_LOAD_UEFI_KEYS
48 	.mokvar_table		= EFI_INVALID_TABLE_ADDR,
49 #endif
50 #ifdef CONFIG_EFI_COCO_SECRET
51 	.coco_secret		= EFI_INVALID_TABLE_ADDR,
52 #endif
53 #ifdef CONFIG_UNACCEPTED_MEMORY
54 	.unaccepted		= EFI_INVALID_TABLE_ADDR,
55 #endif
56 };
57 EXPORT_SYMBOL(efi);
58 
59 unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
60 static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
61 static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
62 static unsigned long __initdata initrd = EFI_INVALID_TABLE_ADDR;
63 
64 extern unsigned long screen_info_table;
65 
66 struct mm_struct efi_mm = {
67 	.mm_mt			= MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock),
68 	.mm_users		= ATOMIC_INIT(2),
69 	.mm_count		= ATOMIC_INIT(1),
70 	.write_protect_seq      = SEQCNT_ZERO(efi_mm.write_protect_seq),
71 	MMAP_LOCK_INITIALIZER(efi_mm)
72 	.page_table_lock	= __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
73 	.mmlist			= LIST_HEAD_INIT(efi_mm.mmlist),
74 	.cpu_bitmap		= { [BITS_TO_LONGS(NR_CPUS)] = 0},
75 };
76 
77 struct workqueue_struct *efi_rts_wq;
78 
79 static bool disable_runtime = IS_ENABLED(CONFIG_EFI_DISABLE_RUNTIME);
setup_noefi(char * arg)80 static int __init setup_noefi(char *arg)
81 {
82 	disable_runtime = true;
83 	return 0;
84 }
85 early_param("noefi", setup_noefi);
86 
efi_runtime_disabled(void)87 bool efi_runtime_disabled(void)
88 {
89 	return disable_runtime;
90 }
91 
__efi_soft_reserve_enabled(void)92 bool __pure __efi_soft_reserve_enabled(void)
93 {
94 	return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
95 }
96 
parse_efi_cmdline(char * str)97 static int __init parse_efi_cmdline(char *str)
98 {
99 	if (!str) {
100 		pr_warn("need at least one option\n");
101 		return -EINVAL;
102 	}
103 
104 	if (parse_option_str(str, "debug"))
105 		set_bit(EFI_DBG, &efi.flags);
106 
107 	if (parse_option_str(str, "noruntime"))
108 		disable_runtime = true;
109 
110 	if (parse_option_str(str, "runtime"))
111 		disable_runtime = false;
112 
113 	if (parse_option_str(str, "nosoftreserve"))
114 		set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
115 
116 	return 0;
117 }
118 early_param("efi", parse_efi_cmdline);
119 
120 struct kobject *efi_kobj;
121 
122 /*
123  * Let's not leave out systab information that snuck into
124  * the efivars driver
125  * Note, do not add more fields in systab sysfs file as it breaks sysfs
126  * one value per file rule!
127  */
systab_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)128 static ssize_t systab_show(struct kobject *kobj,
129 			   struct kobj_attribute *attr, char *buf)
130 {
131 	char *str = buf;
132 
133 	if (!kobj || !buf)
134 		return -EINVAL;
135 
136 	if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
137 		str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
138 	if (efi.acpi != EFI_INVALID_TABLE_ADDR)
139 		str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
140 	/*
141 	 * If both SMBIOS and SMBIOS3 entry points are implemented, the
142 	 * SMBIOS3 entry point shall be preferred, so we list it first to
143 	 * let applications stop parsing after the first match.
144 	 */
145 	if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
146 		str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
147 	if (efi.smbios != EFI_INVALID_TABLE_ADDR)
148 		str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
149 
150 	if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86))
151 		str = efi_systab_show_arch(str);
152 
153 	return str - buf;
154 }
155 
156 static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
157 
fw_platform_size_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)158 static ssize_t fw_platform_size_show(struct kobject *kobj,
159 				     struct kobj_attribute *attr, char *buf)
160 {
161 	return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
162 }
163 
164 extern __weak struct kobj_attribute efi_attr_fw_vendor;
165 extern __weak struct kobj_attribute efi_attr_runtime;
166 extern __weak struct kobj_attribute efi_attr_config_table;
167 static struct kobj_attribute efi_attr_fw_platform_size =
168 	__ATTR_RO(fw_platform_size);
169 
170 static struct attribute *efi_subsys_attrs[] = {
171 	&efi_attr_systab.attr,
172 	&efi_attr_fw_platform_size.attr,
173 	&efi_attr_fw_vendor.attr,
174 	&efi_attr_runtime.attr,
175 	&efi_attr_config_table.attr,
176 	NULL,
177 };
178 
efi_attr_is_visible(struct kobject * kobj,struct attribute * attr,int n)179 umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr,
180 				   int n)
181 {
182 	return attr->mode;
183 }
184 
185 static const struct attribute_group efi_subsys_attr_group = {
186 	.attrs = efi_subsys_attrs,
187 	.is_visible = efi_attr_is_visible,
188 };
189 
190 static struct efivars generic_efivars;
191 static struct efivar_operations generic_ops;
192 
generic_ops_supported(void)193 static bool generic_ops_supported(void)
194 {
195 	unsigned long name_size;
196 	efi_status_t status;
197 	efi_char16_t name;
198 	efi_guid_t guid;
199 
200 	name_size = sizeof(name);
201 
202 	if (!efi.get_next_variable)
203 		return false;
204 	status = efi.get_next_variable(&name_size, &name, &guid);
205 	if (status == EFI_UNSUPPORTED)
206 		return false;
207 
208 	return true;
209 }
210 
generic_ops_register(void)211 static int generic_ops_register(void)
212 {
213 	if (!generic_ops_supported())
214 		return 0;
215 
216 	generic_ops.get_variable = efi.get_variable;
217 	generic_ops.get_next_variable = efi.get_next_variable;
218 	generic_ops.query_variable_store = efi_query_variable_store;
219 	generic_ops.query_variable_info = efi.query_variable_info;
220 
221 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
222 		generic_ops.set_variable = efi.set_variable;
223 		generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
224 	}
225 	return efivars_register(&generic_efivars, &generic_ops);
226 }
227 
generic_ops_unregister(void)228 static void generic_ops_unregister(void)
229 {
230 	if (!generic_ops.get_variable)
231 		return;
232 
233 	efivars_unregister(&generic_efivars);
234 }
235 
236 #ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
237 #define EFIVAR_SSDT_NAME_MAX	16UL
238 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
efivar_ssdt_setup(char * str)239 static int __init efivar_ssdt_setup(char *str)
240 {
241 	int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
242 
243 	if (ret)
244 		return ret;
245 
246 	if (strlen(str) < sizeof(efivar_ssdt))
247 		memcpy(efivar_ssdt, str, strlen(str));
248 	else
249 		pr_warn("efivar_ssdt: name too long: %s\n", str);
250 	return 1;
251 }
252 __setup("efivar_ssdt=", efivar_ssdt_setup);
253 
efivar_ssdt_load(void)254 static __init int efivar_ssdt_load(void)
255 {
256 	unsigned long name_size = 256;
257 	efi_char16_t *name = NULL;
258 	efi_status_t status;
259 	efi_guid_t guid;
260 
261 	if (!efivar_ssdt[0])
262 		return 0;
263 
264 	name = kzalloc(name_size, GFP_KERNEL);
265 	if (!name)
266 		return -ENOMEM;
267 
268 	for (;;) {
269 		char utf8_name[EFIVAR_SSDT_NAME_MAX];
270 		unsigned long data_size = 0;
271 		void *data;
272 		int limit;
273 
274 		status = efi.get_next_variable(&name_size, name, &guid);
275 		if (status == EFI_NOT_FOUND) {
276 			break;
277 		} else if (status == EFI_BUFFER_TOO_SMALL) {
278 			efi_char16_t *name_tmp =
279 				krealloc(name, name_size, GFP_KERNEL);
280 			if (!name_tmp) {
281 				kfree(name);
282 				return -ENOMEM;
283 			}
284 			name = name_tmp;
285 			continue;
286 		}
287 
288 		limit = min(EFIVAR_SSDT_NAME_MAX, name_size);
289 		ucs2_as_utf8(utf8_name, name, limit - 1);
290 		if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
291 			continue;
292 
293 		pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, &guid);
294 
295 		status = efi.get_variable(name, &guid, NULL, &data_size, NULL);
296 		if (status != EFI_BUFFER_TOO_SMALL || !data_size)
297 			return -EIO;
298 
299 		data = kmalloc(data_size, GFP_KERNEL);
300 		if (!data)
301 			return -ENOMEM;
302 
303 		status = efi.get_variable(name, &guid, NULL, &data_size, data);
304 		if (status == EFI_SUCCESS) {
305 			acpi_status ret = acpi_load_table(data, NULL);
306 			if (ret)
307 				pr_err("failed to load table: %u\n", ret);
308 			else
309 				continue;
310 		} else {
311 			pr_err("failed to get var data: 0x%lx\n", status);
312 		}
313 		kfree(data);
314 	}
315 	return 0;
316 }
317 #else
efivar_ssdt_load(void)318 static inline int efivar_ssdt_load(void) { return 0; }
319 #endif
320 
321 #ifdef CONFIG_DEBUG_FS
322 
323 #define EFI_DEBUGFS_MAX_BLOBS 32
324 
325 static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS];
326 
efi_debugfs_init(void)327 static void __init efi_debugfs_init(void)
328 {
329 	struct dentry *efi_debugfs;
330 	efi_memory_desc_t *md;
331 	char name[32];
332 	int type_count[EFI_BOOT_SERVICES_DATA + 1] = {};
333 	int i = 0;
334 
335 	efi_debugfs = debugfs_create_dir("efi", NULL);
336 	if (IS_ERR_OR_NULL(efi_debugfs))
337 		return;
338 
339 	for_each_efi_memory_desc(md) {
340 		switch (md->type) {
341 		case EFI_BOOT_SERVICES_CODE:
342 			snprintf(name, sizeof(name), "boot_services_code%d",
343 				 type_count[md->type]++);
344 			break;
345 		case EFI_BOOT_SERVICES_DATA:
346 			snprintf(name, sizeof(name), "boot_services_data%d",
347 				 type_count[md->type]++);
348 			break;
349 		default:
350 			continue;
351 		}
352 
353 		if (i >= EFI_DEBUGFS_MAX_BLOBS) {
354 			pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n",
355 				EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS);
356 			break;
357 		}
358 
359 		debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT;
360 		debugfs_blob[i].data = memremap(md->phys_addr,
361 						debugfs_blob[i].size,
362 						MEMREMAP_WB);
363 		if (!debugfs_blob[i].data)
364 			continue;
365 
366 		debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]);
367 		i++;
368 	}
369 }
370 #else
efi_debugfs_init(void)371 static inline void efi_debugfs_init(void) {}
372 #endif
373 
374 /*
375  * We register the efi subsystem with the firmware subsystem and the
376  * efivars subsystem with the efi subsystem, if the system was booted with
377  * EFI.
378  */
efisubsys_init(void)379 static int __init efisubsys_init(void)
380 {
381 	int error;
382 
383 	if (!efi_enabled(EFI_RUNTIME_SERVICES))
384 		efi.runtime_supported_mask = 0;
385 
386 	if (!efi_enabled(EFI_BOOT))
387 		return 0;
388 
389 	if (efi.runtime_supported_mask) {
390 		/*
391 		 * Since we process only one efi_runtime_service() at a time, an
392 		 * ordered workqueue (which creates only one execution context)
393 		 * should suffice for all our needs.
394 		 */
395 		efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
396 		if (!efi_rts_wq) {
397 			pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
398 			clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
399 			efi.runtime_supported_mask = 0;
400 			return 0;
401 		}
402 	}
403 
404 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES))
405 		platform_device_register_simple("rtc-efi", 0, NULL, 0);
406 
407 	/* We register the efi directory at /sys/firmware/efi */
408 	efi_kobj = kobject_create_and_add("efi", firmware_kobj);
409 	if (!efi_kobj) {
410 		pr_err("efi: Firmware registration failed.\n");
411 		error = -ENOMEM;
412 		goto err_destroy_wq;
413 	}
414 
415 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
416 				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
417 		error = generic_ops_register();
418 		if (error)
419 			goto err_put;
420 		efivar_ssdt_load();
421 		platform_device_register_simple("efivars", 0, NULL, 0);
422 	}
423 
424 	error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
425 	if (error) {
426 		pr_err("efi: Sysfs attribute export failed with error %d.\n",
427 		       error);
428 		goto err_unregister;
429 	}
430 
431 	/* and the standard mountpoint for efivarfs */
432 	error = sysfs_create_mount_point(efi_kobj, "efivars");
433 	if (error) {
434 		pr_err("efivars: Subsystem registration failed.\n");
435 		goto err_remove_group;
436 	}
437 
438 	if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
439 		efi_debugfs_init();
440 
441 #ifdef CONFIG_EFI_COCO_SECRET
442 	if (efi.coco_secret != EFI_INVALID_TABLE_ADDR)
443 		platform_device_register_simple("efi_secret", 0, NULL, 0);
444 #endif
445 
446 	return 0;
447 
448 err_remove_group:
449 	sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
450 err_unregister:
451 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
452 				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME))
453 		generic_ops_unregister();
454 err_put:
455 	kobject_put(efi_kobj);
456 	efi_kobj = NULL;
457 err_destroy_wq:
458 	if (efi_rts_wq)
459 		destroy_workqueue(efi_rts_wq);
460 
461 	return error;
462 }
463 
464 subsys_initcall(efisubsys_init);
465 
efi_find_mirror(void)466 void __init efi_find_mirror(void)
467 {
468 	efi_memory_desc_t *md;
469 	u64 mirror_size = 0, total_size = 0;
470 
471 	if (!efi_enabled(EFI_MEMMAP))
472 		return;
473 
474 	for_each_efi_memory_desc(md) {
475 		unsigned long long start = md->phys_addr;
476 		unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
477 
478 		total_size += size;
479 		if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
480 			memblock_mark_mirror(start, size);
481 			mirror_size += size;
482 		}
483 	}
484 	if (mirror_size)
485 		pr_info("Memory: %lldM/%lldM mirrored memory\n",
486 			mirror_size>>20, total_size>>20);
487 }
488 
489 /*
490  * Find the efi memory descriptor for a given physical address.  Given a
491  * physical address, determine if it exists within an EFI Memory Map entry,
492  * and if so, populate the supplied memory descriptor with the appropriate
493  * data.
494  */
__efi_mem_desc_lookup(u64 phys_addr,efi_memory_desc_t * out_md)495 int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
496 {
497 	efi_memory_desc_t *md;
498 
499 	if (!efi_enabled(EFI_MEMMAP)) {
500 		pr_err_once("EFI_MEMMAP is not enabled.\n");
501 		return -EINVAL;
502 	}
503 
504 	if (!out_md) {
505 		pr_err_once("out_md is null.\n");
506 		return -EINVAL;
507         }
508 
509 	for_each_efi_memory_desc(md) {
510 		u64 size;
511 		u64 end;
512 
513 		/* skip bogus entries (including empty ones) */
514 		if ((md->phys_addr & (EFI_PAGE_SIZE - 1)) ||
515 		    (md->num_pages <= 0) ||
516 		    (md->num_pages > (U64_MAX - md->phys_addr) >> EFI_PAGE_SHIFT))
517 			continue;
518 
519 		size = md->num_pages << EFI_PAGE_SHIFT;
520 		end = md->phys_addr + size;
521 		if (phys_addr >= md->phys_addr && phys_addr < end) {
522 			memcpy(out_md, md, sizeof(*out_md));
523 			return 0;
524 		}
525 	}
526 	return -ENOENT;
527 }
528 
529 extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
530 	__weak __alias(__efi_mem_desc_lookup);
531 
532 /*
533  * Calculate the highest address of an efi memory descriptor.
534  */
efi_mem_desc_end(efi_memory_desc_t * md)535 u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
536 {
537 	u64 size = md->num_pages << EFI_PAGE_SHIFT;
538 	u64 end = md->phys_addr + size;
539 	return end;
540 }
541 
efi_arch_mem_reserve(phys_addr_t addr,u64 size)542 void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
543 
544 /**
545  * efi_mem_reserve - Reserve an EFI memory region
546  * @addr: Physical address to reserve
547  * @size: Size of reservation
548  *
549  * Mark a region as reserved from general kernel allocation and
550  * prevent it being released by efi_free_boot_services().
551  *
552  * This function should be called drivers once they've parsed EFI
553  * configuration tables to figure out where their data lives, e.g.
554  * efi_esrt_init().
555  */
efi_mem_reserve(phys_addr_t addr,u64 size)556 void __init efi_mem_reserve(phys_addr_t addr, u64 size)
557 {
558 	/* efi_mem_reserve() does not work under Xen */
559 	if (WARN_ON_ONCE(efi_enabled(EFI_PARAVIRT)))
560 		return;
561 
562 	if (!memblock_is_region_reserved(addr, size))
563 		memblock_reserve(addr, size);
564 
565 	/*
566 	 * Some architectures (x86) reserve all boot services ranges
567 	 * until efi_free_boot_services() because of buggy firmware
568 	 * implementations. This means the above memblock_reserve() is
569 	 * superfluous on x86 and instead what it needs to do is
570 	 * ensure the @start, @size is not freed.
571 	 */
572 	efi_arch_mem_reserve(addr, size);
573 }
574 
575 static const efi_config_table_type_t common_tables[] __initconst = {
576 	{ACPI_20_TABLE_GUID,			&efi.acpi20,		"ACPI 2.0"	},
577 	{ACPI_TABLE_GUID,			&efi.acpi,		"ACPI"		},
578 	{SMBIOS_TABLE_GUID,			&efi.smbios,		"SMBIOS"	},
579 	{SMBIOS3_TABLE_GUID,			&efi.smbios3,		"SMBIOS 3.0"	},
580 	{EFI_SYSTEM_RESOURCE_TABLE_GUID,	&efi.esrt,		"ESRT"		},
581 	{EFI_MEMORY_ATTRIBUTES_TABLE_GUID,	&efi_mem_attr_table,	"MEMATTR"	},
582 	{LINUX_EFI_RANDOM_SEED_TABLE_GUID,	&efi_rng_seed,		"RNG"		},
583 	{LINUX_EFI_TPM_EVENT_LOG_GUID,		&efi.tpm_log,		"TPMEventLog"	},
584 	{LINUX_EFI_TPM_FINAL_LOG_GUID,		&efi.tpm_final_log,	"TPMFinalLog"	},
585 	{LINUX_EFI_MEMRESERVE_TABLE_GUID,	&mem_reserve,		"MEMRESERVE"	},
586 	{LINUX_EFI_INITRD_MEDIA_GUID,		&initrd,		"INITRD"	},
587 	{EFI_RT_PROPERTIES_TABLE_GUID,		&rt_prop,		"RTPROP"	},
588 #ifdef CONFIG_EFI_RCI2_TABLE
589 	{DELLEMC_EFI_RCI2_TABLE_GUID,		&rci2_table_phys			},
590 #endif
591 #ifdef CONFIG_LOAD_UEFI_KEYS
592 	{LINUX_EFI_MOK_VARIABLE_TABLE_GUID,	&efi.mokvar_table,	"MOKvar"	},
593 #endif
594 #ifdef CONFIG_EFI_COCO_SECRET
595 	{LINUX_EFI_COCO_SECRET_AREA_GUID,	&efi.coco_secret,	"CocoSecret"	},
596 #endif
597 #ifdef CONFIG_UNACCEPTED_MEMORY
598 	{LINUX_EFI_UNACCEPTED_MEM_TABLE_GUID,	&efi.unaccepted,	"Unaccepted"	},
599 #endif
600 #ifdef CONFIG_EFI_GENERIC_STUB
601 	{LINUX_EFI_SCREEN_INFO_TABLE_GUID,	&screen_info_table			},
602 #endif
603 	{},
604 };
605 
match_config_table(const efi_guid_t * guid,unsigned long table,const efi_config_table_type_t * table_types)606 static __init int match_config_table(const efi_guid_t *guid,
607 				     unsigned long table,
608 				     const efi_config_table_type_t *table_types)
609 {
610 	int i;
611 
612 	for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
613 		if (efi_guidcmp(*guid, table_types[i].guid))
614 			continue;
615 
616 		if (!efi_config_table_is_usable(guid, table)) {
617 			if (table_types[i].name[0])
618 				pr_cont("(%s=0x%lx unusable) ",
619 					table_types[i].name, table);
620 			return 1;
621 		}
622 
623 		*(table_types[i].ptr) = table;
624 		if (table_types[i].name[0])
625 			pr_cont("%s=0x%lx ", table_types[i].name, table);
626 		return 1;
627 	}
628 
629 	return 0;
630 }
631 
632 /**
633  * reserve_unaccepted - Map and reserve unaccepted configuration table
634  * @unaccepted: Pointer to unaccepted memory table
635  *
636  * memblock_add() makes sure that the table is mapped in direct mapping. During
637  * normal boot it happens automatically because the table is allocated from
638  * usable memory. But during crashkernel boot only memory specifically reserved
639  * for crash scenario is mapped. memblock_add() forces the table to be mapped
640  * in crashkernel case.
641  *
642  * Align the range to the nearest page borders. Ranges smaller than page size
643  * are not going to be mapped.
644  *
645  * memblock_reserve() makes sure that future allocations will not touch the
646  * table.
647  */
648 
reserve_unaccepted(struct efi_unaccepted_memory * unaccepted)649 static __init void reserve_unaccepted(struct efi_unaccepted_memory *unaccepted)
650 {
651 	phys_addr_t start, size;
652 
653 	start = PAGE_ALIGN_DOWN(efi.unaccepted);
654 	size = PAGE_ALIGN(sizeof(*unaccepted) + unaccepted->size);
655 
656 	memblock_add(start, size);
657 	memblock_reserve(start, size);
658 }
659 
efi_config_parse_tables(const efi_config_table_t * config_tables,int count,const efi_config_table_type_t * arch_tables)660 int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
661 				   int count,
662 				   const efi_config_table_type_t *arch_tables)
663 {
664 	const efi_config_table_64_t *tbl64 = (void *)config_tables;
665 	const efi_config_table_32_t *tbl32 = (void *)config_tables;
666 	const efi_guid_t *guid;
667 	unsigned long table;
668 	int i;
669 
670 	pr_info("");
671 	for (i = 0; i < count; i++) {
672 		if (!IS_ENABLED(CONFIG_X86)) {
673 			guid = &config_tables[i].guid;
674 			table = (unsigned long)config_tables[i].table;
675 		} else if (efi_enabled(EFI_64BIT)) {
676 			guid = &tbl64[i].guid;
677 			table = tbl64[i].table;
678 
679 			if (IS_ENABLED(CONFIG_X86_32) &&
680 			    tbl64[i].table > U32_MAX) {
681 				pr_cont("\n");
682 				pr_err("Table located above 4GB, disabling EFI.\n");
683 				return -EINVAL;
684 			}
685 		} else {
686 			guid = &tbl32[i].guid;
687 			table = tbl32[i].table;
688 		}
689 
690 		if (!match_config_table(guid, table, common_tables) && arch_tables)
691 			match_config_table(guid, table, arch_tables);
692 	}
693 	pr_cont("\n");
694 	set_bit(EFI_CONFIG_TABLES, &efi.flags);
695 
696 	if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) {
697 		struct linux_efi_random_seed *seed;
698 		u32 size = 0;
699 
700 		seed = early_memremap(efi_rng_seed, sizeof(*seed));
701 		if (seed != NULL) {
702 			size = min_t(u32, seed->size, SZ_1K); // sanity check
703 			early_memunmap(seed, sizeof(*seed));
704 		} else {
705 			pr_err("Could not map UEFI random seed!\n");
706 		}
707 		if (size > 0) {
708 			seed = early_memremap(efi_rng_seed,
709 					      sizeof(*seed) + size);
710 			if (seed != NULL) {
711 				add_bootloader_randomness(seed->bits, size);
712 				memzero_explicit(seed->bits, size);
713 				early_memunmap(seed, sizeof(*seed) + size);
714 			} else {
715 				pr_err("Could not map UEFI random seed!\n");
716 			}
717 		}
718 	}
719 
720 	if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
721 		efi_memattr_init();
722 
723 	efi_tpm_eventlog_init();
724 
725 	if (mem_reserve != EFI_INVALID_TABLE_ADDR) {
726 		unsigned long prsv = mem_reserve;
727 
728 		while (prsv) {
729 			struct linux_efi_memreserve *rsv;
730 			u8 *p;
731 
732 			/*
733 			 * Just map a full page: that is what we will get
734 			 * anyway, and it permits us to map the entire entry
735 			 * before knowing its size.
736 			 */
737 			p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
738 					   PAGE_SIZE);
739 			if (p == NULL) {
740 				pr_err("Could not map UEFI memreserve entry!\n");
741 				return -ENOMEM;
742 			}
743 
744 			rsv = (void *)(p + prsv % PAGE_SIZE);
745 
746 			/* reserve the entry itself */
747 			memblock_reserve(prsv,
748 					 struct_size(rsv, entry, rsv->size));
749 
750 			for (i = 0; i < atomic_read(&rsv->count); i++) {
751 				memblock_reserve(rsv->entry[i].base,
752 						 rsv->entry[i].size);
753 			}
754 
755 			prsv = rsv->next;
756 			early_memunmap(p, PAGE_SIZE);
757 		}
758 	}
759 
760 	if (rt_prop != EFI_INVALID_TABLE_ADDR) {
761 		efi_rt_properties_table_t *tbl;
762 
763 		tbl = early_memremap(rt_prop, sizeof(*tbl));
764 		if (tbl) {
765 			efi.runtime_supported_mask &= tbl->runtime_services_supported;
766 			early_memunmap(tbl, sizeof(*tbl));
767 		}
768 	}
769 
770 	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) &&
771 	    initrd != EFI_INVALID_TABLE_ADDR && phys_initrd_size == 0) {
772 		struct linux_efi_initrd *tbl;
773 
774 		tbl = early_memremap(initrd, sizeof(*tbl));
775 		if (tbl) {
776 			phys_initrd_start = tbl->base;
777 			phys_initrd_size = tbl->size;
778 			early_memunmap(tbl, sizeof(*tbl));
779 		}
780 	}
781 
782 	if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY) &&
783 	    efi.unaccepted != EFI_INVALID_TABLE_ADDR) {
784 		struct efi_unaccepted_memory *unaccepted;
785 
786 		unaccepted = early_memremap(efi.unaccepted, sizeof(*unaccepted));
787 		if (unaccepted) {
788 
789 			if (unaccepted->version == 1) {
790 				reserve_unaccepted(unaccepted);
791 			} else {
792 				efi.unaccepted = EFI_INVALID_TABLE_ADDR;
793 			}
794 
795 			early_memunmap(unaccepted, sizeof(*unaccepted));
796 		}
797 	}
798 
799 	return 0;
800 }
801 
efi_systab_check_header(const efi_table_hdr_t * systab_hdr)802 int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr)
803 {
804 	if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
805 		pr_err("System table signature incorrect!\n");
806 		return -EINVAL;
807 	}
808 
809 	return 0;
810 }
811 
812 #ifndef CONFIG_IA64
map_fw_vendor(unsigned long fw_vendor,size_t size)813 static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor,
814 						size_t size)
815 {
816 	const efi_char16_t *ret;
817 
818 	ret = early_memremap_ro(fw_vendor, size);
819 	if (!ret)
820 		pr_err("Could not map the firmware vendor!\n");
821 	return ret;
822 }
823 
unmap_fw_vendor(const void * fw_vendor,size_t size)824 static void __init unmap_fw_vendor(const void *fw_vendor, size_t size)
825 {
826 	early_memunmap((void *)fw_vendor, size);
827 }
828 #else
829 #define map_fw_vendor(p, s)	__va(p)
830 #define unmap_fw_vendor(v, s)
831 #endif
832 
efi_systab_report_header(const efi_table_hdr_t * systab_hdr,unsigned long fw_vendor)833 void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
834 				     unsigned long fw_vendor)
835 {
836 	char vendor[100] = "unknown";
837 	const efi_char16_t *c16;
838 	size_t i;
839 	u16 rev;
840 
841 	c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
842 	if (c16) {
843 		for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
844 			vendor[i] = c16[i];
845 		vendor[i] = '\0';
846 
847 		unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
848 	}
849 
850 	rev = (u16)systab_hdr->revision;
851 	pr_info("EFI v%u.%u", systab_hdr->revision >> 16, rev / 10);
852 
853 	rev %= 10;
854 	if (rev)
855 		pr_cont(".%u", rev);
856 
857 	pr_cont(" by %s\n", vendor);
858 
859 	if (IS_ENABLED(CONFIG_X86_64) &&
860 	    systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
861 	    !strcmp(vendor, "Apple")) {
862 		pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n");
863 		efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION;
864 	}
865 }
866 
867 static __initdata char memory_type_name[][13] = {
868 	"Reserved",
869 	"Loader Code",
870 	"Loader Data",
871 	"Boot Code",
872 	"Boot Data",
873 	"Runtime Code",
874 	"Runtime Data",
875 	"Conventional",
876 	"Unusable",
877 	"ACPI Reclaim",
878 	"ACPI Mem NVS",
879 	"MMIO",
880 	"MMIO Port",
881 	"PAL Code",
882 	"Persistent",
883 	"Unaccepted",
884 };
885 
efi_md_typeattr_format(char * buf,size_t size,const efi_memory_desc_t * md)886 char * __init efi_md_typeattr_format(char *buf, size_t size,
887 				     const efi_memory_desc_t *md)
888 {
889 	char *pos;
890 	int type_len;
891 	u64 attr;
892 
893 	pos = buf;
894 	if (md->type >= ARRAY_SIZE(memory_type_name))
895 		type_len = snprintf(pos, size, "[type=%u", md->type);
896 	else
897 		type_len = snprintf(pos, size, "[%-*s",
898 				    (int)(sizeof(memory_type_name[0]) - 1),
899 				    memory_type_name[md->type]);
900 	if (type_len >= size)
901 		return buf;
902 
903 	pos += type_len;
904 	size -= type_len;
905 
906 	attr = md->attribute;
907 	if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
908 		     EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
909 		     EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
910 		     EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
911 		     EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
912 		snprintf(pos, size, "|attr=0x%016llx]",
913 			 (unsigned long long)attr);
914 	else
915 		snprintf(pos, size,
916 			 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
917 			 attr & EFI_MEMORY_RUNTIME		? "RUN" : "",
918 			 attr & EFI_MEMORY_MORE_RELIABLE	? "MR"  : "",
919 			 attr & EFI_MEMORY_CPU_CRYPTO   	? "CC"  : "",
920 			 attr & EFI_MEMORY_SP			? "SP"  : "",
921 			 attr & EFI_MEMORY_NV			? "NV"  : "",
922 			 attr & EFI_MEMORY_XP			? "XP"  : "",
923 			 attr & EFI_MEMORY_RP			? "RP"  : "",
924 			 attr & EFI_MEMORY_WP			? "WP"  : "",
925 			 attr & EFI_MEMORY_RO			? "RO"  : "",
926 			 attr & EFI_MEMORY_UCE			? "UCE" : "",
927 			 attr & EFI_MEMORY_WB			? "WB"  : "",
928 			 attr & EFI_MEMORY_WT			? "WT"  : "",
929 			 attr & EFI_MEMORY_WC			? "WC"  : "",
930 			 attr & EFI_MEMORY_UC			? "UC"  : "");
931 	return buf;
932 }
933 
934 /*
935  * IA64 has a funky EFI memory map that doesn't work the same way as
936  * other architectures.
937  */
938 #ifndef CONFIG_IA64
939 /*
940  * efi_mem_attributes - lookup memmap attributes for physical address
941  * @phys_addr: the physical address to lookup
942  *
943  * Search in the EFI memory map for the region covering
944  * @phys_addr. Returns the EFI memory attributes if the region
945  * was found in the memory map, 0 otherwise.
946  */
efi_mem_attributes(unsigned long phys_addr)947 u64 efi_mem_attributes(unsigned long phys_addr)
948 {
949 	efi_memory_desc_t *md;
950 
951 	if (!efi_enabled(EFI_MEMMAP))
952 		return 0;
953 
954 	for_each_efi_memory_desc(md) {
955 		if ((md->phys_addr <= phys_addr) &&
956 		    (phys_addr < (md->phys_addr +
957 		    (md->num_pages << EFI_PAGE_SHIFT))))
958 			return md->attribute;
959 	}
960 	return 0;
961 }
962 
963 /*
964  * efi_mem_type - lookup memmap type for physical address
965  * @phys_addr: the physical address to lookup
966  *
967  * Search in the EFI memory map for the region covering @phys_addr.
968  * Returns the EFI memory type if the region was found in the memory
969  * map, -EINVAL otherwise.
970  */
efi_mem_type(unsigned long phys_addr)971 int efi_mem_type(unsigned long phys_addr)
972 {
973 	const efi_memory_desc_t *md;
974 
975 	if (!efi_enabled(EFI_MEMMAP))
976 		return -ENOTSUPP;
977 
978 	for_each_efi_memory_desc(md) {
979 		if ((md->phys_addr <= phys_addr) &&
980 		    (phys_addr < (md->phys_addr +
981 				  (md->num_pages << EFI_PAGE_SHIFT))))
982 			return md->type;
983 	}
984 	return -EINVAL;
985 }
986 #endif
987 
efi_status_to_err(efi_status_t status)988 int efi_status_to_err(efi_status_t status)
989 {
990 	int err;
991 
992 	switch (status) {
993 	case EFI_SUCCESS:
994 		err = 0;
995 		break;
996 	case EFI_INVALID_PARAMETER:
997 		err = -EINVAL;
998 		break;
999 	case EFI_OUT_OF_RESOURCES:
1000 		err = -ENOSPC;
1001 		break;
1002 	case EFI_DEVICE_ERROR:
1003 		err = -EIO;
1004 		break;
1005 	case EFI_WRITE_PROTECTED:
1006 		err = -EROFS;
1007 		break;
1008 	case EFI_SECURITY_VIOLATION:
1009 		err = -EACCES;
1010 		break;
1011 	case EFI_NOT_FOUND:
1012 		err = -ENOENT;
1013 		break;
1014 	case EFI_ABORTED:
1015 		err = -EINTR;
1016 		break;
1017 	default:
1018 		err = -EINVAL;
1019 	}
1020 
1021 	return err;
1022 }
1023 EXPORT_SYMBOL_GPL(efi_status_to_err);
1024 
1025 static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
1026 static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
1027 
efi_memreserve_map_root(void)1028 static int __init efi_memreserve_map_root(void)
1029 {
1030 	if (mem_reserve == EFI_INVALID_TABLE_ADDR)
1031 		return -ENODEV;
1032 
1033 	efi_memreserve_root = memremap(mem_reserve,
1034 				       sizeof(*efi_memreserve_root),
1035 				       MEMREMAP_WB);
1036 	if (WARN_ON_ONCE(!efi_memreserve_root))
1037 		return -ENOMEM;
1038 	return 0;
1039 }
1040 
efi_mem_reserve_iomem(phys_addr_t addr,u64 size)1041 static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
1042 {
1043 	struct resource *res, *parent;
1044 	int ret;
1045 
1046 	res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
1047 	if (!res)
1048 		return -ENOMEM;
1049 
1050 	res->name	= "reserved";
1051 	res->flags	= IORESOURCE_MEM;
1052 	res->start	= addr;
1053 	res->end	= addr + size - 1;
1054 
1055 	/* we expect a conflict with a 'System RAM' region */
1056 	parent = request_resource_conflict(&iomem_resource, res);
1057 	ret = parent ? request_resource(parent, res) : 0;
1058 
1059 	/*
1060 	 * Given that efi_mem_reserve_iomem() can be called at any
1061 	 * time, only call memblock_reserve() if the architecture
1062 	 * keeps the infrastructure around.
1063 	 */
1064 	if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
1065 		memblock_reserve(addr, size);
1066 
1067 	return ret;
1068 }
1069 
efi_mem_reserve_persistent(phys_addr_t addr,u64 size)1070 int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
1071 {
1072 	struct linux_efi_memreserve *rsv;
1073 	unsigned long prsv;
1074 	int rc, index;
1075 
1076 	if (efi_memreserve_root == (void *)ULONG_MAX)
1077 		return -ENODEV;
1078 
1079 	if (!efi_memreserve_root) {
1080 		rc = efi_memreserve_map_root();
1081 		if (rc)
1082 			return rc;
1083 	}
1084 
1085 	/* first try to find a slot in an existing linked list entry */
1086 	for (prsv = efi_memreserve_root->next; prsv; ) {
1087 		rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
1088 		if (!rsv)
1089 			return -ENOMEM;
1090 		index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
1091 		if (index < rsv->size) {
1092 			rsv->entry[index].base = addr;
1093 			rsv->entry[index].size = size;
1094 
1095 			memunmap(rsv);
1096 			return efi_mem_reserve_iomem(addr, size);
1097 		}
1098 		prsv = rsv->next;
1099 		memunmap(rsv);
1100 	}
1101 
1102 	/* no slot found - allocate a new linked list entry */
1103 	rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
1104 	if (!rsv)
1105 		return -ENOMEM;
1106 
1107 	rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
1108 	if (rc) {
1109 		free_page((unsigned long)rsv);
1110 		return rc;
1111 	}
1112 
1113 	/*
1114 	 * The memremap() call above assumes that a linux_efi_memreserve entry
1115 	 * never crosses a page boundary, so let's ensure that this remains true
1116 	 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
1117 	 * using SZ_4K explicitly in the size calculation below.
1118 	 */
1119 	rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
1120 	atomic_set(&rsv->count, 1);
1121 	rsv->entry[0].base = addr;
1122 	rsv->entry[0].size = size;
1123 
1124 	spin_lock(&efi_mem_reserve_persistent_lock);
1125 	rsv->next = efi_memreserve_root->next;
1126 	efi_memreserve_root->next = __pa(rsv);
1127 	spin_unlock(&efi_mem_reserve_persistent_lock);
1128 
1129 	return efi_mem_reserve_iomem(addr, size);
1130 }
1131 
efi_memreserve_root_init(void)1132 static int __init efi_memreserve_root_init(void)
1133 {
1134 	if (efi_memreserve_root)
1135 		return 0;
1136 	if (efi_memreserve_map_root())
1137 		efi_memreserve_root = (void *)ULONG_MAX;
1138 	return 0;
1139 }
1140 early_initcall(efi_memreserve_root_init);
1141 
1142 #ifdef CONFIG_KEXEC
update_efi_random_seed(struct notifier_block * nb,unsigned long code,void * unused)1143 static int update_efi_random_seed(struct notifier_block *nb,
1144 				  unsigned long code, void *unused)
1145 {
1146 	struct linux_efi_random_seed *seed;
1147 	u32 size = 0;
1148 
1149 	if (!kexec_in_progress)
1150 		return NOTIFY_DONE;
1151 
1152 	seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB);
1153 	if (seed != NULL) {
1154 		size = min(seed->size, EFI_RANDOM_SEED_SIZE);
1155 		memunmap(seed);
1156 	} else {
1157 		pr_err("Could not map UEFI random seed!\n");
1158 	}
1159 	if (size > 0) {
1160 		seed = memremap(efi_rng_seed, sizeof(*seed) + size,
1161 				MEMREMAP_WB);
1162 		if (seed != NULL) {
1163 			seed->size = size;
1164 			get_random_bytes(seed->bits, seed->size);
1165 			memunmap(seed);
1166 		} else {
1167 			pr_err("Could not map UEFI random seed!\n");
1168 		}
1169 	}
1170 	return NOTIFY_DONE;
1171 }
1172 
1173 static struct notifier_block efi_random_seed_nb = {
1174 	.notifier_call = update_efi_random_seed,
1175 };
1176 
register_update_efi_random_seed(void)1177 static int __init register_update_efi_random_seed(void)
1178 {
1179 	if (efi_rng_seed == EFI_INVALID_TABLE_ADDR)
1180 		return 0;
1181 	return register_reboot_notifier(&efi_random_seed_nb);
1182 }
1183 late_initcall(register_update_efi_random_seed);
1184 #endif
1185