xref: /openbmc/linux/arch/arm64/kernel/acpi.c (revision e368cd72)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  ARM64 Specific Low-Level ACPI Boot Support
4  *
5  *  Copyright (C) 2013-2014, Linaro Ltd.
6  *	Author: Al Stone <al.stone@linaro.org>
7  *	Author: Graeme Gregory <graeme.gregory@linaro.org>
8  *	Author: Hanjun Guo <hanjun.guo@linaro.org>
9  *	Author: Tomasz Nowicki <tomasz.nowicki@linaro.org>
10  *	Author: Naresh Bhat <naresh.bhat@linaro.org>
11  */
12 
13 #define pr_fmt(fmt) "ACPI: " fmt
14 
15 #include <linux/acpi.h>
16 #include <linux/cpumask.h>
17 #include <linux/efi.h>
18 #include <linux/efi-bgrt.h>
19 #include <linux/init.h>
20 #include <linux/irq.h>
21 #include <linux/irqdomain.h>
22 #include <linux/irq_work.h>
23 #include <linux/memblock.h>
24 #include <linux/of_fdt.h>
25 #include <linux/smp.h>
26 #include <linux/serial_core.h>
27 #include <linux/pgtable.h>
28 
29 #include <acpi/ghes.h>
30 #include <asm/cputype.h>
31 #include <asm/cpu_ops.h>
32 #include <asm/daifflags.h>
33 #include <asm/smp_plat.h>
34 
35 int acpi_noirq = 1;		/* skip ACPI IRQ initialization */
36 int acpi_disabled = 1;
37 EXPORT_SYMBOL(acpi_disabled);
38 
39 int acpi_pci_disabled = 1;	/* skip ACPI PCI scan and IRQ initialization */
40 EXPORT_SYMBOL(acpi_pci_disabled);
41 
42 static bool param_acpi_off __initdata;
43 static bool param_acpi_on __initdata;
44 static bool param_acpi_force __initdata;
45 
46 static int __init parse_acpi(char *arg)
47 {
48 	if (!arg)
49 		return -EINVAL;
50 
51 	/* "acpi=off" disables both ACPI table parsing and interpreter */
52 	if (strcmp(arg, "off") == 0)
53 		param_acpi_off = true;
54 	else if (strcmp(arg, "on") == 0) /* prefer ACPI over DT */
55 		param_acpi_on = true;
56 	else if (strcmp(arg, "force") == 0) /* force ACPI to be enabled */
57 		param_acpi_force = true;
58 	else
59 		return -EINVAL;	/* Core will print when we return error */
60 
61 	return 0;
62 }
63 early_param("acpi", parse_acpi);
64 
65 static int __init dt_scan_depth1_nodes(unsigned long node,
66 				       const char *uname, int depth,
67 				       void *data)
68 {
69 	/*
70 	 * Ignore anything not directly under the root node; we'll
71 	 * catch its parent instead.
72 	 */
73 	if (depth != 1)
74 		return 0;
75 
76 	if (strcmp(uname, "chosen") == 0)
77 		return 0;
78 
79 	if (strcmp(uname, "hypervisor") == 0 &&
80 	    of_flat_dt_is_compatible(node, "xen,xen"))
81 		return 0;
82 
83 	/*
84 	 * This node at depth 1 is neither a chosen node nor a xen node,
85 	 * which we do not expect.
86 	 */
87 	return 1;
88 }
89 
90 /*
91  * __acpi_map_table() will be called before page_init(), so early_ioremap()
92  * or early_memremap() should be called here to for ACPI table mapping.
93  */
94 void __init __iomem *__acpi_map_table(unsigned long phys, unsigned long size)
95 {
96 	if (!size)
97 		return NULL;
98 
99 	return early_memremap(phys, size);
100 }
101 
102 void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
103 {
104 	if (!map || !size)
105 		return;
106 
107 	early_memunmap(map, size);
108 }
109 
110 bool __init acpi_psci_present(void)
111 {
112 	return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_COMPLIANT;
113 }
114 
115 /* Whether HVC must be used instead of SMC as the PSCI conduit */
116 bool acpi_psci_use_hvc(void)
117 {
118 	return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_USE_HVC;
119 }
120 
121 /*
122  * acpi_fadt_sanity_check() - Check FADT presence and carry out sanity
123  *			      checks on it
124  *
125  * Return 0 on success,  <0 on failure
126  */
127 static int __init acpi_fadt_sanity_check(void)
128 {
129 	struct acpi_table_header *table;
130 	struct acpi_table_fadt *fadt;
131 	acpi_status status;
132 	int ret = 0;
133 
134 	/*
135 	 * FADT is required on arm64; retrieve it to check its presence
136 	 * and carry out revision and ACPI HW reduced compliancy tests
137 	 */
138 	status = acpi_get_table(ACPI_SIG_FADT, 0, &table);
139 	if (ACPI_FAILURE(status)) {
140 		const char *msg = acpi_format_exception(status);
141 
142 		pr_err("Failed to get FADT table, %s\n", msg);
143 		return -ENODEV;
144 	}
145 
146 	fadt = (struct acpi_table_fadt *)table;
147 
148 	/*
149 	 * Revision in table header is the FADT Major revision, and there
150 	 * is a minor revision of FADT which was introduced by ACPI 5.1,
151 	 * we only deal with ACPI 5.1 or newer revision to get GIC and SMP
152 	 * boot protocol configuration data.
153 	 */
154 	if (table->revision < 5 ||
155 	   (table->revision == 5 && fadt->minor_revision < 1)) {
156 		pr_err(FW_BUG "Unsupported FADT revision %d.%d, should be 5.1+\n",
157 		       table->revision, fadt->minor_revision);
158 
159 		if (!fadt->arm_boot_flags) {
160 			ret = -EINVAL;
161 			goto out;
162 		}
163 		pr_err("FADT has ARM boot flags set, assuming 5.1\n");
164 	}
165 
166 	if (!(fadt->flags & ACPI_FADT_HW_REDUCED)) {
167 		pr_err("FADT not ACPI hardware reduced compliant\n");
168 		ret = -EINVAL;
169 	}
170 
171 out:
172 	/*
173 	 * acpi_get_table() creates FADT table mapping that
174 	 * should be released after parsing and before resuming boot
175 	 */
176 	acpi_put_table(table);
177 	return ret;
178 }
179 
180 /*
181  * acpi_boot_table_init() called from setup_arch(), always.
182  *	1. find RSDP and get its address, and then find XSDT
183  *	2. extract all tables and checksums them all
184  *	3. check ACPI FADT revision
185  *	4. check ACPI FADT HW reduced flag
186  *
187  * We can parse ACPI boot-time tables such as MADT after
188  * this function is called.
189  *
190  * On return ACPI is enabled if either:
191  *
192  * - ACPI tables are initialized and sanity checks passed
193  * - acpi=force was passed in the command line and ACPI was not disabled
194  *   explicitly through acpi=off command line parameter
195  *
196  * ACPI is disabled on function return otherwise
197  */
198 void __init acpi_boot_table_init(void)
199 {
200 	/*
201 	 * Enable ACPI instead of device tree unless
202 	 * - ACPI has been disabled explicitly (acpi=off), or
203 	 * - the device tree is not empty (it has more than just a /chosen node,
204 	 *   and a /hypervisor node when running on Xen)
205 	 *   and ACPI has not been [force] enabled (acpi=on|force)
206 	 */
207 	if (param_acpi_off ||
208 	    (!param_acpi_on && !param_acpi_force &&
209 	     of_scan_flat_dt(dt_scan_depth1_nodes, NULL)))
210 		goto done;
211 
212 	/*
213 	 * ACPI is disabled at this point. Enable it in order to parse
214 	 * the ACPI tables and carry out sanity checks
215 	 */
216 	enable_acpi();
217 
218 	/*
219 	 * If ACPI tables are initialized and FADT sanity checks passed,
220 	 * leave ACPI enabled and carry on booting; otherwise disable ACPI
221 	 * on initialization error.
222 	 * If acpi=force was passed on the command line it forces ACPI
223 	 * to be enabled even if its initialization failed.
224 	 */
225 	if (acpi_table_init() || acpi_fadt_sanity_check()) {
226 		pr_err("Failed to init ACPI tables\n");
227 		if (!param_acpi_force)
228 			disable_acpi();
229 	}
230 
231 done:
232 	if (acpi_disabled) {
233 		if (earlycon_acpi_spcr_enable)
234 			early_init_dt_scan_chosen_stdout();
235 	} else {
236 		acpi_parse_spcr(earlycon_acpi_spcr_enable, true);
237 		if (IS_ENABLED(CONFIG_ACPI_BGRT))
238 			acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt);
239 	}
240 }
241 
242 static pgprot_t __acpi_get_writethrough_mem_attribute(void)
243 {
244 	/*
245 	 * Although UEFI specifies the use of Normal Write-through for
246 	 * EFI_MEMORY_WT, it is seldom used in practice and not implemented
247 	 * by most (all?) CPUs. Rather than allocate a MAIR just for this
248 	 * purpose, emit a warning and use Normal Non-cacheable instead.
249 	 */
250 	pr_warn_once("No MAIR allocation for EFI_MEMORY_WT; treating as Normal Non-cacheable\n");
251 	return __pgprot(PROT_NORMAL_NC);
252 }
253 
254 pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
255 {
256 	/*
257 	 * According to "Table 8 Map: EFI memory types to AArch64 memory
258 	 * types" of UEFI 2.5 section 2.3.6.1, each EFI memory type is
259 	 * mapped to a corresponding MAIR attribute encoding.
260 	 * The EFI memory attribute advises all possible capabilities
261 	 * of a memory region.
262 	 */
263 
264 	u64 attr;
265 
266 	attr = efi_mem_attributes(addr);
267 	if (attr & EFI_MEMORY_WB)
268 		return PAGE_KERNEL;
269 	if (attr & EFI_MEMORY_WC)
270 		return __pgprot(PROT_NORMAL_NC);
271 	if (attr & EFI_MEMORY_WT)
272 		return __acpi_get_writethrough_mem_attribute();
273 	return __pgprot(PROT_DEVICE_nGnRnE);
274 }
275 
276 static void __iomem *__acpi_os_ioremap(acpi_physical_address phys,
277 				       acpi_size size, bool memory)
278 {
279 	efi_memory_desc_t *md, *region = NULL;
280 	pgprot_t prot;
281 
282 	if (WARN_ON_ONCE(!efi_enabled(EFI_MEMMAP)))
283 		return NULL;
284 
285 	for_each_efi_memory_desc(md) {
286 		u64 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
287 
288 		if (phys < md->phys_addr || phys >= end)
289 			continue;
290 
291 		if (phys + size > end) {
292 			pr_warn(FW_BUG "requested region covers multiple EFI memory regions\n");
293 			return NULL;
294 		}
295 		region = md;
296 		break;
297 	}
298 
299 	/*
300 	 * It is fine for AML to remap regions that are not represented in the
301 	 * EFI memory map at all, as it only describes normal memory, and MMIO
302 	 * regions that require a virtual mapping to make them accessible to
303 	 * the EFI runtime services. Determine the region default
304 	 * attributes by checking the requested memory semantics.
305 	 */
306 	prot = memory ? __pgprot(PROT_NORMAL_NC) :
307 			__pgprot(PROT_DEVICE_nGnRnE);
308 	if (region) {
309 		switch (region->type) {
310 		case EFI_LOADER_CODE:
311 		case EFI_LOADER_DATA:
312 		case EFI_BOOT_SERVICES_CODE:
313 		case EFI_BOOT_SERVICES_DATA:
314 		case EFI_CONVENTIONAL_MEMORY:
315 		case EFI_PERSISTENT_MEMORY:
316 			if (memblock_is_map_memory(phys) ||
317 			    !memblock_is_region_memory(phys, size)) {
318 				pr_warn(FW_BUG "requested region covers kernel memory @ %pa\n", &phys);
319 				return NULL;
320 			}
321 			/*
322 			 * Mapping kernel memory is permitted if the region in
323 			 * question is covered by a single memblock with the
324 			 * NOMAP attribute set: this enables the use of ACPI
325 			 * table overrides passed via initramfs, which are
326 			 * reserved in memory using arch_reserve_mem_area()
327 			 * below. As this particular use case only requires
328 			 * read access, fall through to the R/O mapping case.
329 			 */
330 			fallthrough;
331 
332 		case EFI_RUNTIME_SERVICES_CODE:
333 			/*
334 			 * This would be unusual, but not problematic per se,
335 			 * as long as we take care not to create a writable
336 			 * mapping for executable code.
337 			 */
338 			prot = PAGE_KERNEL_RO;
339 			break;
340 
341 		case EFI_ACPI_RECLAIM_MEMORY:
342 			/*
343 			 * ACPI reclaim memory is used to pass firmware tables
344 			 * and other data that is intended for consumption by
345 			 * the OS only, which may decide it wants to reclaim
346 			 * that memory and use it for something else. We never
347 			 * do that, but we usually add it to the linear map
348 			 * anyway, in which case we should use the existing
349 			 * mapping.
350 			 */
351 			if (memblock_is_map_memory(phys))
352 				return (void __iomem *)__phys_to_virt(phys);
353 			fallthrough;
354 
355 		default:
356 			if (region->attribute & EFI_MEMORY_WB)
357 				prot = PAGE_KERNEL;
358 			else if (region->attribute & EFI_MEMORY_WC)
359 				prot = __pgprot(PROT_NORMAL_NC);
360 			else if (region->attribute & EFI_MEMORY_WT)
361 				prot = __acpi_get_writethrough_mem_attribute();
362 		}
363 	}
364 	return __ioremap(phys, size, prot);
365 }
366 
367 void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
368 {
369 	return __acpi_os_ioremap(phys, size, false);
370 }
371 
372 void __iomem *acpi_os_memmap(acpi_physical_address phys, acpi_size size)
373 {
374 	return __acpi_os_ioremap(phys, size, true);
375 }
376 
377 /*
378  * Claim Synchronous External Aborts as a firmware first notification.
379  *
380  * Used by KVM and the arch do_sea handler.
381  * @regs may be NULL when called from process context.
382  */
383 int apei_claim_sea(struct pt_regs *regs)
384 {
385 	int err = -ENOENT;
386 	bool return_to_irqs_enabled;
387 	unsigned long current_flags;
388 
389 	if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
390 		return err;
391 
392 	current_flags = local_daif_save_flags();
393 
394 	/* current_flags isn't useful here as daif doesn't tell us about pNMI */
395 	return_to_irqs_enabled = !irqs_disabled_flags(arch_local_save_flags());
396 
397 	if (regs)
398 		return_to_irqs_enabled = interrupts_enabled(regs);
399 
400 	/*
401 	 * SEA can interrupt SError, mask it and describe this as an NMI so
402 	 * that APEI defers the handling.
403 	 */
404 	local_daif_restore(DAIF_ERRCTX);
405 	nmi_enter();
406 	err = ghes_notify_sea();
407 	nmi_exit();
408 
409 	/*
410 	 * APEI NMI-like notifications are deferred to irq_work. Unless
411 	 * we interrupted irqs-masked code, we can do that now.
412 	 */
413 	if (!err) {
414 		if (return_to_irqs_enabled) {
415 			local_daif_restore(DAIF_PROCCTX_NOIRQ);
416 			__irq_enter();
417 			irq_work_run();
418 			__irq_exit();
419 		} else {
420 			pr_warn_ratelimited("APEI work queued but not completed");
421 			err = -EINPROGRESS;
422 		}
423 	}
424 
425 	local_daif_restore(current_flags);
426 
427 	return err;
428 }
429 
430 void arch_reserve_mem_area(acpi_physical_address addr, size_t size)
431 {
432 	memblock_mark_nomap(addr, size);
433 }
434