xref: /openbmc/linux/arch/arm64/kernel/acpi.c (revision 9a87ffc99ec8eb8d35eed7c4f816d75f5cc9662e)
1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
237655163SAl Stone /*
337655163SAl Stone  *  ARM64 Specific Low-Level ACPI Boot Support
437655163SAl Stone  *
537655163SAl Stone  *  Copyright (C) 2013-2014, Linaro Ltd.
637655163SAl Stone  *	Author: Al Stone <al.stone@linaro.org>
737655163SAl Stone  *	Author: Graeme Gregory <graeme.gregory@linaro.org>
837655163SAl Stone  *	Author: Hanjun Guo <hanjun.guo@linaro.org>
937655163SAl Stone  *	Author: Tomasz Nowicki <tomasz.nowicki@linaro.org>
1037655163SAl Stone  *	Author: Naresh Bhat <naresh.bhat@linaro.org>
1137655163SAl Stone  */
1237655163SAl Stone 
1337655163SAl Stone #define pr_fmt(fmt) "ACPI: " fmt
1437655163SAl Stone 
1537655163SAl Stone #include <linux/acpi.h>
161d280ce0SSudeep Holla #include <linux/arm-smccc.h>
1737655163SAl Stone #include <linux/cpumask.h>
1809ffcb0dSAKASHI Takahiro #include <linux/efi.h>
196e7300cfSBhupesh Sharma #include <linux/efi-bgrt.h>
2037655163SAl Stone #include <linux/init.h>
2137655163SAl Stone #include <linux/irq.h>
2237655163SAl Stone #include <linux/irqdomain.h>
238fcc4ae6SJames Morse #include <linux/irq_work.h>
2437655163SAl Stone #include <linux/memblock.h>
25b10d79f7SAl Stone #include <linux/of_fdt.h>
26b6363fe7SRob Herring #include <linux/libfdt.h>
2737655163SAl Stone #include <linux/smp.h>
28888125a7SAleksey Makarov #include <linux/serial_core.h>
2965fddcfcSMike Rapoport #include <linux/pgtable.h>
3037655163SAl Stone 
31d44f1b8dSJames Morse #include <acpi/ghes.h>
32fccb9a81SHanjun Guo #include <asm/cputype.h>
33fccb9a81SHanjun Guo #include <asm/cpu_ops.h>
34d44f1b8dSJames Morse #include <asm/daifflags.h>
3509ffcb0dSAKASHI Takahiro #include <asm/smp_plat.h>
3689e44b51SJonathan (Zhixiong) Zhang 
37b10d79f7SAl Stone int acpi_noirq = 1;		/* skip ACPI IRQ initialization */
38b10d79f7SAl Stone int acpi_disabled = 1;
3937655163SAl Stone EXPORT_SYMBOL(acpi_disabled);
4037655163SAl Stone 
41b10d79f7SAl Stone int acpi_pci_disabled = 1;	/* skip ACPI PCI scan and IRQ initialization */
4237655163SAl Stone EXPORT_SYMBOL(acpi_pci_disabled);
4337655163SAl Stone 
44b10d79f7SAl Stone static bool param_acpi_off __initdata;
456a1f5471SArd Biesheuvel static bool param_acpi_on __initdata;
46fb094eb1SLorenzo Pieralisi static bool param_acpi_force __initdata;
47b10d79f7SAl Stone 
parse_acpi(char * arg)48b10d79f7SAl Stone static int __init parse_acpi(char *arg)
49b10d79f7SAl Stone {
50b10d79f7SAl Stone 	if (!arg)
51b10d79f7SAl Stone 		return -EINVAL;
52b10d79f7SAl Stone 
53b10d79f7SAl Stone 	/* "acpi=off" disables both ACPI table parsing and interpreter */
54b10d79f7SAl Stone 	if (strcmp(arg, "off") == 0)
55b10d79f7SAl Stone 		param_acpi_off = true;
566a1f5471SArd Biesheuvel 	else if (strcmp(arg, "on") == 0) /* prefer ACPI over DT */
576a1f5471SArd Biesheuvel 		param_acpi_on = true;
58b10d79f7SAl Stone 	else if (strcmp(arg, "force") == 0) /* force ACPI to be enabled */
59b10d79f7SAl Stone 		param_acpi_force = true;
60b10d79f7SAl Stone 	else
61b10d79f7SAl Stone 		return -EINVAL;	/* Core will print when we return error */
62b10d79f7SAl Stone 
63b10d79f7SAl Stone 	return 0;
64b10d79f7SAl Stone }
65b10d79f7SAl Stone early_param("acpi", parse_acpi);
66b10d79f7SAl Stone 
dt_is_stub(void)67b6363fe7SRob Herring static bool __init dt_is_stub(void)
68b10d79f7SAl Stone {
69b6363fe7SRob Herring 	int node;
709981293fSMark Rutland 
71b6363fe7SRob Herring 	fdt_for_each_subnode(node, initial_boot_params, 0) {
72b6363fe7SRob Herring 		const char *name = fdt_get_name(initial_boot_params, node, NULL);
73b6363fe7SRob Herring 		if (strcmp(name, "chosen") == 0)
74b6363fe7SRob Herring 			continue;
75b6363fe7SRob Herring 		if (strcmp(name, "hypervisor") == 0 &&
769981293fSMark Rutland 		    of_flat_dt_is_compatible(node, "xen,xen"))
77b6363fe7SRob Herring 			continue;
789981293fSMark Rutland 
79b6363fe7SRob Herring 		return false;
80b6363fe7SRob Herring 	}
81b6363fe7SRob Herring 
82b6363fe7SRob Herring 	return true;
83b10d79f7SAl Stone }
84b10d79f7SAl Stone 
8537655163SAl Stone /*
8637655163SAl Stone  * __acpi_map_table() will be called before page_init(), so early_ioremap()
8737655163SAl Stone  * or early_memremap() should be called here to for ACPI table mapping.
8837655163SAl Stone  */
__acpi_map_table(unsigned long phys,unsigned long size)896c9a58e8SAndy Shevchenko void __init __iomem *__acpi_map_table(unsigned long phys, unsigned long size)
9037655163SAl Stone {
9137655163SAl Stone 	if (!size)
9237655163SAl Stone 		return NULL;
9337655163SAl Stone 
9437655163SAl Stone 	return early_memremap(phys, size);
9537655163SAl Stone }
9637655163SAl Stone 
__acpi_unmap_table(void __iomem * map,unsigned long size)976c9a58e8SAndy Shevchenko void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
9837655163SAl Stone {
9937655163SAl Stone 	if (!map || !size)
10037655163SAl Stone 		return;
10137655163SAl Stone 
10237655163SAl Stone 	early_memunmap(map, size);
10337655163SAl Stone }
10437655163SAl Stone 
acpi_psci_present(void)105c5a13305SMark Rutland bool __init acpi_psci_present(void)
106c5a13305SMark Rutland {
107c5a13305SMark Rutland 	return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_COMPLIANT;
108c5a13305SMark Rutland }
109c5a13305SMark Rutland 
110c5a13305SMark Rutland /* Whether HVC must be used instead of SMC as the PSCI conduit */
acpi_psci_use_hvc(void)111fa31ab77SJames Morse bool acpi_psci_use_hvc(void)
112c5a13305SMark Rutland {
113c5a13305SMark Rutland 	return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_USE_HVC;
114c5a13305SMark Rutland }
115c5a13305SMark Rutland 
11654971e43SLorenzo Pieralisi /*
11754971e43SLorenzo Pieralisi  * acpi_fadt_sanity_check() - Check FADT presence and carry out sanity
11854971e43SLorenzo Pieralisi  *			      checks on it
11954971e43SLorenzo Pieralisi  *
12054971e43SLorenzo Pieralisi  * Return 0 on success,  <0 on failure
12154971e43SLorenzo Pieralisi  */
acpi_fadt_sanity_check(void)12254971e43SLorenzo Pieralisi static int __init acpi_fadt_sanity_check(void)
12337655163SAl Stone {
12454971e43SLorenzo Pieralisi 	struct acpi_table_header *table;
12554971e43SLorenzo Pieralisi 	struct acpi_table_fadt *fadt;
12654971e43SLorenzo Pieralisi 	acpi_status status;
12754971e43SLorenzo Pieralisi 	int ret = 0;
12854971e43SLorenzo Pieralisi 
12954971e43SLorenzo Pieralisi 	/*
13054971e43SLorenzo Pieralisi 	 * FADT is required on arm64; retrieve it to check its presence
13154971e43SLorenzo Pieralisi 	 * and carry out revision and ACPI HW reduced compliancy tests
13254971e43SLorenzo Pieralisi 	 */
1336b11d1d6SLv Zheng 	status = acpi_get_table(ACPI_SIG_FADT, 0, &table);
13454971e43SLorenzo Pieralisi 	if (ACPI_FAILURE(status)) {
13554971e43SLorenzo Pieralisi 		const char *msg = acpi_format_exception(status);
13654971e43SLorenzo Pieralisi 
13754971e43SLorenzo Pieralisi 		pr_err("Failed to get FADT table, %s\n", msg);
13854971e43SLorenzo Pieralisi 		return -ENODEV;
13954971e43SLorenzo Pieralisi 	}
14054971e43SLorenzo Pieralisi 
14154971e43SLorenzo Pieralisi 	fadt = (struct acpi_table_fadt *)table;
14237655163SAl Stone 
14337655163SAl Stone 	/*
14437655163SAl Stone 	 * Revision in table header is the FADT Major revision, and there
14537655163SAl Stone 	 * is a minor revision of FADT which was introduced by ACPI 5.1,
14637655163SAl Stone 	 * we only deal with ACPI 5.1 or newer revision to get GIC and SMP
14754971e43SLorenzo Pieralisi 	 * boot protocol configuration data.
14837655163SAl Stone 	 */
14954971e43SLorenzo Pieralisi 	if (table->revision < 5 ||
15054971e43SLorenzo Pieralisi 	   (table->revision == 5 && fadt->minor_revision < 1)) {
1512af22f3eSArd Biesheuvel 		pr_err(FW_BUG "Unsupported FADT revision %d.%d, should be 5.1+\n",
15237655163SAl Stone 		       table->revision, fadt->minor_revision);
1532af22f3eSArd Biesheuvel 
1542af22f3eSArd Biesheuvel 		if (!fadt->arm_boot_flags) {
15554971e43SLorenzo Pieralisi 			ret = -EINVAL;
15654971e43SLorenzo Pieralisi 			goto out;
15754971e43SLorenzo Pieralisi 		}
1582af22f3eSArd Biesheuvel 		pr_err("FADT has ARM boot flags set, assuming 5.1\n");
1592af22f3eSArd Biesheuvel 	}
16037655163SAl Stone 
16154971e43SLorenzo Pieralisi 	if (!(fadt->flags & ACPI_FADT_HW_REDUCED)) {
16254971e43SLorenzo Pieralisi 		pr_err("FADT not ACPI hardware reduced compliant\n");
16354971e43SLorenzo Pieralisi 		ret = -EINVAL;
16454971e43SLorenzo Pieralisi 	}
16554971e43SLorenzo Pieralisi 
16654971e43SLorenzo Pieralisi out:
16754971e43SLorenzo Pieralisi 	/*
1686b11d1d6SLv Zheng 	 * acpi_get_table() creates FADT table mapping that
16954971e43SLorenzo Pieralisi 	 * should be released after parsing and before resuming boot
17054971e43SLorenzo Pieralisi 	 */
1716b11d1d6SLv Zheng 	acpi_put_table(table);
17254971e43SLorenzo Pieralisi 	return ret;
17337655163SAl Stone }
17437655163SAl Stone 
17537655163SAl Stone /*
17637655163SAl Stone  * acpi_boot_table_init() called from setup_arch(), always.
17737655163SAl Stone  *	1. find RSDP and get its address, and then find XSDT
17837655163SAl Stone  *	2. extract all tables and checksums them all
17937655163SAl Stone  *	3. check ACPI FADT revision
18054971e43SLorenzo Pieralisi  *	4. check ACPI FADT HW reduced flag
18137655163SAl Stone  *
18237655163SAl Stone  * We can parse ACPI boot-time tables such as MADT after
18337655163SAl Stone  * this function is called.
18454971e43SLorenzo Pieralisi  *
185fb094eb1SLorenzo Pieralisi  * On return ACPI is enabled if either:
186fb094eb1SLorenzo Pieralisi  *
187fb094eb1SLorenzo Pieralisi  * - ACPI tables are initialized and sanity checks passed
188fb094eb1SLorenzo Pieralisi  * - acpi=force was passed in the command line and ACPI was not disabled
189fb094eb1SLorenzo Pieralisi  *   explicitly through acpi=off command line parameter
190fb094eb1SLorenzo Pieralisi  *
191fb094eb1SLorenzo Pieralisi  * ACPI is disabled on function return otherwise
19237655163SAl Stone  */
acpi_boot_table_init(void)19337655163SAl Stone void __init acpi_boot_table_init(void)
19437655163SAl Stone {
195b10d79f7SAl Stone 	/*
196b10d79f7SAl Stone 	 * Enable ACPI instead of device tree unless
197b10d79f7SAl Stone 	 * - ACPI has been disabled explicitly (acpi=off), or
1982366c7fdSShannon Zhao 	 * - the device tree is not empty (it has more than just a /chosen node,
1992366c7fdSShannon Zhao 	 *   and a /hypervisor node when running on Xen)
2006a1f5471SArd Biesheuvel 	 *   and ACPI has not been [force] enabled (acpi=on|force)
201b10d79f7SAl Stone 	 */
202b10d79f7SAl Stone 	if (param_acpi_off ||
203b6363fe7SRob Herring 	    (!param_acpi_on && !param_acpi_force && !dt_is_stub()))
204888125a7SAleksey Makarov 		goto done;
20537655163SAl Stone 
20654971e43SLorenzo Pieralisi 	/*
20754971e43SLorenzo Pieralisi 	 * ACPI is disabled at this point. Enable it in order to parse
20854971e43SLorenzo Pieralisi 	 * the ACPI tables and carry out sanity checks
20954971e43SLorenzo Pieralisi 	 */
210b10d79f7SAl Stone 	enable_acpi();
211b10d79f7SAl Stone 
21254971e43SLorenzo Pieralisi 	/*
21354971e43SLorenzo Pieralisi 	 * If ACPI tables are initialized and FADT sanity checks passed,
21454971e43SLorenzo Pieralisi 	 * leave ACPI enabled and carry on booting; otherwise disable ACPI
21554971e43SLorenzo Pieralisi 	 * on initialization error.
216fb094eb1SLorenzo Pieralisi 	 * If acpi=force was passed on the command line it forces ACPI
217fb094eb1SLorenzo Pieralisi 	 * to be enabled even if its initialization failed.
21854971e43SLorenzo Pieralisi 	 */
21954971e43SLorenzo Pieralisi 	if (acpi_table_init() || acpi_fadt_sanity_check()) {
22054971e43SLorenzo Pieralisi 		pr_err("Failed to init ACPI tables\n");
221fb094eb1SLorenzo Pieralisi 		if (!param_acpi_force)
22237655163SAl Stone 			disable_acpi();
22337655163SAl Stone 	}
224888125a7SAleksey Makarov 
225888125a7SAleksey Makarov done:
226888125a7SAleksey Makarov 	if (acpi_disabled) {
2270231d000SPrarit Bhargava 		if (earlycon_acpi_spcr_enable)
228888125a7SAleksey Makarov 			early_init_dt_scan_chosen_stdout();
229888125a7SAleksey Makarov 	} else {
2300231d000SPrarit Bhargava 		acpi_parse_spcr(earlycon_acpi_spcr_enable, true);
2316e7300cfSBhupesh Sharma 		if (IS_ENABLED(CONFIG_ACPI_BGRT))
2326e7300cfSBhupesh Sharma 			acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt);
233888125a7SAleksey Makarov 	}
23437655163SAl Stone }
235d60fc389STomasz Nowicki 
__acpi_get_writethrough_mem_attribute(void)236ee67c110SWill Deacon static pgprot_t __acpi_get_writethrough_mem_attribute(void)
237ee67c110SWill Deacon {
238ee67c110SWill Deacon 	/*
239ee67c110SWill Deacon 	 * Although UEFI specifies the use of Normal Write-through for
240ee67c110SWill Deacon 	 * EFI_MEMORY_WT, it is seldom used in practice and not implemented
241ee67c110SWill Deacon 	 * by most (all?) CPUs. Rather than allocate a MAIR just for this
242ee67c110SWill Deacon 	 * purpose, emit a warning and use Normal Non-cacheable instead.
243ee67c110SWill Deacon 	 */
244ee67c110SWill Deacon 	pr_warn_once("No MAIR allocation for EFI_MEMORY_WT; treating as Normal Non-cacheable\n");
245ee67c110SWill Deacon 	return __pgprot(PROT_NORMAL_NC);
246ee67c110SWill Deacon }
247ee67c110SWill Deacon 
__acpi_get_mem_attribute(phys_addr_t addr)24809ffcb0dSAKASHI Takahiro pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
24989e44b51SJonathan (Zhixiong) Zhang {
25089e44b51SJonathan (Zhixiong) Zhang 	/*
25189e44b51SJonathan (Zhixiong) Zhang 	 * According to "Table 8 Map: EFI memory types to AArch64 memory
25289e44b51SJonathan (Zhixiong) Zhang 	 * types" of UEFI 2.5 section 2.3.6.1, each EFI memory type is
25389e44b51SJonathan (Zhixiong) Zhang 	 * mapped to a corresponding MAIR attribute encoding.
25489e44b51SJonathan (Zhixiong) Zhang 	 * The EFI memory attribute advises all possible capabilities
255ee67c110SWill Deacon 	 * of a memory region.
25689e44b51SJonathan (Zhixiong) Zhang 	 */
25789e44b51SJonathan (Zhixiong) Zhang 
25889e44b51SJonathan (Zhixiong) Zhang 	u64 attr;
25989e44b51SJonathan (Zhixiong) Zhang 
26089e44b51SJonathan (Zhixiong) Zhang 	attr = efi_mem_attributes(addr);
26189e44b51SJonathan (Zhixiong) Zhang 	if (attr & EFI_MEMORY_WB)
26289e44b51SJonathan (Zhixiong) Zhang 		return PAGE_KERNEL;
26389e44b51SJonathan (Zhixiong) Zhang 	if (attr & EFI_MEMORY_WC)
26489e44b51SJonathan (Zhixiong) Zhang 		return __pgprot(PROT_NORMAL_NC);
265ee67c110SWill Deacon 	if (attr & EFI_MEMORY_WT)
266ee67c110SWill Deacon 		return __acpi_get_writethrough_mem_attribute();
26789e44b51SJonathan (Zhixiong) Zhang 	return __pgprot(PROT_DEVICE_nGnRnE);
26889e44b51SJonathan (Zhixiong) Zhang }
269d44f1b8dSJames Morse 
acpi_os_ioremap(acpi_physical_address phys,acpi_size size)27012064c17SJia He void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
2711583052dSArd Biesheuvel {
2721583052dSArd Biesheuvel 	efi_memory_desc_t *md, *region = NULL;
2731583052dSArd Biesheuvel 	pgprot_t prot;
2741583052dSArd Biesheuvel 
2751583052dSArd Biesheuvel 	if (WARN_ON_ONCE(!efi_enabled(EFI_MEMMAP)))
2761583052dSArd Biesheuvel 		return NULL;
2771583052dSArd Biesheuvel 
2781583052dSArd Biesheuvel 	for_each_efi_memory_desc(md) {
2791583052dSArd Biesheuvel 		u64 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
2801583052dSArd Biesheuvel 
2811583052dSArd Biesheuvel 		if (phys < md->phys_addr || phys >= end)
2821583052dSArd Biesheuvel 			continue;
2831583052dSArd Biesheuvel 
2841583052dSArd Biesheuvel 		if (phys + size > end) {
2851583052dSArd Biesheuvel 			pr_warn(FW_BUG "requested region covers multiple EFI memory regions\n");
2861583052dSArd Biesheuvel 			return NULL;
2871583052dSArd Biesheuvel 		}
2881583052dSArd Biesheuvel 		region = md;
2891583052dSArd Biesheuvel 		break;
2901583052dSArd Biesheuvel 	}
2911583052dSArd Biesheuvel 
2921583052dSArd Biesheuvel 	/*
2931583052dSArd Biesheuvel 	 * It is fine for AML to remap regions that are not represented in the
2941583052dSArd Biesheuvel 	 * EFI memory map at all, as it only describes normal memory, and MMIO
2951583052dSArd Biesheuvel 	 * regions that require a virtual mapping to make them accessible to
29612064c17SJia He 	 * the EFI runtime services.
2971583052dSArd Biesheuvel 	 */
29812064c17SJia He 	prot = __pgprot(PROT_DEVICE_nGnRnE);
2991583052dSArd Biesheuvel 	if (region) {
3001583052dSArd Biesheuvel 		switch (region->type) {
3011583052dSArd Biesheuvel 		case EFI_LOADER_CODE:
3021583052dSArd Biesheuvel 		case EFI_LOADER_DATA:
3031583052dSArd Biesheuvel 		case EFI_BOOT_SERVICES_CODE:
3041583052dSArd Biesheuvel 		case EFI_BOOT_SERVICES_DATA:
3051583052dSArd Biesheuvel 		case EFI_CONVENTIONAL_MEMORY:
3061583052dSArd Biesheuvel 		case EFI_PERSISTENT_MEMORY:
307a509a66aSArd Biesheuvel 			if (memblock_is_map_memory(phys) ||
308a509a66aSArd Biesheuvel 			    !memblock_is_region_memory(phys, size)) {
3091583052dSArd Biesheuvel 				pr_warn(FW_BUG "requested region covers kernel memory @ %pa\n", &phys);
3101583052dSArd Biesheuvel 				return NULL;
311a509a66aSArd Biesheuvel 			}
312a509a66aSArd Biesheuvel 			/*
313a509a66aSArd Biesheuvel 			 * Mapping kernel memory is permitted if the region in
314a509a66aSArd Biesheuvel 			 * question is covered by a single memblock with the
315a509a66aSArd Biesheuvel 			 * NOMAP attribute set: this enables the use of ACPI
316a509a66aSArd Biesheuvel 			 * table overrides passed via initramfs, which are
317a509a66aSArd Biesheuvel 			 * reserved in memory using arch_reserve_mem_area()
318a509a66aSArd Biesheuvel 			 * below. As this particular use case only requires
319a509a66aSArd Biesheuvel 			 * read access, fall through to the R/O mapping case.
320a509a66aSArd Biesheuvel 			 */
321a509a66aSArd Biesheuvel 			fallthrough;
3221583052dSArd Biesheuvel 
323325f5585SArd Biesheuvel 		case EFI_RUNTIME_SERVICES_CODE:
324325f5585SArd Biesheuvel 			/*
325325f5585SArd Biesheuvel 			 * This would be unusual, but not problematic per se,
326325f5585SArd Biesheuvel 			 * as long as we take care not to create a writable
327325f5585SArd Biesheuvel 			 * mapping for executable code.
328325f5585SArd Biesheuvel 			 */
329325f5585SArd Biesheuvel 			prot = PAGE_KERNEL_RO;
330325f5585SArd Biesheuvel 			break;
331325f5585SArd Biesheuvel 
3321583052dSArd Biesheuvel 		case EFI_ACPI_RECLAIM_MEMORY:
3331583052dSArd Biesheuvel 			/*
3341583052dSArd Biesheuvel 			 * ACPI reclaim memory is used to pass firmware tables
3351583052dSArd Biesheuvel 			 * and other data that is intended for consumption by
3361583052dSArd Biesheuvel 			 * the OS only, which may decide it wants to reclaim
3371583052dSArd Biesheuvel 			 * that memory and use it for something else. We never
3381583052dSArd Biesheuvel 			 * do that, but we usually add it to the linear map
3391583052dSArd Biesheuvel 			 * anyway, in which case we should use the existing
3401583052dSArd Biesheuvel 			 * mapping.
3411583052dSArd Biesheuvel 			 */
3421583052dSArd Biesheuvel 			if (memblock_is_map_memory(phys))
3431583052dSArd Biesheuvel 				return (void __iomem *)__phys_to_virt(phys);
344df561f66SGustavo A. R. Silva 			fallthrough;
3451583052dSArd Biesheuvel 
3461583052dSArd Biesheuvel 		default:
3471583052dSArd Biesheuvel 			if (region->attribute & EFI_MEMORY_WB)
3481583052dSArd Biesheuvel 				prot = PAGE_KERNEL;
3491583052dSArd Biesheuvel 			else if (region->attribute & EFI_MEMORY_WC)
3501583052dSArd Biesheuvel 				prot = __pgprot(PROT_NORMAL_NC);
351ee67c110SWill Deacon 			else if (region->attribute & EFI_MEMORY_WT)
352ee67c110SWill Deacon 				prot = __acpi_get_writethrough_mem_attribute();
3531583052dSArd Biesheuvel 		}
3541583052dSArd Biesheuvel 	}
355f23eab0bSKefeng Wang 	return ioremap_prot(phys, size, pgprot_val(prot));
3561583052dSArd Biesheuvel }
3571583052dSArd Biesheuvel 
358d44f1b8dSJames Morse /*
359d44f1b8dSJames Morse  * Claim Synchronous External Aborts as a firmware first notification.
360d44f1b8dSJames Morse  *
361d44f1b8dSJames Morse  * Used by KVM and the arch do_sea handler.
362d44f1b8dSJames Morse  * @regs may be NULL when called from process context.
363d44f1b8dSJames Morse  */
apei_claim_sea(struct pt_regs * regs)364d44f1b8dSJames Morse int apei_claim_sea(struct pt_regs *regs)
365d44f1b8dSJames Morse {
366d44f1b8dSJames Morse 	int err = -ENOENT;
3678fcc4ae6SJames Morse 	bool return_to_irqs_enabled;
368d44f1b8dSJames Morse 	unsigned long current_flags;
369d44f1b8dSJames Morse 
370d44f1b8dSJames Morse 	if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
371d44f1b8dSJames Morse 		return err;
372d44f1b8dSJames Morse 
373e533dbe9SMark Rutland 	current_flags = local_daif_save_flags();
374d44f1b8dSJames Morse 
3758fcc4ae6SJames Morse 	/* current_flags isn't useful here as daif doesn't tell us about pNMI */
3768fcc4ae6SJames Morse 	return_to_irqs_enabled = !irqs_disabled_flags(arch_local_save_flags());
3778fcc4ae6SJames Morse 
3788fcc4ae6SJames Morse 	if (regs)
3798fcc4ae6SJames Morse 		return_to_irqs_enabled = interrupts_enabled(regs);
3808fcc4ae6SJames Morse 
381d44f1b8dSJames Morse 	/*
382d44f1b8dSJames Morse 	 * SEA can interrupt SError, mask it and describe this as an NMI so
383d44f1b8dSJames Morse 	 * that APEI defers the handling.
384d44f1b8dSJames Morse 	 */
385d44f1b8dSJames Morse 	local_daif_restore(DAIF_ERRCTX);
386d44f1b8dSJames Morse 	nmi_enter();
387d44f1b8dSJames Morse 	err = ghes_notify_sea();
388d44f1b8dSJames Morse 	nmi_exit();
3898fcc4ae6SJames Morse 
3908fcc4ae6SJames Morse 	/*
3918fcc4ae6SJames Morse 	 * APEI NMI-like notifications are deferred to irq_work. Unless
3928fcc4ae6SJames Morse 	 * we interrupted irqs-masked code, we can do that now.
3938fcc4ae6SJames Morse 	 */
3948fcc4ae6SJames Morse 	if (!err) {
3958fcc4ae6SJames Morse 		if (return_to_irqs_enabled) {
3968fcc4ae6SJames Morse 			local_daif_restore(DAIF_PROCCTX_NOIRQ);
3978fcc4ae6SJames Morse 			__irq_enter();
3988fcc4ae6SJames Morse 			irq_work_run();
3998fcc4ae6SJames Morse 			__irq_exit();
4008fcc4ae6SJames Morse 		} else {
4018fcc4ae6SJames Morse 			pr_warn_ratelimited("APEI work queued but not completed");
4028fcc4ae6SJames Morse 			err = -EINPROGRESS;
4038fcc4ae6SJames Morse 		}
4048fcc4ae6SJames Morse 	}
4058fcc4ae6SJames Morse 
406d44f1b8dSJames Morse 	local_daif_restore(current_flags);
407d44f1b8dSJames Morse 
408d44f1b8dSJames Morse 	return err;
409d44f1b8dSJames Morse }
410a509a66aSArd Biesheuvel 
arch_reserve_mem_area(acpi_physical_address addr,size_t size)411a509a66aSArd Biesheuvel void arch_reserve_mem_area(acpi_physical_address addr, size_t size)
412a509a66aSArd Biesheuvel {
413a509a66aSArd Biesheuvel 	memblock_mark_nomap(addr, size);
414a509a66aSArd Biesheuvel }
4151d280ce0SSudeep Holla 
4161d280ce0SSudeep Holla #ifdef CONFIG_ACPI_FFH
4171d280ce0SSudeep Holla /*
4181d280ce0SSudeep Holla  * Implements ARM64 specific callbacks to support ACPI FFH Operation Region as
4191d280ce0SSudeep Holla  * specified in https://developer.arm.com/docs/den0048/latest
4201d280ce0SSudeep Holla  */
4211d280ce0SSudeep Holla struct acpi_ffh_data {
4221d280ce0SSudeep Holla 	struct acpi_ffh_info info;
4231d280ce0SSudeep Holla 	void (*invoke_ffh_fn)(unsigned long a0, unsigned long a1,
4241d280ce0SSudeep Holla 			      unsigned long a2, unsigned long a3,
4251d280ce0SSudeep Holla 			      unsigned long a4, unsigned long a5,
4261d280ce0SSudeep Holla 			      unsigned long a6, unsigned long a7,
4271d280ce0SSudeep Holla 			      struct arm_smccc_res *args,
4281d280ce0SSudeep Holla 			      struct arm_smccc_quirk *res);
4291d280ce0SSudeep Holla 	void (*invoke_ffh64_fn)(const struct arm_smccc_1_2_regs *args,
4301d280ce0SSudeep Holla 				struct arm_smccc_1_2_regs *res);
4311d280ce0SSudeep Holla };
4321d280ce0SSudeep Holla 
acpi_ffh_address_space_arch_setup(void * handler_ctxt,void ** region_ctxt)4331d280ce0SSudeep Holla int acpi_ffh_address_space_arch_setup(void *handler_ctxt, void **region_ctxt)
4341d280ce0SSudeep Holla {
4351d280ce0SSudeep Holla 	enum arm_smccc_conduit conduit;
4361d280ce0SSudeep Holla 	struct acpi_ffh_data *ffh_ctxt;
4371d280ce0SSudeep Holla 
4381d280ce0SSudeep Holla 	if (arm_smccc_get_version() < ARM_SMCCC_VERSION_1_2)
4391d280ce0SSudeep Holla 		return -EOPNOTSUPP;
4401d280ce0SSudeep Holla 
4411d280ce0SSudeep Holla 	conduit = arm_smccc_1_1_get_conduit();
4421d280ce0SSudeep Holla 	if (conduit == SMCCC_CONDUIT_NONE) {
4431d280ce0SSudeep Holla 		pr_err("%s: invalid SMCCC conduit\n", __func__);
4441d280ce0SSudeep Holla 		return -EOPNOTSUPP;
4451d280ce0SSudeep Holla 	}
4461d280ce0SSudeep Holla 
447*1b561d39SSudeep Holla 	ffh_ctxt = kzalloc(sizeof(*ffh_ctxt), GFP_KERNEL);
448*1b561d39SSudeep Holla 	if (!ffh_ctxt)
449*1b561d39SSudeep Holla 		return -ENOMEM;
450*1b561d39SSudeep Holla 
4511d280ce0SSudeep Holla 	if (conduit == SMCCC_CONDUIT_SMC) {
4521d280ce0SSudeep Holla 		ffh_ctxt->invoke_ffh_fn = __arm_smccc_smc;
4531d280ce0SSudeep Holla 		ffh_ctxt->invoke_ffh64_fn = arm_smccc_1_2_smc;
4541d280ce0SSudeep Holla 	} else {
4551d280ce0SSudeep Holla 		ffh_ctxt->invoke_ffh_fn = __arm_smccc_hvc;
4561d280ce0SSudeep Holla 		ffh_ctxt->invoke_ffh64_fn = arm_smccc_1_2_hvc;
4571d280ce0SSudeep Holla 	}
4581d280ce0SSudeep Holla 
4591d280ce0SSudeep Holla 	memcpy(ffh_ctxt, handler_ctxt, sizeof(ffh_ctxt->info));
4601d280ce0SSudeep Holla 
4611d280ce0SSudeep Holla 	*region_ctxt = ffh_ctxt;
4621d280ce0SSudeep Holla 	return AE_OK;
4631d280ce0SSudeep Holla }
4641d280ce0SSudeep Holla 
acpi_ffh_smccc_owner_allowed(u32 fid)4651d280ce0SSudeep Holla static bool acpi_ffh_smccc_owner_allowed(u32 fid)
4661d280ce0SSudeep Holla {
4671d280ce0SSudeep Holla 	int owner = ARM_SMCCC_OWNER_NUM(fid);
4681d280ce0SSudeep Holla 
4691d280ce0SSudeep Holla 	if (owner == ARM_SMCCC_OWNER_STANDARD ||
4701d280ce0SSudeep Holla 	    owner == ARM_SMCCC_OWNER_SIP || owner == ARM_SMCCC_OWNER_OEM)
4711d280ce0SSudeep Holla 		return true;
4721d280ce0SSudeep Holla 
4731d280ce0SSudeep Holla 	return false;
4741d280ce0SSudeep Holla }
4751d280ce0SSudeep Holla 
acpi_ffh_address_space_arch_handler(acpi_integer * value,void * region_context)4761d280ce0SSudeep Holla int acpi_ffh_address_space_arch_handler(acpi_integer *value, void *region_context)
4771d280ce0SSudeep Holla {
4781d280ce0SSudeep Holla 	int ret = 0;
4791d280ce0SSudeep Holla 	struct acpi_ffh_data *ffh_ctxt = region_context;
4801d280ce0SSudeep Holla 
4811d280ce0SSudeep Holla 	if (ffh_ctxt->info.offset == 0) {
4821d280ce0SSudeep Holla 		/* SMC/HVC 32bit call */
4831d280ce0SSudeep Holla 		struct arm_smccc_res res;
4841d280ce0SSudeep Holla 		u32 a[8] = { 0 }, *ptr = (u32 *)value;
4851d280ce0SSudeep Holla 
4861d280ce0SSudeep Holla 		if (!ARM_SMCCC_IS_FAST_CALL(*ptr) || ARM_SMCCC_IS_64(*ptr) ||
4871d280ce0SSudeep Holla 		    !acpi_ffh_smccc_owner_allowed(*ptr) ||
4881d280ce0SSudeep Holla 		    ffh_ctxt->info.length > 32) {
4891d280ce0SSudeep Holla 			ret = AE_ERROR;
4901d280ce0SSudeep Holla 		} else {
4911d280ce0SSudeep Holla 			int idx, len = ffh_ctxt->info.length >> 2;
4921d280ce0SSudeep Holla 
4931d280ce0SSudeep Holla 			for (idx = 0; idx < len; idx++)
4941d280ce0SSudeep Holla 				a[idx] = *(ptr + idx);
4951d280ce0SSudeep Holla 
4961d280ce0SSudeep Holla 			ffh_ctxt->invoke_ffh_fn(a[0], a[1], a[2], a[3], a[4],
4971d280ce0SSudeep Holla 						a[5], a[6], a[7], &res, NULL);
4981d280ce0SSudeep Holla 			memcpy(value, &res, sizeof(res));
4991d280ce0SSudeep Holla 		}
5001d280ce0SSudeep Holla 
5011d280ce0SSudeep Holla 	} else if (ffh_ctxt->info.offset == 1) {
5021d280ce0SSudeep Holla 		/* SMC/HVC 64bit call */
5031d280ce0SSudeep Holla 		struct arm_smccc_1_2_regs *r = (struct arm_smccc_1_2_regs *)value;
5041d280ce0SSudeep Holla 
5051d280ce0SSudeep Holla 		if (!ARM_SMCCC_IS_FAST_CALL(r->a0) || !ARM_SMCCC_IS_64(r->a0) ||
5061d280ce0SSudeep Holla 		    !acpi_ffh_smccc_owner_allowed(r->a0) ||
5071d280ce0SSudeep Holla 		    ffh_ctxt->info.length > sizeof(*r)) {
5081d280ce0SSudeep Holla 			ret = AE_ERROR;
5091d280ce0SSudeep Holla 		} else {
5101d280ce0SSudeep Holla 			ffh_ctxt->invoke_ffh64_fn(r, r);
5111d280ce0SSudeep Holla 			memcpy(value, r, ffh_ctxt->info.length);
5121d280ce0SSudeep Holla 		}
5131d280ce0SSudeep Holla 	} else {
5141d280ce0SSudeep Holla 		ret = AE_ERROR;
5151d280ce0SSudeep Holla 	}
5161d280ce0SSudeep Holla 
5171d280ce0SSudeep Holla 	return ret;
5181d280ce0SSudeep Holla }
5191d280ce0SSudeep Holla #endif /* CONFIG_ACPI_FFH */
520