1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2013, 2014 Linaro Ltd;  <roy.franz@linaro.org>
4  *
5  * This file implements the EFI boot stub for the arm64 kernel.
6  * Adapted from ARM version by Mark Salter <msalter@redhat.com>
7  */
8 
9 
10 #include <linux/efi.h>
11 #include <asm/efi.h>
12 #include <asm/memory.h>
13 #include <asm/sysreg.h>
14 
15 #include "efistub.h"
16 
17 static bool system_needs_vamap(void)
18 {
19 	const u8 *type1_family = efi_get_smbios_string(1, family);
20 
21 	/*
22 	 * Ampere eMAG, Altra, and Altra Max machines crash in SetTime() if
23 	 * SetVirtualAddressMap() has not been called prior.
24 	 */
25 	if (!type1_family || (
26 	    strcmp(type1_family, "eMAG") &&
27 	    strcmp(type1_family, "Altra") &&
28 	    strcmp(type1_family, "Altra Max")))
29 		return false;
30 
31 	efi_warn("Working around broken SetVirtualAddressMap()\n");
32 	return true;
33 }
34 
35 efi_status_t check_platform_features(void)
36 {
37 	u64 tg;
38 
39 	/*
40 	 * If we have 48 bits of VA space for TTBR0 mappings, we can map the
41 	 * UEFI runtime regions 1:1 and so calling SetVirtualAddressMap() is
42 	 * unnecessary.
43 	 */
44 	if (VA_BITS_MIN >= 48 && !system_needs_vamap())
45 		efi_novamap = true;
46 
47 	/* UEFI mandates support for 4 KB granularity, no need to check */
48 	if (IS_ENABLED(CONFIG_ARM64_4K_PAGES))
49 		return EFI_SUCCESS;
50 
51 	tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_EL1_TGRAN_SHIFT) & 0xf;
52 	if (tg < ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN || tg > ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX) {
53 		if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
54 			efi_err("This 64 KB granular kernel is not supported by your CPU\n");
55 		else
56 			efi_err("This 16 KB granular kernel is not supported by your CPU\n");
57 		return EFI_UNSUPPORTED;
58 	}
59 	return EFI_SUCCESS;
60 }
61 
62 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
63 #define DCTYPE	"civac"
64 #else
65 #define DCTYPE	"cvau"
66 #endif
67 
68 void efi_cache_sync_image(unsigned long image_base,
69 			  unsigned long alloc_size,
70 			  unsigned long code_size)
71 {
72 	u32 ctr = read_cpuid_effective_cachetype();
73 	u64 lsize = 4 << cpuid_feature_extract_unsigned_field(ctr,
74 						CTR_EL0_DminLine_SHIFT);
75 
76 	/* only perform the cache maintenance if needed for I/D coherency */
77 	if (!(ctr & BIT(CTR_EL0_IDC_SHIFT))) {
78 		do {
79 			asm("dc " DCTYPE ", %0" :: "r"(image_base));
80 			image_base += lsize;
81 			code_size -= lsize;
82 		} while (code_size >= lsize);
83 	}
84 
85 	asm("ic ialluis");
86 	dsb(ish);
87 	isb();
88 }
89 
90 unsigned long __weak primary_entry_offset(void)
91 {
92 	/*
93 	 * By default, we can invoke the kernel via the branch instruction in
94 	 * the image header, so offset #0. This will be overridden by the EFI
95 	 * stub build that is linked into the core kernel, as in that case, the
96 	 * image header may not have been loaded into memory, or may be mapped
97 	 * with non-executable permissions.
98 	 */
99        return 0;
100 }
101 
102 void __noreturn efi_enter_kernel(unsigned long entrypoint,
103 				 unsigned long fdt_addr,
104 				 unsigned long fdt_size)
105 {
106 	void (* __noreturn enter_kernel)(u64, u64, u64, u64);
107 
108 	enter_kernel = (void *)entrypoint + primary_entry_offset();
109 	enter_kernel(fdt_addr, 0, 0, 0);
110 }
111