1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * EFI stub implementation that is shared by arm and arm64 architectures.
4  * This should be #included by the EFI stub implementation files.
5  *
6  * Copyright (C) 2013,2014 Linaro Limited
7  *     Roy Franz <roy.franz@linaro.org
8  * Copyright (C) 2013 Red Hat, Inc.
9  *     Mark Salter <msalter@redhat.com>
10  */
11 
12 #include <linux/efi.h>
13 #include <asm/efi.h>
14 
15 #include "efistub.h"
16 
17 /*
18  * This is the base address at which to start allocating virtual memory ranges
19  * for UEFI Runtime Services.
20  *
21  * For ARM/ARM64:
22  * This is in the low TTBR0 range so that we can use
23  * any allocation we choose, and eliminate the risk of a conflict after kexec.
24  * The value chosen is the largest non-zero power of 2 suitable for this purpose
25  * both on 32-bit and 64-bit ARM CPUs, to maximize the likelihood that it can
26  * be mapped efficiently.
27  * Since 32-bit ARM could potentially execute with a 1G/3G user/kernel split,
28  * map everything below 1 GB. (512 MB is a reasonable upper bound for the
29  * entire footprint of the UEFI runtime services memory regions)
30  *
31  * For RISC-V:
32  * There is no specific reason for which, this address (512MB) can't be used
33  * EFI runtime virtual address for RISC-V. It also helps to use EFI runtime
34  * services on both RV32/RV64. Keep the same runtime virtual address for RISC-V
35  * as well to minimize the code churn.
36  */
37 #define EFI_RT_VIRTUAL_BASE	SZ_512M
38 
39 /*
40  * Some architectures map the EFI regions into the kernel's linear map using a
41  * fixed offset.
42  */
43 #ifndef EFI_RT_VIRTUAL_OFFSET
44 #define EFI_RT_VIRTUAL_OFFSET	0
45 #endif
46 
47 static u64 virtmap_base = EFI_RT_VIRTUAL_BASE;
48 static bool flat_va_mapping = (EFI_RT_VIRTUAL_OFFSET != 0);
49 
50 void __weak free_screen_info(struct screen_info *si)
51 {
52 }
53 
54 static struct screen_info *setup_graphics(void)
55 {
56 	efi_guid_t gop_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID;
57 	efi_status_t status;
58 	unsigned long size;
59 	void **gop_handle = NULL;
60 	struct screen_info *si = NULL;
61 
62 	size = 0;
63 	status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
64 			     &gop_proto, NULL, &size, gop_handle);
65 	if (status == EFI_BUFFER_TOO_SMALL) {
66 		si = alloc_screen_info();
67 		if (!si)
68 			return NULL;
69 		status = efi_setup_gop(si, &gop_proto, size);
70 		if (status != EFI_SUCCESS) {
71 			free_screen_info(si);
72 			return NULL;
73 		}
74 	}
75 	return si;
76 }
77 
78 static void install_memreserve_table(void)
79 {
80 	struct linux_efi_memreserve *rsv;
81 	efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID;
82 	efi_status_t status;
83 
84 	status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
85 			     (void **)&rsv);
86 	if (status != EFI_SUCCESS) {
87 		efi_err("Failed to allocate memreserve entry!\n");
88 		return;
89 	}
90 
91 	rsv->next = 0;
92 	rsv->size = 0;
93 	atomic_set(&rsv->count, 0);
94 
95 	status = efi_bs_call(install_configuration_table,
96 			     &memreserve_table_guid, rsv);
97 	if (status != EFI_SUCCESS)
98 		efi_err("Failed to install memreserve config table!\n");
99 }
100 
101 static u32 get_supported_rt_services(void)
102 {
103 	const efi_rt_properties_table_t *rt_prop_table;
104 	u32 supported = EFI_RT_SUPPORTED_ALL;
105 
106 	rt_prop_table = get_efi_config_table(EFI_RT_PROPERTIES_TABLE_GUID);
107 	if (rt_prop_table)
108 		supported &= rt_prop_table->runtime_services_supported;
109 
110 	return supported;
111 }
112 
113 efi_status_t efi_handle_cmdline(efi_loaded_image_t *image, char **cmdline_ptr)
114 {
115 	int cmdline_size = 0;
116 	efi_status_t status;
117 	char *cmdline;
118 
119 	/*
120 	 * Get the command line from EFI, using the LOADED_IMAGE
121 	 * protocol. We are going to copy the command line into the
122 	 * device tree, so this can be allocated anywhere.
123 	 */
124 	cmdline = efi_convert_cmdline(image, &cmdline_size);
125 	if (!cmdline) {
126 		efi_err("getting command line via LOADED_IMAGE_PROTOCOL\n");
127 		return EFI_OUT_OF_RESOURCES;
128 	}
129 
130 	if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) ||
131 	    IS_ENABLED(CONFIG_CMDLINE_FORCE) ||
132 	    cmdline[0] == 0) {
133 		status = efi_parse_options(CONFIG_CMDLINE);
134 		if (status != EFI_SUCCESS) {
135 			efi_err("Failed to parse options\n");
136 			goto fail_free_cmdline;
137 		}
138 	}
139 
140 	if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && cmdline_size > 0) {
141 		status = efi_parse_options(cmdline);
142 		if (status != EFI_SUCCESS) {
143 			efi_err("Failed to parse options\n");
144 			goto fail_free_cmdline;
145 		}
146 	}
147 
148 	*cmdline_ptr = cmdline;
149 	return EFI_SUCCESS;
150 
151 fail_free_cmdline:
152 	efi_bs_call(free_pool, cmdline);
153 	return status;
154 }
155 
156 efi_status_t efi_stub_common(efi_handle_t handle,
157 			     efi_loaded_image_t *image,
158 			     unsigned long image_addr,
159 			     char *cmdline_ptr)
160 {
161 	struct screen_info *si;
162 	efi_status_t status;
163 
164 	status = check_platform_features();
165 	if (status != EFI_SUCCESS)
166 		return status;
167 
168 	si = setup_graphics();
169 
170 	efi_retrieve_tpm2_eventlog();
171 
172 	/* Ask the firmware to clear memory on unclean shutdown */
173 	efi_enable_reset_attack_mitigation();
174 
175 	efi_load_initrd(image, ULONG_MAX, efi_get_max_initrd_addr(image_addr),
176 			NULL);
177 
178 	efi_random_get_seed();
179 
180 	/* force efi_novamap if SetVirtualAddressMap() is unsupported */
181 	efi_novamap |= !(get_supported_rt_services() &
182 			 EFI_RT_SUPPORTED_SET_VIRTUAL_ADDRESS_MAP);
183 
184 	install_memreserve_table();
185 
186 	status = efi_boot_kernel(handle, image, image_addr, cmdline_ptr);
187 
188 	free_screen_info(si);
189 	return status;
190 }
191 
192 /*
193  * efi_allocate_virtmap() - create a pool allocation for the virtmap
194  *
195  * Create an allocation that is of sufficient size to hold all the memory
196  * descriptors that will be passed to SetVirtualAddressMap() to inform the
197  * firmware about the virtual mapping that will be used under the OS to call
198  * into the firmware.
199  */
200 efi_status_t efi_alloc_virtmap(efi_memory_desc_t **virtmap,
201 			       unsigned long *desc_size, u32 *desc_ver)
202 {
203 	unsigned long size, mmap_key;
204 	efi_status_t status;
205 
206 	/*
207 	 * Use the size of the current memory map as an upper bound for the
208 	 * size of the buffer we need to pass to SetVirtualAddressMap() to
209 	 * cover all EFI_MEMORY_RUNTIME regions.
210 	 */
211 	size = 0;
212 	status = efi_bs_call(get_memory_map, &size, NULL, &mmap_key, desc_size,
213 			     desc_ver);
214 	if (status != EFI_BUFFER_TOO_SMALL)
215 		return EFI_LOAD_ERROR;
216 
217 	return efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
218 			   (void **)virtmap);
219 }
220 
221 /*
222  * efi_get_virtmap() - create a virtual mapping for the EFI memory map
223  *
224  * This function populates the virt_addr fields of all memory region descriptors
225  * in @memory_map whose EFI_MEMORY_RUNTIME attribute is set. Those descriptors
226  * are also copied to @runtime_map, and their total count is returned in @count.
227  */
228 void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
229 		     unsigned long desc_size, efi_memory_desc_t *runtime_map,
230 		     int *count)
231 {
232 	u64 efi_virt_base = virtmap_base;
233 	efi_memory_desc_t *in, *out = runtime_map;
234 	int l;
235 
236 	*count = 0;
237 
238 	for (l = 0; l < map_size; l += desc_size) {
239 		u64 paddr, size;
240 
241 		in = (void *)memory_map + l;
242 		if (!(in->attribute & EFI_MEMORY_RUNTIME))
243 			continue;
244 
245 		paddr = in->phys_addr;
246 		size = in->num_pages * EFI_PAGE_SIZE;
247 
248 		in->virt_addr = in->phys_addr + EFI_RT_VIRTUAL_OFFSET;
249 		if (efi_novamap) {
250 			continue;
251 		}
252 
253 		/*
254 		 * Make the mapping compatible with 64k pages: this allows
255 		 * a 4k page size kernel to kexec a 64k page size kernel and
256 		 * vice versa.
257 		 */
258 		if (!flat_va_mapping) {
259 
260 			paddr = round_down(in->phys_addr, SZ_64K);
261 			size += in->phys_addr - paddr;
262 
263 			/*
264 			 * Avoid wasting memory on PTEs by choosing a virtual
265 			 * base that is compatible with section mappings if this
266 			 * region has the appropriate size and physical
267 			 * alignment. (Sections are 2 MB on 4k granule kernels)
268 			 */
269 			if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M)
270 				efi_virt_base = round_up(efi_virt_base, SZ_2M);
271 			else
272 				efi_virt_base = round_up(efi_virt_base, SZ_64K);
273 
274 			in->virt_addr += efi_virt_base - paddr;
275 			efi_virt_base += size;
276 		}
277 
278 		memcpy(out, in, desc_size);
279 		out = (void *)out + desc_size;
280 		++*count;
281 	}
282 }
283