1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * EFI stub implementation that is shared by arm and arm64 architectures. 4 * This should be #included by the EFI stub implementation files. 5 * 6 * Copyright (C) 2013,2014 Linaro Limited 7 * Roy Franz <roy.franz@linaro.org 8 * Copyright (C) 2013 Red Hat, Inc. 9 * Mark Salter <msalter@redhat.com> 10 */ 11 12 #include <linux/efi.h> 13 #include <asm/efi.h> 14 15 #include "efistub.h" 16 17 /* 18 * This is the base address at which to start allocating virtual memory ranges 19 * for UEFI Runtime Services. 20 * 21 * For ARM/ARM64: 22 * This is in the low TTBR0 range so that we can use 23 * any allocation we choose, and eliminate the risk of a conflict after kexec. 24 * The value chosen is the largest non-zero power of 2 suitable for this purpose 25 * both on 32-bit and 64-bit ARM CPUs, to maximize the likelihood that it can 26 * be mapped efficiently. 27 * Since 32-bit ARM could potentially execute with a 1G/3G user/kernel split, 28 * map everything below 1 GB. (512 MB is a reasonable upper bound for the 29 * entire footprint of the UEFI runtime services memory regions) 30 * 31 * For RISC-V: 32 * There is no specific reason for which, this address (512MB) can't be used 33 * EFI runtime virtual address for RISC-V. It also helps to use EFI runtime 34 * services on both RV32/RV64. Keep the same runtime virtual address for RISC-V 35 * as well to minimize the code churn. 36 */ 37 #define EFI_RT_VIRTUAL_BASE SZ_512M 38 #define EFI_RT_VIRTUAL_SIZE SZ_512M 39 40 #ifdef CONFIG_ARM64 41 # define EFI_RT_VIRTUAL_LIMIT DEFAULT_MAP_WINDOW_64 42 #elif defined(CONFIG_RISCV) || defined(CONFIG_LOONGARCH) 43 # define EFI_RT_VIRTUAL_LIMIT TASK_SIZE_MIN 44 #else /* Only if TASK_SIZE is a constant */ 45 # define EFI_RT_VIRTUAL_LIMIT TASK_SIZE 46 #endif 47 48 /* 49 * Some architectures map the EFI regions into the kernel's linear map using a 50 * fixed offset. 51 */ 52 #ifndef EFI_RT_VIRTUAL_OFFSET 53 #define EFI_RT_VIRTUAL_OFFSET 0 54 #endif 55 56 static u64 virtmap_base = EFI_RT_VIRTUAL_BASE; 57 static bool flat_va_mapping = (EFI_RT_VIRTUAL_OFFSET != 0); 58 59 const efi_system_table_t *efi_system_table; 60 61 static struct screen_info *setup_graphics(void) 62 { 63 efi_guid_t gop_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID; 64 efi_status_t status; 65 unsigned long size; 66 void **gop_handle = NULL; 67 struct screen_info *si = NULL; 68 69 size = 0; 70 status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, 71 &gop_proto, NULL, &size, gop_handle); 72 if (status == EFI_BUFFER_TOO_SMALL) { 73 si = alloc_screen_info(); 74 if (!si) 75 return NULL; 76 status = efi_setup_gop(si, &gop_proto, size); 77 if (status != EFI_SUCCESS) { 78 free_screen_info(si); 79 return NULL; 80 } 81 } 82 return si; 83 } 84 85 static void install_memreserve_table(void) 86 { 87 struct linux_efi_memreserve *rsv; 88 efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID; 89 efi_status_t status; 90 91 status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv), 92 (void **)&rsv); 93 if (status != EFI_SUCCESS) { 94 efi_err("Failed to allocate memreserve entry!\n"); 95 return; 96 } 97 98 rsv->next = 0; 99 rsv->size = 0; 100 atomic_set(&rsv->count, 0); 101 102 status = efi_bs_call(install_configuration_table, 103 &memreserve_table_guid, rsv); 104 if (status != EFI_SUCCESS) 105 efi_err("Failed to install memreserve config table!\n"); 106 } 107 108 static u32 get_supported_rt_services(void) 109 { 110 const efi_rt_properties_table_t *rt_prop_table; 111 u32 supported = EFI_RT_SUPPORTED_ALL; 112 113 rt_prop_table = get_efi_config_table(EFI_RT_PROPERTIES_TABLE_GUID); 114 if (rt_prop_table) 115 supported &= rt_prop_table->runtime_services_supported; 116 117 return supported; 118 } 119 120 /* 121 * EFI entry point for the arm/arm64 EFI stubs. This is the entrypoint 122 * that is described in the PE/COFF header. Most of the code is the same 123 * for both archictectures, with the arch-specific code provided in the 124 * handle_kernel_image() function. 125 */ 126 efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, 127 efi_system_table_t *sys_table_arg) 128 { 129 efi_loaded_image_t *image; 130 efi_status_t status; 131 unsigned long image_addr; 132 unsigned long image_size = 0; 133 /* addr/point and size pairs for memory management*/ 134 char *cmdline_ptr = NULL; 135 int cmdline_size = 0; 136 efi_guid_t loaded_image_proto = LOADED_IMAGE_PROTOCOL_GUID; 137 unsigned long reserve_addr = 0; 138 unsigned long reserve_size = 0; 139 struct screen_info *si; 140 efi_properties_table_t *prop_tbl; 141 142 efi_system_table = sys_table_arg; 143 144 /* Check if we were booted by the EFI firmware */ 145 if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) { 146 status = EFI_INVALID_PARAMETER; 147 goto fail; 148 } 149 150 status = check_platform_features(); 151 if (status != EFI_SUCCESS) 152 goto fail; 153 154 /* 155 * Get a handle to the loaded image protocol. This is used to get 156 * information about the running image, such as size and the command 157 * line. 158 */ 159 status = efi_system_table->boottime->handle_protocol(handle, 160 &loaded_image_proto, (void *)&image); 161 if (status != EFI_SUCCESS) { 162 efi_err("Failed to get loaded image protocol\n"); 163 goto fail; 164 } 165 166 /* 167 * Get the command line from EFI, using the LOADED_IMAGE 168 * protocol. We are going to copy the command line into the 169 * device tree, so this can be allocated anywhere. 170 */ 171 cmdline_ptr = efi_convert_cmdline(image, &cmdline_size); 172 if (!cmdline_ptr) { 173 efi_err("getting command line via LOADED_IMAGE_PROTOCOL\n"); 174 status = EFI_OUT_OF_RESOURCES; 175 goto fail; 176 } 177 178 if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) || 179 IS_ENABLED(CONFIG_CMDLINE_FORCE) || 180 cmdline_size == 0) { 181 status = efi_parse_options(CONFIG_CMDLINE); 182 if (status != EFI_SUCCESS) { 183 efi_err("Failed to parse options\n"); 184 goto fail_free_cmdline; 185 } 186 } 187 188 if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && cmdline_size > 0) { 189 status = efi_parse_options(cmdline_ptr); 190 if (status != EFI_SUCCESS) { 191 efi_err("Failed to parse options\n"); 192 goto fail_free_cmdline; 193 } 194 } 195 196 efi_info("Booting Linux Kernel...\n"); 197 198 si = setup_graphics(); 199 200 status = handle_kernel_image(&image_addr, &image_size, 201 &reserve_addr, 202 &reserve_size, 203 image, handle); 204 if (status != EFI_SUCCESS) { 205 efi_err("Failed to relocate kernel\n"); 206 goto fail_free_screeninfo; 207 } 208 209 efi_retrieve_tpm2_eventlog(); 210 211 /* Ask the firmware to clear memory on unclean shutdown */ 212 efi_enable_reset_attack_mitigation(); 213 214 efi_load_initrd(image, ULONG_MAX, efi_get_max_initrd_addr(image_addr), 215 NULL); 216 217 efi_random_get_seed(); 218 219 /* 220 * If the NX PE data feature is enabled in the properties table, we 221 * should take care not to create a virtual mapping that changes the 222 * relative placement of runtime services code and data regions, as 223 * they may belong to the same PE/COFF executable image in memory. 224 * The easiest way to achieve that is to simply use a 1:1 mapping. 225 */ 226 prop_tbl = get_efi_config_table(EFI_PROPERTIES_TABLE_GUID); 227 flat_va_mapping |= prop_tbl && 228 (prop_tbl->memory_protection_attribute & 229 EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA); 230 231 /* force efi_novamap if SetVirtualAddressMap() is unsupported */ 232 efi_novamap |= !(get_supported_rt_services() & 233 EFI_RT_SUPPORTED_SET_VIRTUAL_ADDRESS_MAP); 234 235 /* hibernation expects the runtime regions to stay in the same place */ 236 if (!IS_ENABLED(CONFIG_HIBERNATION) && !efi_nokaslr && !flat_va_mapping) { 237 /* 238 * Randomize the base of the UEFI runtime services region. 239 * Preserve the 2 MB alignment of the region by taking a 240 * shift of 21 bit positions into account when scaling 241 * the headroom value using a 32-bit random value. 242 */ 243 static const u64 headroom = EFI_RT_VIRTUAL_LIMIT - 244 EFI_RT_VIRTUAL_BASE - 245 EFI_RT_VIRTUAL_SIZE; 246 u32 rnd; 247 248 status = efi_get_random_bytes(sizeof(rnd), (u8 *)&rnd); 249 if (status == EFI_SUCCESS) { 250 virtmap_base = EFI_RT_VIRTUAL_BASE + 251 (((headroom >> 21) * rnd) >> (32 - 21)); 252 } 253 } 254 255 install_memreserve_table(); 256 257 status = efi_boot_kernel(handle, image, image_addr, cmdline_ptr); 258 259 efi_free(image_size, image_addr); 260 efi_free(reserve_size, reserve_addr); 261 fail_free_screeninfo: 262 free_screen_info(si); 263 fail_free_cmdline: 264 efi_bs_call(free_pool, cmdline_ptr); 265 fail: 266 return status; 267 } 268 269 /* 270 * efi_allocate_virtmap() - create a pool allocation for the virtmap 271 * 272 * Create an allocation that is of sufficient size to hold all the memory 273 * descriptors that will be passed to SetVirtualAddressMap() to inform the 274 * firmware about the virtual mapping that will be used under the OS to call 275 * into the firmware. 276 */ 277 efi_status_t efi_alloc_virtmap(efi_memory_desc_t **virtmap, 278 unsigned long *desc_size, u32 *desc_ver) 279 { 280 unsigned long size, mmap_key; 281 efi_status_t status; 282 283 /* 284 * Use the size of the current memory map as an upper bound for the 285 * size of the buffer we need to pass to SetVirtualAddressMap() to 286 * cover all EFI_MEMORY_RUNTIME regions. 287 */ 288 size = 0; 289 status = efi_bs_call(get_memory_map, &size, NULL, &mmap_key, desc_size, 290 desc_ver); 291 if (status != EFI_BUFFER_TOO_SMALL) 292 return EFI_LOAD_ERROR; 293 294 return efi_bs_call(allocate_pool, EFI_LOADER_DATA, size, 295 (void **)virtmap); 296 } 297 298 /* 299 * efi_get_virtmap() - create a virtual mapping for the EFI memory map 300 * 301 * This function populates the virt_addr fields of all memory region descriptors 302 * in @memory_map whose EFI_MEMORY_RUNTIME attribute is set. Those descriptors 303 * are also copied to @runtime_map, and their total count is returned in @count. 304 */ 305 void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size, 306 unsigned long desc_size, efi_memory_desc_t *runtime_map, 307 int *count) 308 { 309 u64 efi_virt_base = virtmap_base; 310 efi_memory_desc_t *in, *out = runtime_map; 311 int l; 312 313 *count = 0; 314 315 for (l = 0; l < map_size; l += desc_size) { 316 u64 paddr, size; 317 318 in = (void *)memory_map + l; 319 if (!(in->attribute & EFI_MEMORY_RUNTIME)) 320 continue; 321 322 paddr = in->phys_addr; 323 size = in->num_pages * EFI_PAGE_SIZE; 324 325 in->virt_addr = in->phys_addr + EFI_RT_VIRTUAL_OFFSET; 326 if (efi_novamap) { 327 continue; 328 } 329 330 /* 331 * Make the mapping compatible with 64k pages: this allows 332 * a 4k page size kernel to kexec a 64k page size kernel and 333 * vice versa. 334 */ 335 if (!flat_va_mapping) { 336 337 paddr = round_down(in->phys_addr, SZ_64K); 338 size += in->phys_addr - paddr; 339 340 /* 341 * Avoid wasting memory on PTEs by choosing a virtual 342 * base that is compatible with section mappings if this 343 * region has the appropriate size and physical 344 * alignment. (Sections are 2 MB on 4k granule kernels) 345 */ 346 if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M) 347 efi_virt_base = round_up(efi_virt_base, SZ_2M); 348 else 349 efi_virt_base = round_up(efi_virt_base, SZ_64K); 350 351 in->virt_addr += efi_virt_base - paddr; 352 efi_virt_base += size; 353 } 354 355 memcpy(out, in, desc_size); 356 out = (void *)out + desc_size; 357 ++*count; 358 } 359 } 360