1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2013, 2014 Linaro Ltd; <roy.franz@linaro.org> 4 * 5 * This file implements the EFI boot stub for the arm64 kernel. 6 * Adapted from ARM version by Mark Salter <msalter@redhat.com> 7 */ 8 9 10 #include <linux/efi.h> 11 #include <asm/efi.h> 12 #include <asm/memory.h> 13 #include <asm/sections.h> 14 #include <asm/sysreg.h> 15 16 #include "efistub.h" 17 18 static bool system_needs_vamap(void) 19 { 20 const u8 *type1_family = efi_get_smbios_string(1, family); 21 22 /* 23 * Ampere Altra machines crash in SetTime() if SetVirtualAddressMap() 24 * has not been called prior. 25 */ 26 if (!type1_family || strcmp(type1_family, "Altra")) 27 return false; 28 29 efi_warn("Working around broken SetVirtualAddressMap()\n"); 30 return true; 31 } 32 33 efi_status_t check_platform_features(void) 34 { 35 u64 tg; 36 37 /* 38 * If we have 48 bits of VA space for TTBR0 mappings, we can map the 39 * UEFI runtime regions 1:1 and so calling SetVirtualAddressMap() is 40 * unnecessary. 41 */ 42 if (VA_BITS_MIN >= 48 && !system_needs_vamap()) 43 efi_novamap = true; 44 45 /* UEFI mandates support for 4 KB granularity, no need to check */ 46 if (IS_ENABLED(CONFIG_ARM64_4K_PAGES)) 47 return EFI_SUCCESS; 48 49 tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_EL1_TGRAN_SHIFT) & 0xf; 50 if (tg < ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN || tg > ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX) { 51 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES)) 52 efi_err("This 64 KB granular kernel is not supported by your CPU\n"); 53 else 54 efi_err("This 16 KB granular kernel is not supported by your CPU\n"); 55 return EFI_UNSUPPORTED; 56 } 57 return EFI_SUCCESS; 58 } 59 60 /* 61 * Distro versions of GRUB may ignore the BSS allocation entirely (i.e., fail 62 * to provide space, and fail to zero it). Check for this condition by double 63 * checking that the first and the last byte of the image are covered by the 64 * same EFI memory map entry. 65 */ 66 static bool check_image_region(u64 base, u64 size) 67 { 68 struct efi_boot_memmap *map; 69 efi_status_t status; 70 bool ret = false; 71 int map_offset; 72 73 status = efi_get_memory_map(&map, false); 74 if (status != EFI_SUCCESS) 75 return false; 76 77 for (map_offset = 0; map_offset < map->map_size; map_offset += map->desc_size) { 78 efi_memory_desc_t *md = (void *)map->map + map_offset; 79 u64 end = md->phys_addr + md->num_pages * EFI_PAGE_SIZE; 80 81 /* 82 * Find the region that covers base, and return whether 83 * it covers base+size bytes. 84 */ 85 if (base >= md->phys_addr && base < end) { 86 ret = (base + size) <= end; 87 break; 88 } 89 } 90 91 efi_bs_call(free_pool, map); 92 93 return ret; 94 } 95 96 efi_status_t handle_kernel_image(unsigned long *image_addr, 97 unsigned long *image_size, 98 unsigned long *reserve_addr, 99 unsigned long *reserve_size, 100 efi_loaded_image_t *image, 101 efi_handle_t image_handle) 102 { 103 efi_status_t status; 104 unsigned long kernel_size, kernel_memsize = 0; 105 u32 phys_seed = 0; 106 107 /* 108 * Although relocatable kernels can fix up the misalignment with 109 * respect to MIN_KIMG_ALIGN, the resulting virtual text addresses are 110 * subtly out of sync with those recorded in the vmlinux when kaslr is 111 * disabled but the image required relocation anyway. Therefore retain 112 * 2M alignment if KASLR was explicitly disabled, even if it was not 113 * going to be activated to begin with. 114 */ 115 u64 min_kimg_align = efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN; 116 117 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { 118 efi_guid_t li_fixed_proto = LINUX_EFI_LOADED_IMAGE_FIXED_GUID; 119 void *p; 120 121 if (efi_nokaslr) { 122 efi_info("KASLR disabled on kernel command line\n"); 123 } else if (efi_bs_call(handle_protocol, image_handle, 124 &li_fixed_proto, &p) == EFI_SUCCESS) { 125 efi_info("Image placement fixed by loader\n"); 126 } else { 127 status = efi_get_random_bytes(sizeof(phys_seed), 128 (u8 *)&phys_seed); 129 if (status == EFI_NOT_FOUND) { 130 efi_info("EFI_RNG_PROTOCOL unavailable\n"); 131 efi_nokaslr = true; 132 } else if (status != EFI_SUCCESS) { 133 efi_err("efi_get_random_bytes() failed (0x%lx)\n", 134 status); 135 efi_nokaslr = true; 136 } 137 } 138 } 139 140 if (image->image_base != _text) 141 efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n"); 142 143 if (!IS_ALIGNED((u64)_text, SEGMENT_ALIGN)) 144 efi_err("FIRMWARE BUG: kernel image not aligned on %dk boundary\n", 145 SEGMENT_ALIGN >> 10); 146 147 kernel_size = _edata - _text; 148 kernel_memsize = kernel_size + (_end - _edata); 149 *reserve_size = kernel_memsize; 150 151 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) { 152 /* 153 * If KASLR is enabled, and we have some randomness available, 154 * locate the kernel at a randomized offset in physical memory. 155 */ 156 status = efi_random_alloc(*reserve_size, min_kimg_align, 157 reserve_addr, phys_seed); 158 if (status != EFI_SUCCESS) 159 efi_warn("efi_random_alloc() failed: 0x%lx\n", status); 160 } else { 161 status = EFI_OUT_OF_RESOURCES; 162 } 163 164 if (status != EFI_SUCCESS) { 165 if (!check_image_region((u64)_text, kernel_memsize)) { 166 efi_err("FIRMWARE BUG: Image BSS overlaps adjacent EFI memory region\n"); 167 } else if (IS_ALIGNED((u64)_text, min_kimg_align)) { 168 /* 169 * Just execute from wherever we were loaded by the 170 * UEFI PE/COFF loader if the alignment is suitable. 171 */ 172 *image_addr = (u64)_text; 173 *reserve_size = 0; 174 return EFI_SUCCESS; 175 } 176 177 status = efi_allocate_pages_aligned(*reserve_size, reserve_addr, 178 ULONG_MAX, min_kimg_align); 179 180 if (status != EFI_SUCCESS) { 181 efi_err("Failed to relocate kernel\n"); 182 *reserve_size = 0; 183 return status; 184 } 185 } 186 187 *image_addr = *reserve_addr; 188 memcpy((void *)*image_addr, _text, kernel_size); 189 190 return EFI_SUCCESS; 191 } 192