1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_EFI_H 3 #define _ASM_X86_EFI_H 4 5 #include <asm/fpu/api.h> 6 #include <asm/processor-flags.h> 7 #include <asm/tlb.h> 8 #include <asm/nospec-branch.h> 9 #include <asm/mmu_context.h> 10 #include <asm/ibt.h> 11 #include <linux/build_bug.h> 12 #include <linux/kernel.h> 13 #include <linux/pgtable.h> 14 15 extern unsigned long efi_fw_vendor, efi_config_table; 16 extern unsigned long efi_mixed_mode_stack_pa; 17 18 /* 19 * We map the EFI regions needed for runtime services non-contiguously, 20 * with preserved alignment on virtual addresses starting from -4G down 21 * for a total max space of 64G. This way, we provide for stable runtime 22 * services addresses across kernels so that a kexec'd kernel can still 23 * use them. 24 * 25 * This is the main reason why we're doing stable VA mappings for RT 26 * services. 27 */ 28 29 #define EFI32_LOADER_SIGNATURE "EL32" 30 #define EFI64_LOADER_SIGNATURE "EL64" 31 32 #define ARCH_EFI_IRQ_FLAGS_MASK X86_EFLAGS_IF 33 34 /* 35 * The EFI services are called through variadic functions in many cases. These 36 * functions are implemented in assembler and support only a fixed number of 37 * arguments. The macros below allows us to check at build time that we don't 38 * try to call them with too many arguments. 39 * 40 * __efi_nargs() will return the number of arguments if it is 7 or less, and 41 * cause a BUILD_BUG otherwise. The limitations of the C preprocessor make it 42 * impossible to calculate the exact number of arguments beyond some 43 * pre-defined limit. The maximum number of arguments currently supported by 44 * any of the thunks is 7, so this is good enough for now and can be extended 45 * in the obvious way if we ever need more. 46 */ 47 48 #define __efi_nargs(...) __efi_nargs_(__VA_ARGS__) 49 #define __efi_nargs_(...) __efi_nargs__(0, ##__VA_ARGS__, \ 50 __efi_arg_sentinel(9), __efi_arg_sentinel(8), \ 51 __efi_arg_sentinel(7), __efi_arg_sentinel(6), \ 52 __efi_arg_sentinel(5), __efi_arg_sentinel(4), \ 53 __efi_arg_sentinel(3), __efi_arg_sentinel(2), \ 54 __efi_arg_sentinel(1), __efi_arg_sentinel(0)) 55 #define __efi_nargs__(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, n, ...) \ 56 __take_second_arg(n, \ 57 ({ BUILD_BUG_ON_MSG(1, "__efi_nargs limit exceeded"); 10; })) 58 #define __efi_arg_sentinel(n) , n 59 60 /* 61 * __efi_nargs_check(f, n, ...) will cause a BUILD_BUG if the ellipsis 62 * represents more than n arguments. 63 */ 64 65 #define __efi_nargs_check(f, n, ...) \ 66 __efi_nargs_check_(f, __efi_nargs(__VA_ARGS__), n) 67 #define __efi_nargs_check_(f, p, n) __efi_nargs_check__(f, p, n) 68 #define __efi_nargs_check__(f, p, n) ({ \ 69 BUILD_BUG_ON_MSG( \ 70 (p) > (n), \ 71 #f " called with too many arguments (" #p ">" #n ")"); \ 72 }) 73 74 static inline void efi_fpu_begin(void) 75 { 76 /* 77 * The UEFI calling convention (UEFI spec 2.3.2 and 2.3.4) requires 78 * that FCW and MXCSR (64-bit) must be initialized prior to calling 79 * UEFI code. (Oddly the spec does not require that the FPU stack 80 * be empty.) 81 */ 82 kernel_fpu_begin_mask(KFPU_387 | KFPU_MXCSR); 83 } 84 85 static inline void efi_fpu_end(void) 86 { 87 kernel_fpu_end(); 88 } 89 90 #ifdef CONFIG_X86_32 91 #define arch_efi_call_virt_setup() \ 92 ({ \ 93 efi_fpu_begin(); \ 94 firmware_restrict_branch_speculation_start(); \ 95 }) 96 97 #define arch_efi_call_virt_teardown() \ 98 ({ \ 99 firmware_restrict_branch_speculation_end(); \ 100 efi_fpu_end(); \ 101 }) 102 103 #else /* !CONFIG_X86_32 */ 104 105 #define EFI_LOADER_SIGNATURE "EL64" 106 107 extern asmlinkage u64 __efi_call(void *fp, ...); 108 109 #define efi_call(...) ({ \ 110 __efi_nargs_check(efi_call, 7, __VA_ARGS__); \ 111 __efi_call(__VA_ARGS__); \ 112 }) 113 114 #define arch_efi_call_virt_setup() \ 115 ({ \ 116 efi_sync_low_kernel_mappings(); \ 117 efi_fpu_begin(); \ 118 firmware_restrict_branch_speculation_start(); \ 119 efi_enter_mm(); \ 120 }) 121 122 #undef arch_efi_call_virt 123 #define arch_efi_call_virt(p, f, args...) ({ \ 124 u64 ret, ibt = ibt_save(); \ 125 ret = efi_call((void *)p->f, args); \ 126 ibt_restore(ibt); \ 127 ret; \ 128 }) 129 130 #define arch_efi_call_virt_teardown() \ 131 ({ \ 132 efi_leave_mm(); \ 133 firmware_restrict_branch_speculation_end(); \ 134 efi_fpu_end(); \ 135 }) 136 137 #ifdef CONFIG_KASAN 138 /* 139 * CONFIG_KASAN may redefine memset to __memset. __memset function is present 140 * only in kernel binary. Since the EFI stub linked into a separate binary it 141 * doesn't have __memset(). So we should use standard memset from 142 * arch/x86/boot/compressed/string.c. The same applies to memcpy and memmove. 143 */ 144 #undef memcpy 145 #undef memset 146 #undef memmove 147 #endif 148 149 #endif /* CONFIG_X86_32 */ 150 151 extern int __init efi_memblock_x86_reserve_range(void); 152 extern void __init efi_print_memmap(void); 153 extern void __init efi_map_region(efi_memory_desc_t *md); 154 extern void __init efi_map_region_fixed(efi_memory_desc_t *md); 155 extern void efi_sync_low_kernel_mappings(void); 156 extern int __init efi_alloc_page_tables(void); 157 extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages); 158 extern void __init efi_runtime_update_mappings(void); 159 extern void __init efi_dump_pagetable(void); 160 extern void __init efi_apply_memmap_quirks(void); 161 extern int __init efi_reuse_config(u64 tables, int nr_tables); 162 extern void efi_delete_dummy_variable(void); 163 extern void efi_crash_gracefully_on_page_fault(unsigned long phys_addr); 164 extern void efi_free_boot_services(void); 165 166 void efi_enter_mm(void); 167 void efi_leave_mm(void); 168 169 /* kexec external ABI */ 170 struct efi_setup_data { 171 u64 fw_vendor; 172 u64 __unused; 173 u64 tables; 174 u64 smbios; 175 u64 reserved[8]; 176 }; 177 178 extern u64 efi_setup; 179 180 #ifdef CONFIG_EFI 181 extern efi_status_t __efi64_thunk(u32, ...); 182 183 #define efi64_thunk(...) ({ \ 184 u64 __pad[3]; /* must have space for 3 args on the stack */ \ 185 __efi_nargs_check(efi64_thunk, 9, __VA_ARGS__); \ 186 __efi64_thunk(__VA_ARGS__, __pad); \ 187 }) 188 189 static inline bool efi_is_mixed(void) 190 { 191 if (!IS_ENABLED(CONFIG_EFI_MIXED)) 192 return false; 193 return IS_ENABLED(CONFIG_X86_64) && !efi_enabled(EFI_64BIT); 194 } 195 196 static inline bool efi_runtime_supported(void) 197 { 198 if (IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT)) 199 return true; 200 201 return IS_ENABLED(CONFIG_EFI_MIXED); 202 } 203 204 extern void parse_efi_setup(u64 phys_addr, u32 data_len); 205 206 extern void efi_thunk_runtime_setup(void); 207 efi_status_t efi_set_virtual_address_map(unsigned long memory_map_size, 208 unsigned long descriptor_size, 209 u32 descriptor_version, 210 efi_memory_desc_t *virtual_map, 211 unsigned long systab_phys); 212 213 /* arch specific definitions used by the stub code */ 214 215 #ifdef CONFIG_EFI_MIXED 216 217 #define ARCH_HAS_EFISTUB_WRAPPERS 218 219 static inline bool efi_is_64bit(void) 220 { 221 extern const bool efi_is64; 222 223 return efi_is64; 224 } 225 226 static inline bool efi_is_native(void) 227 { 228 return efi_is_64bit(); 229 } 230 231 #define efi_mixed_mode_cast(attr) \ 232 __builtin_choose_expr( \ 233 __builtin_types_compatible_p(u32, __typeof__(attr)), \ 234 (unsigned long)(attr), (attr)) 235 236 #define efi_table_attr(inst, attr) \ 237 (efi_is_native() \ 238 ? inst->attr \ 239 : (__typeof__(inst->attr)) \ 240 efi_mixed_mode_cast(inst->mixed_mode.attr)) 241 242 /* 243 * The following macros allow translating arguments if necessary from native to 244 * mixed mode. The use case for this is to initialize the upper 32 bits of 245 * output parameters, and where the 32-bit method requires a 64-bit argument, 246 * which must be split up into two arguments to be thunked properly. 247 * 248 * As examples, the AllocatePool boot service returns the address of the 249 * allocation, but it will not set the high 32 bits of the address. To ensure 250 * that the full 64-bit address is initialized, we zero-init the address before 251 * calling the thunk. 252 * 253 * The FreePages boot service takes a 64-bit physical address even in 32-bit 254 * mode. For the thunk to work correctly, a native 64-bit call of 255 * free_pages(addr, size) 256 * must be translated to 257 * efi64_thunk(free_pages, addr & U32_MAX, addr >> 32, size) 258 * so that the two 32-bit halves of addr get pushed onto the stack separately. 259 */ 260 261 static inline void *efi64_zero_upper(void *p) 262 { 263 ((u32 *)p)[1] = 0; 264 return p; 265 } 266 267 static inline u32 efi64_convert_status(efi_status_t status) 268 { 269 return (u32)(status | (u64)status >> 32); 270 } 271 272 #define __efi64_split(val) (val) & U32_MAX, (u64)(val) >> 32 273 274 #define __efi64_argmap_free_pages(addr, size) \ 275 ((addr), 0, (size)) 276 277 #define __efi64_argmap_get_memory_map(mm_size, mm, key, size, ver) \ 278 ((mm_size), (mm), efi64_zero_upper(key), efi64_zero_upper(size), (ver)) 279 280 #define __efi64_argmap_allocate_pool(type, size, buffer) \ 281 ((type), (size), efi64_zero_upper(buffer)) 282 283 #define __efi64_argmap_create_event(type, tpl, f, c, event) \ 284 ((type), (tpl), (f), (c), efi64_zero_upper(event)) 285 286 #define __efi64_argmap_set_timer(event, type, time) \ 287 ((event), (type), lower_32_bits(time), upper_32_bits(time)) 288 289 #define __efi64_argmap_wait_for_event(num, event, index) \ 290 ((num), (event), efi64_zero_upper(index)) 291 292 #define __efi64_argmap_handle_protocol(handle, protocol, interface) \ 293 ((handle), (protocol), efi64_zero_upper(interface)) 294 295 #define __efi64_argmap_locate_protocol(protocol, reg, interface) \ 296 ((protocol), (reg), efi64_zero_upper(interface)) 297 298 #define __efi64_argmap_locate_device_path(protocol, path, handle) \ 299 ((protocol), (path), efi64_zero_upper(handle)) 300 301 #define __efi64_argmap_exit(handle, status, size, data) \ 302 ((handle), efi64_convert_status(status), (size), (data)) 303 304 /* PCI I/O */ 305 #define __efi64_argmap_get_location(protocol, seg, bus, dev, func) \ 306 ((protocol), efi64_zero_upper(seg), efi64_zero_upper(bus), \ 307 efi64_zero_upper(dev), efi64_zero_upper(func)) 308 309 /* LoadFile */ 310 #define __efi64_argmap_load_file(protocol, path, policy, bufsize, buf) \ 311 ((protocol), (path), (policy), efi64_zero_upper(bufsize), (buf)) 312 313 /* Graphics Output Protocol */ 314 #define __efi64_argmap_query_mode(gop, mode, size, info) \ 315 ((gop), (mode), efi64_zero_upper(size), efi64_zero_upper(info)) 316 317 /* TCG2 protocol */ 318 #define __efi64_argmap_hash_log_extend_event(prot, fl, addr, size, ev) \ 319 ((prot), (fl), 0ULL, (u64)(addr), 0ULL, (u64)(size), 0ULL, ev) 320 321 /* DXE services */ 322 #define __efi64_argmap_get_memory_space_descriptor(phys, desc) \ 323 (__efi64_split(phys), (desc)) 324 325 #define __efi64_argmap_set_memory_space_attributes(phys, size, flags) \ 326 (__efi64_split(phys), __efi64_split(size), __efi64_split(flags)) 327 328 /* 329 * The macros below handle the plumbing for the argument mapping. To add a 330 * mapping for a specific EFI method, simply define a macro 331 * __efi64_argmap_<method name>, following the examples above. 332 */ 333 334 #define __efi64_thunk_map(inst, func, ...) \ 335 efi64_thunk(inst->mixed_mode.func, \ 336 __efi64_argmap(__efi64_argmap_ ## func(__VA_ARGS__), \ 337 (__VA_ARGS__))) 338 339 #define __efi64_argmap(mapped, args) \ 340 __PASTE(__efi64_argmap__, __efi_nargs(__efi_eat mapped))(mapped, args) 341 #define __efi64_argmap__0(mapped, args) __efi_eval mapped 342 #define __efi64_argmap__1(mapped, args) __efi_eval args 343 344 #define __efi_eat(...) 345 #define __efi_eval(...) __VA_ARGS__ 346 347 /* The three macros below handle dispatching via the thunk if needed */ 348 349 #define efi_call_proto(inst, func, ...) \ 350 (efi_is_native() \ 351 ? inst->func(inst, ##__VA_ARGS__) \ 352 : __efi64_thunk_map(inst, func, inst, ##__VA_ARGS__)) 353 354 #define efi_bs_call(func, ...) \ 355 (efi_is_native() \ 356 ? efi_system_table->boottime->func(__VA_ARGS__) \ 357 : __efi64_thunk_map(efi_table_attr(efi_system_table, \ 358 boottime), \ 359 func, __VA_ARGS__)) 360 361 #define efi_rt_call(func, ...) \ 362 (efi_is_native() \ 363 ? efi_system_table->runtime->func(__VA_ARGS__) \ 364 : __efi64_thunk_map(efi_table_attr(efi_system_table, \ 365 runtime), \ 366 func, __VA_ARGS__)) 367 368 #define efi_dxe_call(func, ...) \ 369 (efi_is_native() \ 370 ? efi_dxe_table->func(__VA_ARGS__) \ 371 : __efi64_thunk_map(efi_dxe_table, func, __VA_ARGS__)) 372 373 #else /* CONFIG_EFI_MIXED */ 374 375 static inline bool efi_is_64bit(void) 376 { 377 return IS_ENABLED(CONFIG_X86_64); 378 } 379 380 #endif /* CONFIG_EFI_MIXED */ 381 382 extern bool efi_reboot_required(void); 383 extern bool efi_is_table_address(unsigned long phys_addr); 384 385 extern void efi_reserve_boot_services(void); 386 #else 387 static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {} 388 static inline bool efi_reboot_required(void) 389 { 390 return false; 391 } 392 static inline bool efi_is_table_address(unsigned long phys_addr) 393 { 394 return false; 395 } 396 static inline void efi_reserve_boot_services(void) 397 { 398 } 399 #endif /* CONFIG_EFI */ 400 401 #ifdef CONFIG_EFI_FAKE_MEMMAP 402 extern void __init efi_fake_memmap_early(void); 403 #else 404 static inline void efi_fake_memmap_early(void) 405 { 406 } 407 #endif 408 409 #define arch_ima_efi_boot_mode \ 410 ({ extern struct boot_params boot_params; boot_params.secure_boot; }) 411 412 #endif /* _ASM_X86_EFI_H */ 413