1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_EFI_H 3 #define _ASM_X86_EFI_H 4 5 #include <asm/fpu/api.h> 6 #include <asm/processor-flags.h> 7 #include <asm/tlb.h> 8 #include <asm/nospec-branch.h> 9 #include <asm/mmu_context.h> 10 #include <asm/ibt.h> 11 #include <linux/build_bug.h> 12 #include <linux/kernel.h> 13 #include <linux/pgtable.h> 14 15 extern unsigned long efi_fw_vendor, efi_config_table; 16 extern unsigned long efi_mixed_mode_stack_pa; 17 18 /* 19 * We map the EFI regions needed for runtime services non-contiguously, 20 * with preserved alignment on virtual addresses starting from -4G down 21 * for a total max space of 64G. This way, we provide for stable runtime 22 * services addresses across kernels so that a kexec'd kernel can still 23 * use them. 24 * 25 * This is the main reason why we're doing stable VA mappings for RT 26 * services. 27 */ 28 29 #define EFI32_LOADER_SIGNATURE "EL32" 30 #define EFI64_LOADER_SIGNATURE "EL64" 31 32 #define ARCH_EFI_IRQ_FLAGS_MASK X86_EFLAGS_IF 33 34 /* 35 * The EFI services are called through variadic functions in many cases. These 36 * functions are implemented in assembler and support only a fixed number of 37 * arguments. The macros below allows us to check at build time that we don't 38 * try to call them with too many arguments. 39 * 40 * __efi_nargs() will return the number of arguments if it is 7 or less, and 41 * cause a BUILD_BUG otherwise. The limitations of the C preprocessor make it 42 * impossible to calculate the exact number of arguments beyond some 43 * pre-defined limit. The maximum number of arguments currently supported by 44 * any of the thunks is 7, so this is good enough for now and can be extended 45 * in the obvious way if we ever need more. 46 */ 47 48 #define __efi_nargs(...) __efi_nargs_(__VA_ARGS__) 49 #define __efi_nargs_(...) __efi_nargs__(0, ##__VA_ARGS__, \ 50 __efi_arg_sentinel(9), __efi_arg_sentinel(8), \ 51 __efi_arg_sentinel(7), __efi_arg_sentinel(6), \ 52 __efi_arg_sentinel(5), __efi_arg_sentinel(4), \ 53 __efi_arg_sentinel(3), __efi_arg_sentinel(2), \ 54 __efi_arg_sentinel(1), __efi_arg_sentinel(0)) 55 #define __efi_nargs__(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, n, ...) \ 56 __take_second_arg(n, \ 57 ({ BUILD_BUG_ON_MSG(1, "__efi_nargs limit exceeded"); 10; })) 58 #define __efi_arg_sentinel(n) , n 59 60 /* 61 * __efi_nargs_check(f, n, ...) will cause a BUILD_BUG if the ellipsis 62 * represents more than n arguments. 63 */ 64 65 #define __efi_nargs_check(f, n, ...) \ 66 __efi_nargs_check_(f, __efi_nargs(__VA_ARGS__), n) 67 #define __efi_nargs_check_(f, p, n) __efi_nargs_check__(f, p, n) 68 #define __efi_nargs_check__(f, p, n) ({ \ 69 BUILD_BUG_ON_MSG( \ 70 (p) > (n), \ 71 #f " called with too many arguments (" #p ">" #n ")"); \ 72 }) 73 74 static inline void efi_fpu_begin(void) 75 { 76 /* 77 * The UEFI calling convention (UEFI spec 2.3.2 and 2.3.4) requires 78 * that FCW and MXCSR (64-bit) must be initialized prior to calling 79 * UEFI code. (Oddly the spec does not require that the FPU stack 80 * be empty.) 81 */ 82 kernel_fpu_begin_mask(KFPU_387 | KFPU_MXCSR); 83 } 84 85 static inline void efi_fpu_end(void) 86 { 87 kernel_fpu_end(); 88 } 89 90 #ifdef CONFIG_X86_32 91 #define arch_efi_call_virt_setup() \ 92 ({ \ 93 efi_fpu_begin(); \ 94 firmware_restrict_branch_speculation_start(); \ 95 }) 96 97 #define arch_efi_call_virt_teardown() \ 98 ({ \ 99 firmware_restrict_branch_speculation_end(); \ 100 efi_fpu_end(); \ 101 }) 102 103 #define arch_efi_call_virt(p, f, args...) p->f(args) 104 105 #else /* !CONFIG_X86_32 */ 106 107 #define EFI_LOADER_SIGNATURE "EL64" 108 109 extern asmlinkage u64 __efi_call(void *fp, ...); 110 111 #define efi_call(...) ({ \ 112 __efi_nargs_check(efi_call, 7, __VA_ARGS__); \ 113 __efi_call(__VA_ARGS__); \ 114 }) 115 116 #define arch_efi_call_virt_setup() \ 117 ({ \ 118 efi_sync_low_kernel_mappings(); \ 119 efi_fpu_begin(); \ 120 firmware_restrict_branch_speculation_start(); \ 121 efi_enter_mm(); \ 122 }) 123 124 #define arch_efi_call_virt(p, f, args...) ({ \ 125 u64 ret, ibt = ibt_save(); \ 126 ret = efi_call((void *)p->f, args); \ 127 ibt_restore(ibt); \ 128 ret; \ 129 }) 130 131 #define arch_efi_call_virt_teardown() \ 132 ({ \ 133 efi_leave_mm(); \ 134 firmware_restrict_branch_speculation_end(); \ 135 efi_fpu_end(); \ 136 }) 137 138 #ifdef CONFIG_KASAN 139 /* 140 * CONFIG_KASAN may redefine memset to __memset. __memset function is present 141 * only in kernel binary. Since the EFI stub linked into a separate binary it 142 * doesn't have __memset(). So we should use standard memset from 143 * arch/x86/boot/compressed/string.c. The same applies to memcpy and memmove. 144 */ 145 #undef memcpy 146 #undef memset 147 #undef memmove 148 #endif 149 150 #endif /* CONFIG_X86_32 */ 151 152 extern int __init efi_memblock_x86_reserve_range(void); 153 extern void __init efi_print_memmap(void); 154 extern void __init efi_map_region(efi_memory_desc_t *md); 155 extern void __init efi_map_region_fixed(efi_memory_desc_t *md); 156 extern void efi_sync_low_kernel_mappings(void); 157 extern int __init efi_alloc_page_tables(void); 158 extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages); 159 extern void __init efi_runtime_update_mappings(void); 160 extern void __init efi_dump_pagetable(void); 161 extern void __init efi_apply_memmap_quirks(void); 162 extern int __init efi_reuse_config(u64 tables, int nr_tables); 163 extern void efi_delete_dummy_variable(void); 164 extern void efi_crash_gracefully_on_page_fault(unsigned long phys_addr); 165 extern void efi_free_boot_services(void); 166 167 void efi_enter_mm(void); 168 void efi_leave_mm(void); 169 170 /* kexec external ABI */ 171 struct efi_setup_data { 172 u64 fw_vendor; 173 u64 __unused; 174 u64 tables; 175 u64 smbios; 176 u64 reserved[8]; 177 }; 178 179 extern u64 efi_setup; 180 181 #ifdef CONFIG_EFI 182 extern efi_status_t __efi64_thunk(u32, ...); 183 184 #define efi64_thunk(...) ({ \ 185 u64 __pad[3]; /* must have space for 3 args on the stack */ \ 186 __efi_nargs_check(efi64_thunk, 9, __VA_ARGS__); \ 187 __efi64_thunk(__VA_ARGS__, __pad); \ 188 }) 189 190 static inline bool efi_is_mixed(void) 191 { 192 if (!IS_ENABLED(CONFIG_EFI_MIXED)) 193 return false; 194 return IS_ENABLED(CONFIG_X86_64) && !efi_enabled(EFI_64BIT); 195 } 196 197 static inline bool efi_runtime_supported(void) 198 { 199 if (IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT)) 200 return true; 201 202 return IS_ENABLED(CONFIG_EFI_MIXED); 203 } 204 205 extern void parse_efi_setup(u64 phys_addr, u32 data_len); 206 207 extern void efi_thunk_runtime_setup(void); 208 efi_status_t efi_set_virtual_address_map(unsigned long memory_map_size, 209 unsigned long descriptor_size, 210 u32 descriptor_version, 211 efi_memory_desc_t *virtual_map, 212 unsigned long systab_phys); 213 214 /* arch specific definitions used by the stub code */ 215 216 #ifdef CONFIG_EFI_MIXED 217 218 #define ARCH_HAS_EFISTUB_WRAPPERS 219 220 static inline bool efi_is_64bit(void) 221 { 222 extern const bool efi_is64; 223 224 return efi_is64; 225 } 226 227 static inline bool efi_is_native(void) 228 { 229 return efi_is_64bit(); 230 } 231 232 #define efi_mixed_mode_cast(attr) \ 233 __builtin_choose_expr( \ 234 __builtin_types_compatible_p(u32, __typeof__(attr)), \ 235 (unsigned long)(attr), (attr)) 236 237 #define efi_table_attr(inst, attr) \ 238 (efi_is_native() \ 239 ? inst->attr \ 240 : (__typeof__(inst->attr)) \ 241 efi_mixed_mode_cast(inst->mixed_mode.attr)) 242 243 /* 244 * The following macros allow translating arguments if necessary from native to 245 * mixed mode. The use case for this is to initialize the upper 32 bits of 246 * output parameters, and where the 32-bit method requires a 64-bit argument, 247 * which must be split up into two arguments to be thunked properly. 248 * 249 * As examples, the AllocatePool boot service returns the address of the 250 * allocation, but it will not set the high 32 bits of the address. To ensure 251 * that the full 64-bit address is initialized, we zero-init the address before 252 * calling the thunk. 253 * 254 * The FreePages boot service takes a 64-bit physical address even in 32-bit 255 * mode. For the thunk to work correctly, a native 64-bit call of 256 * free_pages(addr, size) 257 * must be translated to 258 * efi64_thunk(free_pages, addr & U32_MAX, addr >> 32, size) 259 * so that the two 32-bit halves of addr get pushed onto the stack separately. 260 */ 261 262 static inline void *efi64_zero_upper(void *p) 263 { 264 ((u32 *)p)[1] = 0; 265 return p; 266 } 267 268 static inline u32 efi64_convert_status(efi_status_t status) 269 { 270 return (u32)(status | (u64)status >> 32); 271 } 272 273 #define __efi64_split(val) (val) & U32_MAX, (u64)(val) >> 32 274 275 #define __efi64_argmap_free_pages(addr, size) \ 276 ((addr), 0, (size)) 277 278 #define __efi64_argmap_get_memory_map(mm_size, mm, key, size, ver) \ 279 ((mm_size), (mm), efi64_zero_upper(key), efi64_zero_upper(size), (ver)) 280 281 #define __efi64_argmap_allocate_pool(type, size, buffer) \ 282 ((type), (size), efi64_zero_upper(buffer)) 283 284 #define __efi64_argmap_create_event(type, tpl, f, c, event) \ 285 ((type), (tpl), (f), (c), efi64_zero_upper(event)) 286 287 #define __efi64_argmap_set_timer(event, type, time) \ 288 ((event), (type), lower_32_bits(time), upper_32_bits(time)) 289 290 #define __efi64_argmap_wait_for_event(num, event, index) \ 291 ((num), (event), efi64_zero_upper(index)) 292 293 #define __efi64_argmap_handle_protocol(handle, protocol, interface) \ 294 ((handle), (protocol), efi64_zero_upper(interface)) 295 296 #define __efi64_argmap_locate_protocol(protocol, reg, interface) \ 297 ((protocol), (reg), efi64_zero_upper(interface)) 298 299 #define __efi64_argmap_locate_device_path(protocol, path, handle) \ 300 ((protocol), (path), efi64_zero_upper(handle)) 301 302 #define __efi64_argmap_exit(handle, status, size, data) \ 303 ((handle), efi64_convert_status(status), (size), (data)) 304 305 /* PCI I/O */ 306 #define __efi64_argmap_get_location(protocol, seg, bus, dev, func) \ 307 ((protocol), efi64_zero_upper(seg), efi64_zero_upper(bus), \ 308 efi64_zero_upper(dev), efi64_zero_upper(func)) 309 310 /* LoadFile */ 311 #define __efi64_argmap_load_file(protocol, path, policy, bufsize, buf) \ 312 ((protocol), (path), (policy), efi64_zero_upper(bufsize), (buf)) 313 314 /* Graphics Output Protocol */ 315 #define __efi64_argmap_query_mode(gop, mode, size, info) \ 316 ((gop), (mode), efi64_zero_upper(size), efi64_zero_upper(info)) 317 318 /* TCG2 protocol */ 319 #define __efi64_argmap_hash_log_extend_event(prot, fl, addr, size, ev) \ 320 ((prot), (fl), 0ULL, (u64)(addr), 0ULL, (u64)(size), 0ULL, ev) 321 322 /* DXE services */ 323 #define __efi64_argmap_get_memory_space_descriptor(phys, desc) \ 324 (__efi64_split(phys), (desc)) 325 326 #define __efi64_argmap_set_memory_space_descriptor(phys, size, flags) \ 327 (__efi64_split(phys), __efi64_split(size), __efi64_split(flags)) 328 329 /* 330 * The macros below handle the plumbing for the argument mapping. To add a 331 * mapping for a specific EFI method, simply define a macro 332 * __efi64_argmap_<method name>, following the examples above. 333 */ 334 335 #define __efi64_thunk_map(inst, func, ...) \ 336 efi64_thunk(inst->mixed_mode.func, \ 337 __efi64_argmap(__efi64_argmap_ ## func(__VA_ARGS__), \ 338 (__VA_ARGS__))) 339 340 #define __efi64_argmap(mapped, args) \ 341 __PASTE(__efi64_argmap__, __efi_nargs(__efi_eat mapped))(mapped, args) 342 #define __efi64_argmap__0(mapped, args) __efi_eval mapped 343 #define __efi64_argmap__1(mapped, args) __efi_eval args 344 345 #define __efi_eat(...) 346 #define __efi_eval(...) __VA_ARGS__ 347 348 /* The three macros below handle dispatching via the thunk if needed */ 349 350 #define efi_call_proto(inst, func, ...) \ 351 (efi_is_native() \ 352 ? inst->func(inst, ##__VA_ARGS__) \ 353 : __efi64_thunk_map(inst, func, inst, ##__VA_ARGS__)) 354 355 #define efi_bs_call(func, ...) \ 356 (efi_is_native() \ 357 ? efi_system_table->boottime->func(__VA_ARGS__) \ 358 : __efi64_thunk_map(efi_table_attr(efi_system_table, \ 359 boottime), \ 360 func, __VA_ARGS__)) 361 362 #define efi_rt_call(func, ...) \ 363 (efi_is_native() \ 364 ? efi_system_table->runtime->func(__VA_ARGS__) \ 365 : __efi64_thunk_map(efi_table_attr(efi_system_table, \ 366 runtime), \ 367 func, __VA_ARGS__)) 368 369 #define efi_dxe_call(func, ...) \ 370 (efi_is_native() \ 371 ? efi_dxe_table->func(__VA_ARGS__) \ 372 : __efi64_thunk_map(efi_dxe_table, func, __VA_ARGS__)) 373 374 #else /* CONFIG_EFI_MIXED */ 375 376 static inline bool efi_is_64bit(void) 377 { 378 return IS_ENABLED(CONFIG_X86_64); 379 } 380 381 #endif /* CONFIG_EFI_MIXED */ 382 383 extern bool efi_reboot_required(void); 384 extern bool efi_is_table_address(unsigned long phys_addr); 385 386 extern void efi_find_mirror(void); 387 extern void efi_reserve_boot_services(void); 388 #else 389 static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {} 390 static inline bool efi_reboot_required(void) 391 { 392 return false; 393 } 394 static inline bool efi_is_table_address(unsigned long phys_addr) 395 { 396 return false; 397 } 398 static inline void efi_find_mirror(void) 399 { 400 } 401 static inline void efi_reserve_boot_services(void) 402 { 403 } 404 #endif /* CONFIG_EFI */ 405 406 #ifdef CONFIG_EFI_FAKE_MEMMAP 407 extern void __init efi_fake_memmap_early(void); 408 #else 409 static inline void efi_fake_memmap_early(void) 410 { 411 } 412 #endif 413 414 #define arch_ima_efi_boot_mode \ 415 ({ extern struct boot_params boot_params; boot_params.secure_boot; }) 416 417 #endif /* _ASM_X86_EFI_H */ 418