1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 21965aae3SH. Peter Anvin #ifndef _ASM_X86_EFI_H 31965aae3SH. Peter Anvin #define _ASM_X86_EFI_H 4bb898558SAl Viro 5df6b35f4SIngo Molnar #include <asm/fpu/api.h> 6744937b0SIngo Molnar #include <asm/pgtable.h> 79788375dSMark Rutland #include <asm/processor-flags.h> 8c9f2a9a6SMatt Fleming #include <asm/tlb.h> 9dd84441aSDavid Woodhouse #include <asm/nospec-branch.h> 107e904a91SSai Praneeth #include <asm/mmu_context.h> 1114b864f4SArvind Sankar #include <linux/build_bug.h> 12744937b0SIngo Molnar 139cd437acSArd Biesheuvel extern unsigned long efi_fw_vendor, efi_config_table; 149cd437acSArd Biesheuvel 15d2f7cbe7SBorislav Petkov /* 16d2f7cbe7SBorislav Petkov * We map the EFI regions needed for runtime services non-contiguously, 17d2f7cbe7SBorislav Petkov * with preserved alignment on virtual addresses starting from -4G down 18d2f7cbe7SBorislav Petkov * for a total max space of 64G. This way, we provide for stable runtime 19d2f7cbe7SBorislav Petkov * services addresses across kernels so that a kexec'd kernel can still 20d2f7cbe7SBorislav Petkov * use them. 21d2f7cbe7SBorislav Petkov * 22d2f7cbe7SBorislav Petkov * This is the main reason why we're doing stable VA mappings for RT 23d2f7cbe7SBorislav Petkov * services. 24d2f7cbe7SBorislav Petkov * 251f299fadSArd Biesheuvel * SGI UV1 machines are known to be incompatible with this scheme, so we 261f299fadSArd Biesheuvel * provide an opt-out for these machines via a DMI quirk that sets the 271f299fadSArd Biesheuvel * attribute below. 28d2f7cbe7SBorislav Petkov */ 291f299fadSArd Biesheuvel #define EFI_UV1_MEMMAP EFI_ARCH_1 301f299fadSArd Biesheuvel 311f299fadSArd Biesheuvel static inline bool efi_have_uv1_memmap(void) 321f299fadSArd Biesheuvel { 331f299fadSArd Biesheuvel return IS_ENABLED(CONFIG_X86_UV) && efi_enabled(EFI_UV1_MEMMAP); 341f299fadSArd Biesheuvel } 35d2f7cbe7SBorislav Petkov 36b8ff87a6SMatt Fleming #define EFI32_LOADER_SIGNATURE "EL32" 37b8ff87a6SMatt Fleming #define EFI64_LOADER_SIGNATURE "EL64" 38b8ff87a6SMatt Fleming 399788375dSMark Rutland #define ARCH_EFI_IRQ_FLAGS_MASK X86_EFLAGS_IF 40bb898558SAl Viro 4114b864f4SArvind Sankar /* 4214b864f4SArvind Sankar * The EFI services are called through variadic functions in many cases. These 4314b864f4SArvind Sankar * functions are implemented in assembler and support only a fixed number of 4414b864f4SArvind Sankar * arguments. The macros below allows us to check at build time that we don't 4514b864f4SArvind Sankar * try to call them with too many arguments. 4614b864f4SArvind Sankar * 4714b864f4SArvind Sankar * __efi_nargs() will return the number of arguments if it is 7 or less, and 4814b864f4SArvind Sankar * cause a BUILD_BUG otherwise. The limitations of the C preprocessor make it 4914b864f4SArvind Sankar * impossible to calculate the exact number of arguments beyond some 5014b864f4SArvind Sankar * pre-defined limit. The maximum number of arguments currently supported by 5114b864f4SArvind Sankar * any of the thunks is 7, so this is good enough for now and can be extended 5214b864f4SArvind Sankar * in the obvious way if we ever need more. 5314b864f4SArvind Sankar */ 5414b864f4SArvind Sankar 5514b864f4SArvind Sankar #define __efi_nargs(...) __efi_nargs_(__VA_ARGS__) 5614b864f4SArvind Sankar #define __efi_nargs_(...) __efi_nargs__(0, ##__VA_ARGS__, \ 5714b864f4SArvind Sankar __efi_arg_sentinel(7), __efi_arg_sentinel(6), \ 5814b864f4SArvind Sankar __efi_arg_sentinel(5), __efi_arg_sentinel(4), \ 5914b864f4SArvind Sankar __efi_arg_sentinel(3), __efi_arg_sentinel(2), \ 6014b864f4SArvind Sankar __efi_arg_sentinel(1), __efi_arg_sentinel(0)) 6114b864f4SArvind Sankar #define __efi_nargs__(_0, _1, _2, _3, _4, _5, _6, _7, n, ...) \ 6214b864f4SArvind Sankar __take_second_arg(n, \ 6314b864f4SArvind Sankar ({ BUILD_BUG_ON_MSG(1, "__efi_nargs limit exceeded"); 8; })) 6414b864f4SArvind Sankar #define __efi_arg_sentinel(n) , n 6514b864f4SArvind Sankar 6614b864f4SArvind Sankar /* 6714b864f4SArvind Sankar * __efi_nargs_check(f, n, ...) will cause a BUILD_BUG if the ellipsis 6814b864f4SArvind Sankar * represents more than n arguments. 6914b864f4SArvind Sankar */ 7014b864f4SArvind Sankar 7114b864f4SArvind Sankar #define __efi_nargs_check(f, n, ...) \ 7214b864f4SArvind Sankar __efi_nargs_check_(f, __efi_nargs(__VA_ARGS__), n) 7314b864f4SArvind Sankar #define __efi_nargs_check_(f, p, n) __efi_nargs_check__(f, p, n) 7414b864f4SArvind Sankar #define __efi_nargs_check__(f, p, n) ({ \ 7514b864f4SArvind Sankar BUILD_BUG_ON_MSG( \ 7614b864f4SArvind Sankar (p) > (n), \ 7714b864f4SArvind Sankar #f " called with too many arguments (" #p ">" #n ")"); \ 7814b864f4SArvind Sankar }) 7914b864f4SArvind Sankar 809788375dSMark Rutland #ifdef CONFIG_X86_32 81dd84441aSDavid Woodhouse #define arch_efi_call_virt_setup() \ 82dd84441aSDavid Woodhouse ({ \ 83dd84441aSDavid Woodhouse kernel_fpu_begin(); \ 84dd84441aSDavid Woodhouse firmware_restrict_branch_speculation_start(); \ 85dd84441aSDavid Woodhouse }) 86dd84441aSDavid Woodhouse 87dd84441aSDavid Woodhouse #define arch_efi_call_virt_teardown() \ 88dd84441aSDavid Woodhouse ({ \ 89dd84441aSDavid Woodhouse firmware_restrict_branch_speculation_end(); \ 90dd84441aSDavid Woodhouse kernel_fpu_end(); \ 91dd84441aSDavid Woodhouse }) 92dd84441aSDavid Woodhouse 93bc25f9dbSMark Rutland 9489ed4865SArd Biesheuvel #define arch_efi_call_virt(p, f, args...) p->f(args) 95982e239cSRicardo Neri 963e8fa263SMatt Fleming #define efi_ioremap(addr, size, type, attr) ioremap_cache(addr, size) 97e1ad783bSKeith Packard 98bb898558SAl Viro #else /* !CONFIG_X86_32 */ 99bb898558SAl Viro 10062fa6e69SMatt Fleming #define EFI_LOADER_SIGNATURE "EL64" 101bb898558SAl Viro 10214b864f4SArvind Sankar extern asmlinkage u64 __efi_call(void *fp, ...); 10314b864f4SArvind Sankar 10414b864f4SArvind Sankar #define efi_call(...) ({ \ 10514b864f4SArvind Sankar __efi_nargs_check(efi_call, 7, __VA_ARGS__); \ 10614b864f4SArvind Sankar __efi_call(__VA_ARGS__); \ 10714b864f4SArvind Sankar }) 108bb898558SAl Viro 109c9f2a9a6SMatt Fleming /* 11003781e40SSai Praneeth * struct efi_scratch - Scratch space used while switching to/from efi_mm 11103781e40SSai Praneeth * @phys_stack: stack used during EFI Mixed Mode 11203781e40SSai Praneeth * @prev_mm: store/restore stolen mm_struct while switching to/from efi_mm 113c9f2a9a6SMatt Fleming */ 114c9f2a9a6SMatt Fleming struct efi_scratch { 115c9f2a9a6SMatt Fleming u64 phys_stack; 11603781e40SSai Praneeth struct mm_struct *prev_mm; 117c9f2a9a6SMatt Fleming } __packed; 118c9f2a9a6SMatt Fleming 119bc25f9dbSMark Rutland #define arch_efi_call_virt_setup() \ 120d2f7cbe7SBorislav Petkov ({ \ 121d2f7cbe7SBorislav Petkov efi_sync_low_kernel_mappings(); \ 12212209993SSebastian Andrzej Siewior kernel_fpu_begin(); \ 123dd84441aSDavid Woodhouse firmware_restrict_branch_speculation_start(); \ 124c9f2a9a6SMatt Fleming \ 1251f299fadSArd Biesheuvel if (!efi_have_uv1_memmap()) \ 12603781e40SSai Praneeth efi_switch_mm(&efi_mm); \ 127bc25f9dbSMark Rutland }) 128bc25f9dbSMark Rutland 12980e75596SAlex Thorlton #define arch_efi_call_virt(p, f, args...) \ 13080e75596SAlex Thorlton efi_call((void *)p->f, args) \ 131bc25f9dbSMark Rutland 132bc25f9dbSMark Rutland #define arch_efi_call_virt_teardown() \ 133bc25f9dbSMark Rutland ({ \ 1341f299fadSArd Biesheuvel if (!efi_have_uv1_memmap()) \ 13503781e40SSai Praneeth efi_switch_mm(efi_scratch.prev_mm); \ 136c9f2a9a6SMatt Fleming \ 137dd84441aSDavid Woodhouse firmware_restrict_branch_speculation_end(); \ 13812209993SSebastian Andrzej Siewior kernel_fpu_end(); \ 139d2f7cbe7SBorislav Petkov }) 140d2f7cbe7SBorislav Petkov 1414e78eb05SMathias Krause extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size, 1423e8fa263SMatt Fleming u32 type, u64 attribute); 143e1ad783bSKeith Packard 144a523841eSAndrey Ryabinin #ifdef CONFIG_KASAN 145769a8089SAndrey Ryabinin /* 146769a8089SAndrey Ryabinin * CONFIG_KASAN may redefine memset to __memset. __memset function is present 147769a8089SAndrey Ryabinin * only in kernel binary. Since the EFI stub linked into a separate binary it 148769a8089SAndrey Ryabinin * doesn't have __memset(). So we should use standard memset from 149769a8089SAndrey Ryabinin * arch/x86/boot/compressed/string.c. The same applies to memcpy and memmove. 150769a8089SAndrey Ryabinin */ 151769a8089SAndrey Ryabinin #undef memcpy 152769a8089SAndrey Ryabinin #undef memset 153769a8089SAndrey Ryabinin #undef memmove 154a523841eSAndrey Ryabinin #endif 155769a8089SAndrey Ryabinin 156bb898558SAl Viro #endif /* CONFIG_X86_32 */ 157bb898558SAl Viro 158d2f7cbe7SBorislav Petkov extern struct efi_scratch efi_scratch; 1594e78eb05SMathias Krause extern void __init efi_set_executable(efi_memory_desc_t *md, bool executable); 1604e78eb05SMathias Krause extern int __init efi_memblock_x86_reserve_range(void); 1610bbea1ceSTaku Izumi extern void __init efi_print_memmap(void); 1624e78eb05SMathias Krause extern void __init efi_memory_uc(u64 addr, unsigned long size); 163d2f7cbe7SBorislav Petkov extern void __init efi_map_region(efi_memory_desc_t *md); 1643b266496SDave Young extern void __init efi_map_region_fixed(efi_memory_desc_t *md); 165d2f7cbe7SBorislav Petkov extern void efi_sync_low_kernel_mappings(void); 16667a9108eSMatt Fleming extern int __init efi_alloc_page_tables(void); 1674e78eb05SMathias Krause extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages); 168d2f7cbe7SBorislav Petkov extern void __init old_map_region(efi_memory_desc_t *md); 169c55d016fSBorislav Petkov extern void __init runtime_code_page_mkexec(void); 1706d0cc887SSai Praneeth extern void __init efi_runtime_update_mappings(void); 17111cc8512SBorislav Petkov extern void __init efi_dump_pagetable(void); 172a5d90c92SBorislav Petkov extern void __init efi_apply_memmap_quirks(void); 173eeb9db09SSaurabh Tangri extern int __init efi_reuse_config(u64 tables, int nr_tables); 174eeb9db09SSaurabh Tangri extern void efi_delete_dummy_variable(void); 17503781e40SSai Praneeth extern void efi_switch_mm(struct mm_struct *mm); 1763425d934SSai Praneeth extern void efi_recover_from_page_fault(unsigned long phys_addr); 17747c33a09SSai Praneeth Prakhya extern void efi_free_boot_services(void); 1781f299fadSArd Biesheuvel extern pgd_t * __init efi_uv1_memmap_phys_prolog(void); 1791f299fadSArd Biesheuvel extern void __init efi_uv1_memmap_phys_epilog(pgd_t *save_pgd); 180bb898558SAl Viro 181a088b858SArd Biesheuvel /* kexec external ABI */ 1821fec0533SDave Young struct efi_setup_data { 1831fec0533SDave Young u64 fw_vendor; 184a088b858SArd Biesheuvel u64 __unused; 1851fec0533SDave Young u64 tables; 1861fec0533SDave Young u64 smbios; 1871fec0533SDave Young u64 reserved[8]; 1881fec0533SDave Young }; 1891fec0533SDave Young 1901fec0533SDave Young extern u64 efi_setup; 1911fec0533SDave Young 1926b59e366SSatoru Takeuchi #ifdef CONFIG_EFI 19314b864f4SArvind Sankar extern efi_status_t __efi64_thunk(u32, ...); 19414b864f4SArvind Sankar 19514b864f4SArvind Sankar #define efi64_thunk(...) ({ \ 19614b864f4SArvind Sankar __efi_nargs_check(efi64_thunk, 6, __VA_ARGS__); \ 19714b864f4SArvind Sankar __efi64_thunk(__VA_ARGS__); \ 19814b864f4SArvind Sankar }) 1996b59e366SSatoru Takeuchi 200a8147dbaSArd Biesheuvel static inline bool efi_is_mixed(void) 2016b59e366SSatoru Takeuchi { 202a8147dbaSArd Biesheuvel if (!IS_ENABLED(CONFIG_EFI_MIXED)) 203a8147dbaSArd Biesheuvel return false; 204a8147dbaSArd Biesheuvel return IS_ENABLED(CONFIG_X86_64) && !efi_enabled(EFI_64BIT); 2056b59e366SSatoru Takeuchi } 2066b59e366SSatoru Takeuchi 2077d453eeeSMatt Fleming static inline bool efi_runtime_supported(void) 2087d453eeeSMatt Fleming { 2096cfcd6f0SArd Biesheuvel if (IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT)) 2107d453eeeSMatt Fleming return true; 2117d453eeeSMatt Fleming 2121f299fadSArd Biesheuvel return IS_ENABLED(CONFIG_EFI_MIXED); 2137d453eeeSMatt Fleming } 2147d453eeeSMatt Fleming 2155c12af0cSDave Young extern void parse_efi_setup(u64 phys_addr, u32 data_len); 2164f9dbcfcSMatt Fleming 21721289ec0SArd Biesheuvel extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt); 21821289ec0SArd Biesheuvel 2194f9dbcfcSMatt Fleming extern void efi_thunk_runtime_setup(void); 22069829470SArd Biesheuvel efi_status_t efi_set_virtual_address_map(unsigned long memory_map_size, 22169829470SArd Biesheuvel unsigned long descriptor_size, 22269829470SArd Biesheuvel u32 descriptor_version, 22359f2a619SArd Biesheuvel efi_memory_desc_t *virtual_map, 22459f2a619SArd Biesheuvel unsigned long systab_phys); 225243b6754SArd Biesheuvel 226243b6754SArd Biesheuvel /* arch specific definitions used by the stub code */ 227243b6754SArd Biesheuvel 228*de8c5520SArvind Sankar #ifdef CONFIG_EFI_MIXED 229*de8c5520SArvind Sankar 230*de8c5520SArvind Sankar #define ARCH_HAS_EFISTUB_WRAPPERS 2310a755614SArd Biesheuvel 2320a755614SArd Biesheuvel static inline bool efi_is_64bit(void) 2330a755614SArd Biesheuvel { 234*de8c5520SArvind Sankar extern const bool efi_is64; 235*de8c5520SArvind Sankar 2360a755614SArd Biesheuvel return efi_is64; 2370a755614SArd Biesheuvel } 23827571616SLukas Wunner 239f958efe9SArd Biesheuvel static inline bool efi_is_native(void) 240f958efe9SArd Biesheuvel { 241f958efe9SArd Biesheuvel if (!IS_ENABLED(CONFIG_X86_64)) 242f958efe9SArd Biesheuvel return true; 243f958efe9SArd Biesheuvel return efi_is_64bit(); 244f958efe9SArd Biesheuvel } 245f958efe9SArd Biesheuvel 246f958efe9SArd Biesheuvel #define efi_mixed_mode_cast(attr) \ 247f958efe9SArd Biesheuvel __builtin_choose_expr( \ 248f958efe9SArd Biesheuvel __builtin_types_compatible_p(u32, __typeof__(attr)), \ 249f958efe9SArd Biesheuvel (unsigned long)(attr), (attr)) 250f958efe9SArd Biesheuvel 25199ea8b1dSArd Biesheuvel #define efi_table_attr(inst, attr) \ 25299ea8b1dSArd Biesheuvel (efi_is_native() \ 25399ea8b1dSArd Biesheuvel ? inst->attr \ 25499ea8b1dSArd Biesheuvel : (__typeof__(inst->attr)) \ 25599ea8b1dSArd Biesheuvel efi_mixed_mode_cast(inst->mixed_mode.attr)) 2563552fdf2SLukas Wunner 257ea7d87f9SArvind Sankar /* 258ea7d87f9SArvind Sankar * The following macros allow translating arguments if necessary from native to 259ea7d87f9SArvind Sankar * mixed mode. The use case for this is to initialize the upper 32 bits of 260ea7d87f9SArvind Sankar * output parameters, and where the 32-bit method requires a 64-bit argument, 261ea7d87f9SArvind Sankar * which must be split up into two arguments to be thunked properly. 262ea7d87f9SArvind Sankar * 263ea7d87f9SArvind Sankar * As examples, the AllocatePool boot service returns the address of the 264ea7d87f9SArvind Sankar * allocation, but it will not set the high 32 bits of the address. To ensure 265ea7d87f9SArvind Sankar * that the full 64-bit address is initialized, we zero-init the address before 266ea7d87f9SArvind Sankar * calling the thunk. 267ea7d87f9SArvind Sankar * 268ea7d87f9SArvind Sankar * The FreePages boot service takes a 64-bit physical address even in 32-bit 269ea7d87f9SArvind Sankar * mode. For the thunk to work correctly, a native 64-bit call of 270ea7d87f9SArvind Sankar * free_pages(addr, size) 271ea7d87f9SArvind Sankar * must be translated to 272ea7d87f9SArvind Sankar * efi64_thunk(free_pages, addr & U32_MAX, addr >> 32, size) 273ea7d87f9SArvind Sankar * so that the two 32-bit halves of addr get pushed onto the stack separately. 274ea7d87f9SArvind Sankar */ 275ea7d87f9SArvind Sankar 276ea7d87f9SArvind Sankar static inline void *efi64_zero_upper(void *p) 277ea7d87f9SArvind Sankar { 278ea7d87f9SArvind Sankar ((u32 *)p)[1] = 0; 279ea7d87f9SArvind Sankar return p; 280ea7d87f9SArvind Sankar } 281ea7d87f9SArvind Sankar 2823b8f44fcSArd Biesheuvel static inline u32 efi64_convert_status(efi_status_t status) 2833b8f44fcSArd Biesheuvel { 2843b8f44fcSArd Biesheuvel return (u32)(status | (u64)status >> 32); 2853b8f44fcSArd Biesheuvel } 2863b8f44fcSArd Biesheuvel 287ea7d87f9SArvind Sankar #define __efi64_argmap_free_pages(addr, size) \ 288ea7d87f9SArvind Sankar ((addr), 0, (size)) 289ea7d87f9SArvind Sankar 290ea7d87f9SArvind Sankar #define __efi64_argmap_get_memory_map(mm_size, mm, key, size, ver) \ 291ea7d87f9SArvind Sankar ((mm_size), (mm), efi64_zero_upper(key), efi64_zero_upper(size), (ver)) 292ea7d87f9SArvind Sankar 293ea7d87f9SArvind Sankar #define __efi64_argmap_allocate_pool(type, size, buffer) \ 294ea7d87f9SArvind Sankar ((type), (size), efi64_zero_upper(buffer)) 295ea7d87f9SArvind Sankar 296ea7d87f9SArvind Sankar #define __efi64_argmap_handle_protocol(handle, protocol, interface) \ 297ea7d87f9SArvind Sankar ((handle), (protocol), efi64_zero_upper(interface)) 298ea7d87f9SArvind Sankar 299ea7d87f9SArvind Sankar #define __efi64_argmap_locate_protocol(protocol, reg, interface) \ 300ea7d87f9SArvind Sankar ((protocol), (reg), efi64_zero_upper(interface)) 301ea7d87f9SArvind Sankar 302abd26868SArd Biesheuvel #define __efi64_argmap_locate_device_path(protocol, path, handle) \ 303abd26868SArd Biesheuvel ((protocol), (path), efi64_zero_upper(handle)) 304abd26868SArd Biesheuvel 3053b8f44fcSArd Biesheuvel #define __efi64_argmap_exit(handle, status, size, data) \ 3063b8f44fcSArd Biesheuvel ((handle), efi64_convert_status(status), (size), (data)) 3073b8f44fcSArd Biesheuvel 3084444f854SMatthew Garrett /* PCI I/O */ 3094444f854SMatthew Garrett #define __efi64_argmap_get_location(protocol, seg, bus, dev, func) \ 3104444f854SMatthew Garrett ((protocol), efi64_zero_upper(seg), efi64_zero_upper(bus), \ 3114444f854SMatthew Garrett efi64_zero_upper(dev), efi64_zero_upper(func)) 3124444f854SMatthew Garrett 3132931d526SArd Biesheuvel /* LoadFile */ 3142931d526SArd Biesheuvel #define __efi64_argmap_load_file(protocol, path, policy, bufsize, buf) \ 3152931d526SArd Biesheuvel ((protocol), (path), (policy), efi64_zero_upper(bufsize), (buf)) 3162931d526SArd Biesheuvel 317b4b89a02SArvind Sankar /* Graphics Output Protocol */ 318b4b89a02SArvind Sankar #define __efi64_argmap_query_mode(gop, mode, size, info) \ 319b4b89a02SArvind Sankar ((gop), (mode), efi64_zero_upper(size), efi64_zero_upper(info)) 320b4b89a02SArvind Sankar 321ea7d87f9SArvind Sankar /* 322ea7d87f9SArvind Sankar * The macros below handle the plumbing for the argument mapping. To add a 323ea7d87f9SArvind Sankar * mapping for a specific EFI method, simply define a macro 324ea7d87f9SArvind Sankar * __efi64_argmap_<method name>, following the examples above. 325ea7d87f9SArvind Sankar */ 326ea7d87f9SArvind Sankar 327ea7d87f9SArvind Sankar #define __efi64_thunk_map(inst, func, ...) \ 328ea7d87f9SArvind Sankar efi64_thunk(inst->mixed_mode.func, \ 329ea7d87f9SArvind Sankar __efi64_argmap(__efi64_argmap_ ## func(__VA_ARGS__), \ 330ea7d87f9SArvind Sankar (__VA_ARGS__))) 331ea7d87f9SArvind Sankar 332ea7d87f9SArvind Sankar #define __efi64_argmap(mapped, args) \ 333ea7d87f9SArvind Sankar __PASTE(__efi64_argmap__, __efi_nargs(__efi_eat mapped))(mapped, args) 334ea7d87f9SArvind Sankar #define __efi64_argmap__0(mapped, args) __efi_eval mapped 335ea7d87f9SArvind Sankar #define __efi64_argmap__1(mapped, args) __efi_eval args 336ea7d87f9SArvind Sankar 337ea7d87f9SArvind Sankar #define __efi_eat(...) 338ea7d87f9SArvind Sankar #define __efi_eval(...) __VA_ARGS__ 339ea7d87f9SArvind Sankar 340ea7d87f9SArvind Sankar /* The three macros below handle dispatching via the thunk if needed */ 341ea7d87f9SArvind Sankar 34247c0fd39SArd Biesheuvel #define efi_call_proto(inst, func, ...) \ 343afc4cc71SArd Biesheuvel (efi_is_native() \ 34447c0fd39SArd Biesheuvel ? inst->func(inst, ##__VA_ARGS__) \ 345ea7d87f9SArvind Sankar : __efi64_thunk_map(inst, func, inst, ##__VA_ARGS__)) 3463552fdf2SLukas Wunner 347966291f6SArd Biesheuvel #define efi_bs_call(func, ...) \ 348afc4cc71SArd Biesheuvel (efi_is_native() \ 349ccc27ae7SArd Biesheuvel ? efi_system_table->boottime->func(__VA_ARGS__) \ 350ccc27ae7SArd Biesheuvel : __efi64_thunk_map(efi_table_attr(efi_system_table, \ 351ccc27ae7SArd Biesheuvel boottime), \ 352ccc27ae7SArd Biesheuvel func, __VA_ARGS__)) 353243b6754SArd Biesheuvel 354966291f6SArd Biesheuvel #define efi_rt_call(func, ...) \ 355afc4cc71SArd Biesheuvel (efi_is_native() \ 356ccc27ae7SArd Biesheuvel ? efi_system_table->runtime->func(__VA_ARGS__) \ 357ccc27ae7SArd Biesheuvel : __efi64_thunk_map(efi_table_attr(efi_system_table, \ 358ccc27ae7SArd Biesheuvel runtime), \ 359ccc27ae7SArd Biesheuvel func, __VA_ARGS__)) 360a2cd2f3fSDavid Howells 361*de8c5520SArvind Sankar #else /* CONFIG_EFI_MIXED */ 362*de8c5520SArvind Sankar 363*de8c5520SArvind Sankar static inline bool efi_is_64bit(void) 364*de8c5520SArvind Sankar { 365*de8c5520SArvind Sankar return IS_ENABLED(CONFIG_X86_64); 366*de8c5520SArvind Sankar } 367*de8c5520SArvind Sankar 368*de8c5520SArvind Sankar #endif /* CONFIG_EFI_MIXED */ 369*de8c5520SArvind Sankar 37044be28e9SMatt Fleming extern bool efi_reboot_required(void); 371e55f31a5SArd Biesheuvel extern bool efi_is_table_address(unsigned long phys_addr); 37244be28e9SMatt Fleming 3736950e31bSDan Williams extern void efi_find_mirror(void); 3746950e31bSDan Williams extern void efi_reserve_boot_services(void); 3756b59e366SSatoru Takeuchi #else 3765c12af0cSDave Young static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {} 37744be28e9SMatt Fleming static inline bool efi_reboot_required(void) 37844be28e9SMatt Fleming { 37944be28e9SMatt Fleming return false; 38044be28e9SMatt Fleming } 381e55f31a5SArd Biesheuvel static inline bool efi_is_table_address(unsigned long phys_addr) 382e55f31a5SArd Biesheuvel { 383e55f31a5SArd Biesheuvel return false; 384e55f31a5SArd Biesheuvel } 3856950e31bSDan Williams static inline void efi_find_mirror(void) 3866950e31bSDan Williams { 3876950e31bSDan Williams } 3886950e31bSDan Williams static inline void efi_reserve_boot_services(void) 3896950e31bSDan Williams { 3906950e31bSDan Williams } 391bb898558SAl Viro #endif /* CONFIG_EFI */ 392bb898558SAl Viro 393199c8471SDan Williams #ifdef CONFIG_EFI_FAKE_MEMMAP 394199c8471SDan Williams extern void __init efi_fake_memmap_early(void); 395199c8471SDan Williams #else 396199c8471SDan Williams static inline void efi_fake_memmap_early(void) 397199c8471SDan Williams { 398199c8471SDan Williams } 399199c8471SDan Williams #endif 400199c8471SDan Williams 4011965aae3SH. Peter Anvin #endif /* _ASM_X86_EFI_H */ 402