1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 21965aae3SH. Peter Anvin #ifndef _ASM_X86_EFI_H 31965aae3SH. Peter Anvin #define _ASM_X86_EFI_H 4bb898558SAl Viro 5df6b35f4SIngo Molnar #include <asm/fpu/api.h> 6744937b0SIngo Molnar #include <asm/pgtable.h> 79788375dSMark Rutland #include <asm/processor-flags.h> 8c9f2a9a6SMatt Fleming #include <asm/tlb.h> 9dd84441aSDavid Woodhouse #include <asm/nospec-branch.h> 107e904a91SSai Praneeth #include <asm/mmu_context.h> 1114b864f4SArvind Sankar #include <linux/build_bug.h> 12744937b0SIngo Molnar 13d2f7cbe7SBorislav Petkov /* 14d2f7cbe7SBorislav Petkov * We map the EFI regions needed for runtime services non-contiguously, 15d2f7cbe7SBorislav Petkov * with preserved alignment on virtual addresses starting from -4G down 16d2f7cbe7SBorislav Petkov * for a total max space of 64G. This way, we provide for stable runtime 17d2f7cbe7SBorislav Petkov * services addresses across kernels so that a kexec'd kernel can still 18d2f7cbe7SBorislav Petkov * use them. 19d2f7cbe7SBorislav Petkov * 20d2f7cbe7SBorislav Petkov * This is the main reason why we're doing stable VA mappings for RT 21d2f7cbe7SBorislav Petkov * services. 22d2f7cbe7SBorislav Petkov * 23*1f299fadSArd Biesheuvel * SGI UV1 machines are known to be incompatible with this scheme, so we 24*1f299fadSArd Biesheuvel * provide an opt-out for these machines via a DMI quirk that sets the 25*1f299fadSArd Biesheuvel * attribute below. 26d2f7cbe7SBorislav Petkov */ 27*1f299fadSArd Biesheuvel #define EFI_UV1_MEMMAP EFI_ARCH_1 28*1f299fadSArd Biesheuvel 29*1f299fadSArd Biesheuvel static inline bool efi_have_uv1_memmap(void) 30*1f299fadSArd Biesheuvel { 31*1f299fadSArd Biesheuvel return IS_ENABLED(CONFIG_X86_UV) && efi_enabled(EFI_UV1_MEMMAP); 32*1f299fadSArd Biesheuvel } 33d2f7cbe7SBorislav Petkov 34b8ff87a6SMatt Fleming #define EFI32_LOADER_SIGNATURE "EL32" 35b8ff87a6SMatt Fleming #define EFI64_LOADER_SIGNATURE "EL64" 36b8ff87a6SMatt Fleming 3748fcb2d0SArd Biesheuvel #define MAX_CMDLINE_ADDRESS UINT_MAX 3848fcb2d0SArd Biesheuvel 399788375dSMark Rutland #define ARCH_EFI_IRQ_FLAGS_MASK X86_EFLAGS_IF 40bb898558SAl Viro 4114b864f4SArvind Sankar /* 4214b864f4SArvind Sankar * The EFI services are called through variadic functions in many cases. These 4314b864f4SArvind Sankar * functions are implemented in assembler and support only a fixed number of 4414b864f4SArvind Sankar * arguments. The macros below allows us to check at build time that we don't 4514b864f4SArvind Sankar * try to call them with too many arguments. 4614b864f4SArvind Sankar * 4714b864f4SArvind Sankar * __efi_nargs() will return the number of arguments if it is 7 or less, and 4814b864f4SArvind Sankar * cause a BUILD_BUG otherwise. The limitations of the C preprocessor make it 4914b864f4SArvind Sankar * impossible to calculate the exact number of arguments beyond some 5014b864f4SArvind Sankar * pre-defined limit. The maximum number of arguments currently supported by 5114b864f4SArvind Sankar * any of the thunks is 7, so this is good enough for now and can be extended 5214b864f4SArvind Sankar * in the obvious way if we ever need more. 5314b864f4SArvind Sankar */ 5414b864f4SArvind Sankar 5514b864f4SArvind Sankar #define __efi_nargs(...) __efi_nargs_(__VA_ARGS__) 5614b864f4SArvind Sankar #define __efi_nargs_(...) __efi_nargs__(0, ##__VA_ARGS__, \ 5714b864f4SArvind Sankar __efi_arg_sentinel(7), __efi_arg_sentinel(6), \ 5814b864f4SArvind Sankar __efi_arg_sentinel(5), __efi_arg_sentinel(4), \ 5914b864f4SArvind Sankar __efi_arg_sentinel(3), __efi_arg_sentinel(2), \ 6014b864f4SArvind Sankar __efi_arg_sentinel(1), __efi_arg_sentinel(0)) 6114b864f4SArvind Sankar #define __efi_nargs__(_0, _1, _2, _3, _4, _5, _6, _7, n, ...) \ 6214b864f4SArvind Sankar __take_second_arg(n, \ 6314b864f4SArvind Sankar ({ BUILD_BUG_ON_MSG(1, "__efi_nargs limit exceeded"); 8; })) 6414b864f4SArvind Sankar #define __efi_arg_sentinel(n) , n 6514b864f4SArvind Sankar 6614b864f4SArvind Sankar /* 6714b864f4SArvind Sankar * __efi_nargs_check(f, n, ...) will cause a BUILD_BUG if the ellipsis 6814b864f4SArvind Sankar * represents more than n arguments. 6914b864f4SArvind Sankar */ 7014b864f4SArvind Sankar 7114b864f4SArvind Sankar #define __efi_nargs_check(f, n, ...) \ 7214b864f4SArvind Sankar __efi_nargs_check_(f, __efi_nargs(__VA_ARGS__), n) 7314b864f4SArvind Sankar #define __efi_nargs_check_(f, p, n) __efi_nargs_check__(f, p, n) 7414b864f4SArvind Sankar #define __efi_nargs_check__(f, p, n) ({ \ 7514b864f4SArvind Sankar BUILD_BUG_ON_MSG( \ 7614b864f4SArvind Sankar (p) > (n), \ 7714b864f4SArvind Sankar #f " called with too many arguments (" #p ">" #n ")"); \ 7814b864f4SArvind Sankar }) 7914b864f4SArvind Sankar 809788375dSMark Rutland #ifdef CONFIG_X86_32 81dd84441aSDavid Woodhouse #define arch_efi_call_virt_setup() \ 82dd84441aSDavid Woodhouse ({ \ 83dd84441aSDavid Woodhouse kernel_fpu_begin(); \ 84dd84441aSDavid Woodhouse firmware_restrict_branch_speculation_start(); \ 85dd84441aSDavid Woodhouse }) 86dd84441aSDavid Woodhouse 87dd84441aSDavid Woodhouse #define arch_efi_call_virt_teardown() \ 88dd84441aSDavid Woodhouse ({ \ 89dd84441aSDavid Woodhouse firmware_restrict_branch_speculation_end(); \ 90dd84441aSDavid Woodhouse kernel_fpu_end(); \ 91dd84441aSDavid Woodhouse }) 92dd84441aSDavid Woodhouse 93bc25f9dbSMark Rutland 9489ed4865SArd Biesheuvel #define arch_efi_call_virt(p, f, args...) p->f(args) 95982e239cSRicardo Neri 963e8fa263SMatt Fleming #define efi_ioremap(addr, size, type, attr) ioremap_cache(addr, size) 97e1ad783bSKeith Packard 98bb898558SAl Viro #else /* !CONFIG_X86_32 */ 99bb898558SAl Viro 10062fa6e69SMatt Fleming #define EFI_LOADER_SIGNATURE "EL64" 101bb898558SAl Viro 10214b864f4SArvind Sankar extern asmlinkage u64 __efi_call(void *fp, ...); 10314b864f4SArvind Sankar 10414b864f4SArvind Sankar #define efi_call(...) ({ \ 10514b864f4SArvind Sankar __efi_nargs_check(efi_call, 7, __VA_ARGS__); \ 10614b864f4SArvind Sankar __efi_call(__VA_ARGS__); \ 10714b864f4SArvind Sankar }) 108bb898558SAl Viro 109c9f2a9a6SMatt Fleming /* 11003781e40SSai Praneeth * struct efi_scratch - Scratch space used while switching to/from efi_mm 11103781e40SSai Praneeth * @phys_stack: stack used during EFI Mixed Mode 11203781e40SSai Praneeth * @prev_mm: store/restore stolen mm_struct while switching to/from efi_mm 113c9f2a9a6SMatt Fleming */ 114c9f2a9a6SMatt Fleming struct efi_scratch { 115c9f2a9a6SMatt Fleming u64 phys_stack; 11603781e40SSai Praneeth struct mm_struct *prev_mm; 117c9f2a9a6SMatt Fleming } __packed; 118c9f2a9a6SMatt Fleming 119bc25f9dbSMark Rutland #define arch_efi_call_virt_setup() \ 120d2f7cbe7SBorislav Petkov ({ \ 121d2f7cbe7SBorislav Petkov efi_sync_low_kernel_mappings(); \ 12212209993SSebastian Andrzej Siewior kernel_fpu_begin(); \ 123dd84441aSDavid Woodhouse firmware_restrict_branch_speculation_start(); \ 124c9f2a9a6SMatt Fleming \ 125*1f299fadSArd Biesheuvel if (!efi_have_uv1_memmap()) \ 12603781e40SSai Praneeth efi_switch_mm(&efi_mm); \ 127bc25f9dbSMark Rutland }) 128bc25f9dbSMark Rutland 12980e75596SAlex Thorlton #define arch_efi_call_virt(p, f, args...) \ 13080e75596SAlex Thorlton efi_call((void *)p->f, args) \ 131bc25f9dbSMark Rutland 132bc25f9dbSMark Rutland #define arch_efi_call_virt_teardown() \ 133bc25f9dbSMark Rutland ({ \ 134*1f299fadSArd Biesheuvel if (!efi_have_uv1_memmap()) \ 13503781e40SSai Praneeth efi_switch_mm(efi_scratch.prev_mm); \ 136c9f2a9a6SMatt Fleming \ 137dd84441aSDavid Woodhouse firmware_restrict_branch_speculation_end(); \ 13812209993SSebastian Andrzej Siewior kernel_fpu_end(); \ 139d2f7cbe7SBorislav Petkov }) 140d2f7cbe7SBorislav Petkov 1414e78eb05SMathias Krause extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size, 1423e8fa263SMatt Fleming u32 type, u64 attribute); 143e1ad783bSKeith Packard 144a523841eSAndrey Ryabinin #ifdef CONFIG_KASAN 145769a8089SAndrey Ryabinin /* 146769a8089SAndrey Ryabinin * CONFIG_KASAN may redefine memset to __memset. __memset function is present 147769a8089SAndrey Ryabinin * only in kernel binary. Since the EFI stub linked into a separate binary it 148769a8089SAndrey Ryabinin * doesn't have __memset(). So we should use standard memset from 149769a8089SAndrey Ryabinin * arch/x86/boot/compressed/string.c. The same applies to memcpy and memmove. 150769a8089SAndrey Ryabinin */ 151769a8089SAndrey Ryabinin #undef memcpy 152769a8089SAndrey Ryabinin #undef memset 153769a8089SAndrey Ryabinin #undef memmove 154a523841eSAndrey Ryabinin #endif 155769a8089SAndrey Ryabinin 156bb898558SAl Viro #endif /* CONFIG_X86_32 */ 157bb898558SAl Viro 158d2f7cbe7SBorislav Petkov extern struct efi_scratch efi_scratch; 1594e78eb05SMathias Krause extern void __init efi_set_executable(efi_memory_desc_t *md, bool executable); 1604e78eb05SMathias Krause extern int __init efi_memblock_x86_reserve_range(void); 1610bbea1ceSTaku Izumi extern void __init efi_print_memmap(void); 1624e78eb05SMathias Krause extern void __init efi_memory_uc(u64 addr, unsigned long size); 163d2f7cbe7SBorislav Petkov extern void __init efi_map_region(efi_memory_desc_t *md); 1643b266496SDave Young extern void __init efi_map_region_fixed(efi_memory_desc_t *md); 165d2f7cbe7SBorislav Petkov extern void efi_sync_low_kernel_mappings(void); 16667a9108eSMatt Fleming extern int __init efi_alloc_page_tables(void); 1674e78eb05SMathias Krause extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages); 168d2f7cbe7SBorislav Petkov extern void __init old_map_region(efi_memory_desc_t *md); 169c55d016fSBorislav Petkov extern void __init runtime_code_page_mkexec(void); 1706d0cc887SSai Praneeth extern void __init efi_runtime_update_mappings(void); 17111cc8512SBorislav Petkov extern void __init efi_dump_pagetable(void); 172a5d90c92SBorislav Petkov extern void __init efi_apply_memmap_quirks(void); 173eeb9db09SSaurabh Tangri extern int __init efi_reuse_config(u64 tables, int nr_tables); 174eeb9db09SSaurabh Tangri extern void efi_delete_dummy_variable(void); 17503781e40SSai Praneeth extern void efi_switch_mm(struct mm_struct *mm); 1763425d934SSai Praneeth extern void efi_recover_from_page_fault(unsigned long phys_addr); 17747c33a09SSai Praneeth Prakhya extern void efi_free_boot_services(void); 178*1f299fadSArd Biesheuvel extern pgd_t * __init efi_uv1_memmap_phys_prolog(void); 179*1f299fadSArd Biesheuvel extern void __init efi_uv1_memmap_phys_epilog(pgd_t *save_pgd); 180bb898558SAl Viro 1811fec0533SDave Young struct efi_setup_data { 1821fec0533SDave Young u64 fw_vendor; 1831fec0533SDave Young u64 runtime; 1841fec0533SDave Young u64 tables; 1851fec0533SDave Young u64 smbios; 1861fec0533SDave Young u64 reserved[8]; 1871fec0533SDave Young }; 1881fec0533SDave Young 1891fec0533SDave Young extern u64 efi_setup; 1901fec0533SDave Young 1916b59e366SSatoru Takeuchi #ifdef CONFIG_EFI 19214b864f4SArvind Sankar extern efi_status_t __efi64_thunk(u32, ...); 19314b864f4SArvind Sankar 19414b864f4SArvind Sankar #define efi64_thunk(...) ({ \ 19514b864f4SArvind Sankar __efi_nargs_check(efi64_thunk, 6, __VA_ARGS__); \ 19614b864f4SArvind Sankar __efi64_thunk(__VA_ARGS__); \ 19714b864f4SArvind Sankar }) 1986b59e366SSatoru Takeuchi 199a8147dbaSArd Biesheuvel static inline bool efi_is_mixed(void) 2006b59e366SSatoru Takeuchi { 201a8147dbaSArd Biesheuvel if (!IS_ENABLED(CONFIG_EFI_MIXED)) 202a8147dbaSArd Biesheuvel return false; 203a8147dbaSArd Biesheuvel return IS_ENABLED(CONFIG_X86_64) && !efi_enabled(EFI_64BIT); 2046b59e366SSatoru Takeuchi } 2056b59e366SSatoru Takeuchi 2067d453eeeSMatt Fleming static inline bool efi_runtime_supported(void) 2077d453eeeSMatt Fleming { 2086cfcd6f0SArd Biesheuvel if (IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT)) 2097d453eeeSMatt Fleming return true; 2107d453eeeSMatt Fleming 211*1f299fadSArd Biesheuvel return IS_ENABLED(CONFIG_EFI_MIXED); 2127d453eeeSMatt Fleming } 2137d453eeeSMatt Fleming 2145c12af0cSDave Young extern void parse_efi_setup(u64 phys_addr, u32 data_len); 2154f9dbcfcSMatt Fleming 21621289ec0SArd Biesheuvel extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt); 21721289ec0SArd Biesheuvel 2184f9dbcfcSMatt Fleming extern void efi_thunk_runtime_setup(void); 21969829470SArd Biesheuvel efi_status_t efi_set_virtual_address_map(unsigned long memory_map_size, 22069829470SArd Biesheuvel unsigned long descriptor_size, 22169829470SArd Biesheuvel u32 descriptor_version, 22269829470SArd Biesheuvel efi_memory_desc_t *virtual_map); 223243b6754SArd Biesheuvel 224243b6754SArd Biesheuvel /* arch specific definitions used by the stub code */ 225243b6754SArd Biesheuvel 226796eb8d2SArd Biesheuvel __attribute_const__ bool efi_is_64bit(void); 22727571616SLukas Wunner 228f958efe9SArd Biesheuvel static inline bool efi_is_native(void) 229f958efe9SArd Biesheuvel { 230f958efe9SArd Biesheuvel if (!IS_ENABLED(CONFIG_X86_64)) 231f958efe9SArd Biesheuvel return true; 232c3710de5SArd Biesheuvel if (!IS_ENABLED(CONFIG_EFI_MIXED)) 233c3710de5SArd Biesheuvel return true; 234f958efe9SArd Biesheuvel return efi_is_64bit(); 235f958efe9SArd Biesheuvel } 236f958efe9SArd Biesheuvel 237f958efe9SArd Biesheuvel #define efi_mixed_mode_cast(attr) \ 238f958efe9SArd Biesheuvel __builtin_choose_expr( \ 239f958efe9SArd Biesheuvel __builtin_types_compatible_p(u32, __typeof__(attr)), \ 240f958efe9SArd Biesheuvel (unsigned long)(attr), (attr)) 241f958efe9SArd Biesheuvel 24299ea8b1dSArd Biesheuvel #define efi_table_attr(inst, attr) \ 24399ea8b1dSArd Biesheuvel (efi_is_native() \ 24499ea8b1dSArd Biesheuvel ? inst->attr \ 24599ea8b1dSArd Biesheuvel : (__typeof__(inst->attr)) \ 24699ea8b1dSArd Biesheuvel efi_mixed_mode_cast(inst->mixed_mode.attr)) 2473552fdf2SLukas Wunner 248ea7d87f9SArvind Sankar /* 249ea7d87f9SArvind Sankar * The following macros allow translating arguments if necessary from native to 250ea7d87f9SArvind Sankar * mixed mode. The use case for this is to initialize the upper 32 bits of 251ea7d87f9SArvind Sankar * output parameters, and where the 32-bit method requires a 64-bit argument, 252ea7d87f9SArvind Sankar * which must be split up into two arguments to be thunked properly. 253ea7d87f9SArvind Sankar * 254ea7d87f9SArvind Sankar * As examples, the AllocatePool boot service returns the address of the 255ea7d87f9SArvind Sankar * allocation, but it will not set the high 32 bits of the address. To ensure 256ea7d87f9SArvind Sankar * that the full 64-bit address is initialized, we zero-init the address before 257ea7d87f9SArvind Sankar * calling the thunk. 258ea7d87f9SArvind Sankar * 259ea7d87f9SArvind Sankar * The FreePages boot service takes a 64-bit physical address even in 32-bit 260ea7d87f9SArvind Sankar * mode. For the thunk to work correctly, a native 64-bit call of 261ea7d87f9SArvind Sankar * free_pages(addr, size) 262ea7d87f9SArvind Sankar * must be translated to 263ea7d87f9SArvind Sankar * efi64_thunk(free_pages, addr & U32_MAX, addr >> 32, size) 264ea7d87f9SArvind Sankar * so that the two 32-bit halves of addr get pushed onto the stack separately. 265ea7d87f9SArvind Sankar */ 266ea7d87f9SArvind Sankar 267ea7d87f9SArvind Sankar static inline void *efi64_zero_upper(void *p) 268ea7d87f9SArvind Sankar { 269ea7d87f9SArvind Sankar ((u32 *)p)[1] = 0; 270ea7d87f9SArvind Sankar return p; 271ea7d87f9SArvind Sankar } 272ea7d87f9SArvind Sankar 273ea7d87f9SArvind Sankar #define __efi64_argmap_free_pages(addr, size) \ 274ea7d87f9SArvind Sankar ((addr), 0, (size)) 275ea7d87f9SArvind Sankar 276ea7d87f9SArvind Sankar #define __efi64_argmap_get_memory_map(mm_size, mm, key, size, ver) \ 277ea7d87f9SArvind Sankar ((mm_size), (mm), efi64_zero_upper(key), efi64_zero_upper(size), (ver)) 278ea7d87f9SArvind Sankar 279ea7d87f9SArvind Sankar #define __efi64_argmap_allocate_pool(type, size, buffer) \ 280ea7d87f9SArvind Sankar ((type), (size), efi64_zero_upper(buffer)) 281ea7d87f9SArvind Sankar 282ea7d87f9SArvind Sankar #define __efi64_argmap_handle_protocol(handle, protocol, interface) \ 283ea7d87f9SArvind Sankar ((handle), (protocol), efi64_zero_upper(interface)) 284ea7d87f9SArvind Sankar 285ea7d87f9SArvind Sankar #define __efi64_argmap_locate_protocol(protocol, reg, interface) \ 286ea7d87f9SArvind Sankar ((protocol), (reg), efi64_zero_upper(interface)) 287ea7d87f9SArvind Sankar 2884444f854SMatthew Garrett /* PCI I/O */ 2894444f854SMatthew Garrett #define __efi64_argmap_get_location(protocol, seg, bus, dev, func) \ 2904444f854SMatthew Garrett ((protocol), efi64_zero_upper(seg), efi64_zero_upper(bus), \ 2914444f854SMatthew Garrett efi64_zero_upper(dev), efi64_zero_upper(func)) 2924444f854SMatthew Garrett 293ea7d87f9SArvind Sankar /* 294ea7d87f9SArvind Sankar * The macros below handle the plumbing for the argument mapping. To add a 295ea7d87f9SArvind Sankar * mapping for a specific EFI method, simply define a macro 296ea7d87f9SArvind Sankar * __efi64_argmap_<method name>, following the examples above. 297ea7d87f9SArvind Sankar */ 298ea7d87f9SArvind Sankar 299ea7d87f9SArvind Sankar #define __efi64_thunk_map(inst, func, ...) \ 300ea7d87f9SArvind Sankar efi64_thunk(inst->mixed_mode.func, \ 301ea7d87f9SArvind Sankar __efi64_argmap(__efi64_argmap_ ## func(__VA_ARGS__), \ 302ea7d87f9SArvind Sankar (__VA_ARGS__))) 303ea7d87f9SArvind Sankar 304ea7d87f9SArvind Sankar #define __efi64_argmap(mapped, args) \ 305ea7d87f9SArvind Sankar __PASTE(__efi64_argmap__, __efi_nargs(__efi_eat mapped))(mapped, args) 306ea7d87f9SArvind Sankar #define __efi64_argmap__0(mapped, args) __efi_eval mapped 307ea7d87f9SArvind Sankar #define __efi64_argmap__1(mapped, args) __efi_eval args 308ea7d87f9SArvind Sankar 309ea7d87f9SArvind Sankar #define __efi_eat(...) 310ea7d87f9SArvind Sankar #define __efi_eval(...) __VA_ARGS__ 311ea7d87f9SArvind Sankar 312ea7d87f9SArvind Sankar /* The three macros below handle dispatching via the thunk if needed */ 313ea7d87f9SArvind Sankar 31447c0fd39SArd Biesheuvel #define efi_call_proto(inst, func, ...) \ 315afc4cc71SArd Biesheuvel (efi_is_native() \ 31647c0fd39SArd Biesheuvel ? inst->func(inst, ##__VA_ARGS__) \ 317ea7d87f9SArvind Sankar : __efi64_thunk_map(inst, func, inst, ##__VA_ARGS__)) 3183552fdf2SLukas Wunner 319966291f6SArd Biesheuvel #define efi_bs_call(func, ...) \ 320afc4cc71SArd Biesheuvel (efi_is_native() \ 321966291f6SArd Biesheuvel ? efi_system_table()->boottime->func(__VA_ARGS__) \ 322ea7d87f9SArvind Sankar : __efi64_thunk_map(efi_table_attr(efi_system_table(), \ 323ea7d87f9SArvind Sankar boottime), func, __VA_ARGS__)) 324243b6754SArd Biesheuvel 325966291f6SArd Biesheuvel #define efi_rt_call(func, ...) \ 326afc4cc71SArd Biesheuvel (efi_is_native() \ 327966291f6SArd Biesheuvel ? efi_system_table()->runtime->func(__VA_ARGS__) \ 328ea7d87f9SArvind Sankar : __efi64_thunk_map(efi_table_attr(efi_system_table(), \ 329ea7d87f9SArvind Sankar runtime), func, __VA_ARGS__)) 330a2cd2f3fSDavid Howells 33144be28e9SMatt Fleming extern bool efi_reboot_required(void); 332e55f31a5SArd Biesheuvel extern bool efi_is_table_address(unsigned long phys_addr); 33344be28e9SMatt Fleming 3346950e31bSDan Williams extern void efi_find_mirror(void); 3356950e31bSDan Williams extern void efi_reserve_boot_services(void); 3366b59e366SSatoru Takeuchi #else 3375c12af0cSDave Young static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {} 33844be28e9SMatt Fleming static inline bool efi_reboot_required(void) 33944be28e9SMatt Fleming { 34044be28e9SMatt Fleming return false; 34144be28e9SMatt Fleming } 342e55f31a5SArd Biesheuvel static inline bool efi_is_table_address(unsigned long phys_addr) 343e55f31a5SArd Biesheuvel { 344e55f31a5SArd Biesheuvel return false; 345e55f31a5SArd Biesheuvel } 3466950e31bSDan Williams static inline void efi_find_mirror(void) 3476950e31bSDan Williams { 3486950e31bSDan Williams } 3496950e31bSDan Williams static inline void efi_reserve_boot_services(void) 3506950e31bSDan Williams { 3516950e31bSDan Williams } 352bb898558SAl Viro #endif /* CONFIG_EFI */ 353bb898558SAl Viro 354199c8471SDan Williams #ifdef CONFIG_EFI_FAKE_MEMMAP 355199c8471SDan Williams extern void __init efi_fake_memmap_early(void); 356199c8471SDan Williams #else 357199c8471SDan Williams static inline void efi_fake_memmap_early(void) 358199c8471SDan Williams { 359199c8471SDan Williams } 360199c8471SDan Williams #endif 361199c8471SDan Williams 3621965aae3SH. Peter Anvin #endif /* _ASM_X86_EFI_H */ 363