1 #ifndef _ASM_X86_EFI_H 2 #define _ASM_X86_EFI_H 3 4 #include <asm/i387.h> 5 /* 6 * We map the EFI regions needed for runtime services non-contiguously, 7 * with preserved alignment on virtual addresses starting from -4G down 8 * for a total max space of 64G. This way, we provide for stable runtime 9 * services addresses across kernels so that a kexec'd kernel can still 10 * use them. 11 * 12 * This is the main reason why we're doing stable VA mappings for RT 13 * services. 14 * 15 * This flag is used in conjuction with a chicken bit called 16 * "efi=old_map" which can be used as a fallback to the old runtime 17 * services mapping method in case there's some b0rkage with a 18 * particular EFI implementation (haha, it is hard to hold up the 19 * sarcasm here...). 20 */ 21 #define EFI_OLD_MEMMAP EFI_ARCH_1 22 23 #define EFI32_LOADER_SIGNATURE "EL32" 24 #define EFI64_LOADER_SIGNATURE "EL64" 25 26 #ifdef CONFIG_X86_32 27 28 29 extern unsigned long asmlinkage efi_call_phys(void *, ...); 30 31 /* 32 * Wrap all the virtual calls in a way that forces the parameters on the stack. 33 */ 34 35 /* Use this macro if your virtual returns a non-void value */ 36 #define efi_call_virt(f, args...) \ 37 ({ \ 38 efi_status_t __s; \ 39 kernel_fpu_begin(); \ 40 __s = ((efi_##f##_t __attribute__((regparm(0)))*) \ 41 efi.systab->runtime->f)(args); \ 42 kernel_fpu_end(); \ 43 __s; \ 44 }) 45 46 /* Use this macro if your virtual call does not return any value */ 47 #define __efi_call_virt(f, args...) \ 48 ({ \ 49 kernel_fpu_begin(); \ 50 ((efi_##f##_t __attribute__((regparm(0)))*) \ 51 efi.systab->runtime->f)(args); \ 52 kernel_fpu_end(); \ 53 }) 54 55 #define efi_ioremap(addr, size, type, attr) ioremap_cache(addr, size) 56 57 #else /* !CONFIG_X86_32 */ 58 59 #define EFI_LOADER_SIGNATURE "EL64" 60 61 extern u64 asmlinkage efi_call(void *fp, ...); 62 63 #define efi_call_phys(f, args...) efi_call((f), args) 64 65 #define efi_call_virt(f, ...) \ 66 ({ \ 67 efi_status_t __s; \ 68 \ 69 efi_sync_low_kernel_mappings(); \ 70 preempt_disable(); \ 71 __kernel_fpu_begin(); \ 72 __s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__); \ 73 __kernel_fpu_end(); \ 74 preempt_enable(); \ 75 __s; \ 76 }) 77 78 /* 79 * All X86_64 virt calls return non-void values. Thus, use non-void call for 80 * virt calls that would be void on X86_32. 81 */ 82 #define __efi_call_virt(f, args...) efi_call_virt(f, args) 83 84 extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, 85 u32 type, u64 attribute); 86 87 #endif /* CONFIG_X86_32 */ 88 89 extern int add_efi_memmap; 90 extern struct efi_scratch efi_scratch; 91 extern void efi_set_executable(efi_memory_desc_t *md, bool executable); 92 extern int efi_memblock_x86_reserve_range(void); 93 extern void efi_call_phys_prelog(void); 94 extern void efi_call_phys_epilog(void); 95 extern void efi_unmap_memmap(void); 96 extern void efi_memory_uc(u64 addr, unsigned long size); 97 extern void __init efi_map_region(efi_memory_desc_t *md); 98 extern void __init efi_map_region_fixed(efi_memory_desc_t *md); 99 extern void efi_sync_low_kernel_mappings(void); 100 extern int efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages); 101 extern void efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages); 102 extern void __init old_map_region(efi_memory_desc_t *md); 103 extern void __init runtime_code_page_mkexec(void); 104 extern void __init efi_runtime_mkexec(void); 105 extern void __init efi_dump_pagetable(void); 106 extern void __init efi_apply_memmap_quirks(void); 107 extern int __init efi_reuse_config(u64 tables, int nr_tables); 108 extern void efi_delete_dummy_variable(void); 109 110 struct efi_setup_data { 111 u64 fw_vendor; 112 u64 runtime; 113 u64 tables; 114 u64 smbios; 115 u64 reserved[8]; 116 }; 117 118 extern u64 efi_setup; 119 120 #ifdef CONFIG_EFI 121 122 static inline bool efi_is_native(void) 123 { 124 return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT); 125 } 126 127 static inline bool efi_runtime_supported(void) 128 { 129 if (efi_is_native()) 130 return true; 131 132 if (IS_ENABLED(CONFIG_EFI_MIXED) && !efi_enabled(EFI_OLD_MEMMAP)) 133 return true; 134 135 return false; 136 } 137 138 extern struct console early_efi_console; 139 extern void parse_efi_setup(u64 phys_addr, u32 data_len); 140 141 #ifdef CONFIG_EFI_MIXED 142 extern void efi_thunk_runtime_setup(void); 143 extern efi_status_t efi_thunk_set_virtual_address_map( 144 void *phys_set_virtual_address_map, 145 unsigned long memory_map_size, 146 unsigned long descriptor_size, 147 u32 descriptor_version, 148 efi_memory_desc_t *virtual_map); 149 #else 150 static inline void efi_thunk_runtime_setup(void) {} 151 static inline efi_status_t efi_thunk_set_virtual_address_map( 152 void *phys_set_virtual_address_map, 153 unsigned long memory_map_size, 154 unsigned long descriptor_size, 155 u32 descriptor_version, 156 efi_memory_desc_t *virtual_map) 157 { 158 return EFI_SUCCESS; 159 } 160 #endif /* CONFIG_EFI_MIXED */ 161 162 extern bool efi_reboot_required(void); 163 164 #else 165 /* 166 * IF EFI is not configured, have the EFI calls return -ENOSYS. 167 */ 168 #define efi_call0(_f) (-ENOSYS) 169 #define efi_call1(_f, _a1) (-ENOSYS) 170 #define efi_call2(_f, _a1, _a2) (-ENOSYS) 171 #define efi_call3(_f, _a1, _a2, _a3) (-ENOSYS) 172 #define efi_call4(_f, _a1, _a2, _a3, _a4) (-ENOSYS) 173 #define efi_call5(_f, _a1, _a2, _a3, _a4, _a5) (-ENOSYS) 174 #define efi_call6(_f, _a1, _a2, _a3, _a4, _a5, _a6) (-ENOSYS) 175 static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {} 176 static inline bool efi_reboot_required(void) 177 { 178 return false; 179 } 180 #endif /* CONFIG_EFI */ 181 182 #endif /* _ASM_X86_EFI_H */ 183