xref: /openbmc/linux/arch/x86/include/asm/efi.h (revision ef2b56df)
1 #ifndef _ASM_X86_EFI_H
2 #define _ASM_X86_EFI_H
3 
4 #include <asm/fpu/api.h>
5 #include <asm/pgtable.h>
6 #include <asm/processor-flags.h>
7 #include <asm/tlb.h>
8 
9 /*
10  * We map the EFI regions needed for runtime services non-contiguously,
11  * with preserved alignment on virtual addresses starting from -4G down
12  * for a total max space of 64G. This way, we provide for stable runtime
13  * services addresses across kernels so that a kexec'd kernel can still
14  * use them.
15  *
16  * This is the main reason why we're doing stable VA mappings for RT
17  * services.
18  *
19  * This flag is used in conjuction with a chicken bit called
20  * "efi=old_map" which can be used as a fallback to the old runtime
21  * services mapping method in case there's some b0rkage with a
22  * particular EFI implementation (haha, it is hard to hold up the
23  * sarcasm here...).
24  */
25 #define EFI_OLD_MEMMAP		EFI_ARCH_1
26 
27 #define EFI32_LOADER_SIGNATURE	"EL32"
28 #define EFI64_LOADER_SIGNATURE	"EL64"
29 
30 #define MAX_CMDLINE_ADDRESS	UINT_MAX
31 
32 #define ARCH_EFI_IRQ_FLAGS_MASK	X86_EFLAGS_IF
33 
34 #ifdef CONFIG_X86_32
35 
36 extern asmlinkage unsigned long efi_call_phys(void *, ...);
37 
38 #define arch_efi_call_virt_setup()	kernel_fpu_begin()
39 #define arch_efi_call_virt_teardown()	kernel_fpu_end()
40 
41 /*
42  * Wrap all the virtual calls in a way that forces the parameters on the stack.
43  */
44 #define arch_efi_call_virt(p, f, args...)				\
45 ({									\
46 	((efi_##f##_t __attribute__((regparm(0)))*) p->f)(args);	\
47 })
48 
49 #define efi_ioremap(addr, size, type, attr)	ioremap_cache(addr, size)
50 
51 #else /* !CONFIG_X86_32 */
52 
53 #define EFI_LOADER_SIGNATURE	"EL64"
54 
55 extern asmlinkage u64 efi_call(void *fp, ...);
56 
57 #define efi_call_phys(f, args...)		efi_call((f), args)
58 
59 /*
60  * Scratch space used for switching the pagetable in the EFI stub
61  */
62 struct efi_scratch {
63 	u64	r15;
64 	u64	prev_cr3;
65 	pgd_t	*efi_pgt;
66 	bool	use_pgd;
67 	u64	phys_stack;
68 } __packed;
69 
70 #define arch_efi_call_virt_setup()					\
71 ({									\
72 	efi_sync_low_kernel_mappings();					\
73 	preempt_disable();						\
74 	__kernel_fpu_begin();						\
75 									\
76 	if (efi_scratch.use_pgd) {					\
77 		efi_scratch.prev_cr3 = __read_cr3();			\
78 		write_cr3((unsigned long)efi_scratch.efi_pgt);		\
79 		__flush_tlb_all();					\
80 	}								\
81 })
82 
83 #define arch_efi_call_virt(p, f, args...)				\
84 	efi_call((void *)p->f, args)					\
85 
86 #define arch_efi_call_virt_teardown()					\
87 ({									\
88 	if (efi_scratch.use_pgd) {					\
89 		write_cr3(efi_scratch.prev_cr3);			\
90 		__flush_tlb_all();					\
91 	}								\
92 									\
93 	__kernel_fpu_end();						\
94 	preempt_enable();						\
95 })
96 
97 extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
98 					u32 type, u64 attribute);
99 
100 #ifdef CONFIG_KASAN
101 /*
102  * CONFIG_KASAN may redefine memset to __memset.  __memset function is present
103  * only in kernel binary.  Since the EFI stub linked into a separate binary it
104  * doesn't have __memset().  So we should use standard memset from
105  * arch/x86/boot/compressed/string.c.  The same applies to memcpy and memmove.
106  */
107 #undef memcpy
108 #undef memset
109 #undef memmove
110 #endif
111 
112 #endif /* CONFIG_X86_32 */
113 
114 extern struct efi_scratch efi_scratch;
115 extern void __init efi_set_executable(efi_memory_desc_t *md, bool executable);
116 extern int __init efi_memblock_x86_reserve_range(void);
117 extern pgd_t * __init efi_call_phys_prolog(void);
118 extern void __init efi_call_phys_epilog(pgd_t *save_pgd);
119 extern void __init efi_print_memmap(void);
120 extern void __init efi_memory_uc(u64 addr, unsigned long size);
121 extern void __init efi_map_region(efi_memory_desc_t *md);
122 extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
123 extern void efi_sync_low_kernel_mappings(void);
124 extern int __init efi_alloc_page_tables(void);
125 extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
126 extern void __init old_map_region(efi_memory_desc_t *md);
127 extern void __init runtime_code_page_mkexec(void);
128 extern void __init efi_runtime_update_mappings(void);
129 extern void __init efi_dump_pagetable(void);
130 extern void __init efi_apply_memmap_quirks(void);
131 extern int __init efi_reuse_config(u64 tables, int nr_tables);
132 extern void efi_delete_dummy_variable(void);
133 
134 struct efi_setup_data {
135 	u64 fw_vendor;
136 	u64 runtime;
137 	u64 tables;
138 	u64 smbios;
139 	u64 reserved[8];
140 };
141 
142 extern u64 efi_setup;
143 
144 #ifdef CONFIG_EFI
145 
146 static inline bool efi_is_native(void)
147 {
148 	return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT);
149 }
150 
151 static inline bool efi_runtime_supported(void)
152 {
153 	if (efi_is_native())
154 		return true;
155 
156 	if (IS_ENABLED(CONFIG_EFI_MIXED) && !efi_enabled(EFI_OLD_MEMMAP))
157 		return true;
158 
159 	return false;
160 }
161 
162 extern struct console early_efi_console;
163 extern void parse_efi_setup(u64 phys_addr, u32 data_len);
164 
165 extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
166 
167 #ifdef CONFIG_EFI_MIXED
168 extern void efi_thunk_runtime_setup(void);
169 extern efi_status_t efi_thunk_set_virtual_address_map(
170 	void *phys_set_virtual_address_map,
171 	unsigned long memory_map_size,
172 	unsigned long descriptor_size,
173 	u32 descriptor_version,
174 	efi_memory_desc_t *virtual_map);
175 #else
176 static inline void efi_thunk_runtime_setup(void) {}
177 static inline efi_status_t efi_thunk_set_virtual_address_map(
178 	void *phys_set_virtual_address_map,
179 	unsigned long memory_map_size,
180 	unsigned long descriptor_size,
181 	u32 descriptor_version,
182 	efi_memory_desc_t *virtual_map)
183 {
184 	return EFI_SUCCESS;
185 }
186 #endif /* CONFIG_EFI_MIXED */
187 
188 
189 /* arch specific definitions used by the stub code */
190 
191 struct efi_config {
192 	u64 image_handle;
193 	u64 table;
194 	u64 runtime_services;
195 	u64 boot_services;
196 	u64 text_output;
197 	efi_status_t (*call)(unsigned long, ...);
198 	bool is64;
199 } __packed;
200 
201 __pure const struct efi_config *__efi_early(void);
202 
203 static inline bool efi_is_64bit(void)
204 {
205 	if (!IS_ENABLED(CONFIG_X86_64))
206 		return false;
207 
208 	if (!IS_ENABLED(CONFIG_EFI_MIXED))
209 		return true;
210 
211 	return __efi_early()->is64;
212 }
213 
214 #define efi_table_attr(table, attr, instance)				\
215 	(efi_is_64bit() ?						\
216 		((table##_64_t *)(unsigned long)instance)->attr :	\
217 		((table##_32_t *)(unsigned long)instance)->attr)
218 
219 #define efi_call_proto(protocol, f, instance, ...)			\
220 	__efi_early()->call(efi_table_attr(protocol, f, instance),	\
221 		instance, ##__VA_ARGS__)
222 
223 #define efi_call_early(f, ...)						\
224 	__efi_early()->call(efi_table_attr(efi_boot_services, f,	\
225 		__efi_early()->boot_services), __VA_ARGS__)
226 
227 #define __efi_call_early(f, ...)					\
228 	__efi_early()->call((unsigned long)f, __VA_ARGS__);
229 
230 #define efi_call_runtime(f, ...)					\
231 	__efi_early()->call(efi_table_attr(efi_runtime_services, f,	\
232 		__efi_early()->runtime_services), __VA_ARGS__)
233 
234 extern bool efi_reboot_required(void);
235 
236 #else
237 static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {}
238 static inline bool efi_reboot_required(void)
239 {
240 	return false;
241 }
242 #endif /* CONFIG_EFI */
243 
244 #endif /* _ASM_X86_EFI_H */
245