xref: /openbmc/linux/arch/x86/include/asm/fixmap.h (revision c482feef)
1 /*
2  * fixmap.h: compile-time virtual memory allocation
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License.  See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * Copyright (C) 1998 Ingo Molnar
9  *
10  * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11  * x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009
12  */
13 
14 #ifndef _ASM_X86_FIXMAP_H
15 #define _ASM_X86_FIXMAP_H
16 
17 #ifndef __ASSEMBLY__
18 #include <linux/kernel.h>
19 #include <asm/acpi.h>
20 #include <asm/apicdef.h>
21 #include <asm/page.h>
22 #ifdef CONFIG_X86_32
23 #include <linux/threads.h>
24 #include <asm/kmap_types.h>
25 #else
26 #include <uapi/asm/vsyscall.h>
27 #endif
28 
29 /*
30  * We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall
31  * uses fixmaps that relies on FIXADDR_TOP for proper address calculation.
32  * Because of this, FIXADDR_TOP x86 integration was left as later work.
33  */
34 #ifdef CONFIG_X86_32
35 /* used by vmalloc.c, vsyscall.lds.S.
36  *
37  * Leave one empty page between vmalloc'ed areas and
38  * the start of the fixmap.
39  */
40 extern unsigned long __FIXADDR_TOP;
41 #define FIXADDR_TOP	((unsigned long)__FIXADDR_TOP)
42 #else
43 #define FIXADDR_TOP	(round_up(VSYSCALL_ADDR + PAGE_SIZE, 1<<PMD_SHIFT) - \
44 			 PAGE_SIZE)
45 #endif
46 
47 /*
48  * cpu_entry_area is a percpu region in the fixmap that contains things
49  * needed by the CPU and early entry/exit code.  Real types aren't used
50  * for all fields here to avoid circular header dependencies.
51  *
52  * Every field is a virtual alias of some other allocated backing store.
53  * There is no direct allocation of a struct cpu_entry_area.
54  */
55 struct cpu_entry_area {
56 	char gdt[PAGE_SIZE];
57 
58 	/*
59 	 * The GDT is just below SYSENTER_stack and thus serves (on x86_64) as
60 	 * a a read-only guard page.
61 	 */
62 	struct SYSENTER_stack_page SYSENTER_stack_page;
63 
64 	/*
65 	 * On x86_64, the TSS is mapped RO.  On x86_32, it's mapped RW because
66 	 * we need task switches to work, and task switches write to the TSS.
67 	 */
68 	struct tss_struct tss;
69 
70 	char entry_trampoline[PAGE_SIZE];
71 
72 #ifdef CONFIG_X86_64
73 	/*
74 	 * Exception stacks used for IST entries.
75 	 *
76 	 * In the future, this should have a separate slot for each stack
77 	 * with guard pages between them.
78 	 */
79 	char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ];
80 #endif
81 };
82 
83 #define CPU_ENTRY_AREA_PAGES (sizeof(struct cpu_entry_area) / PAGE_SIZE)
84 
85 extern void setup_cpu_entry_areas(void);
86 
87 /*
88  * Here we define all the compile-time 'special' virtual
89  * addresses. The point is to have a constant address at
90  * compile time, but to set the physical address only
91  * in the boot process.
92  * for x86_32: We allocate these special addresses
93  * from the end of virtual memory (0xfffff000) backwards.
94  * Also this lets us do fail-safe vmalloc(), we
95  * can guarantee that these special addresses and
96  * vmalloc()-ed addresses never overlap.
97  *
98  * These 'compile-time allocated' memory buffers are
99  * fixed-size 4k pages (or larger if used with an increment
100  * higher than 1). Use set_fixmap(idx,phys) to associate
101  * physical memory with fixmap indices.
102  *
103  * TLB entries of such buffers will not be flushed across
104  * task switches.
105  */
106 enum fixed_addresses {
107 #ifdef CONFIG_X86_32
108 	FIX_HOLE,
109 #else
110 #ifdef CONFIG_X86_VSYSCALL_EMULATION
111 	VSYSCALL_PAGE = (FIXADDR_TOP - VSYSCALL_ADDR) >> PAGE_SHIFT,
112 #endif
113 #endif
114 	FIX_DBGP_BASE,
115 	FIX_EARLYCON_MEM_BASE,
116 #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
117 	FIX_OHCI1394_BASE,
118 #endif
119 #ifdef CONFIG_X86_LOCAL_APIC
120 	FIX_APIC_BASE,	/* local (CPU) APIC) -- required for SMP or not */
121 #endif
122 #ifdef CONFIG_X86_IO_APIC
123 	FIX_IO_APIC_BASE_0,
124 	FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
125 #endif
126 	FIX_RO_IDT,	/* Virtual mapping for read-only IDT */
127 #ifdef CONFIG_X86_32
128 	FIX_KMAP_BEGIN,	/* reserved pte's for temporary kernel mappings */
129 	FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
130 #ifdef CONFIG_PCI_MMCONFIG
131 	FIX_PCIE_MCFG,
132 #endif
133 #endif
134 #ifdef CONFIG_PARAVIRT
135 	FIX_PARAVIRT_BOOTMAP,
136 #endif
137 	FIX_TEXT_POKE1,	/* reserve 2 pages for text_poke() */
138 	FIX_TEXT_POKE0, /* first page is last, because allocation is backward */
139 #ifdef	CONFIG_X86_INTEL_MID
140 	FIX_LNW_VRTC,
141 #endif
142 	/* Fixmap entries to remap the GDTs, one per processor. */
143 	FIX_CPU_ENTRY_AREA_TOP,
144 	FIX_CPU_ENTRY_AREA_BOTTOM = FIX_CPU_ENTRY_AREA_TOP + (CPU_ENTRY_AREA_PAGES * NR_CPUS) - 1,
145 
146 #ifdef CONFIG_ACPI_APEI_GHES
147 	/* Used for GHES mapping from assorted contexts */
148 	FIX_APEI_GHES_IRQ,
149 	FIX_APEI_GHES_NMI,
150 #endif
151 
152 	__end_of_permanent_fixed_addresses,
153 
154 	/*
155 	 * 512 temporary boot-time mappings, used by early_ioremap(),
156 	 * before ioremap() is functional.
157 	 *
158 	 * If necessary we round it up to the next 512 pages boundary so
159 	 * that we can have a single pgd entry and a single pte table:
160 	 */
161 #define NR_FIX_BTMAPS		64
162 #define FIX_BTMAPS_SLOTS	8
163 #define TOTAL_FIX_BTMAPS	(NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
164 	FIX_BTMAP_END =
165 	 (__end_of_permanent_fixed_addresses ^
166 	  (__end_of_permanent_fixed_addresses + TOTAL_FIX_BTMAPS - 1)) &
167 	 -PTRS_PER_PTE
168 	 ? __end_of_permanent_fixed_addresses + TOTAL_FIX_BTMAPS -
169 	   (__end_of_permanent_fixed_addresses & (TOTAL_FIX_BTMAPS - 1))
170 	 : __end_of_permanent_fixed_addresses,
171 	FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
172 #ifdef CONFIG_X86_32
173 	FIX_WP_TEST,
174 #endif
175 #ifdef CONFIG_INTEL_TXT
176 	FIX_TBOOT_BASE,
177 #endif
178 	__end_of_fixed_addresses
179 };
180 
181 
182 extern void reserve_top_address(unsigned long reserve);
183 
184 #define FIXADDR_SIZE	(__end_of_permanent_fixed_addresses << PAGE_SHIFT)
185 #define FIXADDR_START		(FIXADDR_TOP - FIXADDR_SIZE)
186 
187 extern int fixmaps_set;
188 
189 extern pte_t *kmap_pte;
190 #define kmap_prot PAGE_KERNEL
191 extern pte_t *pkmap_page_table;
192 
193 void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
194 void native_set_fixmap(enum fixed_addresses idx,
195 		       phys_addr_t phys, pgprot_t flags);
196 
197 #ifndef CONFIG_PARAVIRT
198 static inline void __set_fixmap(enum fixed_addresses idx,
199 				phys_addr_t phys, pgprot_t flags)
200 {
201 	native_set_fixmap(idx, phys, flags);
202 }
203 #endif
204 
205 /*
206  * FIXMAP_PAGE_NOCACHE is used for MMIO. Memory encryption is not
207  * supported for MMIO addresses, so make sure that the memory encryption
208  * mask is not part of the page attributes.
209  */
210 #define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_IO_NOCACHE
211 
212 /*
213  * Early memremap routines used for in-place encryption. The mappings created
214  * by these routines are intended to be used as temporary mappings.
215  */
216 void __init *early_memremap_encrypted(resource_size_t phys_addr,
217 				      unsigned long size);
218 void __init *early_memremap_encrypted_wp(resource_size_t phys_addr,
219 					 unsigned long size);
220 void __init *early_memremap_decrypted(resource_size_t phys_addr,
221 				      unsigned long size);
222 void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
223 					 unsigned long size);
224 
225 #include <asm-generic/fixmap.h>
226 
227 #define __late_set_fixmap(idx, phys, flags) __set_fixmap(idx, phys, flags)
228 #define __late_clear_fixmap(idx) __set_fixmap(idx, 0, __pgprot(0))
229 
230 void __early_set_fixmap(enum fixed_addresses idx,
231 			phys_addr_t phys, pgprot_t flags);
232 
233 static inline unsigned int __get_cpu_entry_area_page_index(int cpu, int page)
234 {
235 	BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
236 
237 	return FIX_CPU_ENTRY_AREA_BOTTOM - cpu*CPU_ENTRY_AREA_PAGES - page;
238 }
239 
240 #define __get_cpu_entry_area_offset_index(cpu, offset) ({		\
241 	BUILD_BUG_ON(offset % PAGE_SIZE != 0);				\
242 	__get_cpu_entry_area_page_index(cpu, offset / PAGE_SIZE);	\
243 	})
244 
245 #define get_cpu_entry_area_index(cpu, field)				\
246 	__get_cpu_entry_area_offset_index((cpu), offsetof(struct cpu_entry_area, field))
247 
248 static inline struct cpu_entry_area *get_cpu_entry_area(int cpu)
249 {
250 	return (struct cpu_entry_area *)__fix_to_virt(__get_cpu_entry_area_page_index(cpu, 0));
251 }
252 
253 static inline struct SYSENTER_stack *cpu_SYSENTER_stack(int cpu)
254 {
255 	return &get_cpu_entry_area(cpu)->SYSENTER_stack_page.stack;
256 }
257 
258 #endif /* !__ASSEMBLY__ */
259 #endif /* _ASM_X86_FIXMAP_H */
260