xref: /openbmc/linux/arch/x86/include/asm/fixmap.h (revision ed1bbc40)
1 /*
2  * fixmap.h: compile-time virtual memory allocation
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License.  See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * Copyright (C) 1998 Ingo Molnar
9  *
10  * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11  * x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009
12  */
13 
14 #ifndef _ASM_X86_FIXMAP_H
15 #define _ASM_X86_FIXMAP_H
16 
17 #ifndef __ASSEMBLY__
18 #include <linux/kernel.h>
19 #include <asm/acpi.h>
20 #include <asm/apicdef.h>
21 #include <asm/page.h>
22 #ifdef CONFIG_X86_32
23 #include <linux/threads.h>
24 #include <asm/kmap_types.h>
25 #else
26 #include <uapi/asm/vsyscall.h>
27 #endif
28 #include <asm/cpu_entry_area.h>
29 
30 /*
31  * We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall
32  * uses fixmaps that relies on FIXADDR_TOP for proper address calculation.
33  * Because of this, FIXADDR_TOP x86 integration was left as later work.
34  */
35 #ifdef CONFIG_X86_32
36 /* used by vmalloc.c, vsyscall.lds.S.
37  *
38  * Leave one empty page between vmalloc'ed areas and
39  * the start of the fixmap.
40  */
41 extern unsigned long __FIXADDR_TOP;
42 #define FIXADDR_TOP	((unsigned long)__FIXADDR_TOP)
43 #else
44 #define FIXADDR_TOP	(round_up(VSYSCALL_ADDR + PAGE_SIZE, 1<<PMD_SHIFT) - \
45 			 PAGE_SIZE)
46 #endif
47 
48 /*
49  * Here we define all the compile-time 'special' virtual
50  * addresses. The point is to have a constant address at
51  * compile time, but to set the physical address only
52  * in the boot process.
53  * for x86_32: We allocate these special addresses
54  * from the end of virtual memory (0xfffff000) backwards.
55  * Also this lets us do fail-safe vmalloc(), we
56  * can guarantee that these special addresses and
57  * vmalloc()-ed addresses never overlap.
58  *
59  * These 'compile-time allocated' memory buffers are
60  * fixed-size 4k pages (or larger if used with an increment
61  * higher than 1). Use set_fixmap(idx,phys) to associate
62  * physical memory with fixmap indices.
63  *
64  * TLB entries of such buffers will not be flushed across
65  * task switches.
66  */
67 enum fixed_addresses {
68 #ifdef CONFIG_X86_32
69 	FIX_HOLE,
70 #else
71 #ifdef CONFIG_X86_VSYSCALL_EMULATION
72 	VSYSCALL_PAGE = (FIXADDR_TOP - VSYSCALL_ADDR) >> PAGE_SHIFT,
73 #endif
74 #endif
75 	FIX_DBGP_BASE,
76 	FIX_EARLYCON_MEM_BASE,
77 #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
78 	FIX_OHCI1394_BASE,
79 #endif
80 #ifdef CONFIG_X86_LOCAL_APIC
81 	FIX_APIC_BASE,	/* local (CPU) APIC) -- required for SMP or not */
82 #endif
83 #ifdef CONFIG_X86_IO_APIC
84 	FIX_IO_APIC_BASE_0,
85 	FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
86 #endif
87 	FIX_RO_IDT,	/* Virtual mapping for read-only IDT */
88 #ifdef CONFIG_X86_32
89 	FIX_KMAP_BEGIN,	/* reserved pte's for temporary kernel mappings */
90 	FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
91 #ifdef CONFIG_PCI_MMCONFIG
92 	FIX_PCIE_MCFG,
93 #endif
94 #endif
95 #ifdef CONFIG_PARAVIRT
96 	FIX_PARAVIRT_BOOTMAP,
97 #endif
98 	FIX_TEXT_POKE1,	/* reserve 2 pages for text_poke() */
99 	FIX_TEXT_POKE0, /* first page is last, because allocation is backward */
100 #ifdef	CONFIG_X86_INTEL_MID
101 	FIX_LNW_VRTC,
102 #endif
103 	/* Fixmap entries to remap the GDTs, one per processor. */
104 	FIX_CPU_ENTRY_AREA_TOP,
105 	FIX_CPU_ENTRY_AREA_BOTTOM = FIX_CPU_ENTRY_AREA_TOP + (CPU_ENTRY_AREA_PAGES * NR_CPUS) - 1,
106 
107 #ifdef CONFIG_ACPI_APEI_GHES
108 	/* Used for GHES mapping from assorted contexts */
109 	FIX_APEI_GHES_IRQ,
110 	FIX_APEI_GHES_NMI,
111 #endif
112 
113 	__end_of_permanent_fixed_addresses,
114 
115 	/*
116 	 * 512 temporary boot-time mappings, used by early_ioremap(),
117 	 * before ioremap() is functional.
118 	 *
119 	 * If necessary we round it up to the next 512 pages boundary so
120 	 * that we can have a single pgd entry and a single pte table:
121 	 */
122 #define NR_FIX_BTMAPS		64
123 #define FIX_BTMAPS_SLOTS	8
124 #define TOTAL_FIX_BTMAPS	(NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
125 	FIX_BTMAP_END =
126 	 (__end_of_permanent_fixed_addresses ^
127 	  (__end_of_permanent_fixed_addresses + TOTAL_FIX_BTMAPS - 1)) &
128 	 -PTRS_PER_PTE
129 	 ? __end_of_permanent_fixed_addresses + TOTAL_FIX_BTMAPS -
130 	   (__end_of_permanent_fixed_addresses & (TOTAL_FIX_BTMAPS - 1))
131 	 : __end_of_permanent_fixed_addresses,
132 	FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
133 #ifdef CONFIG_X86_32
134 	FIX_WP_TEST,
135 #endif
136 #ifdef CONFIG_INTEL_TXT
137 	FIX_TBOOT_BASE,
138 #endif
139 	__end_of_fixed_addresses
140 };
141 
142 
143 extern void reserve_top_address(unsigned long reserve);
144 
145 #define FIXADDR_SIZE	(__end_of_permanent_fixed_addresses << PAGE_SHIFT)
146 #define FIXADDR_START		(FIXADDR_TOP - FIXADDR_SIZE)
147 
148 extern int fixmaps_set;
149 
150 extern pte_t *kmap_pte;
151 #define kmap_prot PAGE_KERNEL
152 extern pte_t *pkmap_page_table;
153 
154 void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
155 void native_set_fixmap(enum fixed_addresses idx,
156 		       phys_addr_t phys, pgprot_t flags);
157 
158 #ifndef CONFIG_PARAVIRT
159 static inline void __set_fixmap(enum fixed_addresses idx,
160 				phys_addr_t phys, pgprot_t flags)
161 {
162 	native_set_fixmap(idx, phys, flags);
163 }
164 #endif
165 
166 /*
167  * FIXMAP_PAGE_NOCACHE is used for MMIO. Memory encryption is not
168  * supported for MMIO addresses, so make sure that the memory encryption
169  * mask is not part of the page attributes.
170  */
171 #define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_IO_NOCACHE
172 
173 /*
174  * Early memremap routines used for in-place encryption. The mappings created
175  * by these routines are intended to be used as temporary mappings.
176  */
177 void __init *early_memremap_encrypted(resource_size_t phys_addr,
178 				      unsigned long size);
179 void __init *early_memremap_encrypted_wp(resource_size_t phys_addr,
180 					 unsigned long size);
181 void __init *early_memremap_decrypted(resource_size_t phys_addr,
182 				      unsigned long size);
183 void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
184 					 unsigned long size);
185 
186 #include <asm-generic/fixmap.h>
187 
188 #define __late_set_fixmap(idx, phys, flags) __set_fixmap(idx, phys, flags)
189 #define __late_clear_fixmap(idx) __set_fixmap(idx, 0, __pgprot(0))
190 
191 void __early_set_fixmap(enum fixed_addresses idx,
192 			phys_addr_t phys, pgprot_t flags);
193 
194 static inline unsigned int __get_cpu_entry_area_page_index(int cpu, int page)
195 {
196 	BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
197 
198 	return FIX_CPU_ENTRY_AREA_BOTTOM - cpu*CPU_ENTRY_AREA_PAGES - page;
199 }
200 
201 #define __get_cpu_entry_area_offset_index(cpu, offset) ({		\
202 	BUILD_BUG_ON(offset % PAGE_SIZE != 0);				\
203 	__get_cpu_entry_area_page_index(cpu, offset / PAGE_SIZE);	\
204 	})
205 
206 #define get_cpu_entry_area_index(cpu, field)				\
207 	__get_cpu_entry_area_offset_index((cpu), offsetof(struct cpu_entry_area, field))
208 
209 static inline struct cpu_entry_area *get_cpu_entry_area(int cpu)
210 {
211 	return (struct cpu_entry_area *)__fix_to_virt(__get_cpu_entry_area_page_index(cpu, 0));
212 }
213 
214 static inline struct entry_stack *cpu_entry_stack(int cpu)
215 {
216 	return &get_cpu_entry_area(cpu)->entry_stack_page.stack;
217 }
218 
219 #endif /* !__ASSEMBLY__ */
220 #endif /* _ASM_X86_FIXMAP_H */
221