xref: /openbmc/linux/arch/x86/include/asm/mmu_context.h (revision d2c43ff1)
1 #ifndef _ASM_X86_MMU_CONTEXT_H
2 #define _ASM_X86_MMU_CONTEXT_H
3 
4 #include <asm/desc.h>
5 #include <linux/atomic.h>
6 #include <linux/mm_types.h>
7 #include <linux/pkeys.h>
8 
9 #include <trace/events/tlb.h>
10 
11 #include <asm/pgalloc.h>
12 #include <asm/tlbflush.h>
13 #include <asm/paravirt.h>
14 #include <asm/mpx.h>
15 #ifndef CONFIG_PARAVIRT
16 static inline void paravirt_activate_mm(struct mm_struct *prev,
17 					struct mm_struct *next)
18 {
19 }
20 #endif	/* !CONFIG_PARAVIRT */
21 
22 #ifdef CONFIG_PERF_EVENTS
23 extern struct static_key rdpmc_always_available;
24 
25 static inline void load_mm_cr4(struct mm_struct *mm)
26 {
27 	if (static_key_false(&rdpmc_always_available) ||
28 	    atomic_read(&mm->context.perf_rdpmc_allowed))
29 		cr4_set_bits(X86_CR4_PCE);
30 	else
31 		cr4_clear_bits(X86_CR4_PCE);
32 }
33 #else
34 static inline void load_mm_cr4(struct mm_struct *mm) {}
35 #endif
36 
37 #ifdef CONFIG_MODIFY_LDT_SYSCALL
38 /*
39  * ldt_structs can be allocated, used, and freed, but they are never
40  * modified while live.
41  */
42 struct ldt_struct {
43 	/*
44 	 * Xen requires page-aligned LDTs with special permissions.  This is
45 	 * needed to prevent us from installing evil descriptors such as
46 	 * call gates.  On native, we could merge the ldt_struct and LDT
47 	 * allocations, but it's not worth trying to optimize.
48 	 */
49 	struct desc_struct *entries;
50 	unsigned int nr_entries;
51 };
52 
53 /*
54  * Used for LDT copy/destruction.
55  */
56 int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm);
57 void destroy_context_ldt(struct mm_struct *mm);
58 #else	/* CONFIG_MODIFY_LDT_SYSCALL */
59 static inline int init_new_context_ldt(struct task_struct *tsk,
60 				       struct mm_struct *mm)
61 {
62 	return 0;
63 }
64 static inline void destroy_context_ldt(struct mm_struct *mm) {}
65 #endif
66 
67 static inline void load_mm_ldt(struct mm_struct *mm)
68 {
69 #ifdef CONFIG_MODIFY_LDT_SYSCALL
70 	struct ldt_struct *ldt;
71 
72 	/* lockless_dereference synchronizes with smp_store_release */
73 	ldt = lockless_dereference(mm->context.ldt);
74 
75 	/*
76 	 * Any change to mm->context.ldt is followed by an IPI to all
77 	 * CPUs with the mm active.  The LDT will not be freed until
78 	 * after the IPI is handled by all such CPUs.  This means that,
79 	 * if the ldt_struct changes before we return, the values we see
80 	 * will be safe, and the new values will be loaded before we run
81 	 * any user code.
82 	 *
83 	 * NB: don't try to convert this to use RCU without extreme care.
84 	 * We would still need IRQs off, because we don't want to change
85 	 * the local LDT after an IPI loaded a newer value than the one
86 	 * that we can see.
87 	 */
88 
89 	if (unlikely(ldt))
90 		set_ldt(ldt->entries, ldt->nr_entries);
91 	else
92 		clear_LDT();
93 #else
94 	clear_LDT();
95 #endif
96 }
97 
98 static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
99 {
100 #ifdef CONFIG_MODIFY_LDT_SYSCALL
101 	/*
102 	 * Load the LDT if either the old or new mm had an LDT.
103 	 *
104 	 * An mm will never go from having an LDT to not having an LDT.  Two
105 	 * mms never share an LDT, so we don't gain anything by checking to
106 	 * see whether the LDT changed.  There's also no guarantee that
107 	 * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL,
108 	 * then prev->context.ldt will also be non-NULL.
109 	 *
110 	 * If we really cared, we could optimize the case where prev == next
111 	 * and we're exiting lazy mode.  Most of the time, if this happens,
112 	 * we don't actually need to reload LDTR, but modify_ldt() is mostly
113 	 * used by legacy code and emulators where we don't need this level of
114 	 * performance.
115 	 *
116 	 * This uses | instead of || because it generates better code.
117 	 */
118 	if (unlikely((unsigned long)prev->context.ldt |
119 		     (unsigned long)next->context.ldt))
120 		load_mm_ldt(next);
121 #endif
122 
123 	DEBUG_LOCKS_WARN_ON(preemptible());
124 }
125 
126 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
127 {
128 	if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
129 		this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
130 }
131 
132 static inline int init_new_context(struct task_struct *tsk,
133 				   struct mm_struct *mm)
134 {
135 	#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
136 	if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
137 		/* pkey 0 is the default and always allocated */
138 		mm->context.pkey_allocation_map = 0x1;
139 		/* -1 means unallocated or invalid */
140 		mm->context.execute_only_pkey = -1;
141 	}
142 	#endif
143 	init_new_context_ldt(tsk, mm);
144 
145 	return 0;
146 }
147 static inline void destroy_context(struct mm_struct *mm)
148 {
149 	destroy_context_ldt(mm);
150 }
151 
152 extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
153 		      struct task_struct *tsk);
154 
155 extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
156 			       struct task_struct *tsk);
157 #define switch_mm_irqs_off switch_mm_irqs_off
158 
159 #define activate_mm(prev, next)			\
160 do {						\
161 	paravirt_activate_mm((prev), (next));	\
162 	switch_mm((prev), (next), NULL);	\
163 } while (0);
164 
165 #ifdef CONFIG_X86_32
166 #define deactivate_mm(tsk, mm)			\
167 do {						\
168 	lazy_load_gs(0);			\
169 } while (0)
170 #else
171 #define deactivate_mm(tsk, mm)			\
172 do {						\
173 	load_gs_index(0);			\
174 	loadsegment(fs, 0);			\
175 } while (0)
176 #endif
177 
178 static inline void arch_dup_mmap(struct mm_struct *oldmm,
179 				 struct mm_struct *mm)
180 {
181 	paravirt_arch_dup_mmap(oldmm, mm);
182 }
183 
184 static inline void arch_exit_mmap(struct mm_struct *mm)
185 {
186 	paravirt_arch_exit_mmap(mm);
187 }
188 
189 #ifdef CONFIG_X86_64
190 static inline bool is_64bit_mm(struct mm_struct *mm)
191 {
192 	return	!IS_ENABLED(CONFIG_IA32_EMULATION) ||
193 		!(mm->context.ia32_compat == TIF_IA32);
194 }
195 #else
196 static inline bool is_64bit_mm(struct mm_struct *mm)
197 {
198 	return false;
199 }
200 #endif
201 
202 static inline void arch_bprm_mm_init(struct mm_struct *mm,
203 		struct vm_area_struct *vma)
204 {
205 	mpx_mm_init(mm);
206 }
207 
208 static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
209 			      unsigned long start, unsigned long end)
210 {
211 	/*
212 	 * mpx_notify_unmap() goes and reads a rarely-hot
213 	 * cacheline in the mm_struct.  That can be expensive
214 	 * enough to be seen in profiles.
215 	 *
216 	 * The mpx_notify_unmap() call and its contents have been
217 	 * observed to affect munmap() performance on hardware
218 	 * where MPX is not present.
219 	 *
220 	 * The unlikely() optimizes for the fast case: no MPX
221 	 * in the CPU, or no MPX use in the process.  Even if
222 	 * we get this wrong (in the unlikely event that MPX
223 	 * is widely enabled on some system) the overhead of
224 	 * MPX itself (reading bounds tables) is expected to
225 	 * overwhelm the overhead of getting this unlikely()
226 	 * consistently wrong.
227 	 */
228 	if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
229 		mpx_notify_unmap(mm, vma, start, end);
230 }
231 
232 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
233 static inline int vma_pkey(struct vm_area_struct *vma)
234 {
235 	unsigned long vma_pkey_mask = VM_PKEY_BIT0 | VM_PKEY_BIT1 |
236 				      VM_PKEY_BIT2 | VM_PKEY_BIT3;
237 
238 	return (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT;
239 }
240 #else
241 static inline int vma_pkey(struct vm_area_struct *vma)
242 {
243 	return 0;
244 }
245 #endif
246 
247 /*
248  * We only want to enforce protection keys on the current process
249  * because we effectively have no access to PKRU for other
250  * processes or any way to tell *which * PKRU in a threaded
251  * process we could use.
252  *
253  * So do not enforce things if the VMA is not from the current
254  * mm, or if we are in a kernel thread.
255  */
256 static inline bool vma_is_foreign(struct vm_area_struct *vma)
257 {
258 	if (!current->mm)
259 		return true;
260 	/*
261 	 * Should PKRU be enforced on the access to this VMA?  If
262 	 * the VMA is from another process, then PKRU has no
263 	 * relevance and should not be enforced.
264 	 */
265 	if (current->mm != vma->vm_mm)
266 		return true;
267 
268 	return false;
269 }
270 
271 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
272 		bool write, bool execute, bool foreign)
273 {
274 	/* pkeys never affect instruction fetches */
275 	if (execute)
276 		return true;
277 	/* allow access if the VMA is not one from this process */
278 	if (foreign || vma_is_foreign(vma))
279 		return true;
280 	return __pkru_allows_pkey(vma_pkey(vma), write);
281 }
282 
283 
284 /*
285  * This can be used from process context to figure out what the value of
286  * CR3 is without needing to do a (slow) __read_cr3().
287  *
288  * It's intended to be used for code like KVM that sneakily changes CR3
289  * and needs to restore it.  It needs to be used very carefully.
290  */
291 static inline unsigned long __get_current_cr3_fast(void)
292 {
293 	unsigned long cr3 = __pa(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd);
294 
295 	/* For now, be very restrictive about when this can be called. */
296 	VM_WARN_ON(in_nmi() || preemptible());
297 
298 	VM_BUG_ON(cr3 != __read_cr3());
299 	return cr3;
300 }
301 
302 #endif /* _ASM_X86_MMU_CONTEXT_H */
303