xref: /openbmc/linux/arch/x86/include/asm/mmu_context.h (revision 8cb5d748)
1 #ifndef _ASM_X86_MMU_CONTEXT_H
2 #define _ASM_X86_MMU_CONTEXT_H
3 
4 #include <asm/desc.h>
5 #include <linux/atomic.h>
6 #include <linux/mm_types.h>
7 #include <linux/pkeys.h>
8 
9 #include <trace/events/tlb.h>
10 
11 #include <asm/pgalloc.h>
12 #include <asm/tlbflush.h>
13 #include <asm/paravirt.h>
14 #include <asm/mpx.h>
15 
16 extern atomic64_t last_mm_ctx_id;
17 
18 #ifndef CONFIG_PARAVIRT
19 static inline void paravirt_activate_mm(struct mm_struct *prev,
20 					struct mm_struct *next)
21 {
22 }
23 #endif	/* !CONFIG_PARAVIRT */
24 
25 #ifdef CONFIG_PERF_EVENTS
26 extern struct static_key rdpmc_always_available;
27 
28 static inline void load_mm_cr4(struct mm_struct *mm)
29 {
30 	if (static_key_false(&rdpmc_always_available) ||
31 	    atomic_read(&mm->context.perf_rdpmc_allowed))
32 		cr4_set_bits(X86_CR4_PCE);
33 	else
34 		cr4_clear_bits(X86_CR4_PCE);
35 }
36 #else
37 static inline void load_mm_cr4(struct mm_struct *mm) {}
38 #endif
39 
40 #ifdef CONFIG_MODIFY_LDT_SYSCALL
41 /*
42  * ldt_structs can be allocated, used, and freed, but they are never
43  * modified while live.
44  */
45 struct ldt_struct {
46 	/*
47 	 * Xen requires page-aligned LDTs with special permissions.  This is
48 	 * needed to prevent us from installing evil descriptors such as
49 	 * call gates.  On native, we could merge the ldt_struct and LDT
50 	 * allocations, but it's not worth trying to optimize.
51 	 */
52 	struct desc_struct *entries;
53 	unsigned int nr_entries;
54 };
55 
56 /*
57  * Used for LDT copy/destruction.
58  */
59 int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm);
60 void destroy_context_ldt(struct mm_struct *mm);
61 #else	/* CONFIG_MODIFY_LDT_SYSCALL */
62 static inline int init_new_context_ldt(struct task_struct *tsk,
63 				       struct mm_struct *mm)
64 {
65 	return 0;
66 }
67 static inline void destroy_context_ldt(struct mm_struct *mm) {}
68 #endif
69 
70 static inline void load_mm_ldt(struct mm_struct *mm)
71 {
72 #ifdef CONFIG_MODIFY_LDT_SYSCALL
73 	struct ldt_struct *ldt;
74 
75 	/* lockless_dereference synchronizes with smp_store_release */
76 	ldt = lockless_dereference(mm->context.ldt);
77 
78 	/*
79 	 * Any change to mm->context.ldt is followed by an IPI to all
80 	 * CPUs with the mm active.  The LDT will not be freed until
81 	 * after the IPI is handled by all such CPUs.  This means that,
82 	 * if the ldt_struct changes before we return, the values we see
83 	 * will be safe, and the new values will be loaded before we run
84 	 * any user code.
85 	 *
86 	 * NB: don't try to convert this to use RCU without extreme care.
87 	 * We would still need IRQs off, because we don't want to change
88 	 * the local LDT after an IPI loaded a newer value than the one
89 	 * that we can see.
90 	 */
91 
92 	if (unlikely(ldt))
93 		set_ldt(ldt->entries, ldt->nr_entries);
94 	else
95 		clear_LDT();
96 #else
97 	clear_LDT();
98 #endif
99 }
100 
101 static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
102 {
103 #ifdef CONFIG_MODIFY_LDT_SYSCALL
104 	/*
105 	 * Load the LDT if either the old or new mm had an LDT.
106 	 *
107 	 * An mm will never go from having an LDT to not having an LDT.  Two
108 	 * mms never share an LDT, so we don't gain anything by checking to
109 	 * see whether the LDT changed.  There's also no guarantee that
110 	 * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL,
111 	 * then prev->context.ldt will also be non-NULL.
112 	 *
113 	 * If we really cared, we could optimize the case where prev == next
114 	 * and we're exiting lazy mode.  Most of the time, if this happens,
115 	 * we don't actually need to reload LDTR, but modify_ldt() is mostly
116 	 * used by legacy code and emulators where we don't need this level of
117 	 * performance.
118 	 *
119 	 * This uses | instead of || because it generates better code.
120 	 */
121 	if (unlikely((unsigned long)prev->context.ldt |
122 		     (unsigned long)next->context.ldt))
123 		load_mm_ldt(next);
124 #endif
125 
126 	DEBUG_LOCKS_WARN_ON(preemptible());
127 }
128 
129 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
130 {
131 	int cpu = smp_processor_id();
132 
133 	if (cpumask_test_cpu(cpu, mm_cpumask(mm)))
134 		cpumask_clear_cpu(cpu, mm_cpumask(mm));
135 }
136 
137 static inline int init_new_context(struct task_struct *tsk,
138 				   struct mm_struct *mm)
139 {
140 	mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
141 	atomic64_set(&mm->context.tlb_gen, 0);
142 
143 	#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
144 	if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
145 		/* pkey 0 is the default and always allocated */
146 		mm->context.pkey_allocation_map = 0x1;
147 		/* -1 means unallocated or invalid */
148 		mm->context.execute_only_pkey = -1;
149 	}
150 	#endif
151 	return init_new_context_ldt(tsk, mm);
152 }
153 static inline void destroy_context(struct mm_struct *mm)
154 {
155 	destroy_context_ldt(mm);
156 }
157 
158 extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
159 		      struct task_struct *tsk);
160 
161 extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
162 			       struct task_struct *tsk);
163 #define switch_mm_irqs_off switch_mm_irqs_off
164 
165 #define activate_mm(prev, next)			\
166 do {						\
167 	paravirt_activate_mm((prev), (next));	\
168 	switch_mm((prev), (next), NULL);	\
169 } while (0);
170 
171 #ifdef CONFIG_X86_32
172 #define deactivate_mm(tsk, mm)			\
173 do {						\
174 	lazy_load_gs(0);			\
175 } while (0)
176 #else
177 #define deactivate_mm(tsk, mm)			\
178 do {						\
179 	load_gs_index(0);			\
180 	loadsegment(fs, 0);			\
181 } while (0)
182 #endif
183 
184 static inline void arch_dup_mmap(struct mm_struct *oldmm,
185 				 struct mm_struct *mm)
186 {
187 	paravirt_arch_dup_mmap(oldmm, mm);
188 }
189 
190 static inline void arch_exit_mmap(struct mm_struct *mm)
191 {
192 	paravirt_arch_exit_mmap(mm);
193 }
194 
195 #ifdef CONFIG_X86_64
196 static inline bool is_64bit_mm(struct mm_struct *mm)
197 {
198 	return	!IS_ENABLED(CONFIG_IA32_EMULATION) ||
199 		!(mm->context.ia32_compat == TIF_IA32);
200 }
201 #else
202 static inline bool is_64bit_mm(struct mm_struct *mm)
203 {
204 	return false;
205 }
206 #endif
207 
208 static inline void arch_bprm_mm_init(struct mm_struct *mm,
209 		struct vm_area_struct *vma)
210 {
211 	mpx_mm_init(mm);
212 }
213 
214 static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
215 			      unsigned long start, unsigned long end)
216 {
217 	/*
218 	 * mpx_notify_unmap() goes and reads a rarely-hot
219 	 * cacheline in the mm_struct.  That can be expensive
220 	 * enough to be seen in profiles.
221 	 *
222 	 * The mpx_notify_unmap() call and its contents have been
223 	 * observed to affect munmap() performance on hardware
224 	 * where MPX is not present.
225 	 *
226 	 * The unlikely() optimizes for the fast case: no MPX
227 	 * in the CPU, or no MPX use in the process.  Even if
228 	 * we get this wrong (in the unlikely event that MPX
229 	 * is widely enabled on some system) the overhead of
230 	 * MPX itself (reading bounds tables) is expected to
231 	 * overwhelm the overhead of getting this unlikely()
232 	 * consistently wrong.
233 	 */
234 	if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
235 		mpx_notify_unmap(mm, vma, start, end);
236 }
237 
238 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
239 static inline int vma_pkey(struct vm_area_struct *vma)
240 {
241 	unsigned long vma_pkey_mask = VM_PKEY_BIT0 | VM_PKEY_BIT1 |
242 				      VM_PKEY_BIT2 | VM_PKEY_BIT3;
243 
244 	return (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT;
245 }
246 #else
247 static inline int vma_pkey(struct vm_area_struct *vma)
248 {
249 	return 0;
250 }
251 #endif
252 
253 /*
254  * We only want to enforce protection keys on the current process
255  * because we effectively have no access to PKRU for other
256  * processes or any way to tell *which * PKRU in a threaded
257  * process we could use.
258  *
259  * So do not enforce things if the VMA is not from the current
260  * mm, or if we are in a kernel thread.
261  */
262 static inline bool vma_is_foreign(struct vm_area_struct *vma)
263 {
264 	if (!current->mm)
265 		return true;
266 	/*
267 	 * Should PKRU be enforced on the access to this VMA?  If
268 	 * the VMA is from another process, then PKRU has no
269 	 * relevance and should not be enforced.
270 	 */
271 	if (current->mm != vma->vm_mm)
272 		return true;
273 
274 	return false;
275 }
276 
277 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
278 		bool write, bool execute, bool foreign)
279 {
280 	/* pkeys never affect instruction fetches */
281 	if (execute)
282 		return true;
283 	/* allow access if the VMA is not one from this process */
284 	if (foreign || vma_is_foreign(vma))
285 		return true;
286 	return __pkru_allows_pkey(vma_pkey(vma), write);
287 }
288 
289 /*
290  * If PCID is on, ASID-aware code paths put the ASID+1 into the PCID
291  * bits.  This serves two purposes.  It prevents a nasty situation in
292  * which PCID-unaware code saves CR3, loads some other value (with PCID
293  * == 0), and then restores CR3, thus corrupting the TLB for ASID 0 if
294  * the saved ASID was nonzero.  It also means that any bugs involving
295  * loading a PCID-enabled CR3 with CR4.PCIDE off will trigger
296  * deterministically.
297  */
298 
299 static inline unsigned long build_cr3(struct mm_struct *mm, u16 asid)
300 {
301 	if (static_cpu_has(X86_FEATURE_PCID)) {
302 		VM_WARN_ON_ONCE(asid > 4094);
303 		return __sme_pa(mm->pgd) | (asid + 1);
304 	} else {
305 		VM_WARN_ON_ONCE(asid != 0);
306 		return __sme_pa(mm->pgd);
307 	}
308 }
309 
310 static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid)
311 {
312 	VM_WARN_ON_ONCE(asid > 4094);
313 	return __sme_pa(mm->pgd) | (asid + 1) | CR3_NOFLUSH;
314 }
315 
316 /*
317  * This can be used from process context to figure out what the value of
318  * CR3 is without needing to do a (slow) __read_cr3().
319  *
320  * It's intended to be used for code like KVM that sneakily changes CR3
321  * and needs to restore it.  It needs to be used very carefully.
322  */
323 static inline unsigned long __get_current_cr3_fast(void)
324 {
325 	unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm),
326 		this_cpu_read(cpu_tlbstate.loaded_mm_asid));
327 
328 	/* For now, be very restrictive about when this can be called. */
329 	VM_WARN_ON(in_nmi() || preemptible());
330 
331 	VM_BUG_ON(cr3 != __read_cr3());
332 	return cr3;
333 }
334 
335 #endif /* _ASM_X86_MMU_CONTEXT_H */
336