xref: /openbmc/linux/arch/x86/include/asm/mmu_context.h (revision 590b516e)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_MMU_CONTEXT_H
3 #define _ASM_X86_MMU_CONTEXT_H
4 
5 #include <asm/desc.h>
6 #include <linux/atomic.h>
7 #include <linux/mm_types.h>
8 #include <linux/pkeys.h>
9 
10 #include <trace/events/tlb.h>
11 
12 #include <asm/pgalloc.h>
13 #include <asm/tlbflush.h>
14 #include <asm/paravirt.h>
15 #include <asm/mpx.h>
16 
17 extern atomic64_t last_mm_ctx_id;
18 
19 #ifndef CONFIG_PARAVIRT
20 static inline void paravirt_activate_mm(struct mm_struct *prev,
21 					struct mm_struct *next)
22 {
23 }
24 #endif	/* !CONFIG_PARAVIRT */
25 
26 #ifdef CONFIG_PERF_EVENTS
27 
28 DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key);
29 
30 static inline void load_mm_cr4(struct mm_struct *mm)
31 {
32 	if (static_branch_unlikely(&rdpmc_always_available_key) ||
33 	    atomic_read(&mm->context.perf_rdpmc_allowed))
34 		cr4_set_bits(X86_CR4_PCE);
35 	else
36 		cr4_clear_bits(X86_CR4_PCE);
37 }
38 #else
39 static inline void load_mm_cr4(struct mm_struct *mm) {}
40 #endif
41 
42 #ifdef CONFIG_MODIFY_LDT_SYSCALL
43 /*
44  * ldt_structs can be allocated, used, and freed, but they are never
45  * modified while live.
46  */
47 struct ldt_struct {
48 	/*
49 	 * Xen requires page-aligned LDTs with special permissions.  This is
50 	 * needed to prevent us from installing evil descriptors such as
51 	 * call gates.  On native, we could merge the ldt_struct and LDT
52 	 * allocations, but it's not worth trying to optimize.
53 	 */
54 	struct desc_struct	*entries;
55 	unsigned int		nr_entries;
56 
57 	/*
58 	 * If PTI is in use, then the entries array is not mapped while we're
59 	 * in user mode.  The whole array will be aliased at the addressed
60 	 * given by ldt_slot_va(slot).  We use two slots so that we can allocate
61 	 * and map, and enable a new LDT without invalidating the mapping
62 	 * of an older, still-in-use LDT.
63 	 *
64 	 * slot will be -1 if this LDT doesn't have an alias mapping.
65 	 */
66 	int			slot;
67 };
68 
69 /* This is a multiple of PAGE_SIZE. */
70 #define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE)
71 
72 static inline void *ldt_slot_va(int slot)
73 {
74 	return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot);
75 }
76 
77 /*
78  * Used for LDT copy/destruction.
79  */
80 static inline void init_new_context_ldt(struct mm_struct *mm)
81 {
82 	mm->context.ldt = NULL;
83 	init_rwsem(&mm->context.ldt_usr_sem);
84 }
85 int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm);
86 void destroy_context_ldt(struct mm_struct *mm);
87 void ldt_arch_exit_mmap(struct mm_struct *mm);
88 #else	/* CONFIG_MODIFY_LDT_SYSCALL */
89 static inline void init_new_context_ldt(struct mm_struct *mm) { }
90 static inline int ldt_dup_context(struct mm_struct *oldmm,
91 				  struct mm_struct *mm)
92 {
93 	return 0;
94 }
95 static inline void destroy_context_ldt(struct mm_struct *mm) { }
96 static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { }
97 #endif
98 
99 static inline void load_mm_ldt(struct mm_struct *mm)
100 {
101 #ifdef CONFIG_MODIFY_LDT_SYSCALL
102 	struct ldt_struct *ldt;
103 
104 	/* READ_ONCE synchronizes with smp_store_release */
105 	ldt = READ_ONCE(mm->context.ldt);
106 
107 	/*
108 	 * Any change to mm->context.ldt is followed by an IPI to all
109 	 * CPUs with the mm active.  The LDT will not be freed until
110 	 * after the IPI is handled by all such CPUs.  This means that,
111 	 * if the ldt_struct changes before we return, the values we see
112 	 * will be safe, and the new values will be loaded before we run
113 	 * any user code.
114 	 *
115 	 * NB: don't try to convert this to use RCU without extreme care.
116 	 * We would still need IRQs off, because we don't want to change
117 	 * the local LDT after an IPI loaded a newer value than the one
118 	 * that we can see.
119 	 */
120 
121 	if (unlikely(ldt)) {
122 		if (static_cpu_has(X86_FEATURE_PTI)) {
123 			if (WARN_ON_ONCE((unsigned long)ldt->slot > 1)) {
124 				/*
125 				 * Whoops -- either the new LDT isn't mapped
126 				 * (if slot == -1) or is mapped into a bogus
127 				 * slot (if slot > 1).
128 				 */
129 				clear_LDT();
130 				return;
131 			}
132 
133 			/*
134 			 * If page table isolation is enabled, ldt->entries
135 			 * will not be mapped in the userspace pagetables.
136 			 * Tell the CPU to access the LDT through the alias
137 			 * at ldt_slot_va(ldt->slot).
138 			 */
139 			set_ldt(ldt_slot_va(ldt->slot), ldt->nr_entries);
140 		} else {
141 			set_ldt(ldt->entries, ldt->nr_entries);
142 		}
143 	} else {
144 		clear_LDT();
145 	}
146 #else
147 	clear_LDT();
148 #endif
149 }
150 
151 static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
152 {
153 #ifdef CONFIG_MODIFY_LDT_SYSCALL
154 	/*
155 	 * Load the LDT if either the old or new mm had an LDT.
156 	 *
157 	 * An mm will never go from having an LDT to not having an LDT.  Two
158 	 * mms never share an LDT, so we don't gain anything by checking to
159 	 * see whether the LDT changed.  There's also no guarantee that
160 	 * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL,
161 	 * then prev->context.ldt will also be non-NULL.
162 	 *
163 	 * If we really cared, we could optimize the case where prev == next
164 	 * and we're exiting lazy mode.  Most of the time, if this happens,
165 	 * we don't actually need to reload LDTR, but modify_ldt() is mostly
166 	 * used by legacy code and emulators where we don't need this level of
167 	 * performance.
168 	 *
169 	 * This uses | instead of || because it generates better code.
170 	 */
171 	if (unlikely((unsigned long)prev->context.ldt |
172 		     (unsigned long)next->context.ldt))
173 		load_mm_ldt(next);
174 #endif
175 
176 	DEBUG_LOCKS_WARN_ON(preemptible());
177 }
178 
179 void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
180 
181 static inline int init_new_context(struct task_struct *tsk,
182 				   struct mm_struct *mm)
183 {
184 	mutex_init(&mm->context.lock);
185 
186 	mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
187 	atomic64_set(&mm->context.tlb_gen, 0);
188 
189 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
190 	if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
191 		/* pkey 0 is the default and allocated implicitly */
192 		mm->context.pkey_allocation_map = 0x1;
193 		/* -1 means unallocated or invalid */
194 		mm->context.execute_only_pkey = -1;
195 	}
196 #endif
197 	init_new_context_ldt(mm);
198 	return 0;
199 }
200 static inline void destroy_context(struct mm_struct *mm)
201 {
202 	destroy_context_ldt(mm);
203 }
204 
205 extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
206 		      struct task_struct *tsk);
207 
208 extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
209 			       struct task_struct *tsk);
210 #define switch_mm_irqs_off switch_mm_irqs_off
211 
212 #define activate_mm(prev, next)			\
213 do {						\
214 	paravirt_activate_mm((prev), (next));	\
215 	switch_mm((prev), (next), NULL);	\
216 } while (0);
217 
218 #ifdef CONFIG_X86_32
219 #define deactivate_mm(tsk, mm)			\
220 do {						\
221 	lazy_load_gs(0);			\
222 } while (0)
223 #else
224 #define deactivate_mm(tsk, mm)			\
225 do {						\
226 	load_gs_index(0);			\
227 	loadsegment(fs, 0);			\
228 } while (0)
229 #endif
230 
231 static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
232 {
233 	paravirt_arch_dup_mmap(oldmm, mm);
234 	return ldt_dup_context(oldmm, mm);
235 }
236 
237 static inline void arch_exit_mmap(struct mm_struct *mm)
238 {
239 	paravirt_arch_exit_mmap(mm);
240 	ldt_arch_exit_mmap(mm);
241 }
242 
243 #ifdef CONFIG_X86_64
244 static inline bool is_64bit_mm(struct mm_struct *mm)
245 {
246 	return	!IS_ENABLED(CONFIG_IA32_EMULATION) ||
247 		!(mm->context.ia32_compat == TIF_IA32);
248 }
249 #else
250 static inline bool is_64bit_mm(struct mm_struct *mm)
251 {
252 	return false;
253 }
254 #endif
255 
256 static inline void arch_bprm_mm_init(struct mm_struct *mm,
257 		struct vm_area_struct *vma)
258 {
259 	mpx_mm_init(mm);
260 }
261 
262 static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
263 			      unsigned long start, unsigned long end)
264 {
265 	/*
266 	 * mpx_notify_unmap() goes and reads a rarely-hot
267 	 * cacheline in the mm_struct.  That can be expensive
268 	 * enough to be seen in profiles.
269 	 *
270 	 * The mpx_notify_unmap() call and its contents have been
271 	 * observed to affect munmap() performance on hardware
272 	 * where MPX is not present.
273 	 *
274 	 * The unlikely() optimizes for the fast case: no MPX
275 	 * in the CPU, or no MPX use in the process.  Even if
276 	 * we get this wrong (in the unlikely event that MPX
277 	 * is widely enabled on some system) the overhead of
278 	 * MPX itself (reading bounds tables) is expected to
279 	 * overwhelm the overhead of getting this unlikely()
280 	 * consistently wrong.
281 	 */
282 	if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
283 		mpx_notify_unmap(mm, vma, start, end);
284 }
285 
286 /*
287  * We only want to enforce protection keys on the current process
288  * because we effectively have no access to PKRU for other
289  * processes or any way to tell *which * PKRU in a threaded
290  * process we could use.
291  *
292  * So do not enforce things if the VMA is not from the current
293  * mm, or if we are in a kernel thread.
294  */
295 static inline bool vma_is_foreign(struct vm_area_struct *vma)
296 {
297 	if (!current->mm)
298 		return true;
299 	/*
300 	 * Should PKRU be enforced on the access to this VMA?  If
301 	 * the VMA is from another process, then PKRU has no
302 	 * relevance and should not be enforced.
303 	 */
304 	if (current->mm != vma->vm_mm)
305 		return true;
306 
307 	return false;
308 }
309 
310 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
311 		bool write, bool execute, bool foreign)
312 {
313 	/* pkeys never affect instruction fetches */
314 	if (execute)
315 		return true;
316 	/* allow access if the VMA is not one from this process */
317 	if (foreign || vma_is_foreign(vma))
318 		return true;
319 	return __pkru_allows_pkey(vma_pkey(vma), write);
320 }
321 
322 /*
323  * This can be used from process context to figure out what the value of
324  * CR3 is without needing to do a (slow) __read_cr3().
325  *
326  * It's intended to be used for code like KVM that sneakily changes CR3
327  * and needs to restore it.  It needs to be used very carefully.
328  */
329 static inline unsigned long __get_current_cr3_fast(void)
330 {
331 	unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd,
332 		this_cpu_read(cpu_tlbstate.loaded_mm_asid));
333 
334 	/* For now, be very restrictive about when this can be called. */
335 	VM_WARN_ON(in_nmi() || preemptible());
336 
337 	VM_BUG_ON(cr3 != __read_cr3());
338 	return cr3;
339 }
340 
341 #endif /* _ASM_X86_MMU_CONTEXT_H */
342