xref: /openbmc/linux/arch/x86/include/asm/mmu_context.h (revision 74c228d2)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21965aae3SH. Peter Anvin #ifndef _ASM_X86_MMU_CONTEXT_H
31965aae3SH. Peter Anvin #define _ASM_X86_MMU_CONTEXT_H
4bb898558SAl Viro 
5bb898558SAl Viro #include <asm/desc.h>
660063497SArun Sharma #include <linux/atomic.h>
7d17d8f9dSDave Hansen #include <linux/mm_types.h>
87d06d9c9SDave Hansen #include <linux/pkeys.h>
9d17d8f9dSDave Hansen 
10d17d8f9dSDave Hansen #include <trace/events/tlb.h>
11d17d8f9dSDave Hansen 
12bb898558SAl Viro #include <asm/tlbflush.h>
13bb898558SAl Viro #include <asm/paravirt.h>
14d97080ebSNadav Amit #include <asm/debugreg.h>
15ae53fa18SH. Peter Anvin (Intel) #include <asm/gsseg.h>
16f39681edSAndy Lutomirski 
17f39681edSAndy Lutomirski extern atomic64_t last_mm_ctx_id;
18f39681edSAndy Lutomirski 
19fdc0269eSJuergen Gross #ifndef CONFIG_PARAVIRT_XXL
20bb898558SAl Viro static inline void paravirt_activate_mm(struct mm_struct *prev,
21bb898558SAl Viro 					struct mm_struct *next)
22bb898558SAl Viro {
23bb898558SAl Viro }
24fdc0269eSJuergen Gross #endif	/* !CONFIG_PARAVIRT_XXL */
25bb898558SAl Viro 
267911d3f7SAndy Lutomirski #ifdef CONFIG_PERF_EVENTS
27405b4537SAnthony Steinhauser DECLARE_STATIC_KEY_FALSE(rdpmc_never_available_key);
28631fe154SDavidlohr Bueso DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key);
29cb2a0235SThomas Gleixner void cr4_update_pce(void *ignored);
307911d3f7SAndy Lutomirski #endif
317911d3f7SAndy Lutomirski 
32a5b9e5a2SAndy Lutomirski #ifdef CONFIG_MODIFY_LDT_SYSCALL
33bb898558SAl Viro /*
3437868fe1SAndy Lutomirski  * ldt_structs can be allocated, used, and freed, but they are never
3537868fe1SAndy Lutomirski  * modified while live.
3637868fe1SAndy Lutomirski  */
3737868fe1SAndy Lutomirski struct ldt_struct {
3837868fe1SAndy Lutomirski 	/*
3937868fe1SAndy Lutomirski 	 * Xen requires page-aligned LDTs with special permissions.  This is
4037868fe1SAndy Lutomirski 	 * needed to prevent us from installing evil descriptors such as
4137868fe1SAndy Lutomirski 	 * call gates.  On native, we could merge the ldt_struct and LDT
4237868fe1SAndy Lutomirski 	 * allocations, but it's not worth trying to optimize.
4337868fe1SAndy Lutomirski 	 */
4437868fe1SAndy Lutomirski 	struct desc_struct	*entries;
45bbf79d21SBorislav Petkov 	unsigned int		nr_entries;
46f55f0501SAndy Lutomirski 
47f55f0501SAndy Lutomirski 	/*
48f55f0501SAndy Lutomirski 	 * If PTI is in use, then the entries array is not mapped while we're
49f55f0501SAndy Lutomirski 	 * in user mode.  The whole array will be aliased at the addressed
50f55f0501SAndy Lutomirski 	 * given by ldt_slot_va(slot).  We use two slots so that we can allocate
51f55f0501SAndy Lutomirski 	 * and map, and enable a new LDT without invalidating the mapping
52f55f0501SAndy Lutomirski 	 * of an older, still-in-use LDT.
53f55f0501SAndy Lutomirski 	 *
54f55f0501SAndy Lutomirski 	 * slot will be -1 if this LDT doesn't have an alias mapping.
55f55f0501SAndy Lutomirski 	 */
56f55f0501SAndy Lutomirski 	int			slot;
5737868fe1SAndy Lutomirski };
5837868fe1SAndy Lutomirski 
59a5b9e5a2SAndy Lutomirski /*
60a5b9e5a2SAndy Lutomirski  * Used for LDT copy/destruction.
61a5b9e5a2SAndy Lutomirski  */
62a4828f81SThomas Gleixner static inline void init_new_context_ldt(struct mm_struct *mm)
63a4828f81SThomas Gleixner {
64a4828f81SThomas Gleixner 	mm->context.ldt = NULL;
65a4828f81SThomas Gleixner 	init_rwsem(&mm->context.ldt_usr_sem);
66a4828f81SThomas Gleixner }
67a4828f81SThomas Gleixner int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm);
6839a0526fSDave Hansen void destroy_context_ldt(struct mm_struct *mm);
69f55f0501SAndy Lutomirski void ldt_arch_exit_mmap(struct mm_struct *mm);
70a5b9e5a2SAndy Lutomirski #else	/* CONFIG_MODIFY_LDT_SYSCALL */
71a4828f81SThomas Gleixner static inline void init_new_context_ldt(struct mm_struct *mm) { }
72a4828f81SThomas Gleixner static inline int ldt_dup_context(struct mm_struct *oldmm,
73a5b9e5a2SAndy Lutomirski 				  struct mm_struct *mm)
74a5b9e5a2SAndy Lutomirski {
75a5b9e5a2SAndy Lutomirski 	return 0;
76a5b9e5a2SAndy Lutomirski }
7739a0526fSDave Hansen static inline void destroy_context_ldt(struct mm_struct *mm) { }
78f55f0501SAndy Lutomirski static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { }
79a5b9e5a2SAndy Lutomirski #endif
80a5b9e5a2SAndy Lutomirski 
81186525bdSIngo Molnar #ifdef CONFIG_MODIFY_LDT_SYSCALL
82186525bdSIngo Molnar extern void load_mm_ldt(struct mm_struct *mm);
83186525bdSIngo Molnar extern void switch_ldt(struct mm_struct *prev, struct mm_struct *next);
84186525bdSIngo Molnar #else
8537868fe1SAndy Lutomirski static inline void load_mm_ldt(struct mm_struct *mm)
8637868fe1SAndy Lutomirski {
87f55f0501SAndy Lutomirski 	clear_LDT();
88f55f0501SAndy Lutomirski }
8973534258SAndy Lutomirski static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
9073534258SAndy Lutomirski {
9137868fe1SAndy Lutomirski 	DEBUG_LOCKS_WARN_ON(preemptible());
9237868fe1SAndy Lutomirski }
93186525bdSIngo Molnar #endif
9437868fe1SAndy Lutomirski 
9582721d8bSKirill A. Shutemov #ifdef CONFIG_ADDRESS_MASKING
9682721d8bSKirill A. Shutemov static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm)
9782721d8bSKirill A. Shutemov {
9882721d8bSKirill A. Shutemov 	return mm->context.lam_cr3_mask;
9982721d8bSKirill A. Shutemov }
10082721d8bSKirill A. Shutemov 
10182721d8bSKirill A. Shutemov static inline void dup_lam(struct mm_struct *oldmm, struct mm_struct *mm)
10282721d8bSKirill A. Shutemov {
10382721d8bSKirill A. Shutemov 	mm->context.lam_cr3_mask = oldmm->context.lam_cr3_mask;
104*74c228d2SKirill A. Shutemov 	mm->context.untag_mask = oldmm->context.untag_mask;
105*74c228d2SKirill A. Shutemov }
106*74c228d2SKirill A. Shutemov 
107*74c228d2SKirill A. Shutemov static inline void mm_reset_untag_mask(struct mm_struct *mm)
108*74c228d2SKirill A. Shutemov {
109*74c228d2SKirill A. Shutemov 	mm->context.untag_mask = -1UL;
11082721d8bSKirill A. Shutemov }
11182721d8bSKirill A. Shutemov 
11282721d8bSKirill A. Shutemov #else
11382721d8bSKirill A. Shutemov 
11482721d8bSKirill A. Shutemov static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm)
11582721d8bSKirill A. Shutemov {
11682721d8bSKirill A. Shutemov 	return 0;
11782721d8bSKirill A. Shutemov }
11882721d8bSKirill A. Shutemov 
11982721d8bSKirill A. Shutemov static inline void dup_lam(struct mm_struct *oldmm, struct mm_struct *mm)
12082721d8bSKirill A. Shutemov {
12182721d8bSKirill A. Shutemov }
122*74c228d2SKirill A. Shutemov 
123*74c228d2SKirill A. Shutemov static inline void mm_reset_untag_mask(struct mm_struct *mm)
124*74c228d2SKirill A. Shutemov {
125*74c228d2SKirill A. Shutemov }
12682721d8bSKirill A. Shutemov #endif
12782721d8bSKirill A. Shutemov 
128586c4f24SNicholas Piggin #define enter_lazy_tlb enter_lazy_tlb
129186525bdSIngo Molnar extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
1306826c8ffSBrian Gerst 
131a31e184eSDave Hansen /*
132a31e184eSDave Hansen  * Init a new mm.  Used on mm copies, like at fork()
133a31e184eSDave Hansen  * and on mm's that are brand-new, like at execve().
134a31e184eSDave Hansen  */
135586c4f24SNicholas Piggin #define init_new_context init_new_context
13639a0526fSDave Hansen static inline int init_new_context(struct task_struct *tsk,
13739a0526fSDave Hansen 				   struct mm_struct *mm)
13839a0526fSDave Hansen {
139c2b3496bSPeter Zijlstra 	mutex_init(&mm->context.lock);
140c2b3496bSPeter Zijlstra 
141f39681edSAndy Lutomirski 	mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
142f39681edSAndy Lutomirski 	atomic64_set(&mm->context.tlb_gen, 0);
143f39681edSAndy Lutomirski 
144e8c24d3aSDave Hansen #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
145e8c24d3aSDave Hansen 	if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
1462fa9d1cfSDave Hansen 		/* pkey 0 is the default and allocated implicitly */
147e8c24d3aSDave Hansen 		mm->context.pkey_allocation_map = 0x1;
148e8c24d3aSDave Hansen 		/* -1 means unallocated or invalid */
149e8c24d3aSDave Hansen 		mm->context.execute_only_pkey = -1;
150e8c24d3aSDave Hansen 	}
151e8c24d3aSDave Hansen #endif
152*74c228d2SKirill A. Shutemov 	mm_reset_untag_mask(mm);
153a4828f81SThomas Gleixner 	init_new_context_ldt(mm);
154a4828f81SThomas Gleixner 	return 0;
15539a0526fSDave Hansen }
156586c4f24SNicholas Piggin 
157586c4f24SNicholas Piggin #define destroy_context destroy_context
15839a0526fSDave Hansen static inline void destroy_context(struct mm_struct *mm)
15939a0526fSDave Hansen {
16039a0526fSDave Hansen 	destroy_context_ldt(mm);
16139a0526fSDave Hansen }
16239a0526fSDave Hansen 
16369c0319aSAndy Lutomirski extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
16469c0319aSAndy Lutomirski 		      struct task_struct *tsk);
1656826c8ffSBrian Gerst 
166078194f8SAndy Lutomirski extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
167078194f8SAndy Lutomirski 			       struct task_struct *tsk);
168078194f8SAndy Lutomirski #define switch_mm_irqs_off switch_mm_irqs_off
169bb898558SAl Viro 
170bb898558SAl Viro #define activate_mm(prev, next)			\
171bb898558SAl Viro do {						\
172bb898558SAl Viro 	paravirt_activate_mm((prev), (next));	\
173bb898558SAl Viro 	switch_mm((prev), (next), NULL);	\
174bb898558SAl Viro } while (0);
175bb898558SAl Viro 
1766826c8ffSBrian Gerst #ifdef CONFIG_X86_32
1776826c8ffSBrian Gerst #define deactivate_mm(tsk, mm)			\
1786826c8ffSBrian Gerst do {						\
1793a24a608SBrian Gerst 	loadsegment(gs, 0);			\
1806826c8ffSBrian Gerst } while (0)
1816826c8ffSBrian Gerst #else
1826826c8ffSBrian Gerst #define deactivate_mm(tsk, mm)			\
1836826c8ffSBrian Gerst do {						\
1846826c8ffSBrian Gerst 	load_gs_index(0);			\
1856826c8ffSBrian Gerst 	loadsegment(fs, 0);			\
1866826c8ffSBrian Gerst } while (0)
1876826c8ffSBrian Gerst #endif
188bb898558SAl Viro 
189a31e184eSDave Hansen static inline void arch_dup_pkeys(struct mm_struct *oldmm,
190a31e184eSDave Hansen 				  struct mm_struct *mm)
191a31e184eSDave Hansen {
192a31e184eSDave Hansen #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
193a31e184eSDave Hansen 	if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
194a31e184eSDave Hansen 		return;
195a31e184eSDave Hansen 
196a31e184eSDave Hansen 	/* Duplicate the oldmm pkey state in mm: */
197a31e184eSDave Hansen 	mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
198a31e184eSDave Hansen 	mm->context.execute_only_pkey   = oldmm->context.execute_only_pkey;
199a31e184eSDave Hansen #endif
200a31e184eSDave Hansen }
201a31e184eSDave Hansen 
202c10e83f5SThomas Gleixner static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
203a1ea1c03SDave Hansen {
204a31e184eSDave Hansen 	arch_dup_pkeys(oldmm, mm);
205a1ea1c03SDave Hansen 	paravirt_arch_dup_mmap(oldmm, mm);
20682721d8bSKirill A. Shutemov 	dup_lam(oldmm, mm);
207a4828f81SThomas Gleixner 	return ldt_dup_context(oldmm, mm);
208a1ea1c03SDave Hansen }
209a1ea1c03SDave Hansen 
210a1ea1c03SDave Hansen static inline void arch_exit_mmap(struct mm_struct *mm)
211a1ea1c03SDave Hansen {
212a1ea1c03SDave Hansen 	paravirt_arch_exit_mmap(mm);
213f55f0501SAndy Lutomirski 	ldt_arch_exit_mmap(mm);
214a1ea1c03SDave Hansen }
215a1ea1c03SDave Hansen 
216b0e9b09bSDave Hansen #ifdef CONFIG_X86_64
217b0e9b09bSDave Hansen static inline bool is_64bit_mm(struct mm_struct *mm)
218b0e9b09bSDave Hansen {
21997f2645fSMasahiro Yamada 	return	!IS_ENABLED(CONFIG_IA32_EMULATION) ||
2205ef495e5SKirill A. Shutemov 		!test_bit(MM_CONTEXT_UPROBE_IA32, &mm->context.flags);
221b0e9b09bSDave Hansen }
222b0e9b09bSDave Hansen #else
223b0e9b09bSDave Hansen static inline bool is_64bit_mm(struct mm_struct *mm)
224b0e9b09bSDave Hansen {
225b0e9b09bSDave Hansen 	return false;
226b0e9b09bSDave Hansen }
227b0e9b09bSDave Hansen #endif
228b0e9b09bSDave Hansen 
2295a28fc94SDave Hansen static inline void arch_unmap(struct mm_struct *mm, unsigned long start,
2305a28fc94SDave Hansen 			      unsigned long end)
2311de4fa14SDave Hansen {
2321de4fa14SDave Hansen }
2331de4fa14SDave Hansen 
23433a709b2SDave Hansen /*
23533a709b2SDave Hansen  * We only want to enforce protection keys on the current process
23633a709b2SDave Hansen  * because we effectively have no access to PKRU for other
23733a709b2SDave Hansen  * processes or any way to tell *which * PKRU in a threaded
23833a709b2SDave Hansen  * process we could use.
23933a709b2SDave Hansen  *
24033a709b2SDave Hansen  * So do not enforce things if the VMA is not from the current
24133a709b2SDave Hansen  * mm, or if we are in a kernel thread.
24233a709b2SDave Hansen  */
2431b2ee126SDave Hansen static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
244d61172b4SDave Hansen 		bool write, bool execute, bool foreign)
24533a709b2SDave Hansen {
246d61172b4SDave Hansen 	/* pkeys never affect instruction fetches */
247d61172b4SDave Hansen 	if (execute)
248d61172b4SDave Hansen 		return true;
24933a709b2SDave Hansen 	/* allow access if the VMA is not one from this process */
2501b2ee126SDave Hansen 	if (foreign || vma_is_foreign(vma))
25133a709b2SDave Hansen 		return true;
25233a709b2SDave Hansen 	return __pkru_allows_pkey(vma_pkey(vma), write);
25333a709b2SDave Hansen }
25433a709b2SDave Hansen 
2558c5cc19eSThomas Gleixner unsigned long __get_current_cr3_fast(void);
256d6e41f11SAndy Lutomirski 
257586c4f24SNicholas Piggin #include <asm-generic/mmu_context.h>
258586c4f24SNicholas Piggin 
2591965aae3SH. Peter Anvin #endif /* _ASM_X86_MMU_CONTEXT_H */
260