1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21965aae3SH. Peter Anvin #ifndef _ASM_X86_MMU_CONTEXT_H
31965aae3SH. Peter Anvin #define _ASM_X86_MMU_CONTEXT_H
4bb898558SAl Viro
5bb898558SAl Viro #include <asm/desc.h>
660063497SArun Sharma #include <linux/atomic.h>
7d17d8f9dSDave Hansen #include <linux/mm_types.h>
87d06d9c9SDave Hansen #include <linux/pkeys.h>
9d17d8f9dSDave Hansen
10d17d8f9dSDave Hansen #include <trace/events/tlb.h>
11d17d8f9dSDave Hansen
12bb898558SAl Viro #include <asm/tlbflush.h>
13bb898558SAl Viro #include <asm/paravirt.h>
14d97080ebSNadav Amit #include <asm/debugreg.h>
15ae53fa18SH. Peter Anvin (Intel) #include <asm/gsseg.h>
16f39681edSAndy Lutomirski
17f39681edSAndy Lutomirski extern atomic64_t last_mm_ctx_id;
18f39681edSAndy Lutomirski
197911d3f7SAndy Lutomirski #ifdef CONFIG_PERF_EVENTS
20405b4537SAnthony Steinhauser DECLARE_STATIC_KEY_FALSE(rdpmc_never_available_key);
21631fe154SDavidlohr Bueso DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key);
22cb2a0235SThomas Gleixner void cr4_update_pce(void *ignored);
237911d3f7SAndy Lutomirski #endif
247911d3f7SAndy Lutomirski
25a5b9e5a2SAndy Lutomirski #ifdef CONFIG_MODIFY_LDT_SYSCALL
26bb898558SAl Viro /*
2737868fe1SAndy Lutomirski * ldt_structs can be allocated, used, and freed, but they are never
2837868fe1SAndy Lutomirski * modified while live.
2937868fe1SAndy Lutomirski */
3037868fe1SAndy Lutomirski struct ldt_struct {
3137868fe1SAndy Lutomirski /*
3237868fe1SAndy Lutomirski * Xen requires page-aligned LDTs with special permissions. This is
3337868fe1SAndy Lutomirski * needed to prevent us from installing evil descriptors such as
3437868fe1SAndy Lutomirski * call gates. On native, we could merge the ldt_struct and LDT
3537868fe1SAndy Lutomirski * allocations, but it's not worth trying to optimize.
3637868fe1SAndy Lutomirski */
3737868fe1SAndy Lutomirski struct desc_struct *entries;
38bbf79d21SBorislav Petkov unsigned int nr_entries;
39f55f0501SAndy Lutomirski
40f55f0501SAndy Lutomirski /*
41f55f0501SAndy Lutomirski * If PTI is in use, then the entries array is not mapped while we're
42f55f0501SAndy Lutomirski * in user mode. The whole array will be aliased at the addressed
43f55f0501SAndy Lutomirski * given by ldt_slot_va(slot). We use two slots so that we can allocate
44f55f0501SAndy Lutomirski * and map, and enable a new LDT without invalidating the mapping
45f55f0501SAndy Lutomirski * of an older, still-in-use LDT.
46f55f0501SAndy Lutomirski *
47f55f0501SAndy Lutomirski * slot will be -1 if this LDT doesn't have an alias mapping.
48f55f0501SAndy Lutomirski */
49f55f0501SAndy Lutomirski int slot;
5037868fe1SAndy Lutomirski };
5137868fe1SAndy Lutomirski
52a5b9e5a2SAndy Lutomirski /*
53a5b9e5a2SAndy Lutomirski * Used for LDT copy/destruction.
54a5b9e5a2SAndy Lutomirski */
init_new_context_ldt(struct mm_struct * mm)55a4828f81SThomas Gleixner static inline void init_new_context_ldt(struct mm_struct *mm)
56a4828f81SThomas Gleixner {
57a4828f81SThomas Gleixner mm->context.ldt = NULL;
58a4828f81SThomas Gleixner init_rwsem(&mm->context.ldt_usr_sem);
59a4828f81SThomas Gleixner }
60a4828f81SThomas Gleixner int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm);
6139a0526fSDave Hansen void destroy_context_ldt(struct mm_struct *mm);
62f55f0501SAndy Lutomirski void ldt_arch_exit_mmap(struct mm_struct *mm);
63a5b9e5a2SAndy Lutomirski #else /* CONFIG_MODIFY_LDT_SYSCALL */
init_new_context_ldt(struct mm_struct * mm)64a4828f81SThomas Gleixner static inline void init_new_context_ldt(struct mm_struct *mm) { }
ldt_dup_context(struct mm_struct * oldmm,struct mm_struct * mm)65a4828f81SThomas Gleixner static inline int ldt_dup_context(struct mm_struct *oldmm,
66a5b9e5a2SAndy Lutomirski struct mm_struct *mm)
67a5b9e5a2SAndy Lutomirski {
68a5b9e5a2SAndy Lutomirski return 0;
69a5b9e5a2SAndy Lutomirski }
destroy_context_ldt(struct mm_struct * mm)7039a0526fSDave Hansen static inline void destroy_context_ldt(struct mm_struct *mm) { }
ldt_arch_exit_mmap(struct mm_struct * mm)71f55f0501SAndy Lutomirski static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { }
72a5b9e5a2SAndy Lutomirski #endif
73a5b9e5a2SAndy Lutomirski
74186525bdSIngo Molnar #ifdef CONFIG_MODIFY_LDT_SYSCALL
75186525bdSIngo Molnar extern void load_mm_ldt(struct mm_struct *mm);
76186525bdSIngo Molnar extern void switch_ldt(struct mm_struct *prev, struct mm_struct *next);
77186525bdSIngo Molnar #else
load_mm_ldt(struct mm_struct * mm)7837868fe1SAndy Lutomirski static inline void load_mm_ldt(struct mm_struct *mm)
7937868fe1SAndy Lutomirski {
80f55f0501SAndy Lutomirski clear_LDT();
81f55f0501SAndy Lutomirski }
switch_ldt(struct mm_struct * prev,struct mm_struct * next)8273534258SAndy Lutomirski static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
8373534258SAndy Lutomirski {
8437868fe1SAndy Lutomirski DEBUG_LOCKS_WARN_ON(preemptible());
8537868fe1SAndy Lutomirski }
86186525bdSIngo Molnar #endif
8737868fe1SAndy Lutomirski
8882721d8bSKirill A. Shutemov #ifdef CONFIG_ADDRESS_MASKING
mm_lam_cr3_mask(struct mm_struct * mm)8982721d8bSKirill A. Shutemov static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm)
9082721d8bSKirill A. Shutemov {
9182721d8bSKirill A. Shutemov return mm->context.lam_cr3_mask;
9282721d8bSKirill A. Shutemov }
9382721d8bSKirill A. Shutemov
dup_lam(struct mm_struct * oldmm,struct mm_struct * mm)9482721d8bSKirill A. Shutemov static inline void dup_lam(struct mm_struct *oldmm, struct mm_struct *mm)
9582721d8bSKirill A. Shutemov {
9682721d8bSKirill A. Shutemov mm->context.lam_cr3_mask = oldmm->context.lam_cr3_mask;
9774c228d2SKirill A. Shutemov mm->context.untag_mask = oldmm->context.untag_mask;
9874c228d2SKirill A. Shutemov }
9974c228d2SKirill A. Shutemov
100f7d30434SKirill A. Shutemov #define mm_untag_mask mm_untag_mask
mm_untag_mask(struct mm_struct * mm)101f7d30434SKirill A. Shutemov static inline unsigned long mm_untag_mask(struct mm_struct *mm)
102f7d30434SKirill A. Shutemov {
103f7d30434SKirill A. Shutemov return mm->context.untag_mask;
104f7d30434SKirill A. Shutemov }
105f7d30434SKirill A. Shutemov
mm_reset_untag_mask(struct mm_struct * mm)10674c228d2SKirill A. Shutemov static inline void mm_reset_untag_mask(struct mm_struct *mm)
10774c228d2SKirill A. Shutemov {
10874c228d2SKirill A. Shutemov mm->context.untag_mask = -1UL;
10982721d8bSKirill A. Shutemov }
11082721d8bSKirill A. Shutemov
11123e5d9ecSKirill A. Shutemov #define arch_pgtable_dma_compat arch_pgtable_dma_compat
arch_pgtable_dma_compat(struct mm_struct * mm)11223e5d9ecSKirill A. Shutemov static inline bool arch_pgtable_dma_compat(struct mm_struct *mm)
11323e5d9ecSKirill A. Shutemov {
11423e5d9ecSKirill A. Shutemov return !mm_lam_cr3_mask(mm) ||
11523e5d9ecSKirill A. Shutemov test_bit(MM_CONTEXT_FORCE_TAGGED_SVA, &mm->context.flags);
11623e5d9ecSKirill A. Shutemov }
11782721d8bSKirill A. Shutemov #else
11882721d8bSKirill A. Shutemov
mm_lam_cr3_mask(struct mm_struct * mm)11982721d8bSKirill A. Shutemov static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm)
12082721d8bSKirill A. Shutemov {
12182721d8bSKirill A. Shutemov return 0;
12282721d8bSKirill A. Shutemov }
12382721d8bSKirill A. Shutemov
dup_lam(struct mm_struct * oldmm,struct mm_struct * mm)12482721d8bSKirill A. Shutemov static inline void dup_lam(struct mm_struct *oldmm, struct mm_struct *mm)
12582721d8bSKirill A. Shutemov {
12682721d8bSKirill A. Shutemov }
12774c228d2SKirill A. Shutemov
mm_reset_untag_mask(struct mm_struct * mm)12874c228d2SKirill A. Shutemov static inline void mm_reset_untag_mask(struct mm_struct *mm)
12974c228d2SKirill A. Shutemov {
13074c228d2SKirill A. Shutemov }
13182721d8bSKirill A. Shutemov #endif
13282721d8bSKirill A. Shutemov
133586c4f24SNicholas Piggin #define enter_lazy_tlb enter_lazy_tlb
134186525bdSIngo Molnar extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
1356826c8ffSBrian Gerst
136a31e184eSDave Hansen /*
137a31e184eSDave Hansen * Init a new mm. Used on mm copies, like at fork()
138a31e184eSDave Hansen * and on mm's that are brand-new, like at execve().
139a31e184eSDave Hansen */
140586c4f24SNicholas Piggin #define init_new_context init_new_context
init_new_context(struct task_struct * tsk,struct mm_struct * mm)14139a0526fSDave Hansen static inline int init_new_context(struct task_struct *tsk,
14239a0526fSDave Hansen struct mm_struct *mm)
14339a0526fSDave Hansen {
144c2b3496bSPeter Zijlstra mutex_init(&mm->context.lock);
145c2b3496bSPeter Zijlstra
146f39681edSAndy Lutomirski mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
147f39681edSAndy Lutomirski atomic64_set(&mm->context.tlb_gen, 0);
148f39681edSAndy Lutomirski
149e8c24d3aSDave Hansen #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
150e8c24d3aSDave Hansen if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
1512fa9d1cfSDave Hansen /* pkey 0 is the default and allocated implicitly */
152e8c24d3aSDave Hansen mm->context.pkey_allocation_map = 0x1;
153e8c24d3aSDave Hansen /* -1 means unallocated or invalid */
154e8c24d3aSDave Hansen mm->context.execute_only_pkey = -1;
155e8c24d3aSDave Hansen }
156e8c24d3aSDave Hansen #endif
15774c228d2SKirill A. Shutemov mm_reset_untag_mask(mm);
158a4828f81SThomas Gleixner init_new_context_ldt(mm);
159a4828f81SThomas Gleixner return 0;
16039a0526fSDave Hansen }
161586c4f24SNicholas Piggin
162586c4f24SNicholas Piggin #define destroy_context destroy_context
destroy_context(struct mm_struct * mm)16339a0526fSDave Hansen static inline void destroy_context(struct mm_struct *mm)
16439a0526fSDave Hansen {
16539a0526fSDave Hansen destroy_context_ldt(mm);
16639a0526fSDave Hansen }
16739a0526fSDave Hansen
16869c0319aSAndy Lutomirski extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
16969c0319aSAndy Lutomirski struct task_struct *tsk);
1706826c8ffSBrian Gerst
171078194f8SAndy Lutomirski extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
172078194f8SAndy Lutomirski struct task_struct *tsk);
173078194f8SAndy Lutomirski #define switch_mm_irqs_off switch_mm_irqs_off
174bb898558SAl Viro
175bb898558SAl Viro #define activate_mm(prev, next) \
176bb898558SAl Viro do { \
177c9ae1b10SJuergen Gross paravirt_enter_mmap(next); \
178bb898558SAl Viro switch_mm((prev), (next), NULL); \
179bb898558SAl Viro } while (0);
180bb898558SAl Viro
1816826c8ffSBrian Gerst #ifdef CONFIG_X86_32
1826826c8ffSBrian Gerst #define deactivate_mm(tsk, mm) \
1836826c8ffSBrian Gerst do { \
1843a24a608SBrian Gerst loadsegment(gs, 0); \
1856826c8ffSBrian Gerst } while (0)
1866826c8ffSBrian Gerst #else
1876826c8ffSBrian Gerst #define deactivate_mm(tsk, mm) \
1886826c8ffSBrian Gerst do { \
189*b2926a36SRick Edgecombe shstk_free(tsk); \
1906826c8ffSBrian Gerst load_gs_index(0); \
1916826c8ffSBrian Gerst loadsegment(fs, 0); \
1926826c8ffSBrian Gerst } while (0)
1936826c8ffSBrian Gerst #endif
194bb898558SAl Viro
arch_dup_pkeys(struct mm_struct * oldmm,struct mm_struct * mm)195a31e184eSDave Hansen static inline void arch_dup_pkeys(struct mm_struct *oldmm,
196a31e184eSDave Hansen struct mm_struct *mm)
197a31e184eSDave Hansen {
198a31e184eSDave Hansen #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
199a31e184eSDave Hansen if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
200a31e184eSDave Hansen return;
201a31e184eSDave Hansen
202a31e184eSDave Hansen /* Duplicate the oldmm pkey state in mm: */
203a31e184eSDave Hansen mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
204a31e184eSDave Hansen mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
205a31e184eSDave Hansen #endif
206a31e184eSDave Hansen }
207a31e184eSDave Hansen
arch_dup_mmap(struct mm_struct * oldmm,struct mm_struct * mm)208c10e83f5SThomas Gleixner static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
209a1ea1c03SDave Hansen {
210a31e184eSDave Hansen arch_dup_pkeys(oldmm, mm);
211c9ae1b10SJuergen Gross paravirt_enter_mmap(mm);
21282721d8bSKirill A. Shutemov dup_lam(oldmm, mm);
213a4828f81SThomas Gleixner return ldt_dup_context(oldmm, mm);
214a1ea1c03SDave Hansen }
215a1ea1c03SDave Hansen
arch_exit_mmap(struct mm_struct * mm)216a1ea1c03SDave Hansen static inline void arch_exit_mmap(struct mm_struct *mm)
217a1ea1c03SDave Hansen {
218a1ea1c03SDave Hansen paravirt_arch_exit_mmap(mm);
219f55f0501SAndy Lutomirski ldt_arch_exit_mmap(mm);
220a1ea1c03SDave Hansen }
221a1ea1c03SDave Hansen
222b0e9b09bSDave Hansen #ifdef CONFIG_X86_64
is_64bit_mm(struct mm_struct * mm)223b0e9b09bSDave Hansen static inline bool is_64bit_mm(struct mm_struct *mm)
224b0e9b09bSDave Hansen {
22597f2645fSMasahiro Yamada return !IS_ENABLED(CONFIG_IA32_EMULATION) ||
2265ef495e5SKirill A. Shutemov !test_bit(MM_CONTEXT_UPROBE_IA32, &mm->context.flags);
227b0e9b09bSDave Hansen }
228b0e9b09bSDave Hansen #else
is_64bit_mm(struct mm_struct * mm)229b0e9b09bSDave Hansen static inline bool is_64bit_mm(struct mm_struct *mm)
230b0e9b09bSDave Hansen {
231b0e9b09bSDave Hansen return false;
232b0e9b09bSDave Hansen }
233b0e9b09bSDave Hansen #endif
234b0e9b09bSDave Hansen
arch_unmap(struct mm_struct * mm,unsigned long start,unsigned long end)2355a28fc94SDave Hansen static inline void arch_unmap(struct mm_struct *mm, unsigned long start,
2365a28fc94SDave Hansen unsigned long end)
2371de4fa14SDave Hansen {
2381de4fa14SDave Hansen }
2391de4fa14SDave Hansen
24033a709b2SDave Hansen /*
24133a709b2SDave Hansen * We only want to enforce protection keys on the current process
24233a709b2SDave Hansen * because we effectively have no access to PKRU for other
24333a709b2SDave Hansen * processes or any way to tell *which * PKRU in a threaded
24433a709b2SDave Hansen * process we could use.
24533a709b2SDave Hansen *
24633a709b2SDave Hansen * So do not enforce things if the VMA is not from the current
24733a709b2SDave Hansen * mm, or if we are in a kernel thread.
24833a709b2SDave Hansen */
arch_vma_access_permitted(struct vm_area_struct * vma,bool write,bool execute,bool foreign)2491b2ee126SDave Hansen static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
250d61172b4SDave Hansen bool write, bool execute, bool foreign)
25133a709b2SDave Hansen {
252d61172b4SDave Hansen /* pkeys never affect instruction fetches */
253d61172b4SDave Hansen if (execute)
254d61172b4SDave Hansen return true;
25533a709b2SDave Hansen /* allow access if the VMA is not one from this process */
2561b2ee126SDave Hansen if (foreign || vma_is_foreign(vma))
25733a709b2SDave Hansen return true;
25833a709b2SDave Hansen return __pkru_allows_pkey(vma_pkey(vma), write);
25933a709b2SDave Hansen }
26033a709b2SDave Hansen
2618c5cc19eSThomas Gleixner unsigned long __get_current_cr3_fast(void);
262d6e41f11SAndy Lutomirski
263586c4f24SNicholas Piggin #include <asm-generic/mmu_context.h>
264586c4f24SNicholas Piggin
2651965aae3SH. Peter Anvin #endif /* _ASM_X86_MMU_CONTEXT_H */
266