xref: /openbmc/linux/arch/x86/include/asm/mmu.h (revision fd5e9fccbd504c5179ab57ff695c610bca8809d6)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21965aae3SH. Peter Anvin #ifndef _ASM_X86_MMU_H
31965aae3SH. Peter Anvin #define _ASM_X86_MMU_H
4bb898558SAl Viro 
5bb898558SAl Viro #include <linux/spinlock.h>
6c2b3496bSPeter Zijlstra #include <linux/rwsem.h>
7bb898558SAl Viro #include <linux/mutex.h>
8f39681edSAndy Lutomirski #include <linux/atomic.h>
9ff170cd0SGabriel Krisman Bertazi #include <linux/bits.h>
10ff170cd0SGabriel Krisman Bertazi 
11ff170cd0SGabriel Krisman Bertazi /* Uprobes on this MM assume 32-bit code */
125ef495e5SKirill A. Shutemov #define MM_CONTEXT_UPROBE_IA32		0
13ff170cd0SGabriel Krisman Bertazi /* vsyscall page is accessible on this MM */
145ef495e5SKirill A. Shutemov #define MM_CONTEXT_HAS_VSYSCALL		1
152f8794bdSKirill A. Shutemov /* Do not allow changing LAM mode */
162f8794bdSKirill A. Shutemov #define MM_CONTEXT_LOCK_LAM		2
1723e5d9ecSKirill A. Shutemov /* Allow LAM and SVA coexisting */
1823e5d9ecSKirill A. Shutemov #define MM_CONTEXT_FORCE_TAGGED_SVA	3
19bb898558SAl Viro 
20bb898558SAl Viro /*
21f39681edSAndy Lutomirski  * x86 has arch-specific MMU state beyond what lives in mm_struct.
22bb898558SAl Viro  */
23bb898558SAl Viro typedef struct {
24f39681edSAndy Lutomirski 	/*
25f39681edSAndy Lutomirski 	 * ctx_id uniquely identifies this mm_struct.  A ctx_id will never
26f39681edSAndy Lutomirski 	 * be reused, and zero is not a valid ctx_id.
27f39681edSAndy Lutomirski 	 */
28f39681edSAndy Lutomirski 	u64 ctx_id;
29f39681edSAndy Lutomirski 
30f39681edSAndy Lutomirski 	/*
31f39681edSAndy Lutomirski 	 * Any code that needs to do any sort of TLB flushing for this
32f39681edSAndy Lutomirski 	 * mm will first make its changes to the page tables, then
33f39681edSAndy Lutomirski 	 * increment tlb_gen, then flush.  This lets the low-level
34f39681edSAndy Lutomirski 	 * flushing code keep track of what needs flushing.
35f39681edSAndy Lutomirski 	 *
36f39681edSAndy Lutomirski 	 * This is not used on Xen PV.
37f39681edSAndy Lutomirski 	 */
38f39681edSAndy Lutomirski 	atomic64_t tlb_gen;
39f39681edSAndy Lutomirski 
40*a04fe3bfSRik van Riel 	unsigned long next_trim_cpumask;
41*a04fe3bfSRik van Riel 
42a5b9e5a2SAndy Lutomirski #ifdef CONFIG_MODIFY_LDT_SYSCALL
43c2b3496bSPeter Zijlstra 	struct rw_semaphore	ldt_usr_sem;
4437868fe1SAndy Lutomirski 	struct ldt_struct	*ldt;
45a5b9e5a2SAndy Lutomirski #endif
46c2ef45dfSStephen Wilson 
47c2ef45dfSStephen Wilson #ifdef CONFIG_X86_64
485ef495e5SKirill A. Shutemov 	unsigned long flags;
49c2ef45dfSStephen Wilson #endif
50c2ef45dfSStephen Wilson 
5182721d8bSKirill A. Shutemov #ifdef CONFIG_ADDRESS_MASKING
5282721d8bSKirill A. Shutemov 	/* Active LAM mode:  X86_CR3_LAM_U48 or X86_CR3_LAM_U57 or 0 (disabled) */
5382721d8bSKirill A. Shutemov 	unsigned long lam_cr3_mask;
5474c228d2SKirill A. Shutemov 
5574c228d2SKirill A. Shutemov 	/* Significant bits of the virtual address. Excludes tag bits. */
5674c228d2SKirill A. Shutemov 	u64 untag_mask;
5782721d8bSKirill A. Shutemov #endif
5882721d8bSKirill A. Shutemov 
59af6a25f0SRichard Kennedy 	struct mutex lock;
60352b78c6SAndy Lutomirski 	void __user *vdso;			/* vdso base address */
61352b78c6SAndy Lutomirski 	const struct vdso_image *vdso_image;	/* vdso image in use */
627911d3f7SAndy Lutomirski 
637911d3f7SAndy Lutomirski 	atomic_t perf_rdpmc_allowed;	/* nonzero if rdpmc is allowed */
64e8c24d3aSDave Hansen #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
65e8c24d3aSDave Hansen 	/*
66e8c24d3aSDave Hansen 	 * One bit per protection key says whether userspace can
67c1e8d7c6SMichel Lespinasse 	 * use it or not.  protected by mmap_lock.
68e8c24d3aSDave Hansen 	 */
69e8c24d3aSDave Hansen 	u16 pkey_allocation_map;
70e8c24d3aSDave Hansen 	s16 execute_only_pkey;
71e8c24d3aSDave Hansen #endif
72bb898558SAl Viro } mm_context_t;
73bb898558SAl Viro 
74f39681edSAndy Lutomirski #define INIT_MM_CONTEXT(mm)						\
75f39681edSAndy Lutomirski 	.context = {							\
76f39681edSAndy Lutomirski 		.ctx_id = 1,						\
7739ca5fb4SSebastian Andrzej Siewior 		.lock = __MUTEX_INITIALIZER(mm.context.lock),		\
78f39681edSAndy Lutomirski 	}
79f39681edSAndy Lutomirski 
80bb898558SAl Viro void leave_mm(int cpu);
81bf9282dcSPeter Zijlstra #define leave_mm leave_mm
82bb898558SAl Viro 
831965aae3SH. Peter Anvin #endif /* _ASM_X86_MMU_H */
84