xref: /openbmc/linux/arch/x86/include/asm/mmu.h (revision c4f7ac64)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_MMU_H
3 #define _ASM_X86_MMU_H
4 
5 #include <linux/spinlock.h>
6 #include <linux/rwsem.h>
7 #include <linux/mutex.h>
8 #include <linux/atomic.h>
9 #include <linux/bits.h>
10 
11 /* Uprobes on this MM assume 32-bit code */
12 #define MM_CONTEXT_UPROBE_IA32	BIT(0)
13 /* vsyscall page is accessible on this MM */
14 #define MM_CONTEXT_HAS_VSYSCALL	BIT(1)
15 
16 /*
17  * x86 has arch-specific MMU state beyond what lives in mm_struct.
18  */
19 typedef struct {
20 	/*
21 	 * ctx_id uniquely identifies this mm_struct.  A ctx_id will never
22 	 * be reused, and zero is not a valid ctx_id.
23 	 */
24 	u64 ctx_id;
25 
26 	/*
27 	 * Any code that needs to do any sort of TLB flushing for this
28 	 * mm will first make its changes to the page tables, then
29 	 * increment tlb_gen, then flush.  This lets the low-level
30 	 * flushing code keep track of what needs flushing.
31 	 *
32 	 * This is not used on Xen PV.
33 	 */
34 	atomic64_t tlb_gen;
35 
36 #ifdef CONFIG_MODIFY_LDT_SYSCALL
37 	struct rw_semaphore	ldt_usr_sem;
38 	struct ldt_struct	*ldt;
39 #endif
40 
41 #ifdef CONFIG_X86_64
42 	unsigned short flags;
43 #endif
44 
45 	struct mutex lock;
46 	void __user *vdso;			/* vdso base address */
47 	const struct vdso_image *vdso_image;	/* vdso image in use */
48 
49 	atomic_t perf_rdpmc_allowed;	/* nonzero if rdpmc is allowed */
50 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
51 	/*
52 	 * One bit per protection key says whether userspace can
53 	 * use it or not.  protected by mmap_lock.
54 	 */
55 	u16 pkey_allocation_map;
56 	s16 execute_only_pkey;
57 #endif
58 } mm_context_t;
59 
60 #define INIT_MM_CONTEXT(mm)						\
61 	.context = {							\
62 		.ctx_id = 1,						\
63 		.lock = __MUTEX_INITIALIZER(mm.context.lock),		\
64 	}
65 
66 void leave_mm(int cpu);
67 #define leave_mm leave_mm
68 
69 #endif /* _ASM_X86_MMU_H */
70