xref: /openbmc/linux/arch/x86/include/asm/mmu.h (revision b35565bb)
1 #ifndef _ASM_X86_MMU_H
2 #define _ASM_X86_MMU_H
3 
4 #include <linux/spinlock.h>
5 #include <linux/mutex.h>
6 #include <linux/atomic.h>
7 
8 /*
9  * x86 has arch-specific MMU state beyond what lives in mm_struct.
10  */
11 typedef struct {
12 	/*
13 	 * ctx_id uniquely identifies this mm_struct.  A ctx_id will never
14 	 * be reused, and zero is not a valid ctx_id.
15 	 */
16 	u64 ctx_id;
17 
18 	/*
19 	 * Any code that needs to do any sort of TLB flushing for this
20 	 * mm will first make its changes to the page tables, then
21 	 * increment tlb_gen, then flush.  This lets the low-level
22 	 * flushing code keep track of what needs flushing.
23 	 *
24 	 * This is not used on Xen PV.
25 	 */
26 	atomic64_t tlb_gen;
27 
28 #ifdef CONFIG_MODIFY_LDT_SYSCALL
29 	struct ldt_struct *ldt;
30 #endif
31 
32 #ifdef CONFIG_X86_64
33 	/* True if mm supports a task running in 32 bit compatibility mode. */
34 	unsigned short ia32_compat;
35 #endif
36 
37 	struct mutex lock;
38 	void __user *vdso;			/* vdso base address */
39 	const struct vdso_image *vdso_image;	/* vdso image in use */
40 
41 	atomic_t perf_rdpmc_allowed;	/* nonzero if rdpmc is allowed */
42 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
43 	/*
44 	 * One bit per protection key says whether userspace can
45 	 * use it or not.  protected by mmap_sem.
46 	 */
47 	u16 pkey_allocation_map;
48 	s16 execute_only_pkey;
49 #endif
50 #ifdef CONFIG_X86_INTEL_MPX
51 	/* address of the bounds directory */
52 	void __user *bd_addr;
53 #endif
54 } mm_context_t;
55 
56 #define INIT_MM_CONTEXT(mm)						\
57 	.context = {							\
58 		.ctx_id = 1,						\
59 	}
60 
61 void leave_mm(int cpu);
62 
63 #endif /* _ASM_X86_MMU_H */
64