1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_MMU_H 3 #define _ASM_X86_MMU_H 4 5 #include <linux/spinlock.h> 6 #include <linux/mutex.h> 7 #include <linux/atomic.h> 8 9 /* 10 * x86 has arch-specific MMU state beyond what lives in mm_struct. 11 */ 12 typedef struct { 13 /* 14 * ctx_id uniquely identifies this mm_struct. A ctx_id will never 15 * be reused, and zero is not a valid ctx_id. 16 */ 17 u64 ctx_id; 18 19 /* 20 * Any code that needs to do any sort of TLB flushing for this 21 * mm will first make its changes to the page tables, then 22 * increment tlb_gen, then flush. This lets the low-level 23 * flushing code keep track of what needs flushing. 24 * 25 * This is not used on Xen PV. 26 */ 27 atomic64_t tlb_gen; 28 29 #ifdef CONFIG_MODIFY_LDT_SYSCALL 30 struct ldt_struct *ldt; 31 #endif 32 33 #ifdef CONFIG_X86_64 34 /* True if mm supports a task running in 32 bit compatibility mode. */ 35 unsigned short ia32_compat; 36 #endif 37 38 struct mutex lock; 39 void __user *vdso; /* vdso base address */ 40 const struct vdso_image *vdso_image; /* vdso image in use */ 41 42 atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */ 43 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 44 /* 45 * One bit per protection key says whether userspace can 46 * use it or not. protected by mmap_sem. 47 */ 48 u16 pkey_allocation_map; 49 s16 execute_only_pkey; 50 #endif 51 #ifdef CONFIG_X86_INTEL_MPX 52 /* address of the bounds directory */ 53 void __user *bd_addr; 54 #endif 55 } mm_context_t; 56 57 #define INIT_MM_CONTEXT(mm) \ 58 .context = { \ 59 .ctx_id = 1, \ 60 } 61 62 void leave_mm(int cpu); 63 64 #endif /* _ASM_X86_MMU_H */ 65