xref: /openbmc/linux/arch/x86/include/asm/mmu_context.h (revision 26cfd12b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_MMU_CONTEXT_H
3 #define _ASM_X86_MMU_CONTEXT_H
4 
5 #include <asm/desc.h>
6 #include <linux/atomic.h>
7 #include <linux/mm_types.h>
8 #include <linux/pkeys.h>
9 
10 #include <trace/events/tlb.h>
11 
12 #include <asm/pgalloc.h>
13 #include <asm/tlbflush.h>
14 #include <asm/paravirt.h>
15 #include <asm/debugreg.h>
16 
17 extern atomic64_t last_mm_ctx_id;
18 
19 #ifndef CONFIG_PARAVIRT_XXL
20 static inline void paravirt_activate_mm(struct mm_struct *prev,
21 					struct mm_struct *next)
22 {
23 }
24 #endif	/* !CONFIG_PARAVIRT_XXL */
25 
26 #ifdef CONFIG_PERF_EVENTS
27 DECLARE_STATIC_KEY_FALSE(rdpmc_never_available_key);
28 DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key);
29 void cr4_update_pce(void *ignored);
30 #endif
31 
32 #ifdef CONFIG_MODIFY_LDT_SYSCALL
33 /*
34  * ldt_structs can be allocated, used, and freed, but they are never
35  * modified while live.
36  */
37 struct ldt_struct {
38 	/*
39 	 * Xen requires page-aligned LDTs with special permissions.  This is
40 	 * needed to prevent us from installing evil descriptors such as
41 	 * call gates.  On native, we could merge the ldt_struct and LDT
42 	 * allocations, but it's not worth trying to optimize.
43 	 */
44 	struct desc_struct	*entries;
45 	unsigned int		nr_entries;
46 
47 	/*
48 	 * If PTI is in use, then the entries array is not mapped while we're
49 	 * in user mode.  The whole array will be aliased at the addressed
50 	 * given by ldt_slot_va(slot).  We use two slots so that we can allocate
51 	 * and map, and enable a new LDT without invalidating the mapping
52 	 * of an older, still-in-use LDT.
53 	 *
54 	 * slot will be -1 if this LDT doesn't have an alias mapping.
55 	 */
56 	int			slot;
57 };
58 
59 /*
60  * Used for LDT copy/destruction.
61  */
62 static inline void init_new_context_ldt(struct mm_struct *mm)
63 {
64 	mm->context.ldt = NULL;
65 	init_rwsem(&mm->context.ldt_usr_sem);
66 }
67 int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm);
68 void destroy_context_ldt(struct mm_struct *mm);
69 void ldt_arch_exit_mmap(struct mm_struct *mm);
70 #else	/* CONFIG_MODIFY_LDT_SYSCALL */
71 static inline void init_new_context_ldt(struct mm_struct *mm) { }
72 static inline int ldt_dup_context(struct mm_struct *oldmm,
73 				  struct mm_struct *mm)
74 {
75 	return 0;
76 }
77 static inline void destroy_context_ldt(struct mm_struct *mm) { }
78 static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { }
79 #endif
80 
81 #ifdef CONFIG_MODIFY_LDT_SYSCALL
82 extern void load_mm_ldt(struct mm_struct *mm);
83 extern void switch_ldt(struct mm_struct *prev, struct mm_struct *next);
84 #else
85 static inline void load_mm_ldt(struct mm_struct *mm)
86 {
87 	clear_LDT();
88 }
89 static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
90 {
91 	DEBUG_LOCKS_WARN_ON(preemptible());
92 }
93 #endif
94 
95 extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
96 
97 /*
98  * Init a new mm.  Used on mm copies, like at fork()
99  * and on mm's that are brand-new, like at execve().
100  */
101 static inline int init_new_context(struct task_struct *tsk,
102 				   struct mm_struct *mm)
103 {
104 	mutex_init(&mm->context.lock);
105 
106 	mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
107 	atomic64_set(&mm->context.tlb_gen, 0);
108 
109 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
110 	if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
111 		/* pkey 0 is the default and allocated implicitly */
112 		mm->context.pkey_allocation_map = 0x1;
113 		/* -1 means unallocated or invalid */
114 		mm->context.execute_only_pkey = -1;
115 	}
116 #endif
117 	init_new_context_ldt(mm);
118 	return 0;
119 }
120 static inline void destroy_context(struct mm_struct *mm)
121 {
122 	destroy_context_ldt(mm);
123 }
124 
125 extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
126 		      struct task_struct *tsk);
127 
128 extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
129 			       struct task_struct *tsk);
130 #define switch_mm_irqs_off switch_mm_irqs_off
131 
132 #define activate_mm(prev, next)			\
133 do {						\
134 	paravirt_activate_mm((prev), (next));	\
135 	switch_mm((prev), (next), NULL);	\
136 } while (0);
137 
138 #ifdef CONFIG_X86_32
139 #define deactivate_mm(tsk, mm)			\
140 do {						\
141 	lazy_load_gs(0);			\
142 } while (0)
143 #else
144 #define deactivate_mm(tsk, mm)			\
145 do {						\
146 	load_gs_index(0);			\
147 	loadsegment(fs, 0);			\
148 } while (0)
149 #endif
150 
151 static inline void arch_dup_pkeys(struct mm_struct *oldmm,
152 				  struct mm_struct *mm)
153 {
154 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
155 	if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
156 		return;
157 
158 	/* Duplicate the oldmm pkey state in mm: */
159 	mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
160 	mm->context.execute_only_pkey   = oldmm->context.execute_only_pkey;
161 #endif
162 }
163 
164 static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
165 {
166 	arch_dup_pkeys(oldmm, mm);
167 	paravirt_arch_dup_mmap(oldmm, mm);
168 	return ldt_dup_context(oldmm, mm);
169 }
170 
171 static inline void arch_exit_mmap(struct mm_struct *mm)
172 {
173 	paravirt_arch_exit_mmap(mm);
174 	ldt_arch_exit_mmap(mm);
175 }
176 
177 #ifdef CONFIG_X86_64
178 static inline bool is_64bit_mm(struct mm_struct *mm)
179 {
180 	return	!IS_ENABLED(CONFIG_IA32_EMULATION) ||
181 		!(mm->context.ia32_compat == TIF_IA32);
182 }
183 #else
184 static inline bool is_64bit_mm(struct mm_struct *mm)
185 {
186 	return false;
187 }
188 #endif
189 
190 static inline void arch_unmap(struct mm_struct *mm, unsigned long start,
191 			      unsigned long end)
192 {
193 }
194 
195 /*
196  * We only want to enforce protection keys on the current process
197  * because we effectively have no access to PKRU for other
198  * processes or any way to tell *which * PKRU in a threaded
199  * process we could use.
200  *
201  * So do not enforce things if the VMA is not from the current
202  * mm, or if we are in a kernel thread.
203  */
204 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
205 		bool write, bool execute, bool foreign)
206 {
207 	/* pkeys never affect instruction fetches */
208 	if (execute)
209 		return true;
210 	/* allow access if the VMA is not one from this process */
211 	if (foreign || vma_is_foreign(vma))
212 		return true;
213 	return __pkru_allows_pkey(vma_pkey(vma), write);
214 }
215 
216 unsigned long __get_current_cr3_fast(void);
217 
218 #endif /* _ASM_X86_MMU_CONTEXT_H */
219