xref: /openbmc/linux/arch/x86/kernel/fpu/xstate.h (revision c4a11bf4)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __X86_KERNEL_FPU_XSTATE_H
3 #define __X86_KERNEL_FPU_XSTATE_H
4 
5 #include <asm/cpufeature.h>
6 #include <asm/fpu/xstate.h>
7 
8 #ifdef CONFIG_X86_64
9 DECLARE_PER_CPU(u64, xfd_state);
10 #endif
11 
12 static inline void xstate_init_xcomp_bv(struct xregs_state *xsave, u64 mask)
13 {
14 	/*
15 	 * XRSTORS requires these bits set in xcomp_bv, or it will
16 	 * trigger #GP:
17 	 */
18 	if (cpu_feature_enabled(X86_FEATURE_XSAVES))
19 		xsave->header.xcomp_bv = mask | XCOMP_BV_COMPACTED_FORMAT;
20 }
21 
22 static inline u64 xstate_get_host_group_perm(void)
23 {
24 	/* Pairs with WRITE_ONCE() in xstate_request_perm() */
25 	return READ_ONCE(current->group_leader->thread.fpu.perm.__state_perm);
26 }
27 
28 enum xstate_copy_mode {
29 	XSTATE_COPY_FP,
30 	XSTATE_COPY_FX,
31 	XSTATE_COPY_XSAVE,
32 };
33 
34 struct membuf;
35 extern void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
36 				      u32 pkru_val, enum xstate_copy_mode copy_mode);
37 extern void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
38 				    enum xstate_copy_mode mode);
39 extern int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf);
40 extern int copy_sigframe_from_user_to_xstate(struct fpstate *fpstate, const void __user *ubuf);
41 
42 
43 extern void fpu__init_cpu_xstate(void);
44 extern void fpu__init_system_xstate(unsigned int legacy_size);
45 
46 extern void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr);
47 
48 static inline u64 xfeatures_mask_supervisor(void)
49 {
50 	return fpu_kernel_cfg.max_features & XFEATURE_MASK_SUPERVISOR_SUPPORTED;
51 }
52 
53 static inline u64 xfeatures_mask_independent(void)
54 {
55 	if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR))
56 		return XFEATURE_MASK_INDEPENDENT & ~XFEATURE_MASK_LBR;
57 
58 	return XFEATURE_MASK_INDEPENDENT;
59 }
60 
61 /* XSAVE/XRSTOR wrapper functions */
62 
63 #ifdef CONFIG_X86_64
64 #define REX_PREFIX	"0x48, "
65 #else
66 #define REX_PREFIX
67 #endif
68 
69 /* These macros all use (%edi)/(%rdi) as the single memory argument. */
70 #define XSAVE		".byte " REX_PREFIX "0x0f,0xae,0x27"
71 #define XSAVEOPT	".byte " REX_PREFIX "0x0f,0xae,0x37"
72 #define XSAVES		".byte " REX_PREFIX "0x0f,0xc7,0x2f"
73 #define XRSTOR		".byte " REX_PREFIX "0x0f,0xae,0x2f"
74 #define XRSTORS		".byte " REX_PREFIX "0x0f,0xc7,0x1f"
75 
76 /*
77  * After this @err contains 0 on success or the trap number when the
78  * operation raises an exception.
79  */
80 #define XSTATE_OP(op, st, lmask, hmask, err)				\
81 	asm volatile("1:" op "\n\t"					\
82 		     "xor %[err], %[err]\n"				\
83 		     "2:\n\t"						\
84 		     _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_FAULT_MCE_SAFE)	\
85 		     : [err] "=a" (err)					\
86 		     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)	\
87 		     : "memory")
88 
89 /*
90  * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact
91  * format and supervisor states in addition to modified optimization in
92  * XSAVEOPT.
93  *
94  * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
95  * supports modified optimization which is not supported by XSAVE.
96  *
97  * We use XSAVE as a fallback.
98  *
99  * The 661 label is defined in the ALTERNATIVE* macros as the address of the
100  * original instruction which gets replaced. We need to use it here as the
101  * address of the instruction where we might get an exception at.
102  */
103 #define XSTATE_XSAVE(st, lmask, hmask, err)				\
104 	asm volatile(ALTERNATIVE_2(XSAVE,				\
105 				   XSAVEOPT, X86_FEATURE_XSAVEOPT,	\
106 				   XSAVES,   X86_FEATURE_XSAVES)	\
107 		     "\n"						\
108 		     "xor %[err], %[err]\n"				\
109 		     "3:\n"						\
110 		     ".pushsection .fixup,\"ax\"\n"			\
111 		     "4: movl $-2, %[err]\n"				\
112 		     "jmp 3b\n"						\
113 		     ".popsection\n"					\
114 		     _ASM_EXTABLE(661b, 4b)				\
115 		     : [err] "=r" (err)					\
116 		     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)	\
117 		     : "memory")
118 
119 /*
120  * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
121  * XSAVE area format.
122  */
123 #define XSTATE_XRESTORE(st, lmask, hmask)				\
124 	asm volatile(ALTERNATIVE(XRSTOR,				\
125 				 XRSTORS, X86_FEATURE_XSAVES)		\
126 		     "\n"						\
127 		     "3:\n"						\
128 		     _ASM_EXTABLE_TYPE(661b, 3b, EX_TYPE_FPU_RESTORE)	\
129 		     :							\
130 		     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)	\
131 		     : "memory")
132 
133 #if defined(CONFIG_X86_64) && defined(CONFIG_X86_DEBUG_FPU)
134 extern void xfd_validate_state(struct fpstate *fpstate, u64 mask, bool rstor);
135 #else
136 static inline void xfd_validate_state(struct fpstate *fpstate, u64 mask, bool rstor) { }
137 #endif
138 
139 #ifdef CONFIG_X86_64
140 static inline void xfd_update_state(struct fpstate *fpstate)
141 {
142 	if (fpu_state_size_dynamic()) {
143 		u64 xfd = fpstate->xfd;
144 
145 		if (__this_cpu_read(xfd_state) != xfd) {
146 			wrmsrl(MSR_IA32_XFD, xfd);
147 			__this_cpu_write(xfd_state, xfd);
148 		}
149 	}
150 }
151 #else
152 static inline void xfd_update_state(struct fpstate *fpstate) { }
153 #endif
154 
155 /*
156  * Save processor xstate to xsave area.
157  *
158  * Uses either XSAVE or XSAVEOPT or XSAVES depending on the CPU features
159  * and command line options. The choice is permanent until the next reboot.
160  */
161 static inline void os_xsave(struct fpstate *fpstate)
162 {
163 	u64 mask = fpstate->xfeatures;
164 	u32 lmask = mask;
165 	u32 hmask = mask >> 32;
166 	int err;
167 
168 	WARN_ON_FPU(!alternatives_patched);
169 	xfd_validate_state(fpstate, mask, false);
170 
171 	XSTATE_XSAVE(&fpstate->regs.xsave, lmask, hmask, err);
172 
173 	/* We should never fault when copying to a kernel buffer: */
174 	WARN_ON_FPU(err);
175 }
176 
177 /*
178  * Restore processor xstate from xsave area.
179  *
180  * Uses XRSTORS when XSAVES is used, XRSTOR otherwise.
181  */
182 static inline void os_xrstor(struct fpstate *fpstate, u64 mask)
183 {
184 	u32 lmask = mask;
185 	u32 hmask = mask >> 32;
186 
187 	xfd_validate_state(fpstate, mask, true);
188 	XSTATE_XRESTORE(&fpstate->regs.xsave, lmask, hmask);
189 }
190 
191 /* Restore of supervisor state. Does not require XFD */
192 static inline void os_xrstor_supervisor(struct fpstate *fpstate)
193 {
194 	u64 mask = xfeatures_mask_supervisor();
195 	u32 lmask = mask;
196 	u32 hmask = mask >> 32;
197 
198 	XSTATE_XRESTORE(&fpstate->regs.xsave, lmask, hmask);
199 }
200 
201 /*
202  * Save xstate to user space xsave area.
203  *
204  * We don't use modified optimization because xrstor/xrstors might track
205  * a different application.
206  *
207  * We don't use compacted format xsave area for backward compatibility for
208  * old applications which don't understand the compacted format of the
209  * xsave area.
210  *
211  * The caller has to zero buf::header before calling this because XSAVE*
212  * does not touch the reserved fields in the header.
213  */
214 static inline int xsave_to_user_sigframe(struct xregs_state __user *buf)
215 {
216 	/*
217 	 * Include the features which are not xsaved/rstored by the kernel
218 	 * internally, e.g. PKRU. That's user space ABI and also required
219 	 * to allow the signal handler to modify PKRU.
220 	 */
221 	struct fpstate *fpstate = current->thread.fpu.fpstate;
222 	u64 mask = fpstate->user_xfeatures;
223 	u32 lmask = mask;
224 	u32 hmask = mask >> 32;
225 	int err;
226 
227 	xfd_validate_state(fpstate, mask, false);
228 
229 	stac();
230 	XSTATE_OP(XSAVE, buf, lmask, hmask, err);
231 	clac();
232 
233 	return err;
234 }
235 
236 /*
237  * Restore xstate from user space xsave area.
238  */
239 static inline int xrstor_from_user_sigframe(struct xregs_state __user *buf, u64 mask)
240 {
241 	struct xregs_state *xstate = ((__force struct xregs_state *)buf);
242 	u32 lmask = mask;
243 	u32 hmask = mask >> 32;
244 	int err;
245 
246 	xfd_validate_state(current->thread.fpu.fpstate, mask, true);
247 
248 	stac();
249 	XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
250 	clac();
251 
252 	return err;
253 }
254 
255 /*
256  * Restore xstate from kernel space xsave area, return an error code instead of
257  * an exception.
258  */
259 static inline int os_xrstor_safe(struct fpstate *fpstate, u64 mask)
260 {
261 	struct xregs_state *xstate = &fpstate->regs.xsave;
262 	u32 lmask = mask;
263 	u32 hmask = mask >> 32;
264 	int err;
265 
266 	/* Ensure that XFD is up to date */
267 	xfd_update_state(fpstate);
268 
269 	if (cpu_feature_enabled(X86_FEATURE_XSAVES))
270 		XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
271 	else
272 		XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
273 
274 	return err;
275 }
276 
277 
278 #endif
279