xref: /openbmc/linux/arch/x86/kernel/fpu/legacy.h (revision f68f2ff9)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __X86_KERNEL_FPU_LEGACY_H
3 #define __X86_KERNEL_FPU_LEGACY_H
4 
5 #include <asm/fpu/types.h>
6 
7 extern unsigned int mxcsr_feature_mask;
8 
9 static inline void ldmxcsr(u32 mxcsr)
10 {
11 	asm volatile("ldmxcsr %0" :: "m" (mxcsr));
12 }
13 
14 /*
15  * Returns 0 on success or the trap number when the operation raises an
16  * exception.
17  */
18 #define user_insn(insn, output, input...)				\
19 ({									\
20 	int err;							\
21 									\
22 	might_fault();							\
23 									\
24 	asm volatile(ASM_STAC "\n"					\
25 		     "1: " #insn "\n"					\
26 		     "2: " ASM_CLAC "\n"				\
27 		     _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_FAULT_MCE_SAFE)	\
28 		     : [err] "=a" (err), output				\
29 		     : "0"(0), input);					\
30 	err;								\
31 })
32 
33 #define kernel_insn_err(insn, output, input...)				\
34 ({									\
35 	int err;							\
36 	asm volatile("1:" #insn "\n\t"					\
37 		     "2:\n"						\
38 		     _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %[err]) \
39 		     : [err] "=r" (err), output				\
40 		     : "0"(0), input);					\
41 	err;								\
42 })
43 
44 #define kernel_insn(insn, output, input...)				\
45 	asm volatile("1:" #insn "\n\t"					\
46 		     "2:\n"						\
47 		     _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_FPU_RESTORE)	\
48 		     : output : input)
49 
50 static inline int fnsave_to_user_sigframe(struct fregs_state __user *fx)
51 {
52 	return user_insn(fnsave %[fx]; fwait,  [fx] "=m" (*fx), "m" (*fx));
53 }
54 
55 static inline int fxsave_to_user_sigframe(struct fxregs_state __user *fx)
56 {
57 	if (IS_ENABLED(CONFIG_X86_32))
58 		return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
59 	else
60 		return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
61 
62 }
63 
64 static inline void fxrstor(struct fxregs_state *fx)
65 {
66 	if (IS_ENABLED(CONFIG_X86_32))
67 		kernel_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
68 	else
69 		kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
70 }
71 
72 static inline int fxrstor_safe(struct fxregs_state *fx)
73 {
74 	if (IS_ENABLED(CONFIG_X86_32))
75 		return kernel_insn_err(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
76 	else
77 		return kernel_insn_err(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
78 }
79 
80 static inline int fxrstor_from_user_sigframe(struct fxregs_state __user *fx)
81 {
82 	if (IS_ENABLED(CONFIG_X86_32))
83 		return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
84 	else
85 		return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
86 }
87 
88 static inline void frstor(struct fregs_state *fx)
89 {
90 	kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
91 }
92 
93 static inline int frstor_safe(struct fregs_state *fx)
94 {
95 	return kernel_insn_err(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
96 }
97 
98 static inline int frstor_from_user_sigframe(struct fregs_state __user *fx)
99 {
100 	return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
101 }
102 
103 static inline void fxsave(struct fxregs_state *fx)
104 {
105 	if (IS_ENABLED(CONFIG_X86_32))
106 		asm volatile( "fxsave %[fx]" : [fx] "=m" (*fx));
107 	else
108 		asm volatile("fxsaveq %[fx]" : [fx] "=m" (*fx));
109 }
110 
111 #endif
112