xref: /openbmc/linux/arch/loongarch/include/asm/fpu.h (revision 97e6f135)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Author: Huacai Chen <chenhuacai@loongson.cn>
4  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
5  */
6 #ifndef _ASM_FPU_H
7 #define _ASM_FPU_H
8 
9 #include <linux/sched.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/ptrace.h>
12 #include <linux/thread_info.h>
13 #include <linux/bitops.h>
14 
15 #include <asm/cpu.h>
16 #include <asm/cpu-features.h>
17 #include <asm/current.h>
18 #include <asm/loongarch.h>
19 #include <asm/processor.h>
20 #include <asm/ptrace.h>
21 
22 struct sigcontext;
23 
24 extern void kernel_fpu_begin(void);
25 extern void kernel_fpu_end(void);
26 
27 extern void _init_fpu(unsigned int);
28 extern void _save_fp(struct loongarch_fpu *);
29 extern void _restore_fp(struct loongarch_fpu *);
30 
31 extern void _save_lsx(struct loongarch_fpu *fpu);
32 extern void _restore_lsx(struct loongarch_fpu *fpu);
33 extern void _init_lsx_upper(void);
34 extern void _restore_lsx_upper(struct loongarch_fpu *fpu);
35 
36 extern void _save_lasx(struct loongarch_fpu *fpu);
37 extern void _restore_lasx(struct loongarch_fpu *fpu);
38 extern void _init_lasx_upper(void);
39 extern void _restore_lasx_upper(struct loongarch_fpu *fpu);
40 
41 static inline void enable_lsx(void);
42 static inline void disable_lsx(void);
43 static inline void save_lsx(struct task_struct *t);
44 static inline void restore_lsx(struct task_struct *t);
45 
46 static inline void enable_lasx(void);
47 static inline void disable_lasx(void);
48 static inline void save_lasx(struct task_struct *t);
49 static inline void restore_lasx(struct task_struct *t);
50 
51 /*
52  * Mask the FCSR Cause bits according to the Enable bits, observing
53  * that Unimplemented is always enabled.
54  */
55 static inline unsigned long mask_fcsr_x(unsigned long fcsr)
56 {
57 	return fcsr & ((fcsr & FPU_CSR_ALL_E) <<
58 			(ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E)));
59 }
60 
61 static inline int is_fp_enabled(void)
62 {
63 	return (csr_read32(LOONGARCH_CSR_EUEN) & CSR_EUEN_FPEN) ?
64 		1 : 0;
65 }
66 
67 static inline int is_lsx_enabled(void)
68 {
69 	if (!cpu_has_lsx)
70 		return 0;
71 
72 	return (csr_read32(LOONGARCH_CSR_EUEN) & CSR_EUEN_LSXEN) ?
73 		1 : 0;
74 }
75 
76 static inline int is_lasx_enabled(void)
77 {
78 	if (!cpu_has_lasx)
79 		return 0;
80 
81 	return (csr_read32(LOONGARCH_CSR_EUEN) & CSR_EUEN_LASXEN) ?
82 		1 : 0;
83 }
84 
85 static inline int is_simd_enabled(void)
86 {
87 	return is_lsx_enabled() | is_lasx_enabled();
88 }
89 
90 #define enable_fpu()		set_csr_euen(CSR_EUEN_FPEN)
91 
92 #define disable_fpu()		clear_csr_euen(CSR_EUEN_FPEN)
93 
94 #define clear_fpu_owner()	clear_thread_flag(TIF_USEDFPU)
95 
96 static inline int is_fpu_owner(void)
97 {
98 	return test_thread_flag(TIF_USEDFPU);
99 }
100 
101 static inline void __own_fpu(void)
102 {
103 	enable_fpu();
104 	set_thread_flag(TIF_USEDFPU);
105 	KSTK_EUEN(current) |= CSR_EUEN_FPEN;
106 }
107 
108 static inline void own_fpu_inatomic(int restore)
109 {
110 	if (cpu_has_fpu && !is_fpu_owner()) {
111 		__own_fpu();
112 		if (restore)
113 			_restore_fp(&current->thread.fpu);
114 	}
115 }
116 
117 static inline void own_fpu(int restore)
118 {
119 	preempt_disable();
120 	own_fpu_inatomic(restore);
121 	preempt_enable();
122 }
123 
124 static inline void lose_fpu_inatomic(int save, struct task_struct *tsk)
125 {
126 	if (is_fpu_owner()) {
127 		if (!is_simd_enabled()) {
128 			if (save)
129 				_save_fp(&tsk->thread.fpu);
130 			disable_fpu();
131 		} else {
132 			if (save) {
133 				if (!is_lasx_enabled())
134 					save_lsx(tsk);
135 				else
136 					save_lasx(tsk);
137 			}
138 			disable_fpu();
139 			disable_lsx();
140 			disable_lasx();
141 			clear_tsk_thread_flag(tsk, TIF_USEDSIMD);
142 		}
143 		clear_tsk_thread_flag(tsk, TIF_USEDFPU);
144 	}
145 	KSTK_EUEN(tsk) &= ~(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
146 }
147 
148 static inline void lose_fpu(int save)
149 {
150 	preempt_disable();
151 	lose_fpu_inatomic(save, current);
152 	preempt_enable();
153 }
154 
155 static inline void init_fpu(void)
156 {
157 	unsigned int fcsr = current->thread.fpu.fcsr;
158 
159 	__own_fpu();
160 	_init_fpu(fcsr);
161 	set_used_math();
162 }
163 
164 static inline void save_fp(struct task_struct *tsk)
165 {
166 	if (cpu_has_fpu)
167 		_save_fp(&tsk->thread.fpu);
168 }
169 
170 static inline void restore_fp(struct task_struct *tsk)
171 {
172 	if (cpu_has_fpu)
173 		_restore_fp(&tsk->thread.fpu);
174 }
175 
176 static inline union fpureg *get_fpu_regs(struct task_struct *tsk)
177 {
178 	if (tsk == current) {
179 		preempt_disable();
180 		if (is_fpu_owner())
181 			_save_fp(&current->thread.fpu);
182 		preempt_enable();
183 	}
184 
185 	return tsk->thread.fpu.fpr;
186 }
187 
188 static inline int is_simd_owner(void)
189 {
190 	return test_thread_flag(TIF_USEDSIMD);
191 }
192 
193 #ifdef CONFIG_CPU_HAS_LSX
194 
195 static inline void enable_lsx(void)
196 {
197 	if (cpu_has_lsx)
198 		csr_xchg32(CSR_EUEN_LSXEN, CSR_EUEN_LSXEN, LOONGARCH_CSR_EUEN);
199 }
200 
201 static inline void disable_lsx(void)
202 {
203 	if (cpu_has_lsx)
204 		csr_xchg32(0, CSR_EUEN_LSXEN, LOONGARCH_CSR_EUEN);
205 }
206 
207 static inline void save_lsx(struct task_struct *t)
208 {
209 	if (cpu_has_lsx)
210 		_save_lsx(&t->thread.fpu);
211 }
212 
213 static inline void restore_lsx(struct task_struct *t)
214 {
215 	if (cpu_has_lsx)
216 		_restore_lsx(&t->thread.fpu);
217 }
218 
219 static inline void init_lsx_upper(void)
220 {
221 	/*
222 	 * Check cpu_has_lsx only if it's a constant. This will allow the
223 	 * compiler to optimise out code for CPUs without LSX without adding
224 	 * an extra redundant check for CPUs with LSX.
225 	 */
226 	if (__builtin_constant_p(cpu_has_lsx) && !cpu_has_lsx)
227 		return;
228 
229 	_init_lsx_upper();
230 }
231 
232 static inline void restore_lsx_upper(struct task_struct *t)
233 {
234 	if (cpu_has_lsx)
235 		_restore_lsx_upper(&t->thread.fpu);
236 }
237 
238 #else
239 static inline void enable_lsx(void) {}
240 static inline void disable_lsx(void) {}
241 static inline void save_lsx(struct task_struct *t) {}
242 static inline void restore_lsx(struct task_struct *t) {}
243 static inline void init_lsx_upper(void) {}
244 static inline void restore_lsx_upper(struct task_struct *t) {}
245 #endif
246 
247 #ifdef CONFIG_CPU_HAS_LASX
248 
249 static inline void enable_lasx(void)
250 {
251 
252 	if (cpu_has_lasx)
253 		csr_xchg32(CSR_EUEN_LASXEN, CSR_EUEN_LASXEN, LOONGARCH_CSR_EUEN);
254 }
255 
256 static inline void disable_lasx(void)
257 {
258 	if (cpu_has_lasx)
259 		csr_xchg32(0, CSR_EUEN_LASXEN, LOONGARCH_CSR_EUEN);
260 }
261 
262 static inline void save_lasx(struct task_struct *t)
263 {
264 	if (cpu_has_lasx)
265 		_save_lasx(&t->thread.fpu);
266 }
267 
268 static inline void restore_lasx(struct task_struct *t)
269 {
270 	if (cpu_has_lasx)
271 		_restore_lasx(&t->thread.fpu);
272 }
273 
274 static inline void init_lasx_upper(void)
275 {
276 	if (cpu_has_lasx)
277 		_init_lasx_upper();
278 }
279 
280 static inline void restore_lasx_upper(struct task_struct *t)
281 {
282 	if (cpu_has_lasx)
283 		_restore_lasx_upper(&t->thread.fpu);
284 }
285 
286 #else
287 static inline void enable_lasx(void) {}
288 static inline void disable_lasx(void) {}
289 static inline void save_lasx(struct task_struct *t) {}
290 static inline void restore_lasx(struct task_struct *t) {}
291 static inline void init_lasx_upper(void) {}
292 static inline void restore_lasx_upper(struct task_struct *t) {}
293 #endif
294 
295 static inline int thread_lsx_context_live(void)
296 {
297 	if (__builtin_constant_p(cpu_has_lsx) && !cpu_has_lsx)
298 		return 0;
299 
300 	return test_thread_flag(TIF_LSX_CTX_LIVE);
301 }
302 
303 static inline int thread_lasx_context_live(void)
304 {
305 	if (__builtin_constant_p(cpu_has_lasx) && !cpu_has_lasx)
306 		return 0;
307 
308 	return test_thread_flag(TIF_LASX_CTX_LIVE);
309 }
310 
311 #endif /* _ASM_FPU_H */
312