1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_FPU_SCHED_H 3 #define _ASM_X86_FPU_SCHED_H 4 5 #include <linux/sched.h> 6 7 #include <asm/cpufeature.h> 8 #include <asm/fpu/types.h> 9 10 #include <asm/trace/fpu.h> 11 12 extern void save_fpregs_to_fpstate(struct fpu *fpu); 13 extern void fpu__drop(struct fpu *fpu); 14 extern int fpu_clone(struct task_struct *dst, unsigned long clone_flags, bool minimal, 15 unsigned long shstk_addr); 16 extern void fpu_flush_thread(void); 17 18 /* 19 * FPU state switching for scheduling. 20 * 21 * This is a two-stage process: 22 * 23 * - switch_fpu_prepare() saves the old state. 24 * This is done within the context of the old process. 25 * 26 * - switch_fpu_finish() sets TIF_NEED_FPU_LOAD; the floating point state 27 * will get loaded on return to userspace, or when the kernel needs it. 28 * 29 * If TIF_NEED_FPU_LOAD is cleared then the CPU's FPU registers 30 * are saved in the current thread's FPU register state. 31 * 32 * If TIF_NEED_FPU_LOAD is set then CPU's FPU registers may not 33 * hold current()'s FPU registers. It is required to load the 34 * registers before returning to userland or using the content 35 * otherwise. 36 * 37 * The FPU context is only stored/restored for a user task and 38 * PF_KTHREAD is used to distinguish between kernel and user threads. 39 */ switch_fpu_prepare(struct fpu * old_fpu,int cpu)40static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu) 41 { 42 if (cpu_feature_enabled(X86_FEATURE_FPU) && 43 !(current->flags & (PF_KTHREAD | PF_USER_WORKER))) { 44 save_fpregs_to_fpstate(old_fpu); 45 /* 46 * The save operation preserved register state, so the 47 * fpu_fpregs_owner_ctx is still @old_fpu. Store the 48 * current CPU number in @old_fpu, so the next return 49 * to user space can avoid the FPU register restore 50 * when is returns on the same CPU and still owns the 51 * context. 52 */ 53 old_fpu->last_cpu = cpu; 54 55 trace_x86_fpu_regs_deactivated(old_fpu); 56 } 57 } 58 59 /* 60 * Delay loading of the complete FPU state until the return to userland. 61 * PKRU is handled separately. 62 */ switch_fpu_finish(void)63static inline void switch_fpu_finish(void) 64 { 65 if (cpu_feature_enabled(X86_FEATURE_FPU)) 66 set_thread_flag(TIF_NEED_FPU_LOAD); 67 } 68 69 #endif /* _ASM_X86_FPU_SCHED_H */ 70