1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2174cd4b1SIngo Molnar #include <linux/sched/signal.h> 329930025SIngo Molnar #include <linux/sched/task.h> 468db0cf1SIngo Molnar #include <linux/sched/task_stack.h> 55a0e3ad6STejun Heo #include <linux/slab.h> 60ea820cfSPaul Mundt #include <asm/processor.h> 70ea820cfSPaul Mundt #include <asm/fpu.h> 8f03c4866SPaul Mundt #include <asm/traps.h> 94cf421e5SIngo Molnar #include <asm/ptrace.h> 100ea820cfSPaul Mundt init_fpu(struct task_struct * tsk)110ea820cfSPaul Mundtint init_fpu(struct task_struct *tsk) 120ea820cfSPaul Mundt { 130ea820cfSPaul Mundt if (tsk_used_math(tsk)) { 140ea820cfSPaul Mundt if ((boot_cpu_data.flags & CPU_HAS_FPU) && tsk == current) 150ea820cfSPaul Mundt unlazy_fpu(tsk, task_pt_regs(tsk)); 160ea820cfSPaul Mundt return 0; 170ea820cfSPaul Mundt } 180ea820cfSPaul Mundt 190ea820cfSPaul Mundt /* 200ea820cfSPaul Mundt * Memory allocation at the first usage of the FPU and other state. 210ea820cfSPaul Mundt */ 220ea820cfSPaul Mundt if (!tsk->thread.xstate) { 230ea820cfSPaul Mundt tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep, 240ea820cfSPaul Mundt GFP_KERNEL); 250ea820cfSPaul Mundt if (!tsk->thread.xstate) 260ea820cfSPaul Mundt return -ENOMEM; 270ea820cfSPaul Mundt } 280ea820cfSPaul Mundt 290ea820cfSPaul Mundt if (boot_cpu_data.flags & CPU_HAS_FPU) { 300ea820cfSPaul Mundt struct sh_fpu_hard_struct *fp = &tsk->thread.xstate->hardfpu; 310ea820cfSPaul Mundt memset(fp, 0, xstate_size); 320ea820cfSPaul Mundt fp->fpscr = FPSCR_INIT; 330ea820cfSPaul Mundt } else { 340ea820cfSPaul Mundt struct sh_fpu_soft_struct *fp = &tsk->thread.xstate->softfpu; 350ea820cfSPaul Mundt memset(fp, 0, xstate_size); 360ea820cfSPaul Mundt fp->fpscr = FPSCR_INIT; 370ea820cfSPaul Mundt } 380ea820cfSPaul Mundt 390ea820cfSPaul Mundt set_stopped_child_used_math(tsk); 400ea820cfSPaul Mundt return 0; 410ea820cfSPaul Mundt } 420ea820cfSPaul Mundt 430ea820cfSPaul Mundt #ifdef CONFIG_SH_FPU __fpu_state_restore(void)440ea820cfSPaul Mundtvoid __fpu_state_restore(void) 450ea820cfSPaul Mundt { 460ea820cfSPaul Mundt struct task_struct *tsk = current; 470ea820cfSPaul Mundt 480ea820cfSPaul Mundt restore_fpu(tsk); 490ea820cfSPaul Mundt 500ea820cfSPaul Mundt task_thread_info(tsk)->status |= TS_USEDFPU; 51616c05d1SVineet Gupta tsk->thread.fpu_counter++; 520ea820cfSPaul Mundt } 530ea820cfSPaul Mundt fpu_state_restore(struct pt_regs * regs)540ea820cfSPaul Mundtvoid fpu_state_restore(struct pt_regs *regs) 550ea820cfSPaul Mundt { 560ea820cfSPaul Mundt struct task_struct *tsk = current; 570ea820cfSPaul Mundt 580ea820cfSPaul Mundt if (unlikely(!user_mode(regs))) { 590ea820cfSPaul Mundt printk(KERN_ERR "BUG: FPU is used in kernel mode.\n"); 600ea820cfSPaul Mundt BUG(); 610ea820cfSPaul Mundt return; 620ea820cfSPaul Mundt } 630ea820cfSPaul Mundt 640ea820cfSPaul Mundt if (!tsk_used_math(tsk)) { 65*ce0ee4e6SEric W. Biederman int ret; 660ea820cfSPaul Mundt /* 670ea820cfSPaul Mundt * does a slab alloc which can sleep 680ea820cfSPaul Mundt */ 69*ce0ee4e6SEric W. Biederman local_irq_enable(); 70*ce0ee4e6SEric W. Biederman ret = init_fpu(tsk); 71*ce0ee4e6SEric W. Biederman local_irq_disable(); 72*ce0ee4e6SEric W. Biederman if (ret) { 730ea820cfSPaul Mundt /* 740ea820cfSPaul Mundt * ran out of memory! 750ea820cfSPaul Mundt */ 76*ce0ee4e6SEric W. Biederman force_sig(SIGKILL); 770ea820cfSPaul Mundt return; 780ea820cfSPaul Mundt } 790ea820cfSPaul Mundt } 800ea820cfSPaul Mundt 810ea820cfSPaul Mundt grab_fpu(regs); 820ea820cfSPaul Mundt 830ea820cfSPaul Mundt __fpu_state_restore(); 840ea820cfSPaul Mundt } 850ea820cfSPaul Mundt BUILD_TRAP_HANDLER(fpu_state_restore)860ea820cfSPaul MundtBUILD_TRAP_HANDLER(fpu_state_restore) 870ea820cfSPaul Mundt { 880ea820cfSPaul Mundt TRAP_HANDLER_DECL; 890ea820cfSPaul Mundt 900ea820cfSPaul Mundt fpu_state_restore(regs); 910ea820cfSPaul Mundt } 920ea820cfSPaul Mundt #endif /* CONFIG_SH_FPU */ 93