xref: /openbmc/linux/arch/x86/kernel/fpu/regset.c (revision 1c813ce0)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
20c306bcfSIngo Molnar /*
30c306bcfSIngo Molnar  * FPU register's regset abstraction, for ptrace, core dumps, etc.
40c306bcfSIngo Molnar  */
543be46e8SThomas Gleixner #include <linux/sched/task_stack.h>
643be46e8SThomas Gleixner #include <linux/vmalloc.h>
743be46e8SThomas Gleixner 
8b56d2795SThomas Gleixner #include <asm/fpu/api.h>
90c306bcfSIngo Molnar #include <asm/fpu/signal.h>
100c306bcfSIngo Molnar #include <asm/fpu/regset.h>
110c306bcfSIngo Molnar 
129848fb96SThomas Gleixner #include "context.h"
13d06241f5SThomas Gleixner #include "internal.h"
14d9d005f3SThomas Gleixner #include "legacy.h"
1549e4eb41SThomas Gleixner #include "xstate.h"
16d06241f5SThomas Gleixner 
170c306bcfSIngo Molnar /*
180c306bcfSIngo Molnar  * The xstateregs_active() routine is the same as the regset_fpregs_active() routine,
190c306bcfSIngo Molnar  * as the "regset->n" for the xstate regset will be updated based on the feature
206a6256f9SAdam Buchbinder  * capabilities supported by the xsave.
210c306bcfSIngo Molnar  */
220c306bcfSIngo Molnar int regset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
230c306bcfSIngo Molnar {
242722146eSSebastian Andrzej Siewior 	return regset->n;
250c306bcfSIngo Molnar }
260c306bcfSIngo Molnar 
270c306bcfSIngo Molnar int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
280c306bcfSIngo Molnar {
292722146eSSebastian Andrzej Siewior 	if (boot_cpu_has(X86_FEATURE_FXSR))
3001f8fd73SBorislav Petkov 		return regset->n;
3101f8fd73SBorislav Petkov 	else
3201f8fd73SBorislav Petkov 		return 0;
330c306bcfSIngo Molnar }
340c306bcfSIngo Molnar 
355a32fac8SThomas Gleixner /*
365a32fac8SThomas Gleixner  * The regset get() functions are invoked from:
375a32fac8SThomas Gleixner  *
385a32fac8SThomas Gleixner  *   - coredump to dump the current task's fpstate. If the current task
395a32fac8SThomas Gleixner  *     owns the FPU then the memory state has to be synchronized and the
405a32fac8SThomas Gleixner  *     FPU register state preserved. Otherwise fpstate is already in sync.
415a32fac8SThomas Gleixner  *
425a32fac8SThomas Gleixner  *   - ptrace to dump fpstate of a stopped task, in which case the registers
435a32fac8SThomas Gleixner  *     have already been saved to fpstate on context switch.
445a32fac8SThomas Gleixner  */
455a32fac8SThomas Gleixner static void sync_fpstate(struct fpu *fpu)
465a32fac8SThomas Gleixner {
475a32fac8SThomas Gleixner 	if (fpu == &current->thread.fpu)
48b2681e79SThomas Gleixner 		fpu_sync_fpstate(fpu);
495a32fac8SThomas Gleixner }
505a32fac8SThomas Gleixner 
51dbb60ac7SThomas Gleixner /*
52dbb60ac7SThomas Gleixner  * Invalidate cached FPU registers before modifying the stopped target
53dbb60ac7SThomas Gleixner  * task's fpstate.
54dbb60ac7SThomas Gleixner  *
55dbb60ac7SThomas Gleixner  * This forces the target task on resume to restore the FPU registers from
56dbb60ac7SThomas Gleixner  * modified fpstate. Otherwise the task might skip the restore and operate
57dbb60ac7SThomas Gleixner  * with the cached FPU registers which discards the modifications.
58dbb60ac7SThomas Gleixner  */
59dbb60ac7SThomas Gleixner static void fpu_force_restore(struct fpu *fpu)
60dbb60ac7SThomas Gleixner {
61dbb60ac7SThomas Gleixner 	/*
62dbb60ac7SThomas Gleixner 	 * Only stopped child tasks can be used to modify the FPU
63dbb60ac7SThomas Gleixner 	 * state in the fpstate buffer:
64dbb60ac7SThomas Gleixner 	 */
65dbb60ac7SThomas Gleixner 	WARN_ON_FPU(fpu == &current->thread.fpu);
66dbb60ac7SThomas Gleixner 
67dbb60ac7SThomas Gleixner 	__fpu_invalidate_fpregs_state(fpu);
68dbb60ac7SThomas Gleixner }
69dbb60ac7SThomas Gleixner 
700c306bcfSIngo Molnar int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
710557d64dSAl Viro 		struct membuf to)
720c306bcfSIngo Molnar {
730c306bcfSIngo Molnar 	struct fpu *fpu = &target->thread.fpu;
740c306bcfSIngo Molnar 
75adc997b3SThomas Gleixner 	if (!cpu_feature_enabled(X86_FEATURE_FXSR))
760c306bcfSIngo Molnar 		return -ENODEV;
770c306bcfSIngo Molnar 
785a32fac8SThomas Gleixner 	sync_fpstate(fpu);
790c306bcfSIngo Molnar 
80adc997b3SThomas Gleixner 	if (!use_xsave()) {
81caee31a3SThomas Gleixner 		return membuf_write(&to, &fpu->fpstate->regs.fxsave,
82caee31a3SThomas Gleixner 				    sizeof(fpu->fpstate->regs.fxsave));
83adc997b3SThomas Gleixner 	}
84adc997b3SThomas Gleixner 
85e84ba47eSDave Hansen 	copy_xstate_to_uabi_buf(to, target, XSTATE_COPY_FX);
86adc997b3SThomas Gleixner 	return 0;
870c306bcfSIngo Molnar }
880c306bcfSIngo Molnar 
890c306bcfSIngo Molnar int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
900c306bcfSIngo Molnar 		unsigned int pos, unsigned int count,
910c306bcfSIngo Molnar 		const void *kbuf, const void __user *ubuf)
920c306bcfSIngo Molnar {
930c306bcfSIngo Molnar 	struct fpu *fpu = &target->thread.fpu;
9444cad52cSAndy Lutomirski 	struct fxregs_state newstate;
950c306bcfSIngo Molnar 	int ret;
960c306bcfSIngo Molnar 
976164331dSAndy Lutomirski 	if (!cpu_feature_enabled(X86_FEATURE_FXSR))
980c306bcfSIngo Molnar 		return -ENODEV;
990c306bcfSIngo Molnar 
1006164331dSAndy Lutomirski 	/* No funny business with partial or oversized writes is permitted. */
1016164331dSAndy Lutomirski 	if (pos != 0 || count != sizeof(newstate))
1026164331dSAndy Lutomirski 		return -EINVAL;
1036164331dSAndy Lutomirski 
1046164331dSAndy Lutomirski 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
1056164331dSAndy Lutomirski 	if (ret)
1066164331dSAndy Lutomirski 		return ret;
1076164331dSAndy Lutomirski 
108145e9e0dSAndy Lutomirski 	/* Do not allow an invalid MXCSR value. */
109145e9e0dSAndy Lutomirski 	if (newstate.mxcsr & ~mxcsr_feature_mask)
110145e9e0dSAndy Lutomirski 		return -EINVAL;
1116164331dSAndy Lutomirski 
112dbb60ac7SThomas Gleixner 	fpu_force_restore(fpu);
1130c306bcfSIngo Molnar 
1146164331dSAndy Lutomirski 	/* Copy the state  */
115caee31a3SThomas Gleixner 	memcpy(&fpu->fpstate->regs.fxsave, &newstate, sizeof(newstate));
1160c306bcfSIngo Molnar 
11744cad52cSAndy Lutomirski 	/* Clear xmm8..15 for 32-bit callers */
118caee31a3SThomas Gleixner 	BUILD_BUG_ON(sizeof(fpu->__fpstate.regs.fxsave.xmm_space) != 16 * 16);
11944cad52cSAndy Lutomirski 	if (in_ia32_syscall())
12044cad52cSAndy Lutomirski 		memset(&fpu->fpstate->regs.fxsave.xmm_space[8*4], 0, 8 * 16);
1210c306bcfSIngo Molnar 
1226164331dSAndy Lutomirski 	/* Mark FP and SSE as in use when XSAVE is enabled */
1236164331dSAndy Lutomirski 	if (use_xsave())
124caee31a3SThomas Gleixner 		fpu->fpstate->regs.xsave.header.xfeatures |= XFEATURE_MASK_FPSSE;
1250c306bcfSIngo Molnar 
1266164331dSAndy Lutomirski 	return 0;
1270c306bcfSIngo Molnar }
1280c306bcfSIngo Molnar 
1290c306bcfSIngo Molnar int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
1300557d64dSAl Viro 		struct membuf to)
1310c306bcfSIngo Molnar {
1323a335112SDave Hansen 	if (!cpu_feature_enabled(X86_FEATURE_XSAVE))
1330c306bcfSIngo Molnar 		return -ENODEV;
1340c306bcfSIngo Molnar 
135e84ba47eSDave Hansen 	sync_fpstate(&target->thread.fpu);
13691c3dba7SYu-cheng Yu 
137e84ba47eSDave Hansen 	copy_xstate_to_uabi_buf(to, target, XSTATE_COPY_XSAVE);
1380557d64dSAl Viro 	return 0;
1390c306bcfSIngo Molnar }
1400c306bcfSIngo Molnar 
1410c306bcfSIngo Molnar int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
1420c306bcfSIngo Molnar 		  unsigned int pos, unsigned int count,
1430c306bcfSIngo Molnar 		  const void *kbuf, const void __user *ubuf)
1440c306bcfSIngo Molnar {
1450c306bcfSIngo Molnar 	struct fpu *fpu = &target->thread.fpu;
14643be46e8SThomas Gleixner 	struct xregs_state *tmpbuf = NULL;
1470c306bcfSIngo Molnar 	int ret;
1480c306bcfSIngo Molnar 
14943be46e8SThomas Gleixner 	if (!cpu_feature_enabled(X86_FEATURE_XSAVE))
1500c306bcfSIngo Molnar 		return -ENODEV;
1510c306bcfSIngo Molnar 
15291c3dba7SYu-cheng Yu 	/*
15391c3dba7SYu-cheng Yu 	 * A whole standard-format XSAVE buffer is needed:
15491c3dba7SYu-cheng Yu 	 */
1552bd264bcSThomas Gleixner 	if (pos != 0 || count != fpu_user_cfg.max_size)
15691c3dba7SYu-cheng Yu 		return -EFAULT;
1570c306bcfSIngo Molnar 
15843be46e8SThomas Gleixner 	if (!kbuf) {
15943be46e8SThomas Gleixner 		tmpbuf = vmalloc(count);
16043be46e8SThomas Gleixner 		if (!tmpbuf)
16143be46e8SThomas Gleixner 			return -ENOMEM;
1620c306bcfSIngo Molnar 
16343be46e8SThomas Gleixner 		if (copy_from_user(tmpbuf, ubuf, count)) {
16443be46e8SThomas Gleixner 			ret = -EFAULT;
16543be46e8SThomas Gleixner 			goto out;
16643be46e8SThomas Gleixner 		}
16779fecc2bSIngo Molnar 	}
16891c3dba7SYu-cheng Yu 
169dbb60ac7SThomas Gleixner 	fpu_force_restore(fpu);
170*1c813ce0SKyle Huey 	ret = copy_uabi_from_kernel_to_xstate(fpu->fpstate, kbuf ?: tmpbuf, &target->thread.pkru);
171cf9df81bSEric Biggers 
17243be46e8SThomas Gleixner out:
17343be46e8SThomas Gleixner 	vfree(tmpbuf);
1740c306bcfSIngo Molnar 	return ret;
1750c306bcfSIngo Molnar }
1760c306bcfSIngo Molnar 
1770c306bcfSIngo Molnar #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1780c306bcfSIngo Molnar 
1790c306bcfSIngo Molnar /*
1800c306bcfSIngo Molnar  * FPU tag word conversions.
1810c306bcfSIngo Molnar  */
1820c306bcfSIngo Molnar 
1830c306bcfSIngo Molnar static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
1840c306bcfSIngo Molnar {
1850c306bcfSIngo Molnar 	unsigned int tmp; /* to avoid 16 bit prefixes in the code */
1860c306bcfSIngo Molnar 
1870c306bcfSIngo Molnar 	/* Transform each pair of bits into 01 (valid) or 00 (empty) */
1880c306bcfSIngo Molnar 	tmp = ~twd;
1890c306bcfSIngo Molnar 	tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
1900c306bcfSIngo Molnar 	/* and move the valid bits to the lower byte. */
1910c306bcfSIngo Molnar 	tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
1920c306bcfSIngo Molnar 	tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
1930c306bcfSIngo Molnar 	tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
1940c306bcfSIngo Molnar 
1950c306bcfSIngo Molnar 	return tmp;
1960c306bcfSIngo Molnar }
1970c306bcfSIngo Molnar 
1980c306bcfSIngo Molnar #define FPREG_ADDR(f, n)	((void *)&(f)->st_space + (n) * 16)
1990c306bcfSIngo Molnar #define FP_EXP_TAG_VALID	0
2000c306bcfSIngo Molnar #define FP_EXP_TAG_ZERO		1
2010c306bcfSIngo Molnar #define FP_EXP_TAG_SPECIAL	2
2020c306bcfSIngo Molnar #define FP_EXP_TAG_EMPTY	3
2030c306bcfSIngo Molnar 
204c47ada30SIngo Molnar static inline u32 twd_fxsr_to_i387(struct fxregs_state *fxsave)
2050c306bcfSIngo Molnar {
2060c306bcfSIngo Molnar 	struct _fpxreg *st;
2070c306bcfSIngo Molnar 	u32 tos = (fxsave->swd >> 11) & 7;
2080c306bcfSIngo Molnar 	u32 twd = (unsigned long) fxsave->twd;
2090c306bcfSIngo Molnar 	u32 tag;
2100c306bcfSIngo Molnar 	u32 ret = 0xffff0000u;
2110c306bcfSIngo Molnar 	int i;
2120c306bcfSIngo Molnar 
2130c306bcfSIngo Molnar 	for (i = 0; i < 8; i++, twd >>= 1) {
2140c306bcfSIngo Molnar 		if (twd & 0x1) {
2150c306bcfSIngo Molnar 			st = FPREG_ADDR(fxsave, (i - tos) & 7);
2160c306bcfSIngo Molnar 
2170c306bcfSIngo Molnar 			switch (st->exponent & 0x7fff) {
2180c306bcfSIngo Molnar 			case 0x7fff:
2190c306bcfSIngo Molnar 				tag = FP_EXP_TAG_SPECIAL;
2200c306bcfSIngo Molnar 				break;
2210c306bcfSIngo Molnar 			case 0x0000:
2220c306bcfSIngo Molnar 				if (!st->significand[0] &&
2230c306bcfSIngo Molnar 				    !st->significand[1] &&
2240c306bcfSIngo Molnar 				    !st->significand[2] &&
2250c306bcfSIngo Molnar 				    !st->significand[3])
2260c306bcfSIngo Molnar 					tag = FP_EXP_TAG_ZERO;
2270c306bcfSIngo Molnar 				else
2280c306bcfSIngo Molnar 					tag = FP_EXP_TAG_SPECIAL;
2290c306bcfSIngo Molnar 				break;
2300c306bcfSIngo Molnar 			default:
2310c306bcfSIngo Molnar 				if (st->significand[3] & 0x8000)
2320c306bcfSIngo Molnar 					tag = FP_EXP_TAG_VALID;
2330c306bcfSIngo Molnar 				else
2340c306bcfSIngo Molnar 					tag = FP_EXP_TAG_SPECIAL;
2350c306bcfSIngo Molnar 				break;
2360c306bcfSIngo Molnar 			}
2370c306bcfSIngo Molnar 		} else {
2380c306bcfSIngo Molnar 			tag = FP_EXP_TAG_EMPTY;
2390c306bcfSIngo Molnar 		}
2400c306bcfSIngo Molnar 		ret |= tag << (2 * i);
2410c306bcfSIngo Molnar 	}
2420c306bcfSIngo Molnar 	return ret;
2430c306bcfSIngo Molnar }
2440c306bcfSIngo Molnar 
2450c306bcfSIngo Molnar /*
2460c306bcfSIngo Molnar  * FXSR floating point environment conversions.
2470c306bcfSIngo Molnar  */
2480c306bcfSIngo Molnar 
2493f7f7563SThomas Gleixner static void __convert_from_fxsr(struct user_i387_ia32_struct *env,
2503f7f7563SThomas Gleixner 				struct task_struct *tsk,
2513f7f7563SThomas Gleixner 				struct fxregs_state *fxsave)
2520c306bcfSIngo Molnar {
2530c306bcfSIngo Molnar 	struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
2540c306bcfSIngo Molnar 	struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
2550c306bcfSIngo Molnar 	int i;
2560c306bcfSIngo Molnar 
2570c306bcfSIngo Molnar 	env->cwd = fxsave->cwd | 0xffff0000u;
2580c306bcfSIngo Molnar 	env->swd = fxsave->swd | 0xffff0000u;
2590c306bcfSIngo Molnar 	env->twd = twd_fxsr_to_i387(fxsave);
2600c306bcfSIngo Molnar 
2610c306bcfSIngo Molnar #ifdef CONFIG_X86_64
2620c306bcfSIngo Molnar 	env->fip = fxsave->rip;
2630c306bcfSIngo Molnar 	env->foo = fxsave->rdp;
2640c306bcfSIngo Molnar 	/*
2650c306bcfSIngo Molnar 	 * should be actually ds/cs at fpu exception time, but
2660c306bcfSIngo Molnar 	 * that information is not available in 64bit mode.
2670c306bcfSIngo Molnar 	 */
2680c306bcfSIngo Molnar 	env->fcs = task_pt_regs(tsk)->cs;
2690c306bcfSIngo Molnar 	if (tsk == current) {
2700c306bcfSIngo Molnar 		savesegment(ds, env->fos);
2710c306bcfSIngo Molnar 	} else {
2720c306bcfSIngo Molnar 		env->fos = tsk->thread.ds;
2730c306bcfSIngo Molnar 	}
2740c306bcfSIngo Molnar 	env->fos |= 0xffff0000;
2750c306bcfSIngo Molnar #else
2760c306bcfSIngo Molnar 	env->fip = fxsave->fip;
2770c306bcfSIngo Molnar 	env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16);
2780c306bcfSIngo Molnar 	env->foo = fxsave->foo;
2790c306bcfSIngo Molnar 	env->fos = fxsave->fos;
2800c306bcfSIngo Molnar #endif
2810c306bcfSIngo Molnar 
2820c306bcfSIngo Molnar 	for (i = 0; i < 8; ++i)
2830c306bcfSIngo Molnar 		memcpy(&to[i], &from[i], sizeof(to[0]));
2840c306bcfSIngo Molnar }
2850c306bcfSIngo Molnar 
2863f7f7563SThomas Gleixner void
2873f7f7563SThomas Gleixner convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
2883f7f7563SThomas Gleixner {
289caee31a3SThomas Gleixner 	__convert_from_fxsr(env, tsk, &tsk->thread.fpu.fpstate->regs.fxsave);
2903f7f7563SThomas Gleixner }
2913f7f7563SThomas Gleixner 
29239ea9bafSSebastian Andrzej Siewior void convert_to_fxsr(struct fxregs_state *fxsave,
2930c306bcfSIngo Molnar 		     const struct user_i387_ia32_struct *env)
2940c306bcfSIngo Molnar 
2950c306bcfSIngo Molnar {
2960c306bcfSIngo Molnar 	struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
2970c306bcfSIngo Molnar 	struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
2980c306bcfSIngo Molnar 	int i;
2990c306bcfSIngo Molnar 
3000c306bcfSIngo Molnar 	fxsave->cwd = env->cwd;
3010c306bcfSIngo Molnar 	fxsave->swd = env->swd;
3020c306bcfSIngo Molnar 	fxsave->twd = twd_i387_to_fxsr(env->twd);
3030c306bcfSIngo Molnar 	fxsave->fop = (u16) ((u32) env->fcs >> 16);
3040c306bcfSIngo Molnar #ifdef CONFIG_X86_64
3050c306bcfSIngo Molnar 	fxsave->rip = env->fip;
3060c306bcfSIngo Molnar 	fxsave->rdp = env->foo;
3070c306bcfSIngo Molnar 	/* cs and ds ignored */
3080c306bcfSIngo Molnar #else
3090c306bcfSIngo Molnar 	fxsave->fip = env->fip;
3100c306bcfSIngo Molnar 	fxsave->fcs = (env->fcs & 0xffff);
3110c306bcfSIngo Molnar 	fxsave->foo = env->foo;
3120c306bcfSIngo Molnar 	fxsave->fos = env->fos;
3130c306bcfSIngo Molnar #endif
3140c306bcfSIngo Molnar 
3150c306bcfSIngo Molnar 	for (i = 0; i < 8; ++i)
3160c306bcfSIngo Molnar 		memcpy(&to[i], &from[i], sizeof(from[0]));
3170c306bcfSIngo Molnar }
3180c306bcfSIngo Molnar 
3190c306bcfSIngo Molnar int fpregs_get(struct task_struct *target, const struct user_regset *regset,
3200557d64dSAl Viro 	       struct membuf to)
3210c306bcfSIngo Molnar {
3220c306bcfSIngo Molnar 	struct fpu *fpu = &target->thread.fpu;
3230c306bcfSIngo Molnar 	struct user_i387_ia32_struct env;
3243f7f7563SThomas Gleixner 	struct fxregs_state fxsave, *fx;
3250c306bcfSIngo Molnar 
3265a32fac8SThomas Gleixner 	sync_fpstate(fpu);
3270c306bcfSIngo Molnar 
3283f7f7563SThomas Gleixner 	if (!cpu_feature_enabled(X86_FEATURE_FPU))
3290557d64dSAl Viro 		return fpregs_soft_get(target, regset, to);
3300c306bcfSIngo Molnar 
3313f7f7563SThomas Gleixner 	if (!cpu_feature_enabled(X86_FEATURE_FXSR)) {
332caee31a3SThomas Gleixner 		return membuf_write(&to, &fpu->fpstate->regs.fsave,
3330557d64dSAl Viro 				    sizeof(struct fregs_state));
3340557d64dSAl Viro 	}
3350c306bcfSIngo Molnar 
3363f7f7563SThomas Gleixner 	if (use_xsave()) {
3373f7f7563SThomas Gleixner 		struct membuf mb = { .p = &fxsave, .left = sizeof(fxsave) };
3380c306bcfSIngo Molnar 
3393f7f7563SThomas Gleixner 		/* Handle init state optimized xstate correctly */
340e84ba47eSDave Hansen 		copy_xstate_to_uabi_buf(mb, target, XSTATE_COPY_FP);
3413f7f7563SThomas Gleixner 		fx = &fxsave;
3423f7f7563SThomas Gleixner 	} else {
343caee31a3SThomas Gleixner 		fx = &fpu->fpstate->regs.fxsave;
3440c306bcfSIngo Molnar 	}
3450c306bcfSIngo Molnar 
3463f7f7563SThomas Gleixner 	__convert_from_fxsr(&env, target, fx);
3470557d64dSAl Viro 	return membuf_write(&to, &env, sizeof(env));
3480c306bcfSIngo Molnar }
3490c306bcfSIngo Molnar 
3500c306bcfSIngo Molnar int fpregs_set(struct task_struct *target, const struct user_regset *regset,
3510c306bcfSIngo Molnar 	       unsigned int pos, unsigned int count,
3520c306bcfSIngo Molnar 	       const void *kbuf, const void __user *ubuf)
3530c306bcfSIngo Molnar {
3540c306bcfSIngo Molnar 	struct fpu *fpu = &target->thread.fpu;
3550c306bcfSIngo Molnar 	struct user_i387_ia32_struct env;
3560c306bcfSIngo Molnar 	int ret;
3570c306bcfSIngo Molnar 
358da53f60bSAndy Lutomirski 	/* No funny business with partial or oversized writes is permitted. */
359da53f60bSAndy Lutomirski 	if (pos != 0 || count != sizeof(struct user_i387_ia32_struct))
360da53f60bSAndy Lutomirski 		return -EINVAL;
3610c306bcfSIngo Molnar 
362da53f60bSAndy Lutomirski 	if (!cpu_feature_enabled(X86_FEATURE_FPU))
3630c306bcfSIngo Molnar 		return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
3640c306bcfSIngo Molnar 
3650c306bcfSIngo Molnar 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
366da53f60bSAndy Lutomirski 	if (ret)
367da53f60bSAndy Lutomirski 		return ret;
368da53f60bSAndy Lutomirski 
369dbb60ac7SThomas Gleixner 	fpu_force_restore(fpu);
370da53f60bSAndy Lutomirski 
371da53f60bSAndy Lutomirski 	if (cpu_feature_enabled(X86_FEATURE_FXSR))
372caee31a3SThomas Gleixner 		convert_to_fxsr(&fpu->fpstate->regs.fxsave, &env);
373da53f60bSAndy Lutomirski 	else
374caee31a3SThomas Gleixner 		memcpy(&fpu->fpstate->regs.fsave, &env, sizeof(env));
3750c306bcfSIngo Molnar 
3760c306bcfSIngo Molnar 	/*
377da53f60bSAndy Lutomirski 	 * Update the header bit in the xsave header, indicating the
3780c306bcfSIngo Molnar 	 * presence of FP.
3790c306bcfSIngo Molnar 	 */
380da53f60bSAndy Lutomirski 	if (cpu_feature_enabled(X86_FEATURE_XSAVE))
381caee31a3SThomas Gleixner 		fpu->fpstate->regs.xsave.header.xfeatures |= XFEATURE_MASK_FP;
382da53f60bSAndy Lutomirski 
383da53f60bSAndy Lutomirski 	return 0;
3840c306bcfSIngo Molnar }
3850c306bcfSIngo Molnar 
3860c306bcfSIngo Molnar #endif	/* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
387