xref: /openbmc/linux/arch/x86/kernel/fpu/regset.c (revision dbb60ac7)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
20c306bcfSIngo Molnar /*
30c306bcfSIngo Molnar  * FPU register's regset abstraction, for ptrace, core dumps, etc.
40c306bcfSIngo Molnar  */
543be46e8SThomas Gleixner #include <linux/sched/task_stack.h>
643be46e8SThomas Gleixner #include <linux/vmalloc.h>
743be46e8SThomas Gleixner 
80c306bcfSIngo Molnar #include <asm/fpu/internal.h>
90c306bcfSIngo Molnar #include <asm/fpu/signal.h>
100c306bcfSIngo Molnar #include <asm/fpu/regset.h>
1191c3dba7SYu-cheng Yu #include <asm/fpu/xstate.h>
120c306bcfSIngo Molnar 
130c306bcfSIngo Molnar /*
140c306bcfSIngo Molnar  * The xstateregs_active() routine is the same as the regset_fpregs_active() routine,
150c306bcfSIngo Molnar  * as the "regset->n" for the xstate regset will be updated based on the feature
166a6256f9SAdam Buchbinder  * capabilities supported by the xsave.
170c306bcfSIngo Molnar  */
180c306bcfSIngo Molnar int regset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
190c306bcfSIngo Molnar {
202722146eSSebastian Andrzej Siewior 	return regset->n;
210c306bcfSIngo Molnar }
220c306bcfSIngo Molnar 
230c306bcfSIngo Molnar int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
240c306bcfSIngo Molnar {
252722146eSSebastian Andrzej Siewior 	if (boot_cpu_has(X86_FEATURE_FXSR))
2601f8fd73SBorislav Petkov 		return regset->n;
2701f8fd73SBorislav Petkov 	else
2801f8fd73SBorislav Petkov 		return 0;
290c306bcfSIngo Molnar }
300c306bcfSIngo Molnar 
315a32fac8SThomas Gleixner /*
325a32fac8SThomas Gleixner  * The regset get() functions are invoked from:
335a32fac8SThomas Gleixner  *
345a32fac8SThomas Gleixner  *   - coredump to dump the current task's fpstate. If the current task
355a32fac8SThomas Gleixner  *     owns the FPU then the memory state has to be synchronized and the
365a32fac8SThomas Gleixner  *     FPU register state preserved. Otherwise fpstate is already in sync.
375a32fac8SThomas Gleixner  *
385a32fac8SThomas Gleixner  *   - ptrace to dump fpstate of a stopped task, in which case the registers
395a32fac8SThomas Gleixner  *     have already been saved to fpstate on context switch.
405a32fac8SThomas Gleixner  */
415a32fac8SThomas Gleixner static void sync_fpstate(struct fpu *fpu)
425a32fac8SThomas Gleixner {
435a32fac8SThomas Gleixner 	if (fpu == &current->thread.fpu)
445a32fac8SThomas Gleixner 		fpu__save(fpu);
455a32fac8SThomas Gleixner }
465a32fac8SThomas Gleixner 
47*dbb60ac7SThomas Gleixner /*
48*dbb60ac7SThomas Gleixner  * Invalidate cached FPU registers before modifying the stopped target
49*dbb60ac7SThomas Gleixner  * task's fpstate.
50*dbb60ac7SThomas Gleixner  *
51*dbb60ac7SThomas Gleixner  * This forces the target task on resume to restore the FPU registers from
52*dbb60ac7SThomas Gleixner  * modified fpstate. Otherwise the task might skip the restore and operate
53*dbb60ac7SThomas Gleixner  * with the cached FPU registers which discards the modifications.
54*dbb60ac7SThomas Gleixner  */
55*dbb60ac7SThomas Gleixner static void fpu_force_restore(struct fpu *fpu)
56*dbb60ac7SThomas Gleixner {
57*dbb60ac7SThomas Gleixner 	/*
58*dbb60ac7SThomas Gleixner 	 * Only stopped child tasks can be used to modify the FPU
59*dbb60ac7SThomas Gleixner 	 * state in the fpstate buffer:
60*dbb60ac7SThomas Gleixner 	 */
61*dbb60ac7SThomas Gleixner 	WARN_ON_FPU(fpu == &current->thread.fpu);
62*dbb60ac7SThomas Gleixner 
63*dbb60ac7SThomas Gleixner 	__fpu_invalidate_fpregs_state(fpu);
64*dbb60ac7SThomas Gleixner }
65*dbb60ac7SThomas Gleixner 
660c306bcfSIngo Molnar int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
670557d64dSAl Viro 		struct membuf to)
680c306bcfSIngo Molnar {
690c306bcfSIngo Molnar 	struct fpu *fpu = &target->thread.fpu;
700c306bcfSIngo Molnar 
71adc997b3SThomas Gleixner 	if (!cpu_feature_enabled(X86_FEATURE_FXSR))
720c306bcfSIngo Molnar 		return -ENODEV;
730c306bcfSIngo Molnar 
745a32fac8SThomas Gleixner 	sync_fpstate(fpu);
750c306bcfSIngo Molnar 
76adc997b3SThomas Gleixner 	if (!use_xsave()) {
77adc997b3SThomas Gleixner 		return membuf_write(&to, &fpu->state.fxsave,
78adc997b3SThomas Gleixner 				    sizeof(fpu->state.fxsave));
79adc997b3SThomas Gleixner 	}
80adc997b3SThomas Gleixner 
81adc997b3SThomas Gleixner 	copy_xstate_to_uabi_buf(to, &fpu->state.xsave, XSTATE_COPY_FX);
82adc997b3SThomas Gleixner 	return 0;
830c306bcfSIngo Molnar }
840c306bcfSIngo Molnar 
850c306bcfSIngo Molnar int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
860c306bcfSIngo Molnar 		unsigned int pos, unsigned int count,
870c306bcfSIngo Molnar 		const void *kbuf, const void __user *ubuf)
880c306bcfSIngo Molnar {
890c306bcfSIngo Molnar 	struct fpu *fpu = &target->thread.fpu;
906164331dSAndy Lutomirski 	struct user32_fxsr_struct newstate;
910c306bcfSIngo Molnar 	int ret;
920c306bcfSIngo Molnar 
936164331dSAndy Lutomirski 	BUILD_BUG_ON(sizeof(newstate) != sizeof(struct fxregs_state));
946164331dSAndy Lutomirski 
956164331dSAndy Lutomirski 	if (!cpu_feature_enabled(X86_FEATURE_FXSR))
960c306bcfSIngo Molnar 		return -ENODEV;
970c306bcfSIngo Molnar 
986164331dSAndy Lutomirski 	/* No funny business with partial or oversized writes is permitted. */
996164331dSAndy Lutomirski 	if (pos != 0 || count != sizeof(newstate))
1006164331dSAndy Lutomirski 		return -EINVAL;
1016164331dSAndy Lutomirski 
1026164331dSAndy Lutomirski 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
1036164331dSAndy Lutomirski 	if (ret)
1046164331dSAndy Lutomirski 		return ret;
1056164331dSAndy Lutomirski 
106145e9e0dSAndy Lutomirski 	/* Do not allow an invalid MXCSR value. */
107145e9e0dSAndy Lutomirski 	if (newstate.mxcsr & ~mxcsr_feature_mask)
108145e9e0dSAndy Lutomirski 		return -EINVAL;
1096164331dSAndy Lutomirski 
110*dbb60ac7SThomas Gleixner 	fpu_force_restore(fpu);
1110c306bcfSIngo Molnar 
1126164331dSAndy Lutomirski 	/* Copy the state  */
1136164331dSAndy Lutomirski 	memcpy(&fpu->state.fxsave, &newstate, sizeof(newstate));
1140c306bcfSIngo Molnar 
1156164331dSAndy Lutomirski 	/* Clear xmm8..15 */
1166164331dSAndy Lutomirski 	BUILD_BUG_ON(sizeof(fpu->state.fxsave.xmm_space) != 16 * 16);
1176164331dSAndy Lutomirski 	memset(&fpu->state.fxsave.xmm_space[8], 0, 8 * 16);
1180c306bcfSIngo Molnar 
1196164331dSAndy Lutomirski 	/* Mark FP and SSE as in use when XSAVE is enabled */
1206164331dSAndy Lutomirski 	if (use_xsave())
121d91cab78SDave Hansen 		fpu->state.xsave.header.xfeatures |= XFEATURE_MASK_FPSSE;
1220c306bcfSIngo Molnar 
1236164331dSAndy Lutomirski 	return 0;
1240c306bcfSIngo Molnar }
1250c306bcfSIngo Molnar 
1260c306bcfSIngo Molnar int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
1270557d64dSAl Viro 		struct membuf to)
1280c306bcfSIngo Molnar {
1290c306bcfSIngo Molnar 	struct fpu *fpu = &target->thread.fpu;
1300c306bcfSIngo Molnar 
1313a335112SDave Hansen 	if (!cpu_feature_enabled(X86_FEATURE_XSAVE))
1320c306bcfSIngo Molnar 		return -ENODEV;
1330c306bcfSIngo Molnar 
1345a32fac8SThomas Gleixner 	sync_fpstate(fpu);
13591c3dba7SYu-cheng Yu 
136eb6f5172SThomas Gleixner 	copy_xstate_to_uabi_buf(to, &fpu->state.xsave, XSTATE_COPY_XSAVE);
1370557d64dSAl Viro 	return 0;
1380c306bcfSIngo Molnar }
1390c306bcfSIngo Molnar 
1400c306bcfSIngo Molnar int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
1410c306bcfSIngo Molnar 		  unsigned int pos, unsigned int count,
1420c306bcfSIngo Molnar 		  const void *kbuf, const void __user *ubuf)
1430c306bcfSIngo Molnar {
1440c306bcfSIngo Molnar 	struct fpu *fpu = &target->thread.fpu;
14543be46e8SThomas Gleixner 	struct xregs_state *tmpbuf = NULL;
1460c306bcfSIngo Molnar 	int ret;
1470c306bcfSIngo Molnar 
14843be46e8SThomas Gleixner 	if (!cpu_feature_enabled(X86_FEATURE_XSAVE))
1490c306bcfSIngo Molnar 		return -ENODEV;
1500c306bcfSIngo Molnar 
15191c3dba7SYu-cheng Yu 	/*
15291c3dba7SYu-cheng Yu 	 * A whole standard-format XSAVE buffer is needed:
15391c3dba7SYu-cheng Yu 	 */
15407d6688bSThomas Gleixner 	if (pos != 0 || count != fpu_user_xstate_size)
15591c3dba7SYu-cheng Yu 		return -EFAULT;
1560c306bcfSIngo Molnar 
15743be46e8SThomas Gleixner 	if (!kbuf) {
15843be46e8SThomas Gleixner 		tmpbuf = vmalloc(count);
15943be46e8SThomas Gleixner 		if (!tmpbuf)
16043be46e8SThomas Gleixner 			return -ENOMEM;
1610c306bcfSIngo Molnar 
16243be46e8SThomas Gleixner 		if (copy_from_user(tmpbuf, ubuf, count)) {
16343be46e8SThomas Gleixner 			ret = -EFAULT;
16443be46e8SThomas Gleixner 			goto out;
16543be46e8SThomas Gleixner 		}
16679fecc2bSIngo Molnar 	}
16791c3dba7SYu-cheng Yu 
168*dbb60ac7SThomas Gleixner 	fpu_force_restore(fpu);
16943be46e8SThomas Gleixner 	ret = copy_kernel_to_xstate(&fpu->state.xsave, kbuf ?: tmpbuf);
170cf9df81bSEric Biggers 
17143be46e8SThomas Gleixner out:
17243be46e8SThomas Gleixner 	vfree(tmpbuf);
1730c306bcfSIngo Molnar 	return ret;
1740c306bcfSIngo Molnar }
1750c306bcfSIngo Molnar 
1760c306bcfSIngo Molnar #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1770c306bcfSIngo Molnar 
1780c306bcfSIngo Molnar /*
1790c306bcfSIngo Molnar  * FPU tag word conversions.
1800c306bcfSIngo Molnar  */
1810c306bcfSIngo Molnar 
1820c306bcfSIngo Molnar static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
1830c306bcfSIngo Molnar {
1840c306bcfSIngo Molnar 	unsigned int tmp; /* to avoid 16 bit prefixes in the code */
1850c306bcfSIngo Molnar 
1860c306bcfSIngo Molnar 	/* Transform each pair of bits into 01 (valid) or 00 (empty) */
1870c306bcfSIngo Molnar 	tmp = ~twd;
1880c306bcfSIngo Molnar 	tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
1890c306bcfSIngo Molnar 	/* and move the valid bits to the lower byte. */
1900c306bcfSIngo Molnar 	tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
1910c306bcfSIngo Molnar 	tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
1920c306bcfSIngo Molnar 	tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
1930c306bcfSIngo Molnar 
1940c306bcfSIngo Molnar 	return tmp;
1950c306bcfSIngo Molnar }
1960c306bcfSIngo Molnar 
1970c306bcfSIngo Molnar #define FPREG_ADDR(f, n)	((void *)&(f)->st_space + (n) * 16)
1980c306bcfSIngo Molnar #define FP_EXP_TAG_VALID	0
1990c306bcfSIngo Molnar #define FP_EXP_TAG_ZERO		1
2000c306bcfSIngo Molnar #define FP_EXP_TAG_SPECIAL	2
2010c306bcfSIngo Molnar #define FP_EXP_TAG_EMPTY	3
2020c306bcfSIngo Molnar 
203c47ada30SIngo Molnar static inline u32 twd_fxsr_to_i387(struct fxregs_state *fxsave)
2040c306bcfSIngo Molnar {
2050c306bcfSIngo Molnar 	struct _fpxreg *st;
2060c306bcfSIngo Molnar 	u32 tos = (fxsave->swd >> 11) & 7;
2070c306bcfSIngo Molnar 	u32 twd = (unsigned long) fxsave->twd;
2080c306bcfSIngo Molnar 	u32 tag;
2090c306bcfSIngo Molnar 	u32 ret = 0xffff0000u;
2100c306bcfSIngo Molnar 	int i;
2110c306bcfSIngo Molnar 
2120c306bcfSIngo Molnar 	for (i = 0; i < 8; i++, twd >>= 1) {
2130c306bcfSIngo Molnar 		if (twd & 0x1) {
2140c306bcfSIngo Molnar 			st = FPREG_ADDR(fxsave, (i - tos) & 7);
2150c306bcfSIngo Molnar 
2160c306bcfSIngo Molnar 			switch (st->exponent & 0x7fff) {
2170c306bcfSIngo Molnar 			case 0x7fff:
2180c306bcfSIngo Molnar 				tag = FP_EXP_TAG_SPECIAL;
2190c306bcfSIngo Molnar 				break;
2200c306bcfSIngo Molnar 			case 0x0000:
2210c306bcfSIngo Molnar 				if (!st->significand[0] &&
2220c306bcfSIngo Molnar 				    !st->significand[1] &&
2230c306bcfSIngo Molnar 				    !st->significand[2] &&
2240c306bcfSIngo Molnar 				    !st->significand[3])
2250c306bcfSIngo Molnar 					tag = FP_EXP_TAG_ZERO;
2260c306bcfSIngo Molnar 				else
2270c306bcfSIngo Molnar 					tag = FP_EXP_TAG_SPECIAL;
2280c306bcfSIngo Molnar 				break;
2290c306bcfSIngo Molnar 			default:
2300c306bcfSIngo Molnar 				if (st->significand[3] & 0x8000)
2310c306bcfSIngo Molnar 					tag = FP_EXP_TAG_VALID;
2320c306bcfSIngo Molnar 				else
2330c306bcfSIngo Molnar 					tag = FP_EXP_TAG_SPECIAL;
2340c306bcfSIngo Molnar 				break;
2350c306bcfSIngo Molnar 			}
2360c306bcfSIngo Molnar 		} else {
2370c306bcfSIngo Molnar 			tag = FP_EXP_TAG_EMPTY;
2380c306bcfSIngo Molnar 		}
2390c306bcfSIngo Molnar 		ret |= tag << (2 * i);
2400c306bcfSIngo Molnar 	}
2410c306bcfSIngo Molnar 	return ret;
2420c306bcfSIngo Molnar }
2430c306bcfSIngo Molnar 
2440c306bcfSIngo Molnar /*
2450c306bcfSIngo Molnar  * FXSR floating point environment conversions.
2460c306bcfSIngo Molnar  */
2470c306bcfSIngo Molnar 
2483f7f7563SThomas Gleixner static void __convert_from_fxsr(struct user_i387_ia32_struct *env,
2493f7f7563SThomas Gleixner 				struct task_struct *tsk,
2503f7f7563SThomas Gleixner 				struct fxregs_state *fxsave)
2510c306bcfSIngo Molnar {
2520c306bcfSIngo Molnar 	struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
2530c306bcfSIngo Molnar 	struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
2540c306bcfSIngo Molnar 	int i;
2550c306bcfSIngo Molnar 
2560c306bcfSIngo Molnar 	env->cwd = fxsave->cwd | 0xffff0000u;
2570c306bcfSIngo Molnar 	env->swd = fxsave->swd | 0xffff0000u;
2580c306bcfSIngo Molnar 	env->twd = twd_fxsr_to_i387(fxsave);
2590c306bcfSIngo Molnar 
2600c306bcfSIngo Molnar #ifdef CONFIG_X86_64
2610c306bcfSIngo Molnar 	env->fip = fxsave->rip;
2620c306bcfSIngo Molnar 	env->foo = fxsave->rdp;
2630c306bcfSIngo Molnar 	/*
2640c306bcfSIngo Molnar 	 * should be actually ds/cs at fpu exception time, but
2650c306bcfSIngo Molnar 	 * that information is not available in 64bit mode.
2660c306bcfSIngo Molnar 	 */
2670c306bcfSIngo Molnar 	env->fcs = task_pt_regs(tsk)->cs;
2680c306bcfSIngo Molnar 	if (tsk == current) {
2690c306bcfSIngo Molnar 		savesegment(ds, env->fos);
2700c306bcfSIngo Molnar 	} else {
2710c306bcfSIngo Molnar 		env->fos = tsk->thread.ds;
2720c306bcfSIngo Molnar 	}
2730c306bcfSIngo Molnar 	env->fos |= 0xffff0000;
2740c306bcfSIngo Molnar #else
2750c306bcfSIngo Molnar 	env->fip = fxsave->fip;
2760c306bcfSIngo Molnar 	env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16);
2770c306bcfSIngo Molnar 	env->foo = fxsave->foo;
2780c306bcfSIngo Molnar 	env->fos = fxsave->fos;
2790c306bcfSIngo Molnar #endif
2800c306bcfSIngo Molnar 
2810c306bcfSIngo Molnar 	for (i = 0; i < 8; ++i)
2820c306bcfSIngo Molnar 		memcpy(&to[i], &from[i], sizeof(to[0]));
2830c306bcfSIngo Molnar }
2840c306bcfSIngo Molnar 
2853f7f7563SThomas Gleixner void
2863f7f7563SThomas Gleixner convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
2873f7f7563SThomas Gleixner {
2883f7f7563SThomas Gleixner 	__convert_from_fxsr(env, tsk, &tsk->thread.fpu.state.fxsave);
2893f7f7563SThomas Gleixner }
2903f7f7563SThomas Gleixner 
29139ea9bafSSebastian Andrzej Siewior void convert_to_fxsr(struct fxregs_state *fxsave,
2920c306bcfSIngo Molnar 		     const struct user_i387_ia32_struct *env)
2930c306bcfSIngo Molnar 
2940c306bcfSIngo Molnar {
2950c306bcfSIngo Molnar 	struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
2960c306bcfSIngo Molnar 	struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
2970c306bcfSIngo Molnar 	int i;
2980c306bcfSIngo Molnar 
2990c306bcfSIngo Molnar 	fxsave->cwd = env->cwd;
3000c306bcfSIngo Molnar 	fxsave->swd = env->swd;
3010c306bcfSIngo Molnar 	fxsave->twd = twd_i387_to_fxsr(env->twd);
3020c306bcfSIngo Molnar 	fxsave->fop = (u16) ((u32) env->fcs >> 16);
3030c306bcfSIngo Molnar #ifdef CONFIG_X86_64
3040c306bcfSIngo Molnar 	fxsave->rip = env->fip;
3050c306bcfSIngo Molnar 	fxsave->rdp = env->foo;
3060c306bcfSIngo Molnar 	/* cs and ds ignored */
3070c306bcfSIngo Molnar #else
3080c306bcfSIngo Molnar 	fxsave->fip = env->fip;
3090c306bcfSIngo Molnar 	fxsave->fcs = (env->fcs & 0xffff);
3100c306bcfSIngo Molnar 	fxsave->foo = env->foo;
3110c306bcfSIngo Molnar 	fxsave->fos = env->fos;
3120c306bcfSIngo Molnar #endif
3130c306bcfSIngo Molnar 
3140c306bcfSIngo Molnar 	for (i = 0; i < 8; ++i)
3150c306bcfSIngo Molnar 		memcpy(&to[i], &from[i], sizeof(from[0]));
3160c306bcfSIngo Molnar }
3170c306bcfSIngo Molnar 
3180c306bcfSIngo Molnar int fpregs_get(struct task_struct *target, const struct user_regset *regset,
3190557d64dSAl Viro 	       struct membuf to)
3200c306bcfSIngo Molnar {
3210c306bcfSIngo Molnar 	struct fpu *fpu = &target->thread.fpu;
3220c306bcfSIngo Molnar 	struct user_i387_ia32_struct env;
3233f7f7563SThomas Gleixner 	struct fxregs_state fxsave, *fx;
3240c306bcfSIngo Molnar 
3255a32fac8SThomas Gleixner 	sync_fpstate(fpu);
3260c306bcfSIngo Molnar 
3273f7f7563SThomas Gleixner 	if (!cpu_feature_enabled(X86_FEATURE_FPU))
3280557d64dSAl Viro 		return fpregs_soft_get(target, regset, to);
3290c306bcfSIngo Molnar 
3303f7f7563SThomas Gleixner 	if (!cpu_feature_enabled(X86_FEATURE_FXSR)) {
3310557d64dSAl Viro 		return membuf_write(&to, &fpu->state.fsave,
3320557d64dSAl Viro 				    sizeof(struct fregs_state));
3330557d64dSAl Viro 	}
3340c306bcfSIngo Molnar 
3353f7f7563SThomas Gleixner 	if (use_xsave()) {
3363f7f7563SThomas Gleixner 		struct membuf mb = { .p = &fxsave, .left = sizeof(fxsave) };
3370c306bcfSIngo Molnar 
3383f7f7563SThomas Gleixner 		/* Handle init state optimized xstate correctly */
3393f7f7563SThomas Gleixner 		copy_xstate_to_uabi_buf(mb, &fpu->state.xsave, XSTATE_COPY_FP);
3403f7f7563SThomas Gleixner 		fx = &fxsave;
3413f7f7563SThomas Gleixner 	} else {
3423f7f7563SThomas Gleixner 		fx = &fpu->state.fxsave;
3430c306bcfSIngo Molnar 	}
3440c306bcfSIngo Molnar 
3453f7f7563SThomas Gleixner 	__convert_from_fxsr(&env, target, fx);
3460557d64dSAl Viro 	return membuf_write(&to, &env, sizeof(env));
3470c306bcfSIngo Molnar }
3480c306bcfSIngo Molnar 
3490c306bcfSIngo Molnar int fpregs_set(struct task_struct *target, const struct user_regset *regset,
3500c306bcfSIngo Molnar 	       unsigned int pos, unsigned int count,
3510c306bcfSIngo Molnar 	       const void *kbuf, const void __user *ubuf)
3520c306bcfSIngo Molnar {
3530c306bcfSIngo Molnar 	struct fpu *fpu = &target->thread.fpu;
3540c306bcfSIngo Molnar 	struct user_i387_ia32_struct env;
3550c306bcfSIngo Molnar 	int ret;
3560c306bcfSIngo Molnar 
357da53f60bSAndy Lutomirski 	/* No funny business with partial or oversized writes is permitted. */
358da53f60bSAndy Lutomirski 	if (pos != 0 || count != sizeof(struct user_i387_ia32_struct))
359da53f60bSAndy Lutomirski 		return -EINVAL;
3600c306bcfSIngo Molnar 
361da53f60bSAndy Lutomirski 	if (!cpu_feature_enabled(X86_FEATURE_FPU))
3620c306bcfSIngo Molnar 		return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
3630c306bcfSIngo Molnar 
3640c306bcfSIngo Molnar 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
365da53f60bSAndy Lutomirski 	if (ret)
366da53f60bSAndy Lutomirski 		return ret;
367da53f60bSAndy Lutomirski 
368*dbb60ac7SThomas Gleixner 	fpu_force_restore(fpu);
369da53f60bSAndy Lutomirski 
370da53f60bSAndy Lutomirski 	if (cpu_feature_enabled(X86_FEATURE_FXSR))
371da53f60bSAndy Lutomirski 		convert_to_fxsr(&fpu->state.fxsave, &env);
372da53f60bSAndy Lutomirski 	else
373da53f60bSAndy Lutomirski 		memcpy(&fpu->state.fsave, &env, sizeof(env));
3740c306bcfSIngo Molnar 
3750c306bcfSIngo Molnar 	/*
376da53f60bSAndy Lutomirski 	 * Update the header bit in the xsave header, indicating the
3770c306bcfSIngo Molnar 	 * presence of FP.
3780c306bcfSIngo Molnar 	 */
379da53f60bSAndy Lutomirski 	if (cpu_feature_enabled(X86_FEATURE_XSAVE))
380d91cab78SDave Hansen 		fpu->state.xsave.header.xfeatures |= XFEATURE_MASK_FP;
381da53f60bSAndy Lutomirski 
382da53f60bSAndy Lutomirski 	return 0;
3830c306bcfSIngo Molnar }
3840c306bcfSIngo Molnar 
3850c306bcfSIngo Molnar #endif	/* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
386