xref: /openbmc/linux/arch/x86/kernel/fpu/xstate.c (revision e368cd72)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * xsave/xrstor support.
4  *
5  * Author: Suresh Siddha <suresh.b.siddha@intel.com>
6  */
7 #include <linux/compat.h>
8 #include <linux/cpu.h>
9 #include <linux/mman.h>
10 #include <linux/pkeys.h>
11 #include <linux/seq_file.h>
12 #include <linux/proc_fs.h>
13 
14 #include <asm/fpu/api.h>
15 #include <asm/fpu/internal.h>
16 #include <asm/fpu/signal.h>
17 #include <asm/fpu/regset.h>
18 #include <asm/fpu/xstate.h>
19 
20 #include <asm/tlbflush.h>
21 #include <asm/cpufeature.h>
22 
23 /*
24  * Although we spell it out in here, the Processor Trace
25  * xfeature is completely unused.  We use other mechanisms
26  * to save/restore PT state in Linux.
27  */
28 static const char *xfeature_names[] =
29 {
30 	"x87 floating point registers"	,
31 	"SSE registers"			,
32 	"AVX registers"			,
33 	"MPX bounds registers"		,
34 	"MPX CSR"			,
35 	"AVX-512 opmask"		,
36 	"AVX-512 Hi256"			,
37 	"AVX-512 ZMM_Hi256"		,
38 	"Processor Trace (unused)"	,
39 	"Protection Keys User registers",
40 	"PASID state",
41 	"unknown xstate feature"	,
42 };
43 
44 static short xsave_cpuid_features[] __initdata = {
45 	X86_FEATURE_FPU,
46 	X86_FEATURE_XMM,
47 	X86_FEATURE_AVX,
48 	X86_FEATURE_MPX,
49 	X86_FEATURE_MPX,
50 	X86_FEATURE_AVX512F,
51 	X86_FEATURE_AVX512F,
52 	X86_FEATURE_AVX512F,
53 	X86_FEATURE_INTEL_PT,
54 	X86_FEATURE_PKU,
55 	X86_FEATURE_ENQCMD,
56 };
57 
58 /*
59  * This represents the full set of bits that should ever be set in a kernel
60  * XSAVE buffer, both supervisor and user xstates.
61  */
62 u64 xfeatures_mask_all __ro_after_init;
63 EXPORT_SYMBOL_GPL(xfeatures_mask_all);
64 
65 static unsigned int xstate_offsets[XFEATURE_MAX] __ro_after_init =
66 	{ [ 0 ... XFEATURE_MAX - 1] = -1};
67 static unsigned int xstate_sizes[XFEATURE_MAX] __ro_after_init =
68 	{ [ 0 ... XFEATURE_MAX - 1] = -1};
69 static unsigned int xstate_comp_offsets[XFEATURE_MAX] __ro_after_init =
70 	{ [ 0 ... XFEATURE_MAX - 1] = -1};
71 static unsigned int xstate_supervisor_only_offsets[XFEATURE_MAX] __ro_after_init =
72 	{ [ 0 ... XFEATURE_MAX - 1] = -1};
73 
74 /*
75  * The XSAVE area of kernel can be in standard or compacted format;
76  * it is always in standard format for user mode. This is the user
77  * mode standard format size used for signal and ptrace frames.
78  */
79 unsigned int fpu_user_xstate_size __ro_after_init;
80 
81 /*
82  * Return whether the system supports a given xfeature.
83  *
84  * Also return the name of the (most advanced) feature that the caller requested:
85  */
86 int cpu_has_xfeatures(u64 xfeatures_needed, const char **feature_name)
87 {
88 	u64 xfeatures_missing = xfeatures_needed & ~xfeatures_mask_all;
89 
90 	if (unlikely(feature_name)) {
91 		long xfeature_idx, max_idx;
92 		u64 xfeatures_print;
93 		/*
94 		 * So we use FLS here to be able to print the most advanced
95 		 * feature that was requested but is missing. So if a driver
96 		 * asks about "XFEATURE_MASK_SSE | XFEATURE_MASK_YMM" we'll print the
97 		 * missing AVX feature - this is the most informative message
98 		 * to users:
99 		 */
100 		if (xfeatures_missing)
101 			xfeatures_print = xfeatures_missing;
102 		else
103 			xfeatures_print = xfeatures_needed;
104 
105 		xfeature_idx = fls64(xfeatures_print)-1;
106 		max_idx = ARRAY_SIZE(xfeature_names)-1;
107 		xfeature_idx = min(xfeature_idx, max_idx);
108 
109 		*feature_name = xfeature_names[xfeature_idx];
110 	}
111 
112 	if (xfeatures_missing)
113 		return 0;
114 
115 	return 1;
116 }
117 EXPORT_SYMBOL_GPL(cpu_has_xfeatures);
118 
119 static bool xfeature_is_supervisor(int xfeature_nr)
120 {
121 	/*
122 	 * Extended State Enumeration Sub-leaves (EAX = 0DH, ECX = n, n > 1)
123 	 * returns ECX[0] set to (1) for a supervisor state, and cleared (0)
124 	 * for a user state.
125 	 */
126 	u32 eax, ebx, ecx, edx;
127 
128 	cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
129 	return ecx & 1;
130 }
131 
132 /*
133  * Enable the extended processor state save/restore feature.
134  * Called once per CPU onlining.
135  */
136 void fpu__init_cpu_xstate(void)
137 {
138 	if (!boot_cpu_has(X86_FEATURE_XSAVE) || !xfeatures_mask_all)
139 		return;
140 
141 	cr4_set_bits(X86_CR4_OSXSAVE);
142 
143 	/*
144 	 * XCR_XFEATURE_ENABLED_MASK (aka. XCR0) sets user features
145 	 * managed by XSAVE{C, OPT, S} and XRSTOR{S}.  Only XSAVE user
146 	 * states can be set here.
147 	 */
148 	xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask_uabi());
149 
150 	/*
151 	 * MSR_IA32_XSS sets supervisor states managed by XSAVES.
152 	 */
153 	if (boot_cpu_has(X86_FEATURE_XSAVES)) {
154 		wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() |
155 				     xfeatures_mask_independent());
156 	}
157 }
158 
159 static bool xfeature_enabled(enum xfeature xfeature)
160 {
161 	return xfeatures_mask_all & BIT_ULL(xfeature);
162 }
163 
164 /*
165  * Record the offsets and sizes of various xstates contained
166  * in the XSAVE state memory layout.
167  */
168 static void __init setup_xstate_features(void)
169 {
170 	u32 eax, ebx, ecx, edx, i;
171 	/* start at the beginning of the "extended state" */
172 	unsigned int last_good_offset = offsetof(struct xregs_state,
173 						 extended_state_area);
174 	/*
175 	 * The FP xstates and SSE xstates are legacy states. They are always
176 	 * in the fixed offsets in the xsave area in either compacted form
177 	 * or standard form.
178 	 */
179 	xstate_offsets[XFEATURE_FP]	= 0;
180 	xstate_sizes[XFEATURE_FP]	= offsetof(struct fxregs_state,
181 						   xmm_space);
182 
183 	xstate_offsets[XFEATURE_SSE]	= xstate_sizes[XFEATURE_FP];
184 	xstate_sizes[XFEATURE_SSE]	= sizeof_field(struct fxregs_state,
185 						       xmm_space);
186 
187 	for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
188 		if (!xfeature_enabled(i))
189 			continue;
190 
191 		cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
192 
193 		xstate_sizes[i] = eax;
194 
195 		/*
196 		 * If an xfeature is supervisor state, the offset in EBX is
197 		 * invalid, leave it to -1.
198 		 */
199 		if (xfeature_is_supervisor(i))
200 			continue;
201 
202 		xstate_offsets[i] = ebx;
203 
204 		/*
205 		 * In our xstate size checks, we assume that the highest-numbered
206 		 * xstate feature has the highest offset in the buffer.  Ensure
207 		 * it does.
208 		 */
209 		WARN_ONCE(last_good_offset > xstate_offsets[i],
210 			  "x86/fpu: misordered xstate at %d\n", last_good_offset);
211 
212 		last_good_offset = xstate_offsets[i];
213 	}
214 }
215 
216 static void __init print_xstate_feature(u64 xstate_mask)
217 {
218 	const char *feature_name;
219 
220 	if (cpu_has_xfeatures(xstate_mask, &feature_name))
221 		pr_info("x86/fpu: Supporting XSAVE feature 0x%03Lx: '%s'\n", xstate_mask, feature_name);
222 }
223 
224 /*
225  * Print out all the supported xstate features:
226  */
227 static void __init print_xstate_features(void)
228 {
229 	print_xstate_feature(XFEATURE_MASK_FP);
230 	print_xstate_feature(XFEATURE_MASK_SSE);
231 	print_xstate_feature(XFEATURE_MASK_YMM);
232 	print_xstate_feature(XFEATURE_MASK_BNDREGS);
233 	print_xstate_feature(XFEATURE_MASK_BNDCSR);
234 	print_xstate_feature(XFEATURE_MASK_OPMASK);
235 	print_xstate_feature(XFEATURE_MASK_ZMM_Hi256);
236 	print_xstate_feature(XFEATURE_MASK_Hi16_ZMM);
237 	print_xstate_feature(XFEATURE_MASK_PKRU);
238 	print_xstate_feature(XFEATURE_MASK_PASID);
239 }
240 
241 /*
242  * This check is important because it is easy to get XSTATE_*
243  * confused with XSTATE_BIT_*.
244  */
245 #define CHECK_XFEATURE(nr) do {		\
246 	WARN_ON(nr < FIRST_EXTENDED_XFEATURE);	\
247 	WARN_ON(nr >= XFEATURE_MAX);	\
248 } while (0)
249 
250 /*
251  * We could cache this like xstate_size[], but we only use
252  * it here, so it would be a waste of space.
253  */
254 static int xfeature_is_aligned(int xfeature_nr)
255 {
256 	u32 eax, ebx, ecx, edx;
257 
258 	CHECK_XFEATURE(xfeature_nr);
259 
260 	if (!xfeature_enabled(xfeature_nr)) {
261 		WARN_ONCE(1, "Checking alignment of disabled xfeature %d\n",
262 			  xfeature_nr);
263 		return 0;
264 	}
265 
266 	cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
267 	/*
268 	 * The value returned by ECX[1] indicates the alignment
269 	 * of state component 'i' when the compacted format
270 	 * of the extended region of an XSAVE area is used:
271 	 */
272 	return !!(ecx & 2);
273 }
274 
275 /*
276  * This function sets up offsets and sizes of all extended states in
277  * xsave area. This supports both standard format and compacted format
278  * of the xsave area.
279  */
280 static void __init setup_xstate_comp_offsets(void)
281 {
282 	unsigned int next_offset;
283 	int i;
284 
285 	/*
286 	 * The FP xstates and SSE xstates are legacy states. They are always
287 	 * in the fixed offsets in the xsave area in either compacted form
288 	 * or standard form.
289 	 */
290 	xstate_comp_offsets[XFEATURE_FP] = 0;
291 	xstate_comp_offsets[XFEATURE_SSE] = offsetof(struct fxregs_state,
292 						     xmm_space);
293 
294 	if (!boot_cpu_has(X86_FEATURE_XSAVES)) {
295 		for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
296 			if (xfeature_enabled(i))
297 				xstate_comp_offsets[i] = xstate_offsets[i];
298 		}
299 		return;
300 	}
301 
302 	next_offset = FXSAVE_SIZE + XSAVE_HDR_SIZE;
303 
304 	for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
305 		if (!xfeature_enabled(i))
306 			continue;
307 
308 		if (xfeature_is_aligned(i))
309 			next_offset = ALIGN(next_offset, 64);
310 
311 		xstate_comp_offsets[i] = next_offset;
312 		next_offset += xstate_sizes[i];
313 	}
314 }
315 
316 /*
317  * Setup offsets of a supervisor-state-only XSAVES buffer:
318  *
319  * The offsets stored in xstate_comp_offsets[] only work for one specific
320  * value of the Requested Feature BitMap (RFBM).  In cases where a different
321  * RFBM value is used, a different set of offsets is required.  This set of
322  * offsets is for when RFBM=xfeatures_mask_supervisor().
323  */
324 static void __init setup_supervisor_only_offsets(void)
325 {
326 	unsigned int next_offset;
327 	int i;
328 
329 	next_offset = FXSAVE_SIZE + XSAVE_HDR_SIZE;
330 
331 	for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
332 		if (!xfeature_enabled(i) || !xfeature_is_supervisor(i))
333 			continue;
334 
335 		if (xfeature_is_aligned(i))
336 			next_offset = ALIGN(next_offset, 64);
337 
338 		xstate_supervisor_only_offsets[i] = next_offset;
339 		next_offset += xstate_sizes[i];
340 	}
341 }
342 
343 /*
344  * Print out xstate component offsets and sizes
345  */
346 static void __init print_xstate_offset_size(void)
347 {
348 	int i;
349 
350 	for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
351 		if (!xfeature_enabled(i))
352 			continue;
353 		pr_info("x86/fpu: xstate_offset[%d]: %4d, xstate_sizes[%d]: %4d\n",
354 			 i, xstate_comp_offsets[i], i, xstate_sizes[i]);
355 	}
356 }
357 
358 /*
359  * All supported features have either init state all zeros or are
360  * handled in setup_init_fpu() individually. This is an explicit
361  * feature list and does not use XFEATURE_MASK*SUPPORTED to catch
362  * newly added supported features at build time and make people
363  * actually look at the init state for the new feature.
364  */
365 #define XFEATURES_INIT_FPSTATE_HANDLED		\
366 	(XFEATURE_MASK_FP |			\
367 	 XFEATURE_MASK_SSE |			\
368 	 XFEATURE_MASK_YMM |			\
369 	 XFEATURE_MASK_OPMASK |			\
370 	 XFEATURE_MASK_ZMM_Hi256 |		\
371 	 XFEATURE_MASK_Hi16_ZMM	 |		\
372 	 XFEATURE_MASK_PKRU |			\
373 	 XFEATURE_MASK_BNDREGS |		\
374 	 XFEATURE_MASK_BNDCSR |			\
375 	 XFEATURE_MASK_PASID)
376 
377 /*
378  * setup the xstate image representing the init state
379  */
380 static void __init setup_init_fpu_buf(void)
381 {
382 	static int on_boot_cpu __initdata = 1;
383 
384 	BUILD_BUG_ON((XFEATURE_MASK_USER_SUPPORTED |
385 		      XFEATURE_MASK_SUPERVISOR_SUPPORTED) !=
386 		     XFEATURES_INIT_FPSTATE_HANDLED);
387 
388 	WARN_ON_FPU(!on_boot_cpu);
389 	on_boot_cpu = 0;
390 
391 	if (!boot_cpu_has(X86_FEATURE_XSAVE))
392 		return;
393 
394 	setup_xstate_features();
395 	print_xstate_features();
396 
397 	if (boot_cpu_has(X86_FEATURE_XSAVES))
398 		init_fpstate.xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT |
399 						     xfeatures_mask_all;
400 
401 	/*
402 	 * Init all the features state with header.xfeatures being 0x0
403 	 */
404 	os_xrstor_booting(&init_fpstate.xsave);
405 
406 	/*
407 	 * All components are now in init state. Read the state back so
408 	 * that init_fpstate contains all non-zero init state. This only
409 	 * works with XSAVE, but not with XSAVEOPT and XSAVES because
410 	 * those use the init optimization which skips writing data for
411 	 * components in init state.
412 	 *
413 	 * XSAVE could be used, but that would require to reshuffle the
414 	 * data when XSAVES is available because XSAVES uses xstate
415 	 * compaction. But doing so is a pointless exercise because most
416 	 * components have an all zeros init state except for the legacy
417 	 * ones (FP and SSE). Those can be saved with FXSAVE into the
418 	 * legacy area. Adding new features requires to ensure that init
419 	 * state is all zeroes or if not to add the necessary handling
420 	 * here.
421 	 */
422 	fxsave(&init_fpstate.fxsave);
423 }
424 
425 static int xfeature_uncompacted_offset(int xfeature_nr)
426 {
427 	u32 eax, ebx, ecx, edx;
428 
429 	/*
430 	 * Only XSAVES supports supervisor states and it uses compacted
431 	 * format. Checking a supervisor state's uncompacted offset is
432 	 * an error.
433 	 */
434 	if (XFEATURE_MASK_SUPERVISOR_ALL & BIT_ULL(xfeature_nr)) {
435 		WARN_ONCE(1, "No fixed offset for xstate %d\n", xfeature_nr);
436 		return -1;
437 	}
438 
439 	CHECK_XFEATURE(xfeature_nr);
440 	cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
441 	return ebx;
442 }
443 
444 int xfeature_size(int xfeature_nr)
445 {
446 	u32 eax, ebx, ecx, edx;
447 
448 	CHECK_XFEATURE(xfeature_nr);
449 	cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
450 	return eax;
451 }
452 
453 /* Validate an xstate header supplied by userspace (ptrace or sigreturn) */
454 static int validate_user_xstate_header(const struct xstate_header *hdr)
455 {
456 	/* No unknown or supervisor features may be set */
457 	if (hdr->xfeatures & ~xfeatures_mask_uabi())
458 		return -EINVAL;
459 
460 	/* Userspace must use the uncompacted format */
461 	if (hdr->xcomp_bv)
462 		return -EINVAL;
463 
464 	/*
465 	 * If 'reserved' is shrunken to add a new field, make sure to validate
466 	 * that new field here!
467 	 */
468 	BUILD_BUG_ON(sizeof(hdr->reserved) != 48);
469 
470 	/* No reserved bits may be set */
471 	if (memchr_inv(hdr->reserved, 0, sizeof(hdr->reserved)))
472 		return -EINVAL;
473 
474 	return 0;
475 }
476 
477 static void __xstate_dump_leaves(void)
478 {
479 	int i;
480 	u32 eax, ebx, ecx, edx;
481 	static int should_dump = 1;
482 
483 	if (!should_dump)
484 		return;
485 	should_dump = 0;
486 	/*
487 	 * Dump out a few leaves past the ones that we support
488 	 * just in case there are some goodies up there
489 	 */
490 	for (i = 0; i < XFEATURE_MAX + 10; i++) {
491 		cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
492 		pr_warn("CPUID[%02x, %02x]: eax=%08x ebx=%08x ecx=%08x edx=%08x\n",
493 			XSTATE_CPUID, i, eax, ebx, ecx, edx);
494 	}
495 }
496 
497 #define XSTATE_WARN_ON(x) do {							\
498 	if (WARN_ONCE(x, "XSAVE consistency problem, dumping leaves")) {	\
499 		__xstate_dump_leaves();						\
500 	}									\
501 } while (0)
502 
503 #define XCHECK_SZ(sz, nr, nr_macro, __struct) do {			\
504 	if ((nr == nr_macro) &&						\
505 	    WARN_ONCE(sz != sizeof(__struct),				\
506 		"%s: struct is %zu bytes, cpu state %d bytes\n",	\
507 		__stringify(nr_macro), sizeof(__struct), sz)) {		\
508 		__xstate_dump_leaves();					\
509 	}								\
510 } while (0)
511 
512 /*
513  * We have a C struct for each 'xstate'.  We need to ensure
514  * that our software representation matches what the CPU
515  * tells us about the state's size.
516  */
517 static void check_xstate_against_struct(int nr)
518 {
519 	/*
520 	 * Ask the CPU for the size of the state.
521 	 */
522 	int sz = xfeature_size(nr);
523 	/*
524 	 * Match each CPU state with the corresponding software
525 	 * structure.
526 	 */
527 	XCHECK_SZ(sz, nr, XFEATURE_YMM,       struct ymmh_struct);
528 	XCHECK_SZ(sz, nr, XFEATURE_BNDREGS,   struct mpx_bndreg_state);
529 	XCHECK_SZ(sz, nr, XFEATURE_BNDCSR,    struct mpx_bndcsr_state);
530 	XCHECK_SZ(sz, nr, XFEATURE_OPMASK,    struct avx_512_opmask_state);
531 	XCHECK_SZ(sz, nr, XFEATURE_ZMM_Hi256, struct avx_512_zmm_uppers_state);
532 	XCHECK_SZ(sz, nr, XFEATURE_Hi16_ZMM,  struct avx_512_hi16_state);
533 	XCHECK_SZ(sz, nr, XFEATURE_PKRU,      struct pkru_state);
534 	XCHECK_SZ(sz, nr, XFEATURE_PASID,     struct ia32_pasid_state);
535 
536 	/*
537 	 * Make *SURE* to add any feature numbers in below if
538 	 * there are "holes" in the xsave state component
539 	 * numbers.
540 	 */
541 	if ((nr < XFEATURE_YMM) ||
542 	    (nr >= XFEATURE_MAX) ||
543 	    (nr == XFEATURE_PT_UNIMPLEMENTED_SO_FAR) ||
544 	    ((nr >= XFEATURE_RSRVD_COMP_11) && (nr <= XFEATURE_LBR))) {
545 		WARN_ONCE(1, "no structure for xstate: %d\n", nr);
546 		XSTATE_WARN_ON(1);
547 	}
548 }
549 
550 /*
551  * This essentially double-checks what the cpu told us about
552  * how large the XSAVE buffer needs to be.  We are recalculating
553  * it to be safe.
554  *
555  * Independent XSAVE features allocate their own buffers and are not
556  * covered by these checks. Only the size of the buffer for task->fpu
557  * is checked here.
558  */
559 static void do_extra_xstate_size_checks(void)
560 {
561 	int paranoid_xstate_size = FXSAVE_SIZE + XSAVE_HDR_SIZE;
562 	int i;
563 
564 	for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
565 		if (!xfeature_enabled(i))
566 			continue;
567 
568 		check_xstate_against_struct(i);
569 		/*
570 		 * Supervisor state components can be managed only by
571 		 * XSAVES.
572 		 */
573 		if (!cpu_feature_enabled(X86_FEATURE_XSAVES))
574 			XSTATE_WARN_ON(xfeature_is_supervisor(i));
575 
576 		/* Align from the end of the previous feature */
577 		if (xfeature_is_aligned(i))
578 			paranoid_xstate_size = ALIGN(paranoid_xstate_size, 64);
579 		/*
580 		 * The offset of a given state in the non-compacted
581 		 * format is given to us in a CPUID leaf.  We check
582 		 * them for being ordered (increasing offsets) in
583 		 * setup_xstate_features(). XSAVES uses compacted format.
584 		 */
585 		if (!cpu_feature_enabled(X86_FEATURE_XSAVES))
586 			paranoid_xstate_size = xfeature_uncompacted_offset(i);
587 		/*
588 		 * The compacted-format offset always depends on where
589 		 * the previous state ended.
590 		 */
591 		paranoid_xstate_size += xfeature_size(i);
592 	}
593 	XSTATE_WARN_ON(paranoid_xstate_size != fpu_kernel_xstate_size);
594 }
595 
596 
597 /*
598  * Get total size of enabled xstates in XCR0 | IA32_XSS.
599  *
600  * Note the SDM's wording here.  "sub-function 0" only enumerates
601  * the size of the *user* states.  If we use it to size a buffer
602  * that we use 'XSAVES' on, we could potentially overflow the
603  * buffer because 'XSAVES' saves system states too.
604  */
605 static unsigned int __init get_xsaves_size(void)
606 {
607 	unsigned int eax, ebx, ecx, edx;
608 	/*
609 	 * - CPUID function 0DH, sub-function 1:
610 	 *    EBX enumerates the size (in bytes) required by
611 	 *    the XSAVES instruction for an XSAVE area
612 	 *    containing all the state components
613 	 *    corresponding to bits currently set in
614 	 *    XCR0 | IA32_XSS.
615 	 */
616 	cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
617 	return ebx;
618 }
619 
620 /*
621  * Get the total size of the enabled xstates without the independent supervisor
622  * features.
623  */
624 static unsigned int __init get_xsaves_size_no_independent(void)
625 {
626 	u64 mask = xfeatures_mask_independent();
627 	unsigned int size;
628 
629 	if (!mask)
630 		return get_xsaves_size();
631 
632 	/* Disable independent features. */
633 	wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor());
634 
635 	/*
636 	 * Ask the hardware what size is required of the buffer.
637 	 * This is the size required for the task->fpu buffer.
638 	 */
639 	size = get_xsaves_size();
640 
641 	/* Re-enable independent features so XSAVES will work on them again. */
642 	wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() | mask);
643 
644 	return size;
645 }
646 
647 static unsigned int __init get_xsave_size(void)
648 {
649 	unsigned int eax, ebx, ecx, edx;
650 	/*
651 	 * - CPUID function 0DH, sub-function 0:
652 	 *    EBX enumerates the size (in bytes) required by
653 	 *    the XSAVE instruction for an XSAVE area
654 	 *    containing all the *user* state components
655 	 *    corresponding to bits currently set in XCR0.
656 	 */
657 	cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
658 	return ebx;
659 }
660 
661 /*
662  * Will the runtime-enumerated 'xstate_size' fit in the init
663  * task's statically-allocated buffer?
664  */
665 static bool is_supported_xstate_size(unsigned int test_xstate_size)
666 {
667 	if (test_xstate_size <= sizeof(union fpregs_state))
668 		return true;
669 
670 	pr_warn("x86/fpu: xstate buffer too small (%zu < %d), disabling xsave\n",
671 			sizeof(union fpregs_state), test_xstate_size);
672 	return false;
673 }
674 
675 static int __init init_xstate_size(void)
676 {
677 	/* Recompute the context size for enabled features: */
678 	unsigned int possible_xstate_size;
679 	unsigned int xsave_size;
680 
681 	xsave_size = get_xsave_size();
682 
683 	if (boot_cpu_has(X86_FEATURE_XSAVES))
684 		possible_xstate_size = get_xsaves_size_no_independent();
685 	else
686 		possible_xstate_size = xsave_size;
687 
688 	/* Ensure we have the space to store all enabled: */
689 	if (!is_supported_xstate_size(possible_xstate_size))
690 		return -EINVAL;
691 
692 	/*
693 	 * The size is OK, we are definitely going to use xsave,
694 	 * make it known to the world that we need more space.
695 	 */
696 	fpu_kernel_xstate_size = possible_xstate_size;
697 	do_extra_xstate_size_checks();
698 
699 	/*
700 	 * User space is always in standard format.
701 	 */
702 	fpu_user_xstate_size = xsave_size;
703 	return 0;
704 }
705 
706 /*
707  * We enabled the XSAVE hardware, but something went wrong and
708  * we can not use it.  Disable it.
709  */
710 static void fpu__init_disable_system_xstate(void)
711 {
712 	xfeatures_mask_all = 0;
713 	cr4_clear_bits(X86_CR4_OSXSAVE);
714 	setup_clear_cpu_cap(X86_FEATURE_XSAVE);
715 }
716 
717 /*
718  * Enable and initialize the xsave feature.
719  * Called once per system bootup.
720  */
721 void __init fpu__init_system_xstate(void)
722 {
723 	unsigned int eax, ebx, ecx, edx;
724 	static int on_boot_cpu __initdata = 1;
725 	u64 xfeatures;
726 	int err;
727 	int i;
728 
729 	WARN_ON_FPU(!on_boot_cpu);
730 	on_boot_cpu = 0;
731 
732 	if (!boot_cpu_has(X86_FEATURE_FPU)) {
733 		pr_info("x86/fpu: No FPU detected\n");
734 		return;
735 	}
736 
737 	if (!boot_cpu_has(X86_FEATURE_XSAVE)) {
738 		pr_info("x86/fpu: x87 FPU will use %s\n",
739 			boot_cpu_has(X86_FEATURE_FXSR) ? "FXSAVE" : "FSAVE");
740 		return;
741 	}
742 
743 	if (boot_cpu_data.cpuid_level < XSTATE_CPUID) {
744 		WARN_ON_FPU(1);
745 		return;
746 	}
747 
748 	/*
749 	 * Find user xstates supported by the processor.
750 	 */
751 	cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
752 	xfeatures_mask_all = eax + ((u64)edx << 32);
753 
754 	/*
755 	 * Find supervisor xstates supported by the processor.
756 	 */
757 	cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
758 	xfeatures_mask_all |= ecx + ((u64)edx << 32);
759 
760 	if ((xfeatures_mask_uabi() & XFEATURE_MASK_FPSSE) != XFEATURE_MASK_FPSSE) {
761 		/*
762 		 * This indicates that something really unexpected happened
763 		 * with the enumeration.  Disable XSAVE and try to continue
764 		 * booting without it.  This is too early to BUG().
765 		 */
766 		pr_err("x86/fpu: FP/SSE not present amongst the CPU's xstate features: 0x%llx.\n",
767 		       xfeatures_mask_all);
768 		goto out_disable;
769 	}
770 
771 	/*
772 	 * Clear XSAVE features that are disabled in the normal CPUID.
773 	 */
774 	for (i = 0; i < ARRAY_SIZE(xsave_cpuid_features); i++) {
775 		if (!boot_cpu_has(xsave_cpuid_features[i]))
776 			xfeatures_mask_all &= ~BIT_ULL(i);
777 	}
778 
779 	xfeatures_mask_all &= XFEATURE_MASK_USER_SUPPORTED |
780 			      XFEATURE_MASK_SUPERVISOR_SUPPORTED;
781 
782 	/* Store it for paranoia check at the end */
783 	xfeatures = xfeatures_mask_all;
784 
785 	/* Enable xstate instructions to be able to continue with initialization: */
786 	fpu__init_cpu_xstate();
787 	err = init_xstate_size();
788 	if (err)
789 		goto out_disable;
790 
791 	/*
792 	 * Update info used for ptrace frames; use standard-format size and no
793 	 * supervisor xstates:
794 	 */
795 	update_regset_xstate_info(fpu_user_xstate_size, xfeatures_mask_uabi());
796 
797 	fpu__init_prepare_fx_sw_frame();
798 	setup_init_fpu_buf();
799 	setup_xstate_comp_offsets();
800 	setup_supervisor_only_offsets();
801 
802 	/*
803 	 * Paranoia check whether something in the setup modified the
804 	 * xfeatures mask.
805 	 */
806 	if (xfeatures != xfeatures_mask_all) {
807 		pr_err("x86/fpu: xfeatures modified from 0x%016llx to 0x%016llx during init, disabling XSAVE\n",
808 		       xfeatures, xfeatures_mask_all);
809 		goto out_disable;
810 	}
811 
812 	print_xstate_offset_size();
813 	pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n",
814 		xfeatures_mask_all,
815 		fpu_kernel_xstate_size,
816 		boot_cpu_has(X86_FEATURE_XSAVES) ? "compacted" : "standard");
817 	return;
818 
819 out_disable:
820 	/* something went wrong, try to boot without any XSAVE support */
821 	fpu__init_disable_system_xstate();
822 }
823 
824 /*
825  * Restore minimal FPU state after suspend:
826  */
827 void fpu__resume_cpu(void)
828 {
829 	/*
830 	 * Restore XCR0 on xsave capable CPUs:
831 	 */
832 	if (cpu_feature_enabled(X86_FEATURE_XSAVE))
833 		xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask_uabi());
834 
835 	/*
836 	 * Restore IA32_XSS. The same CPUID bit enumerates support
837 	 * of XSAVES and MSR_IA32_XSS.
838 	 */
839 	if (cpu_feature_enabled(X86_FEATURE_XSAVES)) {
840 		wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor()  |
841 				     xfeatures_mask_independent());
842 	}
843 }
844 
845 /*
846  * Given an xstate feature nr, calculate where in the xsave
847  * buffer the state is.  Callers should ensure that the buffer
848  * is valid.
849  */
850 static void *__raw_xsave_addr(struct xregs_state *xsave, int xfeature_nr)
851 {
852 	if (!xfeature_enabled(xfeature_nr)) {
853 		WARN_ON_FPU(1);
854 		return NULL;
855 	}
856 
857 	return (void *)xsave + xstate_comp_offsets[xfeature_nr];
858 }
859 /*
860  * Given the xsave area and a state inside, this function returns the
861  * address of the state.
862  *
863  * This is the API that is called to get xstate address in either
864  * standard format or compacted format of xsave area.
865  *
866  * Note that if there is no data for the field in the xsave buffer
867  * this will return NULL.
868  *
869  * Inputs:
870  *	xstate: the thread's storage area for all FPU data
871  *	xfeature_nr: state which is defined in xsave.h (e.g. XFEATURE_FP,
872  *	XFEATURE_SSE, etc...)
873  * Output:
874  *	address of the state in the xsave area, or NULL if the
875  *	field is not present in the xsave buffer.
876  */
877 void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr)
878 {
879 	/*
880 	 * Do we even *have* xsave state?
881 	 */
882 	if (!boot_cpu_has(X86_FEATURE_XSAVE))
883 		return NULL;
884 
885 	/*
886 	 * We should not ever be requesting features that we
887 	 * have not enabled.
888 	 */
889 	WARN_ONCE(!(xfeatures_mask_all & BIT_ULL(xfeature_nr)),
890 		  "get of unsupported state");
891 	/*
892 	 * This assumes the last 'xsave*' instruction to
893 	 * have requested that 'xfeature_nr' be saved.
894 	 * If it did not, we might be seeing and old value
895 	 * of the field in the buffer.
896 	 *
897 	 * This can happen because the last 'xsave' did not
898 	 * request that this feature be saved (unlikely)
899 	 * or because the "init optimization" caused it
900 	 * to not be saved.
901 	 */
902 	if (!(xsave->header.xfeatures & BIT_ULL(xfeature_nr)))
903 		return NULL;
904 
905 	return __raw_xsave_addr(xsave, xfeature_nr);
906 }
907 EXPORT_SYMBOL_GPL(get_xsave_addr);
908 
909 #ifdef CONFIG_ARCH_HAS_PKEYS
910 
911 /*
912  * This will go out and modify PKRU register to set the access
913  * rights for @pkey to @init_val.
914  */
915 int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
916 			      unsigned long init_val)
917 {
918 	u32 old_pkru, new_pkru_bits = 0;
919 	int pkey_shift;
920 
921 	/*
922 	 * This check implies XSAVE support.  OSPKE only gets
923 	 * set if we enable XSAVE and we enable PKU in XCR0.
924 	 */
925 	if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
926 		return -EINVAL;
927 
928 	/*
929 	 * This code should only be called with valid 'pkey'
930 	 * values originating from in-kernel users.  Complain
931 	 * if a bad value is observed.
932 	 */
933 	if (WARN_ON_ONCE(pkey >= arch_max_pkey()))
934 		return -EINVAL;
935 
936 	/* Set the bits we need in PKRU:  */
937 	if (init_val & PKEY_DISABLE_ACCESS)
938 		new_pkru_bits |= PKRU_AD_BIT;
939 	if (init_val & PKEY_DISABLE_WRITE)
940 		new_pkru_bits |= PKRU_WD_BIT;
941 
942 	/* Shift the bits in to the correct place in PKRU for pkey: */
943 	pkey_shift = pkey * PKRU_BITS_PER_PKEY;
944 	new_pkru_bits <<= pkey_shift;
945 
946 	/* Get old PKRU and mask off any old bits in place: */
947 	old_pkru = read_pkru();
948 	old_pkru &= ~((PKRU_AD_BIT|PKRU_WD_BIT) << pkey_shift);
949 
950 	/* Write old part along with new part: */
951 	write_pkru(old_pkru | new_pkru_bits);
952 
953 	return 0;
954 }
955 #endif /* ! CONFIG_ARCH_HAS_PKEYS */
956 
957 static void copy_feature(bool from_xstate, struct membuf *to, void *xstate,
958 			 void *init_xstate, unsigned int size)
959 {
960 	membuf_write(to, from_xstate ? xstate : init_xstate, size);
961 }
962 
963 /**
964  * copy_xstate_to_uabi_buf - Copy kernel saved xstate to a UABI buffer
965  * @to:		membuf descriptor
966  * @tsk:	The task from which to copy the saved xstate
967  * @copy_mode:	The requested copy mode
968  *
969  * Converts from kernel XSAVE or XSAVES compacted format to UABI conforming
970  * format, i.e. from the kernel internal hardware dependent storage format
971  * to the requested @mode. UABI XSTATE is always uncompacted!
972  *
973  * It supports partial copy but @to.pos always starts from zero.
974  */
975 void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
976 			     enum xstate_copy_mode copy_mode)
977 {
978 	const unsigned int off_mxcsr = offsetof(struct fxregs_state, mxcsr);
979 	struct xregs_state *xsave = &tsk->thread.fpu.state.xsave;
980 	struct xregs_state *xinit = &init_fpstate.xsave;
981 	struct xstate_header header;
982 	unsigned int zerofrom;
983 	int i;
984 
985 	memset(&header, 0, sizeof(header));
986 	header.xfeatures = xsave->header.xfeatures;
987 
988 	/* Mask out the feature bits depending on copy mode */
989 	switch (copy_mode) {
990 	case XSTATE_COPY_FP:
991 		header.xfeatures &= XFEATURE_MASK_FP;
992 		break;
993 
994 	case XSTATE_COPY_FX:
995 		header.xfeatures &= XFEATURE_MASK_FP | XFEATURE_MASK_SSE;
996 		break;
997 
998 	case XSTATE_COPY_XSAVE:
999 		header.xfeatures &= xfeatures_mask_uabi();
1000 		break;
1001 	}
1002 
1003 	/* Copy FP state up to MXCSR */
1004 	copy_feature(header.xfeatures & XFEATURE_MASK_FP, &to, &xsave->i387,
1005 		     &xinit->i387, off_mxcsr);
1006 
1007 	/* Copy MXCSR when SSE or YMM are set in the feature mask */
1008 	copy_feature(header.xfeatures & (XFEATURE_MASK_SSE | XFEATURE_MASK_YMM),
1009 		     &to, &xsave->i387.mxcsr, &xinit->i387.mxcsr,
1010 		     MXCSR_AND_FLAGS_SIZE);
1011 
1012 	/* Copy the remaining FP state */
1013 	copy_feature(header.xfeatures & XFEATURE_MASK_FP,
1014 		     &to, &xsave->i387.st_space, &xinit->i387.st_space,
1015 		     sizeof(xsave->i387.st_space));
1016 
1017 	/* Copy the SSE state - shared with YMM, but independently managed */
1018 	copy_feature(header.xfeatures & XFEATURE_MASK_SSE,
1019 		     &to, &xsave->i387.xmm_space, &xinit->i387.xmm_space,
1020 		     sizeof(xsave->i387.xmm_space));
1021 
1022 	if (copy_mode != XSTATE_COPY_XSAVE)
1023 		goto out;
1024 
1025 	/* Zero the padding area */
1026 	membuf_zero(&to, sizeof(xsave->i387.padding));
1027 
1028 	/* Copy xsave->i387.sw_reserved */
1029 	membuf_write(&to, xstate_fx_sw_bytes, sizeof(xsave->i387.sw_reserved));
1030 
1031 	/* Copy the user space relevant state of @xsave->header */
1032 	membuf_write(&to, &header, sizeof(header));
1033 
1034 	zerofrom = offsetof(struct xregs_state, extended_state_area);
1035 
1036 	for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
1037 		/*
1038 		 * The ptrace buffer is in non-compacted XSAVE format.
1039 		 * In non-compacted format disabled features still occupy
1040 		 * state space, but there is no state to copy from in the
1041 		 * compacted init_fpstate. The gap tracking will zero this
1042 		 * later.
1043 		 */
1044 		if (!(xfeatures_mask_uabi() & BIT_ULL(i)))
1045 			continue;
1046 
1047 		/*
1048 		 * If there was a feature or alignment gap, zero the space
1049 		 * in the destination buffer.
1050 		 */
1051 		if (zerofrom < xstate_offsets[i])
1052 			membuf_zero(&to, xstate_offsets[i] - zerofrom);
1053 
1054 		if (i == XFEATURE_PKRU) {
1055 			struct pkru_state pkru = {0};
1056 			/*
1057 			 * PKRU is not necessarily up to date in the
1058 			 * thread's XSAVE buffer.  Fill this part from the
1059 			 * per-thread storage.
1060 			 */
1061 			pkru.pkru = tsk->thread.pkru;
1062 			membuf_write(&to, &pkru, sizeof(pkru));
1063 		} else {
1064 			copy_feature(header.xfeatures & BIT_ULL(i), &to,
1065 				     __raw_xsave_addr(xsave, i),
1066 				     __raw_xsave_addr(xinit, i),
1067 				     xstate_sizes[i]);
1068 		}
1069 		/*
1070 		 * Keep track of the last copied state in the non-compacted
1071 		 * target buffer for gap zeroing.
1072 		 */
1073 		zerofrom = xstate_offsets[i] + xstate_sizes[i];
1074 	}
1075 
1076 out:
1077 	if (to.left)
1078 		membuf_zero(&to, to.left);
1079 }
1080 
1081 static int copy_from_buffer(void *dst, unsigned int offset, unsigned int size,
1082 			    const void *kbuf, const void __user *ubuf)
1083 {
1084 	if (kbuf) {
1085 		memcpy(dst, kbuf + offset, size);
1086 	} else {
1087 		if (copy_from_user(dst, ubuf + offset, size))
1088 			return -EFAULT;
1089 	}
1090 	return 0;
1091 }
1092 
1093 
1094 static int copy_uabi_to_xstate(struct xregs_state *xsave, const void *kbuf,
1095 			       const void __user *ubuf)
1096 {
1097 	unsigned int offset, size;
1098 	struct xstate_header hdr;
1099 	u64 mask;
1100 	int i;
1101 
1102 	offset = offsetof(struct xregs_state, header);
1103 	if (copy_from_buffer(&hdr, offset, sizeof(hdr), kbuf, ubuf))
1104 		return -EFAULT;
1105 
1106 	if (validate_user_xstate_header(&hdr))
1107 		return -EINVAL;
1108 
1109 	/* Validate MXCSR when any of the related features is in use */
1110 	mask = XFEATURE_MASK_FP | XFEATURE_MASK_SSE | XFEATURE_MASK_YMM;
1111 	if (hdr.xfeatures & mask) {
1112 		u32 mxcsr[2];
1113 
1114 		offset = offsetof(struct fxregs_state, mxcsr);
1115 		if (copy_from_buffer(mxcsr, offset, sizeof(mxcsr), kbuf, ubuf))
1116 			return -EFAULT;
1117 
1118 		/* Reserved bits in MXCSR must be zero. */
1119 		if (mxcsr[0] & ~mxcsr_feature_mask)
1120 			return -EINVAL;
1121 
1122 		/* SSE and YMM require MXCSR even when FP is not in use. */
1123 		if (!(hdr.xfeatures & XFEATURE_MASK_FP)) {
1124 			xsave->i387.mxcsr = mxcsr[0];
1125 			xsave->i387.mxcsr_mask = mxcsr[1];
1126 		}
1127 	}
1128 
1129 	for (i = 0; i < XFEATURE_MAX; i++) {
1130 		u64 mask = ((u64)1 << i);
1131 
1132 		if (hdr.xfeatures & mask) {
1133 			void *dst = __raw_xsave_addr(xsave, i);
1134 
1135 			offset = xstate_offsets[i];
1136 			size = xstate_sizes[i];
1137 
1138 			if (copy_from_buffer(dst, offset, size, kbuf, ubuf))
1139 				return -EFAULT;
1140 		}
1141 	}
1142 
1143 	/*
1144 	 * The state that came in from userspace was user-state only.
1145 	 * Mask all the user states out of 'xfeatures':
1146 	 */
1147 	xsave->header.xfeatures &= XFEATURE_MASK_SUPERVISOR_ALL;
1148 
1149 	/*
1150 	 * Add back in the features that came in from userspace:
1151 	 */
1152 	xsave->header.xfeatures |= hdr.xfeatures;
1153 
1154 	return 0;
1155 }
1156 
1157 /*
1158  * Convert from a ptrace standard-format kernel buffer to kernel XSAVE[S]
1159  * format and copy to the target thread. This is called from
1160  * xstateregs_set().
1161  */
1162 int copy_uabi_from_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf)
1163 {
1164 	return copy_uabi_to_xstate(xsave, kbuf, NULL);
1165 }
1166 
1167 /*
1168  * Convert from a sigreturn standard-format user-space buffer to kernel
1169  * XSAVE[S] format and copy to the target thread. This is called from the
1170  * sigreturn() and rt_sigreturn() system calls.
1171  */
1172 int copy_sigframe_from_user_to_xstate(struct xregs_state *xsave,
1173 				      const void __user *ubuf)
1174 {
1175 	return copy_uabi_to_xstate(xsave, NULL, ubuf);
1176 }
1177 
1178 static bool validate_xsaves_xrstors(u64 mask)
1179 {
1180 	u64 xchk;
1181 
1182 	if (WARN_ON_FPU(!cpu_feature_enabled(X86_FEATURE_XSAVES)))
1183 		return false;
1184 	/*
1185 	 * Validate that this is either a task->fpstate related component
1186 	 * subset or an independent one.
1187 	 */
1188 	if (mask & xfeatures_mask_independent())
1189 		xchk = ~xfeatures_mask_independent();
1190 	else
1191 		xchk = ~xfeatures_mask_all;
1192 
1193 	if (WARN_ON_ONCE(!mask || mask & xchk))
1194 		return false;
1195 
1196 	return true;
1197 }
1198 
1199 /**
1200  * xsaves - Save selected components to a kernel xstate buffer
1201  * @xstate:	Pointer to the buffer
1202  * @mask:	Feature mask to select the components to save
1203  *
1204  * The @xstate buffer must be 64 byte aligned and correctly initialized as
1205  * XSAVES does not write the full xstate header. Before first use the
1206  * buffer should be zeroed otherwise a consecutive XRSTORS from that buffer
1207  * can #GP.
1208  *
1209  * The feature mask must either be a subset of the independent features or
1210  * a subset of the task->fpstate related features.
1211  */
1212 void xsaves(struct xregs_state *xstate, u64 mask)
1213 {
1214 	int err;
1215 
1216 	if (!validate_xsaves_xrstors(mask))
1217 		return;
1218 
1219 	XSTATE_OP(XSAVES, xstate, (u32)mask, (u32)(mask >> 32), err);
1220 	WARN_ON_ONCE(err);
1221 }
1222 
1223 /**
1224  * xrstors - Restore selected components from a kernel xstate buffer
1225  * @xstate:	Pointer to the buffer
1226  * @mask:	Feature mask to select the components to restore
1227  *
1228  * The @xstate buffer must be 64 byte aligned and correctly initialized
1229  * otherwise XRSTORS from that buffer can #GP.
1230  *
1231  * Proper usage is to restore the state which was saved with
1232  * xsaves() into @xstate.
1233  *
1234  * The feature mask must either be a subset of the independent features or
1235  * a subset of the task->fpstate related features.
1236  */
1237 void xrstors(struct xregs_state *xstate, u64 mask)
1238 {
1239 	int err;
1240 
1241 	if (!validate_xsaves_xrstors(mask))
1242 		return;
1243 
1244 	XSTATE_OP(XRSTORS, xstate, (u32)mask, (u32)(mask >> 32), err);
1245 	WARN_ON_ONCE(err);
1246 }
1247 
1248 #ifdef CONFIG_PROC_PID_ARCH_STATUS
1249 /*
1250  * Report the amount of time elapsed in millisecond since last AVX512
1251  * use in the task.
1252  */
1253 static void avx512_status(struct seq_file *m, struct task_struct *task)
1254 {
1255 	unsigned long timestamp = READ_ONCE(task->thread.fpu.avx512_timestamp);
1256 	long delta;
1257 
1258 	if (!timestamp) {
1259 		/*
1260 		 * Report -1 if no AVX512 usage
1261 		 */
1262 		delta = -1;
1263 	} else {
1264 		delta = (long)(jiffies - timestamp);
1265 		/*
1266 		 * Cap to LONG_MAX if time difference > LONG_MAX
1267 		 */
1268 		if (delta < 0)
1269 			delta = LONG_MAX;
1270 		delta = jiffies_to_msecs(delta);
1271 	}
1272 
1273 	seq_put_decimal_ll(m, "AVX512_elapsed_ms:\t", delta);
1274 	seq_putc(m, '\n');
1275 }
1276 
1277 /*
1278  * Report architecture specific information
1279  */
1280 int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns,
1281 			struct pid *pid, struct task_struct *task)
1282 {
1283 	/*
1284 	 * Report AVX512 state if the processor and build option supported.
1285 	 */
1286 	if (cpu_feature_enabled(X86_FEATURE_AVX512F))
1287 		avx512_status(m, task);
1288 
1289 	return 0;
1290 }
1291 #endif /* CONFIG_PROC_PID_ARCH_STATUS */
1292