xref: /openbmc/linux/tools/testing/selftests/x86/amx.c (revision f21e49be)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #define _GNU_SOURCE
4 #include <err.h>
5 #include <errno.h>
6 #include <pthread.h>
7 #include <setjmp.h>
8 #include <stdio.h>
9 #include <string.h>
10 #include <stdbool.h>
11 #include <unistd.h>
12 #include <x86intrin.h>
13 
14 #include <sys/auxv.h>
15 #include <sys/mman.h>
16 #include <sys/shm.h>
17 #include <sys/syscall.h>
18 #include <sys/wait.h>
19 
20 #ifndef __x86_64__
21 # error This test is 64-bit only
22 #endif
23 
24 #define XSAVE_HDR_OFFSET	512
25 #define XSAVE_HDR_SIZE		64
26 
27 struct xsave_buffer {
28 	union {
29 		struct {
30 			char legacy[XSAVE_HDR_OFFSET];
31 			char header[XSAVE_HDR_SIZE];
32 			char extended[0];
33 		};
34 		char bytes[0];
35 	};
36 };
37 
38 static inline uint64_t xgetbv(uint32_t index)
39 {
40 	uint32_t eax, edx;
41 
42 	asm volatile("xgetbv;"
43 		     : "=a" (eax), "=d" (edx)
44 		     : "c" (index));
45 	return eax + ((uint64_t)edx << 32);
46 }
47 
48 static inline void cpuid(uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
49 {
50 	asm volatile("cpuid;"
51 		     : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
52 		     : "0" (*eax), "2" (*ecx));
53 }
54 
55 static inline void xsave(struct xsave_buffer *xbuf, uint64_t rfbm)
56 {
57 	uint32_t rfbm_lo = rfbm;
58 	uint32_t rfbm_hi = rfbm >> 32;
59 
60 	asm volatile("xsave (%%rdi)"
61 		     : : "D" (xbuf), "a" (rfbm_lo), "d" (rfbm_hi)
62 		     : "memory");
63 }
64 
65 static inline void xrstor(struct xsave_buffer *xbuf, uint64_t rfbm)
66 {
67 	uint32_t rfbm_lo = rfbm;
68 	uint32_t rfbm_hi = rfbm >> 32;
69 
70 	asm volatile("xrstor (%%rdi)"
71 		     : : "D" (xbuf), "a" (rfbm_lo), "d" (rfbm_hi));
72 }
73 
74 /* err() exits and will not return */
75 #define fatal_error(msg, ...)	err(1, "[FAIL]\t" msg, ##__VA_ARGS__)
76 
77 static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
78 		       int flags)
79 {
80 	struct sigaction sa;
81 
82 	memset(&sa, 0, sizeof(sa));
83 	sa.sa_sigaction = handler;
84 	sa.sa_flags = SA_SIGINFO | flags;
85 	sigemptyset(&sa.sa_mask);
86 	if (sigaction(sig, &sa, 0))
87 		fatal_error("sigaction");
88 }
89 
90 static void clearhandler(int sig)
91 {
92 	struct sigaction sa;
93 
94 	memset(&sa, 0, sizeof(sa));
95 	sa.sa_handler = SIG_DFL;
96 	sigemptyset(&sa.sa_mask);
97 	if (sigaction(sig, &sa, 0))
98 		fatal_error("sigaction");
99 }
100 
101 #define XFEATURE_XTILECFG	17
102 #define XFEATURE_XTILEDATA	18
103 #define XFEATURE_MASK_XTILECFG	(1 << XFEATURE_XTILECFG)
104 #define XFEATURE_MASK_XTILEDATA	(1 << XFEATURE_XTILEDATA)
105 #define XFEATURE_MASK_XTILE	(XFEATURE_MASK_XTILECFG | XFEATURE_MASK_XTILEDATA)
106 
107 #define CPUID_LEAF1_ECX_XSAVE_MASK	(1 << 26)
108 #define CPUID_LEAF1_ECX_OSXSAVE_MASK	(1 << 27)
109 static inline void check_cpuid_xsave(void)
110 {
111 	uint32_t eax, ebx, ecx, edx;
112 
113 	/*
114 	 * CPUID.1:ECX.XSAVE[bit 26] enumerates general
115 	 * support for the XSAVE feature set, including
116 	 * XGETBV.
117 	 */
118 	eax = 1;
119 	ecx = 0;
120 	cpuid(&eax, &ebx, &ecx, &edx);
121 	if (!(ecx & CPUID_LEAF1_ECX_XSAVE_MASK))
122 		fatal_error("cpuid: no CPU xsave support");
123 	if (!(ecx & CPUID_LEAF1_ECX_OSXSAVE_MASK))
124 		fatal_error("cpuid: no OS xsave support");
125 }
126 
127 static uint32_t xbuf_size;
128 
129 static struct {
130 	uint32_t xbuf_offset;
131 	uint32_t size;
132 } xtiledata;
133 
134 #define CPUID_LEAF_XSTATE		0xd
135 #define CPUID_SUBLEAF_XSTATE_USER	0x0
136 #define TILE_CPUID			0x1d
137 #define TILE_PALETTE_ID			0x1
138 
139 static void check_cpuid_xtiledata(void)
140 {
141 	uint32_t eax, ebx, ecx, edx;
142 
143 	eax = CPUID_LEAF_XSTATE;
144 	ecx = CPUID_SUBLEAF_XSTATE_USER;
145 	cpuid(&eax, &ebx, &ecx, &edx);
146 
147 	/*
148 	 * EBX enumerates the size (in bytes) required by the XSAVE
149 	 * instruction for an XSAVE area containing all the user state
150 	 * components corresponding to bits currently set in XCR0.
151 	 *
152 	 * Stash that off so it can be used to allocate buffers later.
153 	 */
154 	xbuf_size = ebx;
155 
156 	eax = CPUID_LEAF_XSTATE;
157 	ecx = XFEATURE_XTILEDATA;
158 
159 	cpuid(&eax, &ebx, &ecx, &edx);
160 	/*
161 	 * eax: XTILEDATA state component size
162 	 * ebx: XTILEDATA state component offset in user buffer
163 	 */
164 	if (!eax || !ebx)
165 		fatal_error("xstate cpuid: invalid tile data size/offset: %d/%d",
166 				eax, ebx);
167 
168 	xtiledata.size	      = eax;
169 	xtiledata.xbuf_offset = ebx;
170 }
171 
172 /* The helpers for managing XSAVE buffer and tile states: */
173 
174 struct xsave_buffer *alloc_xbuf(void)
175 {
176 	struct xsave_buffer *xbuf;
177 
178 	/* XSAVE buffer should be 64B-aligned. */
179 	xbuf = aligned_alloc(64, xbuf_size);
180 	if (!xbuf)
181 		fatal_error("aligned_alloc()");
182 	return xbuf;
183 }
184 
185 static inline void clear_xstate_header(struct xsave_buffer *buffer)
186 {
187 	memset(&buffer->header, 0, sizeof(buffer->header));
188 }
189 
190 static inline uint64_t get_xstatebv(struct xsave_buffer *buffer)
191 {
192 	/* XSTATE_BV is at the beginning of the header: */
193 	return *(uint64_t *)&buffer->header;
194 }
195 
196 static inline void set_xstatebv(struct xsave_buffer *buffer, uint64_t bv)
197 {
198 	/* XSTATE_BV is at the beginning of the header: */
199 	*(uint64_t *)(&buffer->header) = bv;
200 }
201 
202 static void set_rand_tiledata(struct xsave_buffer *xbuf)
203 {
204 	int *ptr = (int *)&xbuf->bytes[xtiledata.xbuf_offset];
205 	int data;
206 	int i;
207 
208 	/*
209 	 * Ensure that 'data' is never 0.  This ensures that
210 	 * the registers are never in their initial configuration
211 	 * and thus never tracked as being in the init state.
212 	 */
213 	data = rand() | 1;
214 
215 	for (i = 0; i < xtiledata.size / sizeof(int); i++, ptr++)
216 		*ptr = data;
217 }
218 
219 struct xsave_buffer *stashed_xsave;
220 
221 static void init_stashed_xsave(void)
222 {
223 	stashed_xsave = alloc_xbuf();
224 	if (!stashed_xsave)
225 		fatal_error("failed to allocate stashed_xsave\n");
226 	clear_xstate_header(stashed_xsave);
227 }
228 
229 static void free_stashed_xsave(void)
230 {
231 	free(stashed_xsave);
232 }
233 
234 /* See 'struct _fpx_sw_bytes' at sigcontext.h */
235 #define SW_BYTES_OFFSET		464
236 /* N.B. The struct's field name varies so read from the offset. */
237 #define SW_BYTES_BV_OFFSET	(SW_BYTES_OFFSET + 8)
238 
239 static inline struct _fpx_sw_bytes *get_fpx_sw_bytes(void *buffer)
240 {
241 	return (struct _fpx_sw_bytes *)(buffer + SW_BYTES_OFFSET);
242 }
243 
244 static inline uint64_t get_fpx_sw_bytes_features(void *buffer)
245 {
246 	return *(uint64_t *)(buffer + SW_BYTES_BV_OFFSET);
247 }
248 
249 /* Work around printf() being unsafe in signals: */
250 #define SIGNAL_BUF_LEN 1000
251 char signal_message_buffer[SIGNAL_BUF_LEN];
252 void sig_print(char *msg)
253 {
254 	int left = SIGNAL_BUF_LEN - strlen(signal_message_buffer) - 1;
255 
256 	strncat(signal_message_buffer, msg, left);
257 }
258 
259 static volatile bool noperm_signaled;
260 static int noperm_errs;
261 /*
262  * Signal handler for when AMX is used but
263  * permission has not been obtained.
264  */
265 static void handle_noperm(int sig, siginfo_t *si, void *ctx_void)
266 {
267 	ucontext_t *ctx = (ucontext_t *)ctx_void;
268 	void *xbuf = ctx->uc_mcontext.fpregs;
269 	struct _fpx_sw_bytes *sw_bytes;
270 	uint64_t features;
271 
272 	/* Reset the signal message buffer: */
273 	signal_message_buffer[0] = '\0';
274 	sig_print("\tAt SIGILL handler,\n");
275 
276 	if (si->si_code != ILL_ILLOPC) {
277 		noperm_errs++;
278 		sig_print("[FAIL]\tInvalid signal code.\n");
279 	} else {
280 		sig_print("[OK]\tValid signal code (ILL_ILLOPC).\n");
281 	}
282 
283 	sw_bytes = get_fpx_sw_bytes(xbuf);
284 	/*
285 	 * Without permission, the signal XSAVE buffer should not
286 	 * have room for AMX register state (aka. xtiledata).
287 	 * Check that the size does not overlap with where xtiledata
288 	 * will reside.
289 	 *
290 	 * This also implies that no state components *PAST*
291 	 * XTILEDATA (features >=19) can be present in the buffer.
292 	 */
293 	if (sw_bytes->xstate_size <= xtiledata.xbuf_offset) {
294 		sig_print("[OK]\tValid xstate size\n");
295 	} else {
296 		noperm_errs++;
297 		sig_print("[FAIL]\tInvalid xstate size\n");
298 	}
299 
300 	features = get_fpx_sw_bytes_features(xbuf);
301 	/*
302 	 * Without permission, the XTILEDATA feature
303 	 * bit should not be set.
304 	 */
305 	if ((features & XFEATURE_MASK_XTILEDATA) == 0) {
306 		sig_print("[OK]\tValid xstate mask\n");
307 	} else {
308 		noperm_errs++;
309 		sig_print("[FAIL]\tInvalid xstate mask\n");
310 	}
311 
312 	noperm_signaled = true;
313 	ctx->uc_mcontext.gregs[REG_RIP] += 3; /* Skip the faulting XRSTOR */
314 }
315 
316 /* Return true if XRSTOR is successful; otherwise, false. */
317 static inline bool xrstor_safe(struct xsave_buffer *xbuf, uint64_t mask)
318 {
319 	noperm_signaled = false;
320 	xrstor(xbuf, mask);
321 
322 	/* Print any messages produced by the signal code: */
323 	printf("%s", signal_message_buffer);
324 	/*
325 	 * Reset the buffer to make sure any future printing
326 	 * only outputs new messages:
327 	 */
328 	signal_message_buffer[0] = '\0';
329 
330 	if (noperm_errs)
331 		fatal_error("saw %d errors in noperm signal handler\n", noperm_errs);
332 
333 	return !noperm_signaled;
334 }
335 
336 /*
337  * Use XRSTOR to populate the XTILEDATA registers with
338  * random data.
339  *
340  * Return true if successful; otherwise, false.
341  */
342 static inline bool load_rand_tiledata(struct xsave_buffer *xbuf)
343 {
344 	clear_xstate_header(xbuf);
345 	set_xstatebv(xbuf, XFEATURE_MASK_XTILEDATA);
346 	set_rand_tiledata(xbuf);
347 	return xrstor_safe(xbuf, XFEATURE_MASK_XTILEDATA);
348 }
349 
350 /* Return XTILEDATA to its initial configuration. */
351 static inline void init_xtiledata(void)
352 {
353 	clear_xstate_header(stashed_xsave);
354 	xrstor_safe(stashed_xsave, XFEATURE_MASK_XTILEDATA);
355 }
356 
357 enum expected_result { FAIL_EXPECTED, SUCCESS_EXPECTED };
358 
359 /* arch_prctl() and sigaltstack() test */
360 
361 #define ARCH_GET_XCOMP_PERM	0x1022
362 #define ARCH_REQ_XCOMP_PERM	0x1023
363 
364 static void req_xtiledata_perm(void)
365 {
366 	syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_PERM, XFEATURE_XTILEDATA);
367 }
368 
369 static void validate_req_xcomp_perm(enum expected_result exp)
370 {
371 	unsigned long bitmask;
372 	long rc;
373 
374 	rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_PERM, XFEATURE_XTILEDATA);
375 	if (exp == FAIL_EXPECTED) {
376 		if (rc) {
377 			printf("[OK]\tARCH_REQ_XCOMP_PERM saw expected failure..\n");
378 			return;
379 		}
380 
381 		fatal_error("ARCH_REQ_XCOMP_PERM saw unexpected success.\n");
382 	} else if (rc) {
383 		fatal_error("ARCH_REQ_XCOMP_PERM saw unexpected failure.\n");
384 	}
385 
386 	rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_PERM, &bitmask);
387 	if (rc) {
388 		fatal_error("prctl(ARCH_GET_XCOMP_PERM) error: %ld", rc);
389 	} else if (bitmask & XFEATURE_MASK_XTILE) {
390 		printf("\tARCH_REQ_XCOMP_PERM is successful.\n");
391 	}
392 }
393 
394 static void validate_xcomp_perm(enum expected_result exp)
395 {
396 	bool load_success = load_rand_tiledata(stashed_xsave);
397 
398 	if (exp == FAIL_EXPECTED) {
399 		if (load_success) {
400 			noperm_errs++;
401 			printf("[FAIL]\tLoad tiledata succeeded.\n");
402 		} else {
403 			printf("[OK]\tLoad tiledata failed.\n");
404 		}
405 	} else if (exp == SUCCESS_EXPECTED) {
406 		if (load_success) {
407 			printf("[OK]\tLoad tiledata succeeded.\n");
408 		} else {
409 			noperm_errs++;
410 			printf("[FAIL]\tLoad tiledata failed.\n");
411 		}
412 	}
413 }
414 
415 #ifndef AT_MINSIGSTKSZ
416 #  define AT_MINSIGSTKSZ	51
417 #endif
418 
419 static void *alloc_altstack(unsigned int size)
420 {
421 	void *altstack;
422 
423 	altstack = mmap(NULL, size, PROT_READ | PROT_WRITE,
424 			MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
425 
426 	if (altstack == MAP_FAILED)
427 		fatal_error("mmap() for altstack");
428 
429 	return altstack;
430 }
431 
432 static void setup_altstack(void *addr, unsigned long size, enum expected_result exp)
433 {
434 	stack_t ss;
435 	int rc;
436 
437 	memset(&ss, 0, sizeof(ss));
438 	ss.ss_size = size;
439 	ss.ss_sp = addr;
440 
441 	rc = sigaltstack(&ss, NULL);
442 
443 	if (exp == FAIL_EXPECTED) {
444 		if (rc) {
445 			printf("[OK]\tsigaltstack() failed.\n");
446 		} else {
447 			fatal_error("sigaltstack() succeeded unexpectedly.\n");
448 		}
449 	} else if (rc) {
450 		fatal_error("sigaltstack()");
451 	}
452 }
453 
454 static void test_dynamic_sigaltstack(void)
455 {
456 	unsigned int small_size, enough_size;
457 	unsigned long minsigstksz;
458 	void *altstack;
459 
460 	minsigstksz = getauxval(AT_MINSIGSTKSZ);
461 	printf("\tAT_MINSIGSTKSZ = %lu\n", minsigstksz);
462 	/*
463 	 * getauxval() itself can return 0 for failure or
464 	 * success.  But, in this case, AT_MINSIGSTKSZ
465 	 * will always return a >=0 value if implemented.
466 	 * Just check for 0.
467 	 */
468 	if (minsigstksz == 0) {
469 		printf("no support for AT_MINSIGSTKSZ, skipping sigaltstack tests\n");
470 		return;
471 	}
472 
473 	enough_size = minsigstksz * 2;
474 
475 	altstack = alloc_altstack(enough_size);
476 	printf("\tAllocate memory for altstack (%u bytes).\n", enough_size);
477 
478 	/*
479 	 * Try setup_altstack() with a size which can not fit
480 	 * XTILEDATA.  ARCH_REQ_XCOMP_PERM should fail.
481 	 */
482 	small_size = minsigstksz - xtiledata.size;
483 	printf("\tAfter sigaltstack() with small size (%u bytes).\n", small_size);
484 	setup_altstack(altstack, small_size, SUCCESS_EXPECTED);
485 	validate_req_xcomp_perm(FAIL_EXPECTED);
486 
487 	/*
488 	 * Try setup_altstack() with a size derived from
489 	 * AT_MINSIGSTKSZ.  It should be more than large enough
490 	 * and thus ARCH_REQ_XCOMP_PERM should succeed.
491 	 */
492 	printf("\tAfter sigaltstack() with enough size (%u bytes).\n", enough_size);
493 	setup_altstack(altstack, enough_size, SUCCESS_EXPECTED);
494 	validate_req_xcomp_perm(SUCCESS_EXPECTED);
495 
496 	/*
497 	 * Try to coerce setup_altstack() to again accept a
498 	 * too-small altstack.  This ensures that big-enough
499 	 * sigaltstacks can not shrink to a too-small value
500 	 * once XTILEDATA permission is established.
501 	 */
502 	printf("\tThen, sigaltstack() with small size (%u bytes).\n", small_size);
503 	setup_altstack(altstack, small_size, FAIL_EXPECTED);
504 }
505 
506 static void test_dynamic_state(void)
507 {
508 	pid_t parent, child, grandchild;
509 
510 	parent = fork();
511 	if (parent < 0) {
512 		/* fork() failed */
513 		fatal_error("fork");
514 	} else if (parent > 0) {
515 		int status;
516 		/* fork() succeeded.  Now in the parent. */
517 
518 		wait(&status);
519 		if (!WIFEXITED(status) || WEXITSTATUS(status))
520 			fatal_error("arch_prctl test parent exit");
521 		return;
522 	}
523 	/* fork() succeeded.  Now in the child . */
524 
525 	printf("[RUN]\tCheck ARCH_REQ_XCOMP_PERM around process fork() and sigaltack() test.\n");
526 
527 	printf("\tFork a child.\n");
528 	child = fork();
529 	if (child < 0) {
530 		fatal_error("fork");
531 	} else if (child > 0) {
532 		int status;
533 
534 		wait(&status);
535 		if (!WIFEXITED(status) || WEXITSTATUS(status))
536 			fatal_error("arch_prctl test child exit");
537 		_exit(0);
538 	}
539 
540 	/*
541 	 * The permission request should fail without an
542 	 * XTILEDATA-compatible signal stack
543 	 */
544 	printf("\tTest XCOMP_PERM at child.\n");
545 	validate_xcomp_perm(FAIL_EXPECTED);
546 
547 	/*
548 	 * Set up an XTILEDATA-compatible signal stack and
549 	 * also obtain permission to populate XTILEDATA.
550 	 */
551 	printf("\tTest dynamic sigaltstack at child:\n");
552 	test_dynamic_sigaltstack();
553 
554 	/* Ensure that XTILEDATA can be populated. */
555 	printf("\tTest XCOMP_PERM again at child.\n");
556 	validate_xcomp_perm(SUCCESS_EXPECTED);
557 
558 	printf("\tFork a grandchild.\n");
559 	grandchild = fork();
560 	if (grandchild < 0) {
561 		/* fork() failed */
562 		fatal_error("fork");
563 	} else if (!grandchild) {
564 		/* fork() succeeded.  Now in the (grand)child. */
565 		printf("\tTest XCOMP_PERM at grandchild.\n");
566 
567 		/*
568 		 * Ensure that the grandchild inherited
569 		 * permission and a compatible sigaltstack:
570 		 */
571 		validate_xcomp_perm(SUCCESS_EXPECTED);
572 	} else {
573 		int status;
574 		/* fork() succeeded.  Now in the parent. */
575 
576 		wait(&status);
577 		if (!WIFEXITED(status) || WEXITSTATUS(status))
578 			fatal_error("fork test grandchild");
579 	}
580 
581 	_exit(0);
582 }
583 
584 /*
585  * Save current register state and compare it to @xbuf1.'
586  *
587  * Returns false if @xbuf1 matches the registers.
588  * Returns true  if @xbuf1 differs from the registers.
589  */
590 static inline bool __validate_tiledata_regs(struct xsave_buffer *xbuf1)
591 {
592 	struct xsave_buffer *xbuf2;
593 	int ret;
594 
595 	xbuf2 = alloc_xbuf();
596 	if (!xbuf2)
597 		fatal_error("failed to allocate XSAVE buffer\n");
598 
599 	xsave(xbuf2, XFEATURE_MASK_XTILEDATA);
600 	ret = memcmp(&xbuf1->bytes[xtiledata.xbuf_offset],
601 		     &xbuf2->bytes[xtiledata.xbuf_offset],
602 		     xtiledata.size);
603 
604 	free(xbuf2);
605 
606 	if (ret == 0)
607 		return false;
608 	return true;
609 }
610 
611 static inline void validate_tiledata_regs_same(struct xsave_buffer *xbuf)
612 {
613 	int ret = __validate_tiledata_regs(xbuf);
614 
615 	if (ret != 0)
616 		fatal_error("TILEDATA registers changed");
617 }
618 
619 static inline void validate_tiledata_regs_changed(struct xsave_buffer *xbuf)
620 {
621 	int ret = __validate_tiledata_regs(xbuf);
622 
623 	if (ret == 0)
624 		fatal_error("TILEDATA registers did not change");
625 }
626 
627 /* tiledata inheritance test */
628 
629 static void test_fork(void)
630 {
631 	pid_t child, grandchild;
632 
633 	child = fork();
634 	if (child < 0) {
635 		/* fork() failed */
636 		fatal_error("fork");
637 	} else if (child > 0) {
638 		/* fork() succeeded.  Now in the parent. */
639 		int status;
640 
641 		wait(&status);
642 		if (!WIFEXITED(status) || WEXITSTATUS(status))
643 			fatal_error("fork test child");
644 		return;
645 	}
646 	/* fork() succeeded.  Now in the child. */
647 	printf("[RUN]\tCheck tile data inheritance.\n\tBefore fork(), load tiledata\n");
648 
649 	load_rand_tiledata(stashed_xsave);
650 
651 	grandchild = fork();
652 	if (grandchild < 0) {
653 		/* fork() failed */
654 		fatal_error("fork");
655 	} else if (grandchild > 0) {
656 		/* fork() succeeded.  Still in the first child. */
657 		int status;
658 
659 		wait(&status);
660 		if (!WIFEXITED(status) || WEXITSTATUS(status))
661 			fatal_error("fork test grand child");
662 		_exit(0);
663 	}
664 	/* fork() succeeded.  Now in the (grand)child. */
665 
666 	/*
667 	 * TILEDATA registers are not preserved across fork().
668 	 * Ensure that their value has changed:
669 	 */
670 	validate_tiledata_regs_changed(stashed_xsave);
671 
672 	_exit(0);
673 }
674 
675 /* Context switching test */
676 
677 static struct _ctxtswtest_cfg {
678 	unsigned int iterations;
679 	unsigned int num_threads;
680 } ctxtswtest_config;
681 
682 struct futex_info {
683 	pthread_t thread;
684 	int nr;
685 	pthread_mutex_t mutex;
686 	struct futex_info *next;
687 };
688 
689 static void *check_tiledata(void *info)
690 {
691 	struct futex_info *finfo = (struct futex_info *)info;
692 	struct xsave_buffer *xbuf;
693 	int i;
694 
695 	xbuf = alloc_xbuf();
696 	if (!xbuf)
697 		fatal_error("unable to allocate XSAVE buffer");
698 
699 	/*
700 	 * Load random data into 'xbuf' and then restore
701 	 * it to the tile registers themselves.
702 	 */
703 	load_rand_tiledata(xbuf);
704 	for (i = 0; i < ctxtswtest_config.iterations; i++) {
705 		pthread_mutex_lock(&finfo->mutex);
706 
707 		/*
708 		 * Ensure the register values have not
709 		 * diverged from those recorded in 'xbuf'.
710 		 */
711 		validate_tiledata_regs_same(xbuf);
712 
713 		/* Load new, random values into xbuf and registers */
714 		load_rand_tiledata(xbuf);
715 
716 		/*
717 		 * The last thread's last unlock will be for
718 		 * thread 0's mutex.  However, thread 0 will
719 		 * have already exited the loop and the mutex
720 		 * will already be unlocked.
721 		 *
722 		 * Because this is not an ERRORCHECK mutex,
723 		 * that inconsistency will be silently ignored.
724 		 */
725 		pthread_mutex_unlock(&finfo->next->mutex);
726 	}
727 
728 	free(xbuf);
729 	/*
730 	 * Return this thread's finfo, which is
731 	 * a unique value for this thread.
732 	 */
733 	return finfo;
734 }
735 
736 static int create_threads(int num, struct futex_info *finfo)
737 {
738 	int i;
739 
740 	for (i = 0; i < num; i++) {
741 		int next_nr;
742 
743 		finfo[i].nr = i;
744 		/*
745 		 * Thread 'i' will wait on this mutex to
746 		 * be unlocked.  Lock it immediately after
747 		 * initialization:
748 		 */
749 		pthread_mutex_init(&finfo[i].mutex, NULL);
750 		pthread_mutex_lock(&finfo[i].mutex);
751 
752 		next_nr = (i + 1) % num;
753 		finfo[i].next = &finfo[next_nr];
754 
755 		if (pthread_create(&finfo[i].thread, NULL, check_tiledata, &finfo[i]))
756 			fatal_error("pthread_create()");
757 	}
758 	return 0;
759 }
760 
761 static void affinitize_cpu0(void)
762 {
763 	cpu_set_t cpuset;
764 
765 	CPU_ZERO(&cpuset);
766 	CPU_SET(0, &cpuset);
767 
768 	if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0)
769 		fatal_error("sched_setaffinity to CPU 0");
770 }
771 
772 static void test_context_switch(void)
773 {
774 	struct futex_info *finfo;
775 	int i;
776 
777 	/* Affinitize to one CPU to force context switches */
778 	affinitize_cpu0();
779 
780 	req_xtiledata_perm();
781 
782 	printf("[RUN]\tCheck tiledata context switches, %d iterations, %d threads.\n",
783 	       ctxtswtest_config.iterations,
784 	       ctxtswtest_config.num_threads);
785 
786 
787 	finfo = malloc(sizeof(*finfo) * ctxtswtest_config.num_threads);
788 	if (!finfo)
789 		fatal_error("malloc()");
790 
791 	create_threads(ctxtswtest_config.num_threads, finfo);
792 
793 	/*
794 	 * This thread wakes up thread 0
795 	 * Thread 0 will wake up 1
796 	 * Thread 1 will wake up 2
797 	 * ...
798 	 * the last thread will wake up 0
799 	 *
800 	 * ... this will repeat for the configured
801 	 * number of iterations.
802 	 */
803 	pthread_mutex_unlock(&finfo[0].mutex);
804 
805 	/* Wait for all the threads to finish: */
806 	for (i = 0; i < ctxtswtest_config.num_threads; i++) {
807 		void *thread_retval;
808 		int rc;
809 
810 		rc = pthread_join(finfo[i].thread, &thread_retval);
811 
812 		if (rc)
813 			fatal_error("pthread_join() failed for thread %d err: %d\n",
814 					i, rc);
815 
816 		if (thread_retval != &finfo[i])
817 			fatal_error("unexpected thread retval for thread %d: %p\n",
818 					i, thread_retval);
819 
820 	}
821 
822 	printf("[OK]\tNo incorrect case was found.\n");
823 
824 	free(finfo);
825 }
826 
827 int main(void)
828 {
829 	/* Check hardware availability at first */
830 	check_cpuid_xsave();
831 	check_cpuid_xtiledata();
832 
833 	init_stashed_xsave();
834 	sethandler(SIGILL, handle_noperm, 0);
835 
836 	test_dynamic_state();
837 
838 	/* Request permission for the following tests */
839 	req_xtiledata_perm();
840 
841 	test_fork();
842 
843 	ctxtswtest_config.iterations = 10;
844 	ctxtswtest_config.num_threads = 5;
845 	test_context_switch();
846 
847 	clearhandler(SIGILL);
848 	free_stashed_xsave();
849 
850 	return 0;
851 }
852