1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * amx tests
4  *
5  * Copyright (C) 2021, Intel, Inc.
6  *
7  * Tests for amx #NM exception and save/restore.
8  */
9 
10 #define _GNU_SOURCE /* for program_invocation_short_name */
11 #include <fcntl.h>
12 #include <stdio.h>
13 #include <stdlib.h>
14 #include <string.h>
15 #include <sys/ioctl.h>
16 #include <sys/syscall.h>
17 
18 #include "test_util.h"
19 
20 #include "kvm_util.h"
21 #include "processor.h"
22 #include "vmx.h"
23 
24 #ifndef __x86_64__
25 # error This test is 64-bit only
26 #endif
27 
28 #define NUM_TILES			8
29 #define TILE_SIZE			1024
30 #define XSAVE_SIZE			((NUM_TILES * TILE_SIZE) + PAGE_SIZE)
31 
32 /* Tile configuration associated: */
33 #define PALETTE_TABLE_INDEX		1
34 #define MAX_TILES			16
35 #define RESERVED_BYTES			14
36 
37 #define XSAVE_HDR_OFFSET		512
38 
39 struct tile_config {
40 	u8  palette_id;
41 	u8  start_row;
42 	u8  reserved[RESERVED_BYTES];
43 	u16 colsb[MAX_TILES];
44 	u8  rows[MAX_TILES];
45 };
46 
47 struct tile_data {
48 	u8 data[NUM_TILES * TILE_SIZE];
49 };
50 
51 struct xtile_info {
52 	u16 bytes_per_tile;
53 	u16 bytes_per_row;
54 	u16 max_names;
55 	u16 max_rows;
56 	u32 xsave_offset;
57 	u32 xsave_size;
58 };
59 
60 static struct xtile_info xtile;
61 
__ldtilecfg(void * cfg)62 static inline void __ldtilecfg(void *cfg)
63 {
64 	asm volatile(".byte 0xc4,0xe2,0x78,0x49,0x00"
65 		     : : "a"(cfg));
66 }
67 
__tileloadd(void * tile)68 static inline void __tileloadd(void *tile)
69 {
70 	asm volatile(".byte 0xc4,0xe2,0x7b,0x4b,0x04,0x10"
71 		     : : "a"(tile), "d"(0));
72 }
73 
__tilerelease(void)74 static inline void __tilerelease(void)
75 {
76 	asm volatile(".byte 0xc4, 0xe2, 0x78, 0x49, 0xc0" ::);
77 }
78 
__xsavec(struct xstate * xstate,uint64_t rfbm)79 static inline void __xsavec(struct xstate *xstate, uint64_t rfbm)
80 {
81 	uint32_t rfbm_lo = rfbm;
82 	uint32_t rfbm_hi = rfbm >> 32;
83 
84 	asm volatile("xsavec (%%rdi)"
85 		     : : "D" (xstate), "a" (rfbm_lo), "d" (rfbm_hi)
86 		     : "memory");
87 }
88 
check_xtile_info(void)89 static void check_xtile_info(void)
90 {
91 	GUEST_ASSERT(this_cpu_has_p(X86_PROPERTY_XSTATE_MAX_SIZE_XCR0));
92 	GUEST_ASSERT(this_cpu_property(X86_PROPERTY_XSTATE_MAX_SIZE_XCR0) <= XSAVE_SIZE);
93 
94 	xtile.xsave_offset = this_cpu_property(X86_PROPERTY_XSTATE_TILE_OFFSET);
95 	GUEST_ASSERT(xtile.xsave_offset == 2816);
96 	xtile.xsave_size = this_cpu_property(X86_PROPERTY_XSTATE_TILE_SIZE);
97 	GUEST_ASSERT(xtile.xsave_size == 8192);
98 	GUEST_ASSERT(sizeof(struct tile_data) >= xtile.xsave_size);
99 
100 	GUEST_ASSERT(this_cpu_has_p(X86_PROPERTY_AMX_MAX_PALETTE_TABLES));
101 	GUEST_ASSERT(this_cpu_property(X86_PROPERTY_AMX_MAX_PALETTE_TABLES) >=
102 		     PALETTE_TABLE_INDEX);
103 
104 	GUEST_ASSERT(this_cpu_has_p(X86_PROPERTY_AMX_NR_TILE_REGS));
105 	xtile.max_names = this_cpu_property(X86_PROPERTY_AMX_NR_TILE_REGS);
106 	GUEST_ASSERT(xtile.max_names == 8);
107 	xtile.bytes_per_tile = this_cpu_property(X86_PROPERTY_AMX_BYTES_PER_TILE);
108 	GUEST_ASSERT(xtile.bytes_per_tile == 1024);
109 	xtile.bytes_per_row = this_cpu_property(X86_PROPERTY_AMX_BYTES_PER_ROW);
110 	GUEST_ASSERT(xtile.bytes_per_row == 64);
111 	xtile.max_rows = this_cpu_property(X86_PROPERTY_AMX_MAX_ROWS);
112 	GUEST_ASSERT(xtile.max_rows == 16);
113 }
114 
set_tilecfg(struct tile_config * cfg)115 static void set_tilecfg(struct tile_config *cfg)
116 {
117 	int i;
118 
119 	/* Only palette id 1 */
120 	cfg->palette_id = 1;
121 	for (i = 0; i < xtile.max_names; i++) {
122 		cfg->colsb[i] = xtile.bytes_per_row;
123 		cfg->rows[i] = xtile.max_rows;
124 	}
125 }
126 
init_regs(void)127 static void init_regs(void)
128 {
129 	uint64_t cr4, xcr0;
130 
131 	GUEST_ASSERT(this_cpu_has(X86_FEATURE_XSAVE));
132 
133 	/* turn on CR4.OSXSAVE */
134 	cr4 = get_cr4();
135 	cr4 |= X86_CR4_OSXSAVE;
136 	set_cr4(cr4);
137 	GUEST_ASSERT(this_cpu_has(X86_FEATURE_OSXSAVE));
138 
139 	xcr0 = xgetbv(0);
140 	xcr0 |= XFEATURE_MASK_XTILE;
141 	xsetbv(0x0, xcr0);
142 	GUEST_ASSERT((xgetbv(0) & XFEATURE_MASK_XTILE) == XFEATURE_MASK_XTILE);
143 }
144 
guest_code(struct tile_config * amx_cfg,struct tile_data * tiledata,struct xstate * xstate)145 static void __attribute__((__flatten__)) guest_code(struct tile_config *amx_cfg,
146 						    struct tile_data *tiledata,
147 						    struct xstate *xstate)
148 {
149 	init_regs();
150 	check_xtile_info();
151 	GUEST_SYNC(1);
152 
153 	/* xfd=0, enable amx */
154 	wrmsr(MSR_IA32_XFD, 0);
155 	GUEST_SYNC(2);
156 	GUEST_ASSERT(rdmsr(MSR_IA32_XFD) == 0);
157 	set_tilecfg(amx_cfg);
158 	__ldtilecfg(amx_cfg);
159 	GUEST_SYNC(3);
160 	/* Check save/restore when trap to userspace */
161 	__tileloadd(tiledata);
162 	GUEST_SYNC(4);
163 	__tilerelease();
164 	GUEST_SYNC(5);
165 	/*
166 	 * After XSAVEC, XTILEDATA is cleared in the xstate_bv but is set in
167 	 * the xcomp_bv.
168 	 */
169 	xstate->header.xstate_bv = XFEATURE_MASK_XTILE_DATA;
170 	__xsavec(xstate, XFEATURE_MASK_XTILE_DATA);
171 	GUEST_ASSERT(!(xstate->header.xstate_bv & XFEATURE_MASK_XTILE_DATA));
172 	GUEST_ASSERT(xstate->header.xcomp_bv & XFEATURE_MASK_XTILE_DATA);
173 
174 	/* xfd=0x40000, disable amx tiledata */
175 	wrmsr(MSR_IA32_XFD, XFEATURE_MASK_XTILE_DATA);
176 
177 	/*
178 	 * XTILEDATA is cleared in xstate_bv but set in xcomp_bv, this property
179 	 * remains the same even when amx tiledata is disabled by IA32_XFD.
180 	 */
181 	xstate->header.xstate_bv = XFEATURE_MASK_XTILE_DATA;
182 	__xsavec(xstate, XFEATURE_MASK_XTILE_DATA);
183 	GUEST_ASSERT(!(xstate->header.xstate_bv & XFEATURE_MASK_XTILE_DATA));
184 	GUEST_ASSERT((xstate->header.xcomp_bv & XFEATURE_MASK_XTILE_DATA));
185 
186 	GUEST_SYNC(6);
187 	GUEST_ASSERT(rdmsr(MSR_IA32_XFD) == XFEATURE_MASK_XTILE_DATA);
188 	set_tilecfg(amx_cfg);
189 	__ldtilecfg(amx_cfg);
190 	/* Trigger #NM exception */
191 	__tileloadd(tiledata);
192 	GUEST_SYNC(10);
193 
194 	GUEST_DONE();
195 }
196 
guest_nm_handler(struct ex_regs * regs)197 void guest_nm_handler(struct ex_regs *regs)
198 {
199 	/* Check if #NM is triggered by XFEATURE_MASK_XTILE_DATA */
200 	GUEST_SYNC(7);
201 	GUEST_ASSERT(!(get_cr0() & X86_CR0_TS));
202 	GUEST_ASSERT(rdmsr(MSR_IA32_XFD_ERR) == XFEATURE_MASK_XTILE_DATA);
203 	GUEST_ASSERT(rdmsr(MSR_IA32_XFD) == XFEATURE_MASK_XTILE_DATA);
204 	GUEST_SYNC(8);
205 	GUEST_ASSERT(rdmsr(MSR_IA32_XFD_ERR) == XFEATURE_MASK_XTILE_DATA);
206 	GUEST_ASSERT(rdmsr(MSR_IA32_XFD) == XFEATURE_MASK_XTILE_DATA);
207 	/* Clear xfd_err */
208 	wrmsr(MSR_IA32_XFD_ERR, 0);
209 	/* xfd=0, enable amx */
210 	wrmsr(MSR_IA32_XFD, 0);
211 	GUEST_SYNC(9);
212 }
213 
main(int argc,char * argv[])214 int main(int argc, char *argv[])
215 {
216 	struct kvm_regs regs1, regs2;
217 	struct kvm_vcpu *vcpu;
218 	struct kvm_vm *vm;
219 	struct kvm_x86_state *state;
220 	int xsave_restore_size;
221 	vm_vaddr_t amx_cfg, tiledata, xstate;
222 	struct ucall uc;
223 	u32 amx_offset;
224 	int ret;
225 
226 	/*
227 	 * Note, all off-by-default features must be enabled before anything
228 	 * caches KVM_GET_SUPPORTED_CPUID, e.g. before using kvm_cpu_has().
229 	 */
230 	vm_xsave_require_permission(XFEATURE_MASK_XTILE_DATA);
231 
232 	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XFD));
233 	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XSAVE));
234 	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_AMX_TILE));
235 	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XTILECFG));
236 	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XTILEDATA));
237 	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XTILEDATA_XFD));
238 
239 	/* Create VM */
240 	vm = vm_create_with_one_vcpu(&vcpu, guest_code);
241 
242 	TEST_ASSERT(kvm_cpu_has_p(X86_PROPERTY_XSTATE_MAX_SIZE),
243 		    "KVM should enumerate max XSAVE size when XSAVE is supported");
244 	xsave_restore_size = kvm_cpu_property(X86_PROPERTY_XSTATE_MAX_SIZE);
245 
246 	vcpu_regs_get(vcpu, &regs1);
247 
248 	/* Register #NM handler */
249 	vm_init_descriptor_tables(vm);
250 	vcpu_init_descriptor_tables(vcpu);
251 	vm_install_exception_handler(vm, NM_VECTOR, guest_nm_handler);
252 
253 	/* amx cfg for guest_code */
254 	amx_cfg = vm_vaddr_alloc_page(vm);
255 	memset(addr_gva2hva(vm, amx_cfg), 0x0, getpagesize());
256 
257 	/* amx tiledata for guest_code */
258 	tiledata = vm_vaddr_alloc_pages(vm, 2);
259 	memset(addr_gva2hva(vm, tiledata), rand() | 1, 2 * getpagesize());
260 
261 	/* XSAVE state for guest_code */
262 	xstate = vm_vaddr_alloc_pages(vm, DIV_ROUND_UP(XSAVE_SIZE, PAGE_SIZE));
263 	memset(addr_gva2hva(vm, xstate), 0, PAGE_SIZE * DIV_ROUND_UP(XSAVE_SIZE, PAGE_SIZE));
264 	vcpu_args_set(vcpu, 3, amx_cfg, tiledata, xstate);
265 
266 	for (;;) {
267 		vcpu_run(vcpu);
268 		TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
269 
270 		switch (get_ucall(vcpu, &uc)) {
271 		case UCALL_ABORT:
272 			REPORT_GUEST_ASSERT(uc);
273 			/* NOT REACHED */
274 		case UCALL_SYNC:
275 			switch (uc.args[1]) {
276 			case 1:
277 			case 2:
278 			case 3:
279 			case 5:
280 			case 6:
281 			case 7:
282 			case 8:
283 				fprintf(stderr, "GUEST_SYNC(%ld)\n", uc.args[1]);
284 				break;
285 			case 4:
286 			case 10:
287 				fprintf(stderr,
288 				"GUEST_SYNC(%ld), check save/restore status\n", uc.args[1]);
289 
290 				/* Compacted mode, get amx offset by xsave area
291 				 * size subtract 8K amx size.
292 				 */
293 				amx_offset = xsave_restore_size - NUM_TILES*TILE_SIZE;
294 				state = vcpu_save_state(vcpu);
295 				void *amx_start = (void *)state->xsave + amx_offset;
296 				void *tiles_data = (void *)addr_gva2hva(vm, tiledata);
297 				/* Only check TMM0 register, 1 tile */
298 				ret = memcmp(amx_start, tiles_data, TILE_SIZE);
299 				TEST_ASSERT(ret == 0, "memcmp failed, ret=%d\n", ret);
300 				kvm_x86_state_cleanup(state);
301 				break;
302 			case 9:
303 				fprintf(stderr,
304 				"GUEST_SYNC(%ld), #NM exception and enable amx\n", uc.args[1]);
305 				break;
306 			}
307 			break;
308 		case UCALL_DONE:
309 			fprintf(stderr, "UCALL_DONE\n");
310 			goto done;
311 		default:
312 			TEST_FAIL("Unknown ucall %lu", uc.cmd);
313 		}
314 
315 		state = vcpu_save_state(vcpu);
316 		memset(&regs1, 0, sizeof(regs1));
317 		vcpu_regs_get(vcpu, &regs1);
318 
319 		kvm_vm_release(vm);
320 
321 		/* Restore state in a new VM.  */
322 		vcpu = vm_recreate_with_one_vcpu(vm);
323 		vcpu_load_state(vcpu, state);
324 		kvm_x86_state_cleanup(state);
325 
326 		memset(&regs2, 0, sizeof(regs2));
327 		vcpu_regs_get(vcpu, &regs2);
328 		TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
329 			    "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
330 			    (ulong) regs2.rdi, (ulong) regs2.rsi);
331 	}
332 done:
333 	kvm_vm_free(vm);
334 }
335