xref: /openbmc/linux/tools/testing/selftests/kvm/aarch64/arch_timer.c (revision db44e1c871bcf6228b9447aada421088e036692a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * arch_timer.c - Tests the aarch64 timer IRQ functionality
4  *
5  * The test validates both the virtual and physical timer IRQs using
6  * CVAL and TVAL registers. This consitutes the four stages in the test.
7  * The guest's main thread configures the timer interrupt for a stage
8  * and waits for it to fire, with a timeout equal to the timer period.
9  * It asserts that the timeout doesn't exceed the timer period.
10  *
11  * On the other hand, upon receipt of an interrupt, the guest's interrupt
12  * handler validates the interrupt by checking if the architectural state
13  * is in compliance with the specifications.
14  *
15  * The test provides command-line options to configure the timer's
16  * period (-p), number of vCPUs (-n), and iterations per stage (-i).
17  * To stress-test the timer stack even more, an option to migrate the
18  * vCPUs across pCPUs (-m), at a particular rate, is also provided.
19  *
20  * Copyright (c) 2021, Google LLC.
21  */
22 #define USE_GUEST_ASSERT_PRINTF 1
23 
24 #define _GNU_SOURCE
25 
26 #include <stdlib.h>
27 #include <pthread.h>
28 #include <linux/kvm.h>
29 #include <linux/sizes.h>
30 #include <linux/bitmap.h>
31 #include <sys/sysinfo.h>
32 
33 #include "kvm_util.h"
34 #include "processor.h"
35 #include "delay.h"
36 #include "arch_timer.h"
37 #include "gic.h"
38 #include "vgic.h"
39 
40 #define NR_VCPUS_DEF			4
41 #define NR_TEST_ITERS_DEF		5
42 #define TIMER_TEST_PERIOD_MS_DEF	10
43 #define TIMER_TEST_ERR_MARGIN_US	100
44 #define TIMER_TEST_MIGRATION_FREQ_MS	2
45 
46 struct test_args {
47 	int nr_vcpus;
48 	int nr_iter;
49 	int timer_period_ms;
50 	int migration_freq_ms;
51 	struct kvm_arm_counter_offset offset;
52 };
53 
54 static struct test_args test_args = {
55 	.nr_vcpus = NR_VCPUS_DEF,
56 	.nr_iter = NR_TEST_ITERS_DEF,
57 	.timer_period_ms = TIMER_TEST_PERIOD_MS_DEF,
58 	.migration_freq_ms = TIMER_TEST_MIGRATION_FREQ_MS,
59 	.offset = { .reserved = 1 },
60 };
61 
62 #define msecs_to_usecs(msec)		((msec) * 1000LL)
63 
64 #define GICD_BASE_GPA			0x8000000ULL
65 #define GICR_BASE_GPA			0x80A0000ULL
66 
67 enum guest_stage {
68 	GUEST_STAGE_VTIMER_CVAL = 1,
69 	GUEST_STAGE_VTIMER_TVAL,
70 	GUEST_STAGE_PTIMER_CVAL,
71 	GUEST_STAGE_PTIMER_TVAL,
72 	GUEST_STAGE_MAX,
73 };
74 
75 /* Shared variables between host and guest */
76 struct test_vcpu_shared_data {
77 	int nr_iter;
78 	enum guest_stage guest_stage;
79 	uint64_t xcnt;
80 };
81 
82 static struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
83 static pthread_t pt_vcpu_run[KVM_MAX_VCPUS];
84 static struct test_vcpu_shared_data vcpu_shared_data[KVM_MAX_VCPUS];
85 
86 static int vtimer_irq, ptimer_irq;
87 
88 static unsigned long *vcpu_done_map;
89 static pthread_mutex_t vcpu_done_map_lock;
90 
91 static void
92 guest_configure_timer_action(struct test_vcpu_shared_data *shared_data)
93 {
94 	switch (shared_data->guest_stage) {
95 	case GUEST_STAGE_VTIMER_CVAL:
96 		timer_set_next_cval_ms(VIRTUAL, test_args.timer_period_ms);
97 		shared_data->xcnt = timer_get_cntct(VIRTUAL);
98 		timer_set_ctl(VIRTUAL, CTL_ENABLE);
99 		break;
100 	case GUEST_STAGE_VTIMER_TVAL:
101 		timer_set_next_tval_ms(VIRTUAL, test_args.timer_period_ms);
102 		shared_data->xcnt = timer_get_cntct(VIRTUAL);
103 		timer_set_ctl(VIRTUAL, CTL_ENABLE);
104 		break;
105 	case GUEST_STAGE_PTIMER_CVAL:
106 		timer_set_next_cval_ms(PHYSICAL, test_args.timer_period_ms);
107 		shared_data->xcnt = timer_get_cntct(PHYSICAL);
108 		timer_set_ctl(PHYSICAL, CTL_ENABLE);
109 		break;
110 	case GUEST_STAGE_PTIMER_TVAL:
111 		timer_set_next_tval_ms(PHYSICAL, test_args.timer_period_ms);
112 		shared_data->xcnt = timer_get_cntct(PHYSICAL);
113 		timer_set_ctl(PHYSICAL, CTL_ENABLE);
114 		break;
115 	default:
116 		GUEST_ASSERT(0);
117 	}
118 }
119 
120 static void guest_validate_irq(unsigned int intid,
121 				struct test_vcpu_shared_data *shared_data)
122 {
123 	enum guest_stage stage = shared_data->guest_stage;
124 	uint64_t xcnt = 0, xcnt_diff_us, cval = 0;
125 	unsigned long xctl = 0;
126 	unsigned int timer_irq = 0;
127 	unsigned int accessor;
128 
129 	if (intid == IAR_SPURIOUS)
130 		return;
131 
132 	switch (stage) {
133 	case GUEST_STAGE_VTIMER_CVAL:
134 	case GUEST_STAGE_VTIMER_TVAL:
135 		accessor = VIRTUAL;
136 		timer_irq = vtimer_irq;
137 		break;
138 	case GUEST_STAGE_PTIMER_CVAL:
139 	case GUEST_STAGE_PTIMER_TVAL:
140 		accessor = PHYSICAL;
141 		timer_irq = ptimer_irq;
142 		break;
143 	default:
144 		GUEST_ASSERT(0);
145 		return;
146 	}
147 
148 	xctl = timer_get_ctl(accessor);
149 	if ((xctl & CTL_IMASK) || !(xctl & CTL_ENABLE))
150 		return;
151 
152 	timer_set_ctl(accessor, CTL_IMASK);
153 	xcnt = timer_get_cntct(accessor);
154 	cval = timer_get_cval(accessor);
155 
156 	xcnt_diff_us = cycles_to_usec(xcnt - shared_data->xcnt);
157 
158 	/* Make sure we are dealing with the correct timer IRQ */
159 	GUEST_ASSERT_EQ(intid, timer_irq);
160 
161 	/* Basic 'timer condition met' check */
162 	__GUEST_ASSERT(xcnt >= cval,
163 		       "xcnt = 0x%llx, cval = 0x%llx, xcnt_diff_us = 0x%llx",
164 		       xcnt, cval, xcnt_diff_us);
165 	__GUEST_ASSERT(xctl & CTL_ISTATUS, "xcnt = 0x%llx", xcnt);
166 
167 	WRITE_ONCE(shared_data->nr_iter, shared_data->nr_iter + 1);
168 }
169 
170 static void guest_irq_handler(struct ex_regs *regs)
171 {
172 	unsigned int intid = gic_get_and_ack_irq();
173 	uint32_t cpu = guest_get_vcpuid();
174 	struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
175 
176 	guest_validate_irq(intid, shared_data);
177 
178 	gic_set_eoi(intid);
179 }
180 
181 static void guest_run_stage(struct test_vcpu_shared_data *shared_data,
182 				enum guest_stage stage)
183 {
184 	uint32_t irq_iter, config_iter;
185 
186 	shared_data->guest_stage = stage;
187 	shared_data->nr_iter = 0;
188 
189 	for (config_iter = 0; config_iter < test_args.nr_iter; config_iter++) {
190 		/* Setup the next interrupt */
191 		guest_configure_timer_action(shared_data);
192 
193 		/* Setup a timeout for the interrupt to arrive */
194 		udelay(msecs_to_usecs(test_args.timer_period_ms) +
195 			TIMER_TEST_ERR_MARGIN_US);
196 
197 		irq_iter = READ_ONCE(shared_data->nr_iter);
198 		GUEST_ASSERT_EQ(config_iter + 1, irq_iter);
199 	}
200 }
201 
202 static void guest_code(void)
203 {
204 	uint32_t cpu = guest_get_vcpuid();
205 	struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
206 
207 	local_irq_disable();
208 
209 	gic_init(GIC_V3, test_args.nr_vcpus,
210 		(void *)GICD_BASE_GPA, (void *)GICR_BASE_GPA);
211 
212 	timer_set_ctl(VIRTUAL, CTL_IMASK);
213 	timer_set_ctl(PHYSICAL, CTL_IMASK);
214 
215 	gic_irq_enable(vtimer_irq);
216 	gic_irq_enable(ptimer_irq);
217 	local_irq_enable();
218 
219 	guest_run_stage(shared_data, GUEST_STAGE_VTIMER_CVAL);
220 	guest_run_stage(shared_data, GUEST_STAGE_VTIMER_TVAL);
221 	guest_run_stage(shared_data, GUEST_STAGE_PTIMER_CVAL);
222 	guest_run_stage(shared_data, GUEST_STAGE_PTIMER_TVAL);
223 
224 	GUEST_DONE();
225 }
226 
227 static void *test_vcpu_run(void *arg)
228 {
229 	unsigned int vcpu_idx = (unsigned long)arg;
230 	struct ucall uc;
231 	struct kvm_vcpu *vcpu = vcpus[vcpu_idx];
232 	struct kvm_vm *vm = vcpu->vm;
233 	struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[vcpu_idx];
234 
235 	vcpu_run(vcpu);
236 
237 	/* Currently, any exit from guest is an indication of completion */
238 	pthread_mutex_lock(&vcpu_done_map_lock);
239 	__set_bit(vcpu_idx, vcpu_done_map);
240 	pthread_mutex_unlock(&vcpu_done_map_lock);
241 
242 	switch (get_ucall(vcpu, &uc)) {
243 	case UCALL_SYNC:
244 	case UCALL_DONE:
245 		break;
246 	case UCALL_ABORT:
247 		sync_global_from_guest(vm, *shared_data);
248 		fprintf(stderr, "Guest assert failed,  vcpu %u; stage; %u; iter: %u\n",
249 			vcpu_idx, shared_data->guest_stage, shared_data->nr_iter);
250 		REPORT_GUEST_ASSERT(uc);
251 		break;
252 	default:
253 		TEST_FAIL("Unexpected guest exit\n");
254 	}
255 
256 	return NULL;
257 }
258 
259 static uint32_t test_get_pcpu(void)
260 {
261 	uint32_t pcpu;
262 	unsigned int nproc_conf;
263 	cpu_set_t online_cpuset;
264 
265 	nproc_conf = get_nprocs_conf();
266 	sched_getaffinity(0, sizeof(cpu_set_t), &online_cpuset);
267 
268 	/* Randomly find an available pCPU to place a vCPU on */
269 	do {
270 		pcpu = rand() % nproc_conf;
271 	} while (!CPU_ISSET(pcpu, &online_cpuset));
272 
273 	return pcpu;
274 }
275 
276 static int test_migrate_vcpu(unsigned int vcpu_idx)
277 {
278 	int ret;
279 	cpu_set_t cpuset;
280 	uint32_t new_pcpu = test_get_pcpu();
281 
282 	CPU_ZERO(&cpuset);
283 	CPU_SET(new_pcpu, &cpuset);
284 
285 	pr_debug("Migrating vCPU: %u to pCPU: %u\n", vcpu_idx, new_pcpu);
286 
287 	ret = pthread_setaffinity_np(pt_vcpu_run[vcpu_idx],
288 				     sizeof(cpuset), &cpuset);
289 
290 	/* Allow the error where the vCPU thread is already finished */
291 	TEST_ASSERT(ret == 0 || ret == ESRCH,
292 		    "Failed to migrate the vCPU:%u to pCPU: %u; ret: %d\n",
293 		    vcpu_idx, new_pcpu, ret);
294 
295 	return ret;
296 }
297 
298 static void *test_vcpu_migration(void *arg)
299 {
300 	unsigned int i, n_done;
301 	bool vcpu_done;
302 
303 	do {
304 		usleep(msecs_to_usecs(test_args.migration_freq_ms));
305 
306 		for (n_done = 0, i = 0; i < test_args.nr_vcpus; i++) {
307 			pthread_mutex_lock(&vcpu_done_map_lock);
308 			vcpu_done = test_bit(i, vcpu_done_map);
309 			pthread_mutex_unlock(&vcpu_done_map_lock);
310 
311 			if (vcpu_done) {
312 				n_done++;
313 				continue;
314 			}
315 
316 			test_migrate_vcpu(i);
317 		}
318 	} while (test_args.nr_vcpus != n_done);
319 
320 	return NULL;
321 }
322 
323 static void test_run(struct kvm_vm *vm)
324 {
325 	pthread_t pt_vcpu_migration;
326 	unsigned int i;
327 	int ret;
328 
329 	pthread_mutex_init(&vcpu_done_map_lock, NULL);
330 	vcpu_done_map = bitmap_zalloc(test_args.nr_vcpus);
331 	TEST_ASSERT(vcpu_done_map, "Failed to allocate vcpu done bitmap\n");
332 
333 	for (i = 0; i < (unsigned long)test_args.nr_vcpus; i++) {
334 		ret = pthread_create(&pt_vcpu_run[i], NULL, test_vcpu_run,
335 				     (void *)(unsigned long)i);
336 		TEST_ASSERT(!ret, "Failed to create vCPU-%d pthread\n", i);
337 	}
338 
339 	/* Spawn a thread to control the vCPU migrations */
340 	if (test_args.migration_freq_ms) {
341 		srand(time(NULL));
342 
343 		ret = pthread_create(&pt_vcpu_migration, NULL,
344 					test_vcpu_migration, NULL);
345 		TEST_ASSERT(!ret, "Failed to create the migration pthread\n");
346 	}
347 
348 
349 	for (i = 0; i < test_args.nr_vcpus; i++)
350 		pthread_join(pt_vcpu_run[i], NULL);
351 
352 	if (test_args.migration_freq_ms)
353 		pthread_join(pt_vcpu_migration, NULL);
354 
355 	bitmap_free(vcpu_done_map);
356 }
357 
358 static void test_init_timer_irq(struct kvm_vm *vm)
359 {
360 	/* Timer initid should be same for all the vCPUs, so query only vCPU-0 */
361 	vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL,
362 			     KVM_ARM_VCPU_TIMER_IRQ_PTIMER, &ptimer_irq);
363 	vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL,
364 			     KVM_ARM_VCPU_TIMER_IRQ_VTIMER, &vtimer_irq);
365 
366 	sync_global_to_guest(vm, ptimer_irq);
367 	sync_global_to_guest(vm, vtimer_irq);
368 
369 	pr_debug("ptimer_irq: %d; vtimer_irq: %d\n", ptimer_irq, vtimer_irq);
370 }
371 
372 static int gic_fd;
373 
374 static struct kvm_vm *test_vm_create(void)
375 {
376 	struct kvm_vm *vm;
377 	unsigned int i;
378 	int nr_vcpus = test_args.nr_vcpus;
379 
380 	vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus);
381 
382 	vm_init_descriptor_tables(vm);
383 	vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT, guest_irq_handler);
384 
385 	if (!test_args.offset.reserved) {
386 		if (kvm_has_cap(KVM_CAP_COUNTER_OFFSET))
387 			vm_ioctl(vm, KVM_ARM_SET_COUNTER_OFFSET, &test_args.offset);
388 		else
389 			TEST_FAIL("no support for global offset\n");
390 	}
391 
392 	for (i = 0; i < nr_vcpus; i++)
393 		vcpu_init_descriptor_tables(vcpus[i]);
394 
395 	test_init_timer_irq(vm);
396 	gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
397 	__TEST_REQUIRE(gic_fd >= 0, "Failed to create vgic-v3");
398 
399 	/* Make all the test's cmdline args visible to the guest */
400 	sync_global_to_guest(vm, test_args);
401 
402 	return vm;
403 }
404 
405 static void test_vm_cleanup(struct kvm_vm *vm)
406 {
407 	close(gic_fd);
408 	kvm_vm_free(vm);
409 }
410 
411 static void test_print_help(char *name)
412 {
413 	pr_info("Usage: %s [-h] [-n nr_vcpus] [-i iterations] [-p timer_period_ms]\n",
414 		name);
415 	pr_info("\t-n: Number of vCPUs to configure (default: %u; max: %u)\n",
416 		NR_VCPUS_DEF, KVM_MAX_VCPUS);
417 	pr_info("\t-i: Number of iterations per stage (default: %u)\n",
418 		NR_TEST_ITERS_DEF);
419 	pr_info("\t-p: Periodicity (in ms) of the guest timer (default: %u)\n",
420 		TIMER_TEST_PERIOD_MS_DEF);
421 	pr_info("\t-m: Frequency (in ms) of vCPUs to migrate to different pCPU. 0 to turn off (default: %u)\n",
422 		TIMER_TEST_MIGRATION_FREQ_MS);
423 	pr_info("\t-o: Counter offset (in counter cycles, default: 0)\n");
424 	pr_info("\t-h: print this help screen\n");
425 }
426 
427 static bool parse_args(int argc, char *argv[])
428 {
429 	int opt;
430 
431 	while ((opt = getopt(argc, argv, "hn:i:p:m:o:")) != -1) {
432 		switch (opt) {
433 		case 'n':
434 			test_args.nr_vcpus = atoi_positive("Number of vCPUs", optarg);
435 			if (test_args.nr_vcpus > KVM_MAX_VCPUS) {
436 				pr_info("Max allowed vCPUs: %u\n",
437 					KVM_MAX_VCPUS);
438 				goto err;
439 			}
440 			break;
441 		case 'i':
442 			test_args.nr_iter = atoi_positive("Number of iterations", optarg);
443 			break;
444 		case 'p':
445 			test_args.timer_period_ms = atoi_positive("Periodicity", optarg);
446 			break;
447 		case 'm':
448 			test_args.migration_freq_ms = atoi_non_negative("Frequency", optarg);
449 			break;
450 		case 'o':
451 			test_args.offset.counter_offset = strtol(optarg, NULL, 0);
452 			test_args.offset.reserved = 0;
453 			break;
454 		case 'h':
455 		default:
456 			goto err;
457 		}
458 	}
459 
460 	return true;
461 
462 err:
463 	test_print_help(argv[0]);
464 	return false;
465 }
466 
467 int main(int argc, char *argv[])
468 {
469 	struct kvm_vm *vm;
470 
471 	if (!parse_args(argc, argv))
472 		exit(KSFT_SKIP);
473 
474 	__TEST_REQUIRE(!test_args.migration_freq_ms || get_nprocs() >= 2,
475 		       "At least two physical CPUs needed for vCPU migration");
476 
477 	vm = test_vm_create();
478 	test_run(vm);
479 	test_vm_cleanup(vm);
480 
481 	return 0;
482 }
483