1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * psci_cpu_on_test - Test that the observable state of a vCPU targeted by the
4  * CPU_ON PSCI call matches what the caller requested.
5  *
6  * Copyright (c) 2021 Google LLC.
7  *
8  * This is a regression test for a race between KVM servicing the PSCI call and
9  * userspace reading the vCPUs registers.
10  */
11 
12 #define _GNU_SOURCE
13 
14 #include <linux/psci.h>
15 
16 #include "kvm_util.h"
17 #include "processor.h"
18 #include "test_util.h"
19 
20 #define VCPU_ID_SOURCE 0
21 #define VCPU_ID_TARGET 1
22 
23 #define CPU_ON_ENTRY_ADDR 0xfeedf00dul
24 #define CPU_ON_CONTEXT_ID 0xdeadc0deul
25 
26 static uint64_t psci_cpu_on(uint64_t target_cpu, uint64_t entry_addr,
27 			    uint64_t context_id)
28 {
29 	struct arm_smccc_res res;
30 
31 	smccc_hvc(PSCI_0_2_FN64_CPU_ON, target_cpu, entry_addr, context_id,
32 		  0, 0, 0, 0, &res);
33 
34 	return res.a0;
35 }
36 
37 static uint64_t psci_affinity_info(uint64_t target_affinity,
38 				   uint64_t lowest_affinity_level)
39 {
40 	struct arm_smccc_res res;
41 
42 	smccc_hvc(PSCI_0_2_FN64_AFFINITY_INFO, target_affinity, lowest_affinity_level,
43 		  0, 0, 0, 0, 0, &res);
44 
45 	return res.a0;
46 }
47 
48 static uint64_t psci_system_suspend(uint64_t entry_addr, uint64_t context_id)
49 {
50 	struct arm_smccc_res res;
51 
52 	smccc_hvc(PSCI_1_0_FN64_SYSTEM_SUSPEND, entry_addr, context_id,
53 		  0, 0, 0, 0, 0, &res);
54 
55 	return res.a0;
56 }
57 
58 static uint64_t psci_features(uint32_t func_id)
59 {
60 	struct arm_smccc_res res;
61 
62 	smccc_hvc(PSCI_1_0_FN_PSCI_FEATURES, func_id, 0, 0, 0, 0, 0, 0, &res);
63 
64 	return res.a0;
65 }
66 
67 static void vcpu_power_off(struct kvm_vm *vm, uint32_t vcpuid)
68 {
69 	struct kvm_mp_state mp_state = {
70 		.mp_state = KVM_MP_STATE_STOPPED,
71 	};
72 
73 	vcpu_mp_state_set(vm, vcpuid, &mp_state);
74 }
75 
76 static struct kvm_vm *setup_vm(void *guest_code)
77 {
78 	struct kvm_vcpu_init init;
79 	struct kvm_vm *vm;
80 
81 	vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES);
82 	ucall_init(vm, NULL);
83 
84 	vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init);
85 	init.features[0] |= (1 << KVM_ARM_VCPU_PSCI_0_2);
86 
87 	aarch64_vcpu_add_default(vm, VCPU_ID_SOURCE, &init, guest_code);
88 	aarch64_vcpu_add_default(vm, VCPU_ID_TARGET, &init, guest_code);
89 
90 	return vm;
91 }
92 
93 static void enter_guest(struct kvm_vm *vm, uint32_t vcpuid)
94 {
95 	struct ucall uc;
96 
97 	vcpu_run(vm, vcpuid);
98 	if (get_ucall(vm, vcpuid, &uc) == UCALL_ABORT)
99 		TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], __FILE__,
100 			  uc.args[1]);
101 }
102 
103 static void assert_vcpu_reset(struct kvm_vm *vm, uint32_t vcpuid)
104 {
105 	uint64_t obs_pc, obs_x0;
106 
107 	get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), &obs_pc);
108 	get_reg(vm, vcpuid, ARM64_CORE_REG(regs.regs[0]), &obs_x0);
109 
110 	TEST_ASSERT(obs_pc == CPU_ON_ENTRY_ADDR,
111 		    "unexpected target cpu pc: %lx (expected: %lx)",
112 		    obs_pc, CPU_ON_ENTRY_ADDR);
113 	TEST_ASSERT(obs_x0 == CPU_ON_CONTEXT_ID,
114 		    "unexpected target context id: %lx (expected: %lx)",
115 		    obs_x0, CPU_ON_CONTEXT_ID);
116 }
117 
118 static void guest_test_cpu_on(uint64_t target_cpu)
119 {
120 	uint64_t target_state;
121 
122 	GUEST_ASSERT(!psci_cpu_on(target_cpu, CPU_ON_ENTRY_ADDR, CPU_ON_CONTEXT_ID));
123 
124 	do {
125 		target_state = psci_affinity_info(target_cpu, 0);
126 
127 		GUEST_ASSERT((target_state == PSCI_0_2_AFFINITY_LEVEL_ON) ||
128 			     (target_state == PSCI_0_2_AFFINITY_LEVEL_OFF));
129 	} while (target_state != PSCI_0_2_AFFINITY_LEVEL_ON);
130 
131 	GUEST_DONE();
132 }
133 
134 static void host_test_cpu_on(void)
135 {
136 	uint64_t target_mpidr;
137 	struct kvm_vm *vm;
138 	struct ucall uc;
139 
140 	vm = setup_vm(guest_test_cpu_on);
141 
142 	/*
143 	 * make sure the target is already off when executing the test.
144 	 */
145 	vcpu_power_off(vm, VCPU_ID_TARGET);
146 
147 	get_reg(vm, VCPU_ID_TARGET, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &target_mpidr);
148 	vcpu_args_set(vm, VCPU_ID_SOURCE, 1, target_mpidr & MPIDR_HWID_BITMASK);
149 	enter_guest(vm, VCPU_ID_SOURCE);
150 
151 	if (get_ucall(vm, VCPU_ID_SOURCE, &uc) != UCALL_DONE)
152 		TEST_FAIL("Unhandled ucall: %lu", uc.cmd);
153 
154 	assert_vcpu_reset(vm, VCPU_ID_TARGET);
155 	kvm_vm_free(vm);
156 }
157 
158 static void guest_test_system_suspend(void)
159 {
160 	uint64_t ret;
161 
162 	/* assert that SYSTEM_SUSPEND is discoverable */
163 	GUEST_ASSERT(!psci_features(PSCI_1_0_FN_SYSTEM_SUSPEND));
164 	GUEST_ASSERT(!psci_features(PSCI_1_0_FN64_SYSTEM_SUSPEND));
165 
166 	ret = psci_system_suspend(CPU_ON_ENTRY_ADDR, CPU_ON_CONTEXT_ID);
167 	GUEST_SYNC(ret);
168 }
169 
170 static void host_test_system_suspend(void)
171 {
172 	struct kvm_run *run;
173 	struct kvm_vm *vm;
174 
175 	vm = setup_vm(guest_test_system_suspend);
176 	vm_enable_cap(vm, KVM_CAP_ARM_SYSTEM_SUSPEND, 0);
177 
178 	vcpu_power_off(vm, VCPU_ID_TARGET);
179 	run = vcpu_state(vm, VCPU_ID_SOURCE);
180 
181 	enter_guest(vm, VCPU_ID_SOURCE);
182 
183 	TEST_ASSERT(run->exit_reason == KVM_EXIT_SYSTEM_EVENT,
184 		    "Unhandled exit reason: %u (%s)",
185 		    run->exit_reason, exit_reason_str(run->exit_reason));
186 	TEST_ASSERT(run->system_event.type == KVM_SYSTEM_EVENT_SUSPEND,
187 		    "Unhandled system event: %u (expected: %u)",
188 		    run->system_event.type, KVM_SYSTEM_EVENT_SUSPEND);
189 
190 	kvm_vm_free(vm);
191 }
192 
193 int main(void)
194 {
195 	if (!kvm_check_cap(KVM_CAP_ARM_SYSTEM_SUSPEND)) {
196 		print_skip("KVM_CAP_ARM_SYSTEM_SUSPEND not supported");
197 		exit(KSFT_SKIP);
198 	}
199 
200 	host_test_cpu_on();
201 	host_test_system_suspend();
202 	return 0;
203 }
204