1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * psci_cpu_on_test - Test that the observable state of a vCPU targeted by the
4  * CPU_ON PSCI call matches what the caller requested.
5  *
6  * Copyright (c) 2021 Google LLC.
7  *
8  * This is a regression test for a race between KVM servicing the PSCI call and
9  * userspace reading the vCPUs registers.
10  */
11 
12 #define _GNU_SOURCE
13 
14 #include <linux/psci.h>
15 
16 #include "kvm_util.h"
17 #include "processor.h"
18 #include "test_util.h"
19 
20 #define CPU_ON_ENTRY_ADDR 0xfeedf00dul
21 #define CPU_ON_CONTEXT_ID 0xdeadc0deul
22 
23 static uint64_t psci_cpu_on(uint64_t target_cpu, uint64_t entry_addr,
24 			    uint64_t context_id)
25 {
26 	struct arm_smccc_res res;
27 
28 	smccc_hvc(PSCI_0_2_FN64_CPU_ON, target_cpu, entry_addr, context_id,
29 		  0, 0, 0, 0, &res);
30 
31 	return res.a0;
32 }
33 
34 static uint64_t psci_affinity_info(uint64_t target_affinity,
35 				   uint64_t lowest_affinity_level)
36 {
37 	struct arm_smccc_res res;
38 
39 	smccc_hvc(PSCI_0_2_FN64_AFFINITY_INFO, target_affinity, lowest_affinity_level,
40 		  0, 0, 0, 0, 0, &res);
41 
42 	return res.a0;
43 }
44 
45 static uint64_t psci_system_suspend(uint64_t entry_addr, uint64_t context_id)
46 {
47 	struct arm_smccc_res res;
48 
49 	smccc_hvc(PSCI_1_0_FN64_SYSTEM_SUSPEND, entry_addr, context_id,
50 		  0, 0, 0, 0, 0, &res);
51 
52 	return res.a0;
53 }
54 
55 static uint64_t psci_features(uint32_t func_id)
56 {
57 	struct arm_smccc_res res;
58 
59 	smccc_hvc(PSCI_1_0_FN_PSCI_FEATURES, func_id, 0, 0, 0, 0, 0, 0, &res);
60 
61 	return res.a0;
62 }
63 
64 static void vcpu_power_off(struct kvm_vcpu *vcpu)
65 {
66 	struct kvm_mp_state mp_state = {
67 		.mp_state = KVM_MP_STATE_STOPPED,
68 	};
69 
70 	vcpu_mp_state_set(vcpu, &mp_state);
71 }
72 
73 static struct kvm_vm *setup_vm(void *guest_code, struct kvm_vcpu **source,
74 			       struct kvm_vcpu **target)
75 {
76 	struct kvm_vcpu_init init;
77 	struct kvm_vm *vm;
78 
79 	vm = vm_create(2);
80 	ucall_init(vm, NULL);
81 
82 	vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init);
83 	init.features[0] |= (1 << KVM_ARM_VCPU_PSCI_0_2);
84 
85 	*source = aarch64_vcpu_add(vm, 0, &init, guest_code);
86 	*target = aarch64_vcpu_add(vm, 1, &init, guest_code);
87 
88 	return vm;
89 }
90 
91 static void enter_guest(struct kvm_vcpu *vcpu)
92 {
93 	struct ucall uc;
94 
95 	vcpu_run(vcpu);
96 	if (get_ucall(vcpu, &uc) == UCALL_ABORT)
97 		TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], __FILE__,
98 			  uc.args[1]);
99 }
100 
101 static void assert_vcpu_reset(struct kvm_vcpu *vcpu)
102 {
103 	uint64_t obs_pc, obs_x0;
104 
105 	vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &obs_pc);
106 	vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.regs[0]), &obs_x0);
107 
108 	TEST_ASSERT(obs_pc == CPU_ON_ENTRY_ADDR,
109 		    "unexpected target cpu pc: %lx (expected: %lx)",
110 		    obs_pc, CPU_ON_ENTRY_ADDR);
111 	TEST_ASSERT(obs_x0 == CPU_ON_CONTEXT_ID,
112 		    "unexpected target context id: %lx (expected: %lx)",
113 		    obs_x0, CPU_ON_CONTEXT_ID);
114 }
115 
116 static void guest_test_cpu_on(uint64_t target_cpu)
117 {
118 	uint64_t target_state;
119 
120 	GUEST_ASSERT(!psci_cpu_on(target_cpu, CPU_ON_ENTRY_ADDR, CPU_ON_CONTEXT_ID));
121 
122 	do {
123 		target_state = psci_affinity_info(target_cpu, 0);
124 
125 		GUEST_ASSERT((target_state == PSCI_0_2_AFFINITY_LEVEL_ON) ||
126 			     (target_state == PSCI_0_2_AFFINITY_LEVEL_OFF));
127 	} while (target_state != PSCI_0_2_AFFINITY_LEVEL_ON);
128 
129 	GUEST_DONE();
130 }
131 
132 static void host_test_cpu_on(void)
133 {
134 	struct kvm_vcpu *source, *target;
135 	uint64_t target_mpidr;
136 	struct kvm_vm *vm;
137 	struct ucall uc;
138 
139 	vm = setup_vm(guest_test_cpu_on, &source, &target);
140 
141 	/*
142 	 * make sure the target is already off when executing the test.
143 	 */
144 	vcpu_power_off(target);
145 
146 	vcpu_get_reg(target, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &target_mpidr);
147 	vcpu_args_set(source, 1, target_mpidr & MPIDR_HWID_BITMASK);
148 	enter_guest(source);
149 
150 	if (get_ucall(source, &uc) != UCALL_DONE)
151 		TEST_FAIL("Unhandled ucall: %lu", uc.cmd);
152 
153 	assert_vcpu_reset(target);
154 	kvm_vm_free(vm);
155 }
156 
157 static void guest_test_system_suspend(void)
158 {
159 	uint64_t ret;
160 
161 	/* assert that SYSTEM_SUSPEND is discoverable */
162 	GUEST_ASSERT(!psci_features(PSCI_1_0_FN_SYSTEM_SUSPEND));
163 	GUEST_ASSERT(!psci_features(PSCI_1_0_FN64_SYSTEM_SUSPEND));
164 
165 	ret = psci_system_suspend(CPU_ON_ENTRY_ADDR, CPU_ON_CONTEXT_ID);
166 	GUEST_SYNC(ret);
167 }
168 
169 static void host_test_system_suspend(void)
170 {
171 	struct kvm_vcpu *source, *target;
172 	struct kvm_run *run;
173 	struct kvm_vm *vm;
174 
175 	vm = setup_vm(guest_test_system_suspend, &source, &target);
176 	vm_enable_cap(vm, KVM_CAP_ARM_SYSTEM_SUSPEND, 0);
177 
178 	vcpu_power_off(target);
179 	run = source->run;
180 
181 	enter_guest(source);
182 
183 	TEST_ASSERT(run->exit_reason == KVM_EXIT_SYSTEM_EVENT,
184 		    "Unhandled exit reason: %u (%s)",
185 		    run->exit_reason, exit_reason_str(run->exit_reason));
186 	TEST_ASSERT(run->system_event.type == KVM_SYSTEM_EVENT_SUSPEND,
187 		    "Unhandled system event: %u (expected: %u)",
188 		    run->system_event.type, KVM_SYSTEM_EVENT_SUSPEND);
189 
190 	kvm_vm_free(vm);
191 }
192 
193 int main(void)
194 {
195 	TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_SYSTEM_SUSPEND));
196 
197 	host_test_cpu_on();
198 	host_test_system_suspend();
199 	return 0;
200 }
201