1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * psci_cpu_on_test - Test that the observable state of a vCPU targeted by the
4  * CPU_ON PSCI call matches what the caller requested.
5  *
6  * Copyright (c) 2021 Google LLC.
7  *
8  * This is a regression test for a race between KVM servicing the PSCI call and
9  * userspace reading the vCPUs registers.
10  */
11 
12 #define _GNU_SOURCE
13 
14 #include <linux/psci.h>
15 
16 #include "kvm_util.h"
17 #include "processor.h"
18 #include "test_util.h"
19 
20 #define CPU_ON_ENTRY_ADDR 0xfeedf00dul
21 #define CPU_ON_CONTEXT_ID 0xdeadc0deul
22 
23 static uint64_t psci_cpu_on(uint64_t target_cpu, uint64_t entry_addr,
24 			    uint64_t context_id)
25 {
26 	struct arm_smccc_res res;
27 
28 	smccc_hvc(PSCI_0_2_FN64_CPU_ON, target_cpu, entry_addr, context_id,
29 		  0, 0, 0, 0, &res);
30 
31 	return res.a0;
32 }
33 
34 static uint64_t psci_affinity_info(uint64_t target_affinity,
35 				   uint64_t lowest_affinity_level)
36 {
37 	struct arm_smccc_res res;
38 
39 	smccc_hvc(PSCI_0_2_FN64_AFFINITY_INFO, target_affinity, lowest_affinity_level,
40 		  0, 0, 0, 0, 0, &res);
41 
42 	return res.a0;
43 }
44 
45 static uint64_t psci_system_suspend(uint64_t entry_addr, uint64_t context_id)
46 {
47 	struct arm_smccc_res res;
48 
49 	smccc_hvc(PSCI_1_0_FN64_SYSTEM_SUSPEND, entry_addr, context_id,
50 		  0, 0, 0, 0, 0, &res);
51 
52 	return res.a0;
53 }
54 
55 static uint64_t psci_features(uint32_t func_id)
56 {
57 	struct arm_smccc_res res;
58 
59 	smccc_hvc(PSCI_1_0_FN_PSCI_FEATURES, func_id, 0, 0, 0, 0, 0, 0, &res);
60 
61 	return res.a0;
62 }
63 
64 static void vcpu_power_off(struct kvm_vcpu *vcpu)
65 {
66 	struct kvm_mp_state mp_state = {
67 		.mp_state = KVM_MP_STATE_STOPPED,
68 	};
69 
70 	vcpu_mp_state_set(vcpu, &mp_state);
71 }
72 
73 static struct kvm_vm *setup_vm(void *guest_code, struct kvm_vcpu **source,
74 			       struct kvm_vcpu **target)
75 {
76 	struct kvm_vcpu_init init;
77 	struct kvm_vm *vm;
78 
79 	vm = vm_create(2);
80 	ucall_init(vm, NULL);
81 
82 	vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init);
83 	init.features[0] |= (1 << KVM_ARM_VCPU_PSCI_0_2);
84 
85 	*source = aarch64_vcpu_add(vm, 0, &init, guest_code);
86 	*target = aarch64_vcpu_add(vm, 1, &init, guest_code);
87 
88 	return vm;
89 }
90 
91 static void enter_guest(struct kvm_vcpu *vcpu)
92 {
93 	struct ucall uc;
94 
95 	vcpu_run(vcpu);
96 	if (get_ucall(vcpu, &uc) == UCALL_ABORT)
97 		REPORT_GUEST_ASSERT(uc);
98 }
99 
100 static void assert_vcpu_reset(struct kvm_vcpu *vcpu)
101 {
102 	uint64_t obs_pc, obs_x0;
103 
104 	vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &obs_pc);
105 	vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.regs[0]), &obs_x0);
106 
107 	TEST_ASSERT(obs_pc == CPU_ON_ENTRY_ADDR,
108 		    "unexpected target cpu pc: %lx (expected: %lx)",
109 		    obs_pc, CPU_ON_ENTRY_ADDR);
110 	TEST_ASSERT(obs_x0 == CPU_ON_CONTEXT_ID,
111 		    "unexpected target context id: %lx (expected: %lx)",
112 		    obs_x0, CPU_ON_CONTEXT_ID);
113 }
114 
115 static void guest_test_cpu_on(uint64_t target_cpu)
116 {
117 	uint64_t target_state;
118 
119 	GUEST_ASSERT(!psci_cpu_on(target_cpu, CPU_ON_ENTRY_ADDR, CPU_ON_CONTEXT_ID));
120 
121 	do {
122 		target_state = psci_affinity_info(target_cpu, 0);
123 
124 		GUEST_ASSERT((target_state == PSCI_0_2_AFFINITY_LEVEL_ON) ||
125 			     (target_state == PSCI_0_2_AFFINITY_LEVEL_OFF));
126 	} while (target_state != PSCI_0_2_AFFINITY_LEVEL_ON);
127 
128 	GUEST_DONE();
129 }
130 
131 static void host_test_cpu_on(void)
132 {
133 	struct kvm_vcpu *source, *target;
134 	uint64_t target_mpidr;
135 	struct kvm_vm *vm;
136 	struct ucall uc;
137 
138 	vm = setup_vm(guest_test_cpu_on, &source, &target);
139 
140 	/*
141 	 * make sure the target is already off when executing the test.
142 	 */
143 	vcpu_power_off(target);
144 
145 	vcpu_get_reg(target, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &target_mpidr);
146 	vcpu_args_set(source, 1, target_mpidr & MPIDR_HWID_BITMASK);
147 	enter_guest(source);
148 
149 	if (get_ucall(source, &uc) != UCALL_DONE)
150 		TEST_FAIL("Unhandled ucall: %lu", uc.cmd);
151 
152 	assert_vcpu_reset(target);
153 	kvm_vm_free(vm);
154 }
155 
156 static void guest_test_system_suspend(void)
157 {
158 	uint64_t ret;
159 
160 	/* assert that SYSTEM_SUSPEND is discoverable */
161 	GUEST_ASSERT(!psci_features(PSCI_1_0_FN_SYSTEM_SUSPEND));
162 	GUEST_ASSERT(!psci_features(PSCI_1_0_FN64_SYSTEM_SUSPEND));
163 
164 	ret = psci_system_suspend(CPU_ON_ENTRY_ADDR, CPU_ON_CONTEXT_ID);
165 	GUEST_SYNC(ret);
166 }
167 
168 static void host_test_system_suspend(void)
169 {
170 	struct kvm_vcpu *source, *target;
171 	struct kvm_run *run;
172 	struct kvm_vm *vm;
173 
174 	vm = setup_vm(guest_test_system_suspend, &source, &target);
175 	vm_enable_cap(vm, KVM_CAP_ARM_SYSTEM_SUSPEND, 0);
176 
177 	vcpu_power_off(target);
178 	run = source->run;
179 
180 	enter_guest(source);
181 
182 	TEST_ASSERT(run->exit_reason == KVM_EXIT_SYSTEM_EVENT,
183 		    "Unhandled exit reason: %u (%s)",
184 		    run->exit_reason, exit_reason_str(run->exit_reason));
185 	TEST_ASSERT(run->system_event.type == KVM_SYSTEM_EVENT_SUSPEND,
186 		    "Unhandled system event: %u (expected: %u)",
187 		    run->system_event.type, KVM_SYSTEM_EVENT_SUSPEND);
188 
189 	kvm_vm_free(vm);
190 }
191 
192 int main(void)
193 {
194 	TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_SYSTEM_SUSPEND));
195 
196 	host_test_cpu_on();
197 	host_test_system_suspend();
198 	return 0;
199 }
200