xref: /openbmc/linux/arch/riscv/kvm/vcpu_sbi.c (revision 40d3b219)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2019 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Atish Patra <atish.patra@wdc.com>
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
12 #include <asm/sbi.h>
13 #include <asm/kvm_vcpu_sbi.h>
14 
15 #ifndef CONFIG_RISCV_SBI_V01
16 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
17 	.extid_start = -1UL,
18 	.extid_end = -1UL,
19 	.handler = NULL,
20 };
21 #endif
22 
23 #ifndef CONFIG_RISCV_PMU_SBI
24 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = {
25 	.extid_start = -1UL,
26 	.extid_end = -1UL,
27 	.handler = NULL,
28 };
29 #endif
30 
31 struct kvm_riscv_sbi_extension_entry {
32 	enum KVM_RISCV_SBI_EXT_ID ext_idx;
33 	const struct kvm_vcpu_sbi_extension *ext_ptr;
34 };
35 
36 static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = {
37 	{
38 		.ext_idx = KVM_RISCV_SBI_EXT_V01,
39 		.ext_ptr = &vcpu_sbi_ext_v01,
40 	},
41 	{
42 		.ext_idx = KVM_RISCV_SBI_EXT_MAX, /* Can't be disabled */
43 		.ext_ptr = &vcpu_sbi_ext_base,
44 	},
45 	{
46 		.ext_idx = KVM_RISCV_SBI_EXT_TIME,
47 		.ext_ptr = &vcpu_sbi_ext_time,
48 	},
49 	{
50 		.ext_idx = KVM_RISCV_SBI_EXT_IPI,
51 		.ext_ptr = &vcpu_sbi_ext_ipi,
52 	},
53 	{
54 		.ext_idx = KVM_RISCV_SBI_EXT_RFENCE,
55 		.ext_ptr = &vcpu_sbi_ext_rfence,
56 	},
57 	{
58 		.ext_idx = KVM_RISCV_SBI_EXT_SRST,
59 		.ext_ptr = &vcpu_sbi_ext_srst,
60 	},
61 	{
62 		.ext_idx = KVM_RISCV_SBI_EXT_HSM,
63 		.ext_ptr = &vcpu_sbi_ext_hsm,
64 	},
65 	{
66 		.ext_idx = KVM_RISCV_SBI_EXT_PMU,
67 		.ext_ptr = &vcpu_sbi_ext_pmu,
68 	},
69 	{
70 		.ext_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL,
71 		.ext_ptr = &vcpu_sbi_ext_experimental,
72 	},
73 	{
74 		.ext_idx = KVM_RISCV_SBI_EXT_VENDOR,
75 		.ext_ptr = &vcpu_sbi_ext_vendor,
76 	},
77 };
78 
79 void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
80 {
81 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
82 
83 	vcpu->arch.sbi_context.return_handled = 0;
84 	vcpu->stat.ecall_exit_stat++;
85 	run->exit_reason = KVM_EXIT_RISCV_SBI;
86 	run->riscv_sbi.extension_id = cp->a7;
87 	run->riscv_sbi.function_id = cp->a6;
88 	run->riscv_sbi.args[0] = cp->a0;
89 	run->riscv_sbi.args[1] = cp->a1;
90 	run->riscv_sbi.args[2] = cp->a2;
91 	run->riscv_sbi.args[3] = cp->a3;
92 	run->riscv_sbi.args[4] = cp->a4;
93 	run->riscv_sbi.args[5] = cp->a5;
94 	run->riscv_sbi.ret[0] = SBI_ERR_NOT_SUPPORTED;
95 	run->riscv_sbi.ret[1] = 0;
96 }
97 
98 void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
99 				     struct kvm_run *run,
100 				     u32 type, u64 reason)
101 {
102 	unsigned long i;
103 	struct kvm_vcpu *tmp;
104 
105 	kvm_for_each_vcpu(i, tmp, vcpu->kvm)
106 		tmp->arch.power_off = true;
107 	kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
108 
109 	memset(&run->system_event, 0, sizeof(run->system_event));
110 	run->system_event.type = type;
111 	run->system_event.ndata = 1;
112 	run->system_event.data[0] = reason;
113 	run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
114 }
115 
116 int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
117 {
118 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
119 
120 	/* Handle SBI return only once */
121 	if (vcpu->arch.sbi_context.return_handled)
122 		return 0;
123 	vcpu->arch.sbi_context.return_handled = 1;
124 
125 	/* Update return values */
126 	cp->a0 = run->riscv_sbi.ret[0];
127 	cp->a1 = run->riscv_sbi.ret[1];
128 
129 	/* Move to next instruction */
130 	vcpu->arch.guest_context.sepc += 4;
131 
132 	return 0;
133 }
134 
135 static int riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu *vcpu,
136 					 unsigned long reg_num,
137 					 unsigned long reg_val)
138 {
139 	unsigned long i;
140 	const struct kvm_riscv_sbi_extension_entry *sext = NULL;
141 	struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
142 
143 	if (reg_num >= KVM_RISCV_SBI_EXT_MAX)
144 		return -ENOENT;
145 
146 	if (reg_val != 1 && reg_val != 0)
147 		return -EINVAL;
148 
149 	for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
150 		if (sbi_ext[i].ext_idx == reg_num) {
151 			sext = &sbi_ext[i];
152 			break;
153 		}
154 	}
155 	if (!sext)
156 		return -ENOENT;
157 
158 	/*
159 	 * We can't set the extension status to available here, since it may
160 	 * have a probe() function which needs to confirm availability first,
161 	 * but it may be too early to call that here. We can set the status to
162 	 * unavailable, though.
163 	 */
164 	if (!reg_val)
165 		scontext->ext_status[sext->ext_idx] =
166 			KVM_RISCV_SBI_EXT_UNAVAILABLE;
167 
168 	return 0;
169 }
170 
171 static int riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu *vcpu,
172 					 unsigned long reg_num,
173 					 unsigned long *reg_val)
174 {
175 	unsigned long i;
176 	const struct kvm_riscv_sbi_extension_entry *sext = NULL;
177 	struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
178 
179 	if (reg_num >= KVM_RISCV_SBI_EXT_MAX)
180 		return -ENOENT;
181 
182 	for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
183 		if (sbi_ext[i].ext_idx == reg_num) {
184 			sext = &sbi_ext[i];
185 			break;
186 		}
187 	}
188 	if (!sext)
189 		return -ENOENT;
190 
191 	/*
192 	 * If the extension status is still uninitialized, then we should probe
193 	 * to determine if it's available, but it may be too early to do that
194 	 * here. The best we can do is report that the extension has not been
195 	 * disabled, i.e. we return 1 when the extension is available and also
196 	 * when it only may be available.
197 	 */
198 	*reg_val = scontext->ext_status[sext->ext_idx] !=
199 				KVM_RISCV_SBI_EXT_UNAVAILABLE;
200 
201 	return 0;
202 }
203 
204 static int riscv_vcpu_set_sbi_ext_multi(struct kvm_vcpu *vcpu,
205 					unsigned long reg_num,
206 					unsigned long reg_val, bool enable)
207 {
208 	unsigned long i, ext_id;
209 
210 	if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
211 		return -ENOENT;
212 
213 	for_each_set_bit(i, &reg_val, BITS_PER_LONG) {
214 		ext_id = i + reg_num * BITS_PER_LONG;
215 		if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
216 			break;
217 
218 		riscv_vcpu_set_sbi_ext_single(vcpu, ext_id, enable);
219 	}
220 
221 	return 0;
222 }
223 
224 static int riscv_vcpu_get_sbi_ext_multi(struct kvm_vcpu *vcpu,
225 					unsigned long reg_num,
226 					unsigned long *reg_val)
227 {
228 	unsigned long i, ext_id, ext_val;
229 
230 	if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
231 		return -ENOENT;
232 
233 	for (i = 0; i < BITS_PER_LONG; i++) {
234 		ext_id = i + reg_num * BITS_PER_LONG;
235 		if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
236 			break;
237 
238 		ext_val = 0;
239 		riscv_vcpu_get_sbi_ext_single(vcpu, ext_id, &ext_val);
240 		if (ext_val)
241 			*reg_val |= KVM_REG_RISCV_SBI_MULTI_MASK(ext_id);
242 	}
243 
244 	return 0;
245 }
246 
247 int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
248 				   const struct kvm_one_reg *reg)
249 {
250 	unsigned long __user *uaddr =
251 			(unsigned long __user *)(unsigned long)reg->addr;
252 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
253 					    KVM_REG_SIZE_MASK |
254 					    KVM_REG_RISCV_SBI_EXT);
255 	unsigned long reg_val, reg_subtype;
256 
257 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
258 		return -EINVAL;
259 
260 	if (vcpu->arch.ran_atleast_once)
261 		return -EBUSY;
262 
263 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
264 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
265 
266 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
267 		return -EFAULT;
268 
269 	switch (reg_subtype) {
270 	case KVM_REG_RISCV_SBI_SINGLE:
271 		return riscv_vcpu_set_sbi_ext_single(vcpu, reg_num, reg_val);
272 	case KVM_REG_RISCV_SBI_MULTI_EN:
273 		return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, true);
274 	case KVM_REG_RISCV_SBI_MULTI_DIS:
275 		return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, false);
276 	default:
277 		return -ENOENT;
278 	}
279 
280 	return 0;
281 }
282 
283 int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
284 				   const struct kvm_one_reg *reg)
285 {
286 	int rc;
287 	unsigned long __user *uaddr =
288 			(unsigned long __user *)(unsigned long)reg->addr;
289 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
290 					    KVM_REG_SIZE_MASK |
291 					    KVM_REG_RISCV_SBI_EXT);
292 	unsigned long reg_val, reg_subtype;
293 
294 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
295 		return -EINVAL;
296 
297 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
298 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
299 
300 	reg_val = 0;
301 	switch (reg_subtype) {
302 	case KVM_REG_RISCV_SBI_SINGLE:
303 		rc = riscv_vcpu_get_sbi_ext_single(vcpu, reg_num, &reg_val);
304 		break;
305 	case KVM_REG_RISCV_SBI_MULTI_EN:
306 	case KVM_REG_RISCV_SBI_MULTI_DIS:
307 		rc = riscv_vcpu_get_sbi_ext_multi(vcpu, reg_num, &reg_val);
308 		if (!rc && reg_subtype == KVM_REG_RISCV_SBI_MULTI_DIS)
309 			reg_val = ~reg_val;
310 		break;
311 	default:
312 		rc = -ENOENT;
313 	}
314 	if (rc)
315 		return rc;
316 
317 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
318 		return -EFAULT;
319 
320 	return 0;
321 }
322 
323 const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
324 				struct kvm_vcpu *vcpu, unsigned long extid)
325 {
326 	struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
327 	const struct kvm_riscv_sbi_extension_entry *entry;
328 	const struct kvm_vcpu_sbi_extension *ext;
329 	int i;
330 
331 	for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
332 		entry = &sbi_ext[i];
333 		ext = entry->ext_ptr;
334 
335 		if (ext->extid_start <= extid && ext->extid_end >= extid) {
336 			if (entry->ext_idx >= KVM_RISCV_SBI_EXT_MAX ||
337 			    scontext->ext_status[entry->ext_idx] ==
338 						KVM_RISCV_SBI_EXT_AVAILABLE)
339 				return ext;
340 			if (scontext->ext_status[entry->ext_idx] ==
341 						KVM_RISCV_SBI_EXT_UNAVAILABLE)
342 				return NULL;
343 			if (ext->probe && !ext->probe(vcpu)) {
344 				scontext->ext_status[entry->ext_idx] =
345 					KVM_RISCV_SBI_EXT_UNAVAILABLE;
346 				return NULL;
347 			}
348 
349 			scontext->ext_status[entry->ext_idx] =
350 				KVM_RISCV_SBI_EXT_AVAILABLE;
351 			return ext;
352 		}
353 	}
354 
355 	return NULL;
356 }
357 
358 int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
359 {
360 	int ret = 1;
361 	bool next_sepc = true;
362 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
363 	const struct kvm_vcpu_sbi_extension *sbi_ext;
364 	struct kvm_cpu_trap utrap = {0};
365 	struct kvm_vcpu_sbi_return sbi_ret = {
366 		.out_val = 0,
367 		.err_val = 0,
368 		.utrap = &utrap,
369 	};
370 	bool ext_is_v01 = false;
371 
372 	sbi_ext = kvm_vcpu_sbi_find_ext(vcpu, cp->a7);
373 	if (sbi_ext && sbi_ext->handler) {
374 #ifdef CONFIG_RISCV_SBI_V01
375 		if (cp->a7 >= SBI_EXT_0_1_SET_TIMER &&
376 		    cp->a7 <= SBI_EXT_0_1_SHUTDOWN)
377 			ext_is_v01 = true;
378 #endif
379 		ret = sbi_ext->handler(vcpu, run, &sbi_ret);
380 	} else {
381 		/* Return error for unsupported SBI calls */
382 		cp->a0 = SBI_ERR_NOT_SUPPORTED;
383 		goto ecall_done;
384 	}
385 
386 	/*
387 	 * When the SBI extension returns a Linux error code, it exits the ioctl
388 	 * loop and forwards the error to userspace.
389 	 */
390 	if (ret < 0) {
391 		next_sepc = false;
392 		goto ecall_done;
393 	}
394 
395 	/* Handle special error cases i.e trap, exit or userspace forward */
396 	if (sbi_ret.utrap->scause) {
397 		/* No need to increment sepc or exit ioctl loop */
398 		ret = 1;
399 		sbi_ret.utrap->sepc = cp->sepc;
400 		kvm_riscv_vcpu_trap_redirect(vcpu, sbi_ret.utrap);
401 		next_sepc = false;
402 		goto ecall_done;
403 	}
404 
405 	/* Exit ioctl loop or Propagate the error code the guest */
406 	if (sbi_ret.uexit) {
407 		next_sepc = false;
408 		ret = 0;
409 	} else {
410 		cp->a0 = sbi_ret.err_val;
411 		ret = 1;
412 	}
413 ecall_done:
414 	if (next_sepc)
415 		cp->sepc += 4;
416 	/* a1 should only be updated when we continue the ioctl loop */
417 	if (!ext_is_v01 && ret == 1)
418 		cp->a1 = sbi_ret.out_val;
419 
420 	return ret;
421 }
422