xref: /openbmc/linux/arch/riscv/kvm/vcpu_sbi.c (revision 0f9b4c3ca5fdf3e177266ef994071b1a03f07318)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2019 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Atish Patra <atish.patra@wdc.com>
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
12 #include <asm/sbi.h>
13 #include <asm/kvm_vcpu_sbi.h>
14 
15 #ifndef CONFIG_RISCV_SBI_V01
16 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
17 	.extid_start = -1UL,
18 	.extid_end = -1UL,
19 	.handler = NULL,
20 };
21 #endif
22 
23 #ifndef CONFIG_RISCV_PMU_SBI
24 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = {
25 	.extid_start = -1UL,
26 	.extid_end = -1UL,
27 	.handler = NULL,
28 };
29 #endif
30 
31 struct kvm_riscv_sbi_extension_entry {
32 	enum KVM_RISCV_SBI_EXT_ID ext_idx;
33 	const struct kvm_vcpu_sbi_extension *ext_ptr;
34 };
35 
36 static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = {
37 	{
38 		.ext_idx = KVM_RISCV_SBI_EXT_V01,
39 		.ext_ptr = &vcpu_sbi_ext_v01,
40 	},
41 	{
42 		.ext_idx = KVM_RISCV_SBI_EXT_MAX, /* Can't be disabled */
43 		.ext_ptr = &vcpu_sbi_ext_base,
44 	},
45 	{
46 		.ext_idx = KVM_RISCV_SBI_EXT_TIME,
47 		.ext_ptr = &vcpu_sbi_ext_time,
48 	},
49 	{
50 		.ext_idx = KVM_RISCV_SBI_EXT_IPI,
51 		.ext_ptr = &vcpu_sbi_ext_ipi,
52 	},
53 	{
54 		.ext_idx = KVM_RISCV_SBI_EXT_RFENCE,
55 		.ext_ptr = &vcpu_sbi_ext_rfence,
56 	},
57 	{
58 		.ext_idx = KVM_RISCV_SBI_EXT_SRST,
59 		.ext_ptr = &vcpu_sbi_ext_srst,
60 	},
61 	{
62 		.ext_idx = KVM_RISCV_SBI_EXT_HSM,
63 		.ext_ptr = &vcpu_sbi_ext_hsm,
64 	},
65 	{
66 		.ext_idx = KVM_RISCV_SBI_EXT_PMU,
67 		.ext_ptr = &vcpu_sbi_ext_pmu,
68 	},
69 	{
70 		.ext_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL,
71 		.ext_ptr = &vcpu_sbi_ext_experimental,
72 	},
73 	{
74 		.ext_idx = KVM_RISCV_SBI_EXT_VENDOR,
75 		.ext_ptr = &vcpu_sbi_ext_vendor,
76 	},
77 };
78 
kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu * vcpu,struct kvm_run * run)79 void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
80 {
81 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
82 
83 	vcpu->arch.sbi_context.return_handled = 0;
84 	vcpu->stat.ecall_exit_stat++;
85 	run->exit_reason = KVM_EXIT_RISCV_SBI;
86 	run->riscv_sbi.extension_id = cp->a7;
87 	run->riscv_sbi.function_id = cp->a6;
88 	run->riscv_sbi.args[0] = cp->a0;
89 	run->riscv_sbi.args[1] = cp->a1;
90 	run->riscv_sbi.args[2] = cp->a2;
91 	run->riscv_sbi.args[3] = cp->a3;
92 	run->riscv_sbi.args[4] = cp->a4;
93 	run->riscv_sbi.args[5] = cp->a5;
94 	run->riscv_sbi.ret[0] = SBI_ERR_NOT_SUPPORTED;
95 	run->riscv_sbi.ret[1] = 0;
96 }
97 
kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu * vcpu,struct kvm_run * run,u32 type,u64 reason)98 void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
99 				     struct kvm_run *run,
100 				     u32 type, u64 reason)
101 {
102 	unsigned long i;
103 	struct kvm_vcpu *tmp;
104 
105 	kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
106 		spin_lock(&vcpu->arch.mp_state_lock);
107 		WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
108 		spin_unlock(&vcpu->arch.mp_state_lock);
109 	}
110 	kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
111 
112 	memset(&run->system_event, 0, sizeof(run->system_event));
113 	run->system_event.type = type;
114 	run->system_event.ndata = 1;
115 	run->system_event.data[0] = reason;
116 	run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
117 }
118 
kvm_riscv_vcpu_sbi_return(struct kvm_vcpu * vcpu,struct kvm_run * run)119 int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
120 {
121 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
122 
123 	/* Handle SBI return only once */
124 	if (vcpu->arch.sbi_context.return_handled)
125 		return 0;
126 	vcpu->arch.sbi_context.return_handled = 1;
127 
128 	/* Update return values */
129 	cp->a0 = run->riscv_sbi.ret[0];
130 	cp->a1 = run->riscv_sbi.ret[1];
131 
132 	/* Move to next instruction */
133 	vcpu->arch.guest_context.sepc += 4;
134 
135 	return 0;
136 }
137 
riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)138 static int riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu *vcpu,
139 					 unsigned long reg_num,
140 					 unsigned long reg_val)
141 {
142 	unsigned long i;
143 	const struct kvm_riscv_sbi_extension_entry *sext = NULL;
144 	struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
145 
146 	if (reg_num >= KVM_RISCV_SBI_EXT_MAX)
147 		return -ENOENT;
148 
149 	if (reg_val != 1 && reg_val != 0)
150 		return -EINVAL;
151 
152 	for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
153 		if (sbi_ext[i].ext_idx == reg_num) {
154 			sext = &sbi_ext[i];
155 			break;
156 		}
157 	}
158 	if (!sext)
159 		return -ENOENT;
160 
161 	/*
162 	 * We can't set the extension status to available here, since it may
163 	 * have a probe() function which needs to confirm availability first,
164 	 * but it may be too early to call that here. We can set the status to
165 	 * unavailable, though.
166 	 */
167 	if (!reg_val)
168 		scontext->ext_status[sext->ext_idx] =
169 			KVM_RISCV_SBI_EXT_UNAVAILABLE;
170 
171 	return 0;
172 }
173 
riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * reg_val)174 static int riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu *vcpu,
175 					 unsigned long reg_num,
176 					 unsigned long *reg_val)
177 {
178 	unsigned long i;
179 	const struct kvm_riscv_sbi_extension_entry *sext = NULL;
180 	struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
181 
182 	if (reg_num >= KVM_RISCV_SBI_EXT_MAX)
183 		return -ENOENT;
184 
185 	for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
186 		if (sbi_ext[i].ext_idx == reg_num) {
187 			sext = &sbi_ext[i];
188 			break;
189 		}
190 	}
191 	if (!sext)
192 		return -ENOENT;
193 
194 	/*
195 	 * If the extension status is still uninitialized, then we should probe
196 	 * to determine if it's available, but it may be too early to do that
197 	 * here. The best we can do is report that the extension has not been
198 	 * disabled, i.e. we return 1 when the extension is available and also
199 	 * when it only may be available.
200 	 */
201 	*reg_val = scontext->ext_status[sext->ext_idx] !=
202 				KVM_RISCV_SBI_EXT_UNAVAILABLE;
203 
204 	return 0;
205 }
206 
riscv_vcpu_set_sbi_ext_multi(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val,bool enable)207 static int riscv_vcpu_set_sbi_ext_multi(struct kvm_vcpu *vcpu,
208 					unsigned long reg_num,
209 					unsigned long reg_val, bool enable)
210 {
211 	unsigned long i, ext_id;
212 
213 	if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
214 		return -ENOENT;
215 
216 	for_each_set_bit(i, &reg_val, BITS_PER_LONG) {
217 		ext_id = i + reg_num * BITS_PER_LONG;
218 		if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
219 			break;
220 
221 		riscv_vcpu_set_sbi_ext_single(vcpu, ext_id, enable);
222 	}
223 
224 	return 0;
225 }
226 
riscv_vcpu_get_sbi_ext_multi(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * reg_val)227 static int riscv_vcpu_get_sbi_ext_multi(struct kvm_vcpu *vcpu,
228 					unsigned long reg_num,
229 					unsigned long *reg_val)
230 {
231 	unsigned long i, ext_id, ext_val;
232 
233 	if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
234 		return -ENOENT;
235 
236 	for (i = 0; i < BITS_PER_LONG; i++) {
237 		ext_id = i + reg_num * BITS_PER_LONG;
238 		if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
239 			break;
240 
241 		ext_val = 0;
242 		riscv_vcpu_get_sbi_ext_single(vcpu, ext_id, &ext_val);
243 		if (ext_val)
244 			*reg_val |= KVM_REG_RISCV_SBI_MULTI_MASK(ext_id);
245 	}
246 
247 	return 0;
248 }
249 
kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)250 int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
251 				   const struct kvm_one_reg *reg)
252 {
253 	unsigned long __user *uaddr =
254 			(unsigned long __user *)(unsigned long)reg->addr;
255 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
256 					    KVM_REG_SIZE_MASK |
257 					    KVM_REG_RISCV_SBI_EXT);
258 	unsigned long reg_val, reg_subtype;
259 
260 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
261 		return -EINVAL;
262 
263 	if (vcpu->arch.ran_atleast_once)
264 		return -EBUSY;
265 
266 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
267 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
268 
269 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
270 		return -EFAULT;
271 
272 	switch (reg_subtype) {
273 	case KVM_REG_RISCV_SBI_SINGLE:
274 		return riscv_vcpu_set_sbi_ext_single(vcpu, reg_num, reg_val);
275 	case KVM_REG_RISCV_SBI_MULTI_EN:
276 		return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, true);
277 	case KVM_REG_RISCV_SBI_MULTI_DIS:
278 		return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, false);
279 	default:
280 		return -ENOENT;
281 	}
282 
283 	return 0;
284 }
285 
kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)286 int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
287 				   const struct kvm_one_reg *reg)
288 {
289 	int rc;
290 	unsigned long __user *uaddr =
291 			(unsigned long __user *)(unsigned long)reg->addr;
292 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
293 					    KVM_REG_SIZE_MASK |
294 					    KVM_REG_RISCV_SBI_EXT);
295 	unsigned long reg_val, reg_subtype;
296 
297 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
298 		return -EINVAL;
299 
300 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
301 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
302 
303 	reg_val = 0;
304 	switch (reg_subtype) {
305 	case KVM_REG_RISCV_SBI_SINGLE:
306 		rc = riscv_vcpu_get_sbi_ext_single(vcpu, reg_num, &reg_val);
307 		break;
308 	case KVM_REG_RISCV_SBI_MULTI_EN:
309 	case KVM_REG_RISCV_SBI_MULTI_DIS:
310 		rc = riscv_vcpu_get_sbi_ext_multi(vcpu, reg_num, &reg_val);
311 		if (!rc && reg_subtype == KVM_REG_RISCV_SBI_MULTI_DIS)
312 			reg_val = ~reg_val;
313 		break;
314 	default:
315 		rc = -ENOENT;
316 	}
317 	if (rc)
318 		return rc;
319 
320 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
321 		return -EFAULT;
322 
323 	return 0;
324 }
325 
kvm_vcpu_sbi_find_ext(struct kvm_vcpu * vcpu,unsigned long extid)326 const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
327 				struct kvm_vcpu *vcpu, unsigned long extid)
328 {
329 	struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
330 	const struct kvm_riscv_sbi_extension_entry *entry;
331 	const struct kvm_vcpu_sbi_extension *ext;
332 	int i;
333 
334 	for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
335 		entry = &sbi_ext[i];
336 		ext = entry->ext_ptr;
337 
338 		if (ext->extid_start <= extid && ext->extid_end >= extid) {
339 			if (entry->ext_idx >= KVM_RISCV_SBI_EXT_MAX ||
340 			    scontext->ext_status[entry->ext_idx] ==
341 						KVM_RISCV_SBI_EXT_AVAILABLE)
342 				return ext;
343 			if (scontext->ext_status[entry->ext_idx] ==
344 						KVM_RISCV_SBI_EXT_UNAVAILABLE)
345 				return NULL;
346 			if (ext->probe && !ext->probe(vcpu)) {
347 				scontext->ext_status[entry->ext_idx] =
348 					KVM_RISCV_SBI_EXT_UNAVAILABLE;
349 				return NULL;
350 			}
351 
352 			scontext->ext_status[entry->ext_idx] =
353 				KVM_RISCV_SBI_EXT_AVAILABLE;
354 			return ext;
355 		}
356 	}
357 
358 	return NULL;
359 }
360 
kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu * vcpu,struct kvm_run * run)361 int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
362 {
363 	int ret = 1;
364 	bool next_sepc = true;
365 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
366 	const struct kvm_vcpu_sbi_extension *sbi_ext;
367 	struct kvm_cpu_trap utrap = {0};
368 	struct kvm_vcpu_sbi_return sbi_ret = {
369 		.out_val = 0,
370 		.err_val = 0,
371 		.utrap = &utrap,
372 	};
373 	bool ext_is_v01 = false;
374 
375 	sbi_ext = kvm_vcpu_sbi_find_ext(vcpu, cp->a7);
376 	if (sbi_ext && sbi_ext->handler) {
377 #ifdef CONFIG_RISCV_SBI_V01
378 		if (cp->a7 >= SBI_EXT_0_1_SET_TIMER &&
379 		    cp->a7 <= SBI_EXT_0_1_SHUTDOWN)
380 			ext_is_v01 = true;
381 #endif
382 		ret = sbi_ext->handler(vcpu, run, &sbi_ret);
383 	} else {
384 		/* Return error for unsupported SBI calls */
385 		cp->a0 = SBI_ERR_NOT_SUPPORTED;
386 		goto ecall_done;
387 	}
388 
389 	/*
390 	 * When the SBI extension returns a Linux error code, it exits the ioctl
391 	 * loop and forwards the error to userspace.
392 	 */
393 	if (ret < 0) {
394 		next_sepc = false;
395 		goto ecall_done;
396 	}
397 
398 	/* Handle special error cases i.e trap, exit or userspace forward */
399 	if (sbi_ret.utrap->scause) {
400 		/* No need to increment sepc or exit ioctl loop */
401 		ret = 1;
402 		sbi_ret.utrap->sepc = cp->sepc;
403 		kvm_riscv_vcpu_trap_redirect(vcpu, sbi_ret.utrap);
404 		next_sepc = false;
405 		goto ecall_done;
406 	}
407 
408 	/* Exit ioctl loop or Propagate the error code the guest */
409 	if (sbi_ret.uexit) {
410 		next_sepc = false;
411 		ret = 0;
412 	} else {
413 		cp->a0 = sbi_ret.err_val;
414 		ret = 1;
415 	}
416 ecall_done:
417 	if (next_sepc)
418 		cp->sepc += 4;
419 	/* a1 should only be updated when we continue the ioctl loop */
420 	if (!ext_is_v01 && ret == 1)
421 		cp->a1 = sbi_ret.out_val;
422 
423 	return ret;
424 }
425