xref: /openbmc/linux/arch/riscv/kvm/vcpu_sbi_v01.c (revision 0be3ff0c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2021 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Atish Patra <atish.patra@wdc.com>
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
12 #include <asm/csr.h>
13 #include <asm/sbi.h>
14 #include <asm/kvm_vcpu_timer.h>
15 #include <asm/kvm_vcpu_sbi.h>
16 
17 static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
18 				      unsigned long *out_val,
19 				      struct kvm_cpu_trap *utrap,
20 				      bool *exit)
21 {
22 	ulong hmask;
23 	int i, ret = 0;
24 	u64 next_cycle;
25 	struct kvm_vcpu *rvcpu;
26 	struct cpumask cm;
27 	struct kvm *kvm = vcpu->kvm;
28 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
29 
30 	switch (cp->a7) {
31 	case SBI_EXT_0_1_CONSOLE_GETCHAR:
32 	case SBI_EXT_0_1_CONSOLE_PUTCHAR:
33 		/*
34 		 * The CONSOLE_GETCHAR/CONSOLE_PUTCHAR SBI calls cannot be
35 		 * handled in kernel so we forward these to user-space
36 		 */
37 		kvm_riscv_vcpu_sbi_forward(vcpu, run);
38 		*exit = true;
39 		break;
40 	case SBI_EXT_0_1_SET_TIMER:
41 #if __riscv_xlen == 32
42 		next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0;
43 #else
44 		next_cycle = (u64)cp->a0;
45 #endif
46 		ret = kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle);
47 		break;
48 	case SBI_EXT_0_1_CLEAR_IPI:
49 		ret = kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_SOFT);
50 		break;
51 	case SBI_EXT_0_1_SEND_IPI:
52 		if (cp->a0)
53 			hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0,
54 							   utrap);
55 		else
56 			hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1;
57 		if (utrap->scause)
58 			break;
59 
60 		for_each_set_bit(i, &hmask, BITS_PER_LONG) {
61 			rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i);
62 			ret = kvm_riscv_vcpu_set_interrupt(rvcpu, IRQ_VS_SOFT);
63 			if (ret < 0)
64 				break;
65 		}
66 		break;
67 	case SBI_EXT_0_1_SHUTDOWN:
68 		kvm_riscv_vcpu_sbi_system_reset(vcpu, run,
69 						KVM_SYSTEM_EVENT_SHUTDOWN, 0);
70 		*exit = true;
71 		break;
72 	case SBI_EXT_0_1_REMOTE_FENCE_I:
73 	case SBI_EXT_0_1_REMOTE_SFENCE_VMA:
74 	case SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID:
75 		if (cp->a0)
76 			hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0,
77 							   utrap);
78 		else
79 			hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1;
80 		if (utrap->scause)
81 			break;
82 
83 		cpumask_clear(&cm);
84 		for_each_set_bit(i, &hmask, BITS_PER_LONG) {
85 			rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i);
86 			if (rvcpu->cpu < 0)
87 				continue;
88 			cpumask_set_cpu(rvcpu->cpu, &cm);
89 		}
90 		if (cp->a7 == SBI_EXT_0_1_REMOTE_FENCE_I)
91 			ret = sbi_remote_fence_i(&cm);
92 		else if (cp->a7 == SBI_EXT_0_1_REMOTE_SFENCE_VMA)
93 			ret = sbi_remote_hfence_vvma(&cm, cp->a1, cp->a2);
94 		else
95 			ret = sbi_remote_hfence_vvma_asid(&cm, cp->a1, cp->a2, cp->a3);
96 		break;
97 	default:
98 		ret = -EINVAL;
99 		break;
100 	}
101 
102 	return ret;
103 }
104 
105 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
106 	.extid_start = SBI_EXT_0_1_SET_TIMER,
107 	.extid_end = SBI_EXT_0_1_SHUTDOWN,
108 	.handler = kvm_sbi_ext_v01_handler,
109 };
110