1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2021 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Atish Patra <atish.patra@wdc.com>
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
12 #include <asm/sbi.h>
13 #include <asm/kvm_vcpu_timer.h>
14 #include <asm/kvm_vcpu_sbi.h>
15 
16 static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
17 				    unsigned long *out_val,
18 				    struct kvm_cpu_trap *utrap, bool *exit)
19 {
20 	int ret = 0;
21 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
22 	u64 next_cycle;
23 
24 	if (cp->a6 != SBI_EXT_TIME_SET_TIMER)
25 		return -EINVAL;
26 
27 #if __riscv_xlen == 32
28 	next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0;
29 #else
30 	next_cycle = (u64)cp->a0;
31 #endif
32 	kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle);
33 
34 	return ret;
35 }
36 
37 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time = {
38 	.extid_start = SBI_EXT_TIME,
39 	.extid_end = SBI_EXT_TIME,
40 	.handler = kvm_sbi_ext_time_handler,
41 };
42 
43 static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
44 				   unsigned long *out_val,
45 				   struct kvm_cpu_trap *utrap, bool *exit)
46 {
47 	int ret = 0;
48 	unsigned long i;
49 	struct kvm_vcpu *tmp;
50 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
51 	unsigned long hmask = cp->a0;
52 	unsigned long hbase = cp->a1;
53 
54 	if (cp->a6 != SBI_EXT_IPI_SEND_IPI)
55 		return -EINVAL;
56 
57 	kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
58 		if (hbase != -1UL) {
59 			if (tmp->vcpu_id < hbase)
60 				continue;
61 			if (!(hmask & (1UL << (tmp->vcpu_id - hbase))))
62 				continue;
63 		}
64 		ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT);
65 		if (ret < 0)
66 			break;
67 	}
68 
69 	return ret;
70 }
71 
72 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi = {
73 	.extid_start = SBI_EXT_IPI,
74 	.extid_end = SBI_EXT_IPI,
75 	.handler = kvm_sbi_ext_ipi_handler,
76 };
77 
78 static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
79 				      unsigned long *out_val,
80 				      struct kvm_cpu_trap *utrap, bool *exit)
81 {
82 	int ret = 0;
83 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
84 	unsigned long hmask = cp->a0;
85 	unsigned long hbase = cp->a1;
86 	unsigned long funcid = cp->a6;
87 
88 	switch (funcid) {
89 	case SBI_EXT_RFENCE_REMOTE_FENCE_I:
90 		kvm_riscv_fence_i(vcpu->kvm, hbase, hmask);
91 		break;
92 	case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
93 		if (cp->a2 == 0 && cp->a3 == 0)
94 			kvm_riscv_hfence_vvma_all(vcpu->kvm, hbase, hmask);
95 		else
96 			kvm_riscv_hfence_vvma_gva(vcpu->kvm, hbase, hmask,
97 						  cp->a2, cp->a3, PAGE_SHIFT);
98 		break;
99 	case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
100 		if (cp->a2 == 0 && cp->a3 == 0)
101 			kvm_riscv_hfence_vvma_asid_all(vcpu->kvm,
102 						       hbase, hmask, cp->a4);
103 		else
104 			kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm,
105 						       hbase, hmask,
106 						       cp->a2, cp->a3,
107 						       PAGE_SHIFT, cp->a4);
108 		break;
109 	case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA:
110 	case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID:
111 	case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA:
112 	case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID:
113 		/*
114 		 * Until nested virtualization is implemented, the
115 		 * SBI HFENCE calls should be treated as NOPs
116 		 */
117 		break;
118 	default:
119 		ret = -EOPNOTSUPP;
120 	}
121 
122 	return ret;
123 }
124 
125 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence = {
126 	.extid_start = SBI_EXT_RFENCE,
127 	.extid_end = SBI_EXT_RFENCE,
128 	.handler = kvm_sbi_ext_rfence_handler,
129 };
130 
131 static int kvm_sbi_ext_srst_handler(struct kvm_vcpu *vcpu,
132 				    struct kvm_run *run,
133 				    unsigned long *out_val,
134 				    struct kvm_cpu_trap *utrap, bool *exit)
135 {
136 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
137 	unsigned long funcid = cp->a6;
138 	u32 reason = cp->a1;
139 	u32 type = cp->a0;
140 	int ret = 0;
141 
142 	switch (funcid) {
143 	case SBI_EXT_SRST_RESET:
144 		switch (type) {
145 		case SBI_SRST_RESET_TYPE_SHUTDOWN:
146 			kvm_riscv_vcpu_sbi_system_reset(vcpu, run,
147 						KVM_SYSTEM_EVENT_SHUTDOWN,
148 						reason);
149 			*exit = true;
150 			break;
151 		case SBI_SRST_RESET_TYPE_COLD_REBOOT:
152 		case SBI_SRST_RESET_TYPE_WARM_REBOOT:
153 			kvm_riscv_vcpu_sbi_system_reset(vcpu, run,
154 						KVM_SYSTEM_EVENT_RESET,
155 						reason);
156 			*exit = true;
157 			break;
158 		default:
159 			ret = -EOPNOTSUPP;
160 		}
161 		break;
162 	default:
163 		ret = -EOPNOTSUPP;
164 	}
165 
166 	return ret;
167 }
168 
169 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst = {
170 	.extid_start = SBI_EXT_SRST,
171 	.extid_end = SBI_EXT_SRST,
172 	.handler = kvm_sbi_ext_srst_handler,
173 };
174