1 /*
2 * RISC-V implementation of KVM hooks
3 *
4 * Copyright (c) 2020 Huawei Technologies Co., Ltd
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include "qemu/osdep.h"
20 #include <sys/ioctl.h>
21 #include <sys/prctl.h>
22
23 #include <linux/kvm.h>
24
25 #include "qemu/timer.h"
26 #include "qapi/error.h"
27 #include "qemu/error-report.h"
28 #include "qemu/main-loop.h"
29 #include "qapi/visitor.h"
30 #include "system/system.h"
31 #include "system/kvm.h"
32 #include "system/kvm_int.h"
33 #include "cpu.h"
34 #include "trace.h"
35 #include "accel/accel-cpu-target.h"
36 #include "hw/pci/pci.h"
37 #include "exec/memattrs.h"
38 #include "system/address-spaces.h"
39 #include "hw/boards.h"
40 #include "hw/irq.h"
41 #include "hw/intc/riscv_imsic.h"
42 #include "qemu/log.h"
43 #include "hw/loader.h"
44 #include "kvm_riscv.h"
45 #include "sbi_ecall_interface.h"
46 #include "chardev/char-fe.h"
47 #include "migration/misc.h"
48 #include "system/runstate.h"
49 #include "hw/riscv/numa.h"
50
51 #define PR_RISCV_V_SET_CONTROL 69
52 #define PR_RISCV_V_VSTATE_CTRL_ON 2
53
riscv_kvm_aplic_request(void * opaque,int irq,int level)54 void riscv_kvm_aplic_request(void *opaque, int irq, int level)
55 {
56 kvm_set_irq(kvm_state, irq, !!level);
57 }
58
59 static bool cap_has_mp_state;
60
61 #define KVM_RISCV_REG_ID_U32(type, idx) (KVM_REG_RISCV | KVM_REG_SIZE_U32 | \
62 type | idx)
63
64 #define KVM_RISCV_REG_ID_U64(type, idx) (KVM_REG_RISCV | KVM_REG_SIZE_U64 | \
65 type | idx)
66
67 #if defined(TARGET_RISCV64)
68 #define KVM_RISCV_REG_ID_ULONG(type, idx) KVM_RISCV_REG_ID_U64(type, idx)
69 #else
70 #define KVM_RISCV_REG_ID_ULONG(type, idx) KVM_RISCV_REG_ID_U32(type, idx)
71 #endif
72
kvm_encode_reg_size_id(uint64_t id,size_t size_b)73 static uint64_t kvm_encode_reg_size_id(uint64_t id, size_t size_b)
74 {
75 uint64_t size_ctz = __builtin_ctz(size_b);
76
77 return id | (size_ctz << KVM_REG_SIZE_SHIFT);
78 }
79
kvm_riscv_vector_reg_id(RISCVCPU * cpu,uint64_t idx)80 static uint64_t kvm_riscv_vector_reg_id(RISCVCPU *cpu,
81 uint64_t idx)
82 {
83 uint64_t id;
84 size_t size_b;
85
86 g_assert(idx < 32);
87
88 id = KVM_REG_RISCV | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(idx);
89 size_b = cpu->cfg.vlenb;
90
91 return kvm_encode_reg_size_id(id, size_b);
92 }
93
94 #define RISCV_CORE_REG(name) \
95 KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, \
96 KVM_REG_RISCV_CORE_REG(name))
97
98 #define RISCV_CSR_REG(name) \
99 KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CSR, \
100 KVM_REG_RISCV_CSR_REG(name))
101
102 #define RISCV_CONFIG_REG(name) \
103 KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG, \
104 KVM_REG_RISCV_CONFIG_REG(name))
105
106 #define RISCV_TIMER_REG(name) KVM_RISCV_REG_ID_U64(KVM_REG_RISCV_TIMER, \
107 KVM_REG_RISCV_TIMER_REG(name))
108
109 #define RISCV_FP_F_REG(idx) KVM_RISCV_REG_ID_U32(KVM_REG_RISCV_FP_F, idx)
110
111 #define RISCV_FP_D_REG(idx) KVM_RISCV_REG_ID_U64(KVM_REG_RISCV_FP_D, idx)
112
113 #define RISCV_VECTOR_CSR_REG(name) \
114 KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_VECTOR, \
115 KVM_REG_RISCV_VECTOR_CSR_REG(name))
116
117 #define KVM_RISCV_GET_TIMER(cs, name, reg) \
118 do { \
119 int ret = kvm_get_one_reg(cs, RISCV_TIMER_REG(name), ®); \
120 if (ret) { \
121 abort(); \
122 } \
123 } while (0)
124
125 #define KVM_RISCV_SET_TIMER(cs, name, reg) \
126 do { \
127 int ret = kvm_set_one_reg(cs, RISCV_TIMER_REG(name), ®); \
128 if (ret) { \
129 abort(); \
130 } \
131 } while (0)
132
133 typedef struct KVMCPUConfig {
134 const char *name;
135 const char *description;
136 target_ulong offset;
137 uint64_t kvm_reg_id;
138 uint32_t prop_size;
139 bool user_set;
140 bool supported;
141 } KVMCPUConfig;
142
143 #define KVM_MISA_CFG(_bit, _reg_id) \
144 {.offset = _bit, .kvm_reg_id = _reg_id}
145
146 /* KVM ISA extensions */
147 static KVMCPUConfig kvm_misa_ext_cfgs[] = {
148 KVM_MISA_CFG(RVA, KVM_RISCV_ISA_EXT_A),
149 KVM_MISA_CFG(RVC, KVM_RISCV_ISA_EXT_C),
150 KVM_MISA_CFG(RVD, KVM_RISCV_ISA_EXT_D),
151 KVM_MISA_CFG(RVF, KVM_RISCV_ISA_EXT_F),
152 KVM_MISA_CFG(RVH, KVM_RISCV_ISA_EXT_H),
153 KVM_MISA_CFG(RVI, KVM_RISCV_ISA_EXT_I),
154 KVM_MISA_CFG(RVM, KVM_RISCV_ISA_EXT_M),
155 KVM_MISA_CFG(RVV, KVM_RISCV_ISA_EXT_V),
156 };
157
kvm_cpu_get_misa_ext_cfg(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)158 static void kvm_cpu_get_misa_ext_cfg(Object *obj, Visitor *v,
159 const char *name,
160 void *opaque, Error **errp)
161 {
162 KVMCPUConfig *misa_ext_cfg = opaque;
163 target_ulong misa_bit = misa_ext_cfg->offset;
164 RISCVCPU *cpu = RISCV_CPU(obj);
165 CPURISCVState *env = &cpu->env;
166 bool value = env->misa_ext_mask & misa_bit;
167
168 visit_type_bool(v, name, &value, errp);
169 }
170
kvm_cpu_set_misa_ext_cfg(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)171 static void kvm_cpu_set_misa_ext_cfg(Object *obj, Visitor *v,
172 const char *name,
173 void *opaque, Error **errp)
174 {
175 KVMCPUConfig *misa_ext_cfg = opaque;
176 target_ulong misa_bit = misa_ext_cfg->offset;
177 RISCVCPU *cpu = RISCV_CPU(obj);
178 CPURISCVState *env = &cpu->env;
179 bool value, host_bit;
180
181 if (!visit_type_bool(v, name, &value, errp)) {
182 return;
183 }
184
185 host_bit = env->misa_ext_mask & misa_bit;
186
187 if (value == host_bit) {
188 return;
189 }
190
191 if (!value) {
192 misa_ext_cfg->user_set = true;
193 return;
194 }
195
196 /*
197 * Forbid users to enable extensions that aren't
198 * available in the hart.
199 */
200 error_setg(errp, "Enabling MISA bit '%s' is not allowed: it's not "
201 "enabled in the host", misa_ext_cfg->name);
202 }
203
kvm_riscv_update_cpu_misa_ext(RISCVCPU * cpu,CPUState * cs)204 static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs)
205 {
206 CPURISCVState *env = &cpu->env;
207 uint64_t id, reg;
208 int i, ret;
209
210 for (i = 0; i < ARRAY_SIZE(kvm_misa_ext_cfgs); i++) {
211 KVMCPUConfig *misa_cfg = &kvm_misa_ext_cfgs[i];
212 target_ulong misa_bit = misa_cfg->offset;
213
214 if (!misa_cfg->user_set) {
215 continue;
216 }
217
218 /* If we're here we're going to disable the MISA bit */
219 reg = 0;
220 id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT,
221 misa_cfg->kvm_reg_id);
222 ret = kvm_set_one_reg(cs, id, ®);
223 if (ret != 0) {
224 /*
225 * We're not checking for -EINVAL because if the bit is about
226 * to be disabled, it means that it was already enabled by
227 * KVM. We determined that by fetching the 'isa' register
228 * during init() time. Any error at this point is worth
229 * aborting.
230 */
231 error_report("Unable to set KVM reg %s, error %d",
232 misa_cfg->name, ret);
233 exit(EXIT_FAILURE);
234 }
235 env->misa_ext &= ~misa_bit;
236 }
237 }
238
239 #define KVM_CSR_CFG(_name, _env_prop, reg_id) \
240 {.name = _name, .offset = ENV_CSR_OFFSET(_env_prop), \
241 .prop_size = sizeof(((CPURISCVState *)0)->_env_prop), \
242 .kvm_reg_id = reg_id}
243
244 static KVMCPUConfig kvm_csr_cfgs[] = {
245 KVM_CSR_CFG("sstatus", mstatus, RISCV_CSR_REG(sstatus)),
246 KVM_CSR_CFG("sie", mie, RISCV_CSR_REG(sie)),
247 KVM_CSR_CFG("stvec", stvec, RISCV_CSR_REG(stvec)),
248 KVM_CSR_CFG("sscratch", sscratch, RISCV_CSR_REG(sscratch)),
249 KVM_CSR_CFG("sepc", sepc, RISCV_CSR_REG(sepc)),
250 KVM_CSR_CFG("scause", scause, RISCV_CSR_REG(scause)),
251 KVM_CSR_CFG("stval", stval, RISCV_CSR_REG(stval)),
252 KVM_CSR_CFG("sip", mip, RISCV_CSR_REG(sip)),
253 KVM_CSR_CFG("satp", satp, RISCV_CSR_REG(satp)),
254 KVM_CSR_CFG("scounteren", scounteren, RISCV_CSR_REG(scounteren)),
255 KVM_CSR_CFG("senvcfg", senvcfg, RISCV_CSR_REG(senvcfg)),
256 };
257
kvmconfig_get_env_addr(RISCVCPU * cpu,KVMCPUConfig * csr_cfg)258 static void *kvmconfig_get_env_addr(RISCVCPU *cpu, KVMCPUConfig *csr_cfg)
259 {
260 return (void *)&cpu->env + csr_cfg->offset;
261 }
262
kvm_cpu_csr_get_u32(RISCVCPU * cpu,KVMCPUConfig * csr_cfg)263 static uint32_t kvm_cpu_csr_get_u32(RISCVCPU *cpu, KVMCPUConfig *csr_cfg)
264 {
265 uint32_t *val32 = kvmconfig_get_env_addr(cpu, csr_cfg);
266 return *val32;
267 }
268
kvm_cpu_csr_get_u64(RISCVCPU * cpu,KVMCPUConfig * csr_cfg)269 static uint64_t kvm_cpu_csr_get_u64(RISCVCPU *cpu, KVMCPUConfig *csr_cfg)
270 {
271 uint64_t *val64 = kvmconfig_get_env_addr(cpu, csr_cfg);
272 return *val64;
273 }
274
kvm_cpu_csr_set_u32(RISCVCPU * cpu,KVMCPUConfig * csr_cfg,uint32_t val)275 static void kvm_cpu_csr_set_u32(RISCVCPU *cpu, KVMCPUConfig *csr_cfg,
276 uint32_t val)
277 {
278 uint32_t *val32 = kvmconfig_get_env_addr(cpu, csr_cfg);
279 *val32 = val;
280 }
281
kvm_cpu_csr_set_u64(RISCVCPU * cpu,KVMCPUConfig * csr_cfg,uint64_t val)282 static void kvm_cpu_csr_set_u64(RISCVCPU *cpu, KVMCPUConfig *csr_cfg,
283 uint64_t val)
284 {
285 uint64_t *val64 = kvmconfig_get_env_addr(cpu, csr_cfg);
286 *val64 = val;
287 }
288
289 #define KVM_EXT_CFG(_name, _prop, _reg_id) \
290 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \
291 .kvm_reg_id = _reg_id}
292
293 static KVMCPUConfig kvm_multi_ext_cfgs[] = {
294 KVM_EXT_CFG("zicbom", ext_zicbom, KVM_RISCV_ISA_EXT_ZICBOM),
295 KVM_EXT_CFG("zicboz", ext_zicboz, KVM_RISCV_ISA_EXT_ZICBOZ),
296 KVM_EXT_CFG("ziccrse", ext_ziccrse, KVM_RISCV_ISA_EXT_ZICCRSE),
297 KVM_EXT_CFG("zicntr", ext_zicntr, KVM_RISCV_ISA_EXT_ZICNTR),
298 KVM_EXT_CFG("zicond", ext_zicond, KVM_RISCV_ISA_EXT_ZICOND),
299 KVM_EXT_CFG("zicsr", ext_zicsr, KVM_RISCV_ISA_EXT_ZICSR),
300 KVM_EXT_CFG("zifencei", ext_zifencei, KVM_RISCV_ISA_EXT_ZIFENCEI),
301 KVM_EXT_CFG("zihintntl", ext_zihintntl, KVM_RISCV_ISA_EXT_ZIHINTNTL),
302 KVM_EXT_CFG("zihintpause", ext_zihintpause, KVM_RISCV_ISA_EXT_ZIHINTPAUSE),
303 KVM_EXT_CFG("zihpm", ext_zihpm, KVM_RISCV_ISA_EXT_ZIHPM),
304 KVM_EXT_CFG("zimop", ext_zimop, KVM_RISCV_ISA_EXT_ZIMOP),
305 KVM_EXT_CFG("zcmop", ext_zcmop, KVM_RISCV_ISA_EXT_ZCMOP),
306 KVM_EXT_CFG("zabha", ext_zabha, KVM_RISCV_ISA_EXT_ZABHA),
307 KVM_EXT_CFG("zacas", ext_zacas, KVM_RISCV_ISA_EXT_ZACAS),
308 KVM_EXT_CFG("zawrs", ext_zawrs, KVM_RISCV_ISA_EXT_ZAWRS),
309 KVM_EXT_CFG("zfa", ext_zfa, KVM_RISCV_ISA_EXT_ZFA),
310 KVM_EXT_CFG("zfh", ext_zfh, KVM_RISCV_ISA_EXT_ZFH),
311 KVM_EXT_CFG("zfhmin", ext_zfhmin, KVM_RISCV_ISA_EXT_ZFHMIN),
312 KVM_EXT_CFG("zba", ext_zba, KVM_RISCV_ISA_EXT_ZBA),
313 KVM_EXT_CFG("zbb", ext_zbb, KVM_RISCV_ISA_EXT_ZBB),
314 KVM_EXT_CFG("zbc", ext_zbc, KVM_RISCV_ISA_EXT_ZBC),
315 KVM_EXT_CFG("zbkb", ext_zbkb, KVM_RISCV_ISA_EXT_ZBKB),
316 KVM_EXT_CFG("zbkc", ext_zbkc, KVM_RISCV_ISA_EXT_ZBKC),
317 KVM_EXT_CFG("zbkx", ext_zbkx, KVM_RISCV_ISA_EXT_ZBKX),
318 KVM_EXT_CFG("zbs", ext_zbs, KVM_RISCV_ISA_EXT_ZBS),
319 KVM_EXT_CFG("zca", ext_zca, KVM_RISCV_ISA_EXT_ZCA),
320 KVM_EXT_CFG("zcb", ext_zcb, KVM_RISCV_ISA_EXT_ZCB),
321 KVM_EXT_CFG("zcd", ext_zcd, KVM_RISCV_ISA_EXT_ZCD),
322 KVM_EXT_CFG("zcf", ext_zcf, KVM_RISCV_ISA_EXT_ZCF),
323 KVM_EXT_CFG("zknd", ext_zknd, KVM_RISCV_ISA_EXT_ZKND),
324 KVM_EXT_CFG("zkne", ext_zkne, KVM_RISCV_ISA_EXT_ZKNE),
325 KVM_EXT_CFG("zknh", ext_zknh, KVM_RISCV_ISA_EXT_ZKNH),
326 KVM_EXT_CFG("zkr", ext_zkr, KVM_RISCV_ISA_EXT_ZKR),
327 KVM_EXT_CFG("zksed", ext_zksed, KVM_RISCV_ISA_EXT_ZKSED),
328 KVM_EXT_CFG("zksh", ext_zksh, KVM_RISCV_ISA_EXT_ZKSH),
329 KVM_EXT_CFG("zkt", ext_zkt, KVM_RISCV_ISA_EXT_ZKT),
330 KVM_EXT_CFG("ztso", ext_ztso, KVM_RISCV_ISA_EXT_ZTSO),
331 KVM_EXT_CFG("zvbb", ext_zvbb, KVM_RISCV_ISA_EXT_ZVBB),
332 KVM_EXT_CFG("zvbc", ext_zvbc, KVM_RISCV_ISA_EXT_ZVBC),
333 KVM_EXT_CFG("zvfh", ext_zvfh, KVM_RISCV_ISA_EXT_ZVFH),
334 KVM_EXT_CFG("zvfhmin", ext_zvfhmin, KVM_RISCV_ISA_EXT_ZVFHMIN),
335 KVM_EXT_CFG("zvkb", ext_zvkb, KVM_RISCV_ISA_EXT_ZVKB),
336 KVM_EXT_CFG("zvkg", ext_zvkg, KVM_RISCV_ISA_EXT_ZVKG),
337 KVM_EXT_CFG("zvkned", ext_zvkned, KVM_RISCV_ISA_EXT_ZVKNED),
338 KVM_EXT_CFG("zvknha", ext_zvknha, KVM_RISCV_ISA_EXT_ZVKNHA),
339 KVM_EXT_CFG("zvknhb", ext_zvknhb, KVM_RISCV_ISA_EXT_ZVKNHB),
340 KVM_EXT_CFG("zvksed", ext_zvksed, KVM_RISCV_ISA_EXT_ZVKSED),
341 KVM_EXT_CFG("zvksh", ext_zvksh, KVM_RISCV_ISA_EXT_ZVKSH),
342 KVM_EXT_CFG("zvkt", ext_zvkt, KVM_RISCV_ISA_EXT_ZVKT),
343 KVM_EXT_CFG("smnpm", ext_smnpm, KVM_RISCV_ISA_EXT_SMNPM),
344 KVM_EXT_CFG("smstateen", ext_smstateen, KVM_RISCV_ISA_EXT_SMSTATEEN),
345 KVM_EXT_CFG("ssaia", ext_ssaia, KVM_RISCV_ISA_EXT_SSAIA),
346 KVM_EXT_CFG("sscofpmf", ext_sscofpmf, KVM_RISCV_ISA_EXT_SSCOFPMF),
347 KVM_EXT_CFG("ssnpm", ext_ssnpm, KVM_RISCV_ISA_EXT_SSNPM),
348 KVM_EXT_CFG("sstc", ext_sstc, KVM_RISCV_ISA_EXT_SSTC),
349 KVM_EXT_CFG("svade", ext_svade, KVM_RISCV_ISA_EXT_SVADE),
350 KVM_EXT_CFG("svadu", ext_svadu, KVM_RISCV_ISA_EXT_SVADU),
351 KVM_EXT_CFG("svinval", ext_svinval, KVM_RISCV_ISA_EXT_SVINVAL),
352 KVM_EXT_CFG("svnapot", ext_svnapot, KVM_RISCV_ISA_EXT_SVNAPOT),
353 KVM_EXT_CFG("svpbmt", ext_svpbmt, KVM_RISCV_ISA_EXT_SVPBMT),
354 KVM_EXT_CFG("svvptc", ext_svvptc, KVM_RISCV_ISA_EXT_SVVPTC),
355 };
356
kvmconfig_get_cfg_addr(RISCVCPU * cpu,KVMCPUConfig * kvmcfg)357 static void *kvmconfig_get_cfg_addr(RISCVCPU *cpu, KVMCPUConfig *kvmcfg)
358 {
359 return (void *)&cpu->cfg + kvmcfg->offset;
360 }
361
kvm_cpu_cfg_set(RISCVCPU * cpu,KVMCPUConfig * multi_ext,uint32_t val)362 static void kvm_cpu_cfg_set(RISCVCPU *cpu, KVMCPUConfig *multi_ext,
363 uint32_t val)
364 {
365 bool *ext_enabled = kvmconfig_get_cfg_addr(cpu, multi_ext);
366
367 *ext_enabled = val;
368 }
369
kvm_cpu_cfg_get(RISCVCPU * cpu,KVMCPUConfig * multi_ext)370 static uint32_t kvm_cpu_cfg_get(RISCVCPU *cpu,
371 KVMCPUConfig *multi_ext)
372 {
373 bool *ext_enabled = kvmconfig_get_cfg_addr(cpu, multi_ext);
374
375 return *ext_enabled;
376 }
377
kvm_cpu_get_multi_ext_cfg(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)378 static void kvm_cpu_get_multi_ext_cfg(Object *obj, Visitor *v,
379 const char *name,
380 void *opaque, Error **errp)
381 {
382 KVMCPUConfig *multi_ext_cfg = opaque;
383 RISCVCPU *cpu = RISCV_CPU(obj);
384 bool value = kvm_cpu_cfg_get(cpu, multi_ext_cfg);
385
386 visit_type_bool(v, name, &value, errp);
387 }
388
kvm_cpu_set_multi_ext_cfg(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)389 static void kvm_cpu_set_multi_ext_cfg(Object *obj, Visitor *v,
390 const char *name,
391 void *opaque, Error **errp)
392 {
393 KVMCPUConfig *multi_ext_cfg = opaque;
394 RISCVCPU *cpu = RISCV_CPU(obj);
395 bool value, host_val;
396
397 if (!visit_type_bool(v, name, &value, errp)) {
398 return;
399 }
400
401 host_val = kvm_cpu_cfg_get(cpu, multi_ext_cfg);
402
403 /*
404 * Ignore if the user is setting the same value
405 * as the host.
406 */
407 if (value == host_val) {
408 return;
409 }
410
411 if (!multi_ext_cfg->supported) {
412 /*
413 * Error out if the user is trying to enable an
414 * extension that KVM doesn't support. Ignore
415 * option otherwise.
416 */
417 if (value) {
418 error_setg(errp, "KVM does not support disabling extension %s",
419 multi_ext_cfg->name);
420 }
421
422 return;
423 }
424
425 multi_ext_cfg->user_set = true;
426 kvm_cpu_cfg_set(cpu, multi_ext_cfg, value);
427 }
428
429 static KVMCPUConfig kvm_cbom_blocksize = {
430 .name = "cbom_blocksize",
431 .offset = CPU_CFG_OFFSET(cbom_blocksize),
432 .kvm_reg_id = KVM_REG_RISCV_CONFIG_REG(zicbom_block_size)
433 };
434
435 static KVMCPUConfig kvm_cboz_blocksize = {
436 .name = "cboz_blocksize",
437 .offset = CPU_CFG_OFFSET(cboz_blocksize),
438 .kvm_reg_id = KVM_REG_RISCV_CONFIG_REG(zicboz_block_size)
439 };
440
441 static KVMCPUConfig kvm_v_vlenb = {
442 .name = "vlenb",
443 .offset = CPU_CFG_OFFSET(vlenb),
444 .kvm_reg_id = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_VECTOR |
445 KVM_REG_RISCV_VECTOR_CSR_REG(vlenb)
446 };
447
448 static KVMCPUConfig kvm_sbi_dbcn = {
449 .name = "sbi_dbcn",
450 .kvm_reg_id = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
451 KVM_REG_RISCV_SBI_EXT | KVM_RISCV_SBI_EXT_DBCN
452 };
453
kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU * cpu,CPUState * cs)454 static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU *cpu, CPUState *cs)
455 {
456 uint64_t id, reg;
457 int i, ret;
458
459 for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
460 KVMCPUConfig *multi_ext_cfg = &kvm_multi_ext_cfgs[i];
461
462 if (!multi_ext_cfg->user_set) {
463 continue;
464 }
465
466 id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT,
467 multi_ext_cfg->kvm_reg_id);
468 reg = kvm_cpu_cfg_get(cpu, multi_ext_cfg);
469 ret = kvm_set_one_reg(cs, id, ®);
470 if (ret != 0) {
471 if (!reg && ret == -EINVAL) {
472 warn_report("KVM cannot disable extension %s",
473 multi_ext_cfg->name);
474 } else {
475 error_report("Unable to enable extension %s in KVM, error %d",
476 multi_ext_cfg->name, ret);
477 exit(EXIT_FAILURE);
478 }
479 }
480 }
481 }
482
cpu_get_cfg_unavailable(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)483 static void cpu_get_cfg_unavailable(Object *obj, Visitor *v,
484 const char *name,
485 void *opaque, Error **errp)
486 {
487 bool value = false;
488
489 visit_type_bool(v, name, &value, errp);
490 }
491
cpu_set_cfg_unavailable(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)492 static void cpu_set_cfg_unavailable(Object *obj, Visitor *v,
493 const char *name,
494 void *opaque, Error **errp)
495 {
496 const char *propname = opaque;
497 bool value;
498
499 if (!visit_type_bool(v, name, &value, errp)) {
500 return;
501 }
502
503 if (value) {
504 error_setg(errp, "'%s' is not available with KVM",
505 propname);
506 }
507 }
508
riscv_cpu_add_kvm_unavail_prop(Object * obj,const char * prop_name)509 static void riscv_cpu_add_kvm_unavail_prop(Object *obj, const char *prop_name)
510 {
511 /* Check if KVM created the property already */
512 if (object_property_find(obj, prop_name)) {
513 return;
514 }
515
516 /*
517 * Set the default to disabled for every extension
518 * unknown to KVM and error out if the user attempts
519 * to enable any of them.
520 */
521 object_property_add(obj, prop_name, "bool",
522 cpu_get_cfg_unavailable,
523 cpu_set_cfg_unavailable,
524 NULL, (void *)prop_name);
525 }
526
riscv_cpu_add_kvm_unavail_prop_array(Object * obj,const RISCVCPUMultiExtConfig * array)527 static void riscv_cpu_add_kvm_unavail_prop_array(Object *obj,
528 const RISCVCPUMultiExtConfig *array)
529 {
530 const RISCVCPUMultiExtConfig *prop;
531
532 g_assert(array);
533
534 for (prop = array; prop && prop->name; prop++) {
535 riscv_cpu_add_kvm_unavail_prop(obj, prop->name);
536 }
537 }
538
kvm_riscv_add_cpu_user_properties(Object * cpu_obj)539 static void kvm_riscv_add_cpu_user_properties(Object *cpu_obj)
540 {
541 int i;
542
543 riscv_add_satp_mode_properties(cpu_obj);
544
545 for (i = 0; i < ARRAY_SIZE(kvm_misa_ext_cfgs); i++) {
546 KVMCPUConfig *misa_cfg = &kvm_misa_ext_cfgs[i];
547 int bit = misa_cfg->offset;
548
549 misa_cfg->name = riscv_get_misa_ext_name(bit);
550 misa_cfg->description = riscv_get_misa_ext_description(bit);
551
552 object_property_add(cpu_obj, misa_cfg->name, "bool",
553 kvm_cpu_get_misa_ext_cfg,
554 kvm_cpu_set_misa_ext_cfg,
555 NULL, misa_cfg);
556 object_property_set_description(cpu_obj, misa_cfg->name,
557 misa_cfg->description);
558 }
559
560 for (i = 0; misa_bits[i] != 0; i++) {
561 const char *ext_name = riscv_get_misa_ext_name(misa_bits[i]);
562 riscv_cpu_add_kvm_unavail_prop(cpu_obj, ext_name);
563 }
564
565 for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
566 KVMCPUConfig *multi_cfg = &kvm_multi_ext_cfgs[i];
567
568 object_property_add(cpu_obj, multi_cfg->name, "bool",
569 kvm_cpu_get_multi_ext_cfg,
570 kvm_cpu_set_multi_ext_cfg,
571 NULL, multi_cfg);
572 }
573
574 riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_extensions);
575 riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_vendor_exts);
576 riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_experimental_exts);
577
578 /* We don't have the needed KVM support for profiles */
579 for (i = 0; riscv_profiles[i] != NULL; i++) {
580 riscv_cpu_add_kvm_unavail_prop(cpu_obj, riscv_profiles[i]->name);
581 }
582 }
583
kvm_riscv_get_regs_core(CPUState * cs)584 static int kvm_riscv_get_regs_core(CPUState *cs)
585 {
586 int ret = 0;
587 int i;
588 target_ulong reg;
589 CPURISCVState *env = &RISCV_CPU(cs)->env;
590
591 ret = kvm_get_one_reg(cs, RISCV_CORE_REG(regs.pc), ®);
592 if (ret) {
593 return ret;
594 }
595 env->pc = reg;
596
597 for (i = 1; i < 32; i++) {
598 uint64_t id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, i);
599 ret = kvm_get_one_reg(cs, id, ®);
600 if (ret) {
601 return ret;
602 }
603 env->gpr[i] = reg;
604 }
605
606 return ret;
607 }
608
kvm_riscv_put_regs_core(CPUState * cs)609 static int kvm_riscv_put_regs_core(CPUState *cs)
610 {
611 int ret = 0;
612 int i;
613 target_ulong reg;
614 CPURISCVState *env = &RISCV_CPU(cs)->env;
615
616 reg = env->pc;
617 ret = kvm_set_one_reg(cs, RISCV_CORE_REG(regs.pc), ®);
618 if (ret) {
619 return ret;
620 }
621
622 for (i = 1; i < 32; i++) {
623 uint64_t id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, i);
624 reg = env->gpr[i];
625 ret = kvm_set_one_reg(cs, id, ®);
626 if (ret) {
627 return ret;
628 }
629 }
630
631 return ret;
632 }
633
kvm_riscv_get_regs_csr(CPUState * cs)634 static int kvm_riscv_get_regs_csr(CPUState *cs)
635 {
636 RISCVCPU *cpu = RISCV_CPU(cs);
637 uint64_t reg;
638 int i, ret;
639
640 for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) {
641 KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i];
642
643 if (!csr_cfg->supported) {
644 continue;
645 }
646
647 ret = kvm_get_one_reg(cs, csr_cfg->kvm_reg_id, ®);
648 if (ret) {
649 return ret;
650 }
651
652 if (csr_cfg->prop_size == sizeof(uint32_t)) {
653 kvm_cpu_csr_set_u32(cpu, csr_cfg, (uint32_t)reg);
654 } else if (csr_cfg->prop_size == sizeof(uint64_t)) {
655 kvm_cpu_csr_set_u64(cpu, csr_cfg, reg);
656 } else {
657 g_assert_not_reached();
658 }
659 }
660
661 return 0;
662 }
663
kvm_riscv_put_regs_csr(CPUState * cs)664 static int kvm_riscv_put_regs_csr(CPUState *cs)
665 {
666 RISCVCPU *cpu = RISCV_CPU(cs);
667 uint64_t reg;
668 int i, ret;
669
670 for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) {
671 KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i];
672
673 if (!csr_cfg->supported) {
674 continue;
675 }
676
677 if (csr_cfg->prop_size == sizeof(uint32_t)) {
678 reg = kvm_cpu_csr_get_u32(cpu, csr_cfg);
679 } else if (csr_cfg->prop_size == sizeof(uint64_t)) {
680 reg = kvm_cpu_csr_get_u64(cpu, csr_cfg);
681 } else {
682 g_assert_not_reached();
683 }
684
685 ret = kvm_set_one_reg(cs, csr_cfg->kvm_reg_id, ®);
686 if (ret) {
687 return ret;
688 }
689 }
690
691 return 0;
692 }
693
kvm_riscv_reset_regs_csr(CPURISCVState * env)694 static void kvm_riscv_reset_regs_csr(CPURISCVState *env)
695 {
696 env->mstatus = 0;
697 env->mie = 0;
698 env->stvec = 0;
699 env->sscratch = 0;
700 env->sepc = 0;
701 env->scause = 0;
702 env->stval = 0;
703 env->mip = 0;
704 env->satp = 0;
705 env->scounteren = 0;
706 env->senvcfg = 0;
707 env->priv = PRV_S;
708 }
709
kvm_riscv_get_regs_fp(CPUState * cs)710 static int kvm_riscv_get_regs_fp(CPUState *cs)
711 {
712 int ret = 0;
713 int i;
714 CPURISCVState *env = &RISCV_CPU(cs)->env;
715
716 if (riscv_has_ext(env, RVD)) {
717 uint64_t reg;
718 for (i = 0; i < 32; i++) {
719 ret = kvm_get_one_reg(cs, RISCV_FP_D_REG(i), ®);
720 if (ret) {
721 return ret;
722 }
723 env->fpr[i] = reg;
724 }
725 return ret;
726 }
727
728 if (riscv_has_ext(env, RVF)) {
729 uint32_t reg;
730 for (i = 0; i < 32; i++) {
731 ret = kvm_get_one_reg(cs, RISCV_FP_F_REG(i), ®);
732 if (ret) {
733 return ret;
734 }
735 env->fpr[i] = reg;
736 }
737 return ret;
738 }
739
740 return ret;
741 }
742
kvm_riscv_put_regs_fp(CPUState * cs)743 static int kvm_riscv_put_regs_fp(CPUState *cs)
744 {
745 int ret = 0;
746 int i;
747 CPURISCVState *env = &RISCV_CPU(cs)->env;
748
749 if (riscv_has_ext(env, RVD)) {
750 uint64_t reg;
751 for (i = 0; i < 32; i++) {
752 reg = env->fpr[i];
753 ret = kvm_set_one_reg(cs, RISCV_FP_D_REG(i), ®);
754 if (ret) {
755 return ret;
756 }
757 }
758 return ret;
759 }
760
761 if (riscv_has_ext(env, RVF)) {
762 uint32_t reg;
763 for (i = 0; i < 32; i++) {
764 reg = env->fpr[i];
765 ret = kvm_set_one_reg(cs, RISCV_FP_F_REG(i), ®);
766 if (ret) {
767 return ret;
768 }
769 }
770 return ret;
771 }
772
773 return ret;
774 }
775
kvm_riscv_get_regs_timer(CPUState * cs)776 static void kvm_riscv_get_regs_timer(CPUState *cs)
777 {
778 CPURISCVState *env = &RISCV_CPU(cs)->env;
779
780 if (env->kvm_timer_dirty) {
781 return;
782 }
783
784 KVM_RISCV_GET_TIMER(cs, time, env->kvm_timer_time);
785 KVM_RISCV_GET_TIMER(cs, compare, env->kvm_timer_compare);
786 KVM_RISCV_GET_TIMER(cs, state, env->kvm_timer_state);
787 KVM_RISCV_GET_TIMER(cs, frequency, env->kvm_timer_frequency);
788
789 env->kvm_timer_dirty = true;
790 }
791
kvm_riscv_put_regs_timer(CPUState * cs)792 static void kvm_riscv_put_regs_timer(CPUState *cs)
793 {
794 uint64_t reg;
795 CPURISCVState *env = &RISCV_CPU(cs)->env;
796
797 if (!env->kvm_timer_dirty) {
798 return;
799 }
800
801 KVM_RISCV_SET_TIMER(cs, time, env->kvm_timer_time);
802 KVM_RISCV_SET_TIMER(cs, compare, env->kvm_timer_compare);
803
804 /*
805 * To set register of RISCV_TIMER_REG(state) will occur a error from KVM
806 * on env->kvm_timer_state == 0, It's better to adapt in KVM, but it
807 * doesn't matter that adaping in QEMU now.
808 * TODO If KVM changes, adapt here.
809 */
810 if (env->kvm_timer_state) {
811 KVM_RISCV_SET_TIMER(cs, state, env->kvm_timer_state);
812 }
813
814 /*
815 * For now, migration will not work between Hosts with different timer
816 * frequency. Therefore, we should check whether they are the same here
817 * during the migration.
818 */
819 if (migration_is_running()) {
820 KVM_RISCV_GET_TIMER(cs, frequency, reg);
821 if (reg != env->kvm_timer_frequency) {
822 error_report("Dst Hosts timer frequency != Src Hosts");
823 }
824 }
825
826 env->kvm_timer_dirty = false;
827 }
828
kvm_riscv_get_timebase_frequency(RISCVCPU * cpu)829 uint64_t kvm_riscv_get_timebase_frequency(RISCVCPU *cpu)
830 {
831 uint64_t reg;
832
833 KVM_RISCV_GET_TIMER(CPU(cpu), frequency, reg);
834
835 return reg;
836 }
837
kvm_riscv_get_regs_vector(CPUState * cs)838 static int kvm_riscv_get_regs_vector(CPUState *cs)
839 {
840 RISCVCPU *cpu = RISCV_CPU(cs);
841 CPURISCVState *env = &cpu->env;
842 target_ulong reg;
843 uint64_t vreg_id;
844 int vreg_idx, ret = 0;
845
846 if (!riscv_has_ext(env, RVV)) {
847 return 0;
848 }
849
850 ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vstart), ®);
851 if (ret) {
852 return ret;
853 }
854 env->vstart = reg;
855
856 ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vl), ®);
857 if (ret) {
858 return ret;
859 }
860 env->vl = reg;
861
862 ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vtype), ®);
863 if (ret) {
864 return ret;
865 }
866 env->vtype = reg;
867
868 if (kvm_v_vlenb.supported) {
869 ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vlenb), ®);
870 if (ret) {
871 return ret;
872 }
873 cpu->cfg.vlenb = reg;
874
875 for (int i = 0; i < 32; i++) {
876 /*
877 * vreg[] is statically allocated using RV_VLEN_MAX.
878 * Use it instead of vlenb to calculate vreg_idx for
879 * simplicity.
880 */
881 vreg_idx = i * RV_VLEN_MAX / 64;
882 vreg_id = kvm_riscv_vector_reg_id(cpu, i);
883
884 ret = kvm_get_one_reg(cs, vreg_id, &env->vreg[vreg_idx]);
885 if (ret) {
886 return ret;
887 }
888 }
889 }
890
891 return 0;
892 }
893
kvm_riscv_put_regs_vector(CPUState * cs)894 static int kvm_riscv_put_regs_vector(CPUState *cs)
895 {
896 RISCVCPU *cpu = RISCV_CPU(cs);
897 CPURISCVState *env = &cpu->env;
898 target_ulong reg;
899 uint64_t vreg_id;
900 int vreg_idx, ret = 0;
901
902 if (!riscv_has_ext(env, RVV)) {
903 return 0;
904 }
905
906 reg = env->vstart;
907 ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vstart), ®);
908 if (ret) {
909 return ret;
910 }
911
912 reg = env->vl;
913 ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vl), ®);
914 if (ret) {
915 return ret;
916 }
917
918 reg = env->vtype;
919 ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vtype), ®);
920 if (ret) {
921 return ret;
922 }
923
924 if (kvm_v_vlenb.supported) {
925 reg = cpu->cfg.vlenb;
926 ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vlenb), ®);
927
928 for (int i = 0; i < 32; i++) {
929 /*
930 * vreg[] is statically allocated using RV_VLEN_MAX.
931 * Use it instead of vlenb to calculate vreg_idx for
932 * simplicity.
933 */
934 vreg_idx = i * RV_VLEN_MAX / 64;
935 vreg_id = kvm_riscv_vector_reg_id(cpu, i);
936
937 ret = kvm_set_one_reg(cs, vreg_id, &env->vreg[vreg_idx]);
938 if (ret) {
939 return ret;
940 }
941 }
942 }
943
944 return ret;
945 }
946
947 typedef struct KVMScratchCPU {
948 int kvmfd;
949 int vmfd;
950 int cpufd;
951 } KVMScratchCPU;
952
953 /*
954 * Heavily inspired by kvm_arm_create_scratch_host_vcpu()
955 * from target/arm/kvm.c.
956 */
kvm_riscv_create_scratch_vcpu(KVMScratchCPU * scratch)957 static bool kvm_riscv_create_scratch_vcpu(KVMScratchCPU *scratch)
958 {
959 int kvmfd = -1, vmfd = -1, cpufd = -1;
960
961 kvmfd = qemu_open_old("/dev/kvm", O_RDWR);
962 if (kvmfd < 0) {
963 goto err;
964 }
965 do {
966 vmfd = ioctl(kvmfd, KVM_CREATE_VM, 0);
967 } while (vmfd == -1 && errno == EINTR);
968 if (vmfd < 0) {
969 goto err;
970 }
971 cpufd = ioctl(vmfd, KVM_CREATE_VCPU, 0);
972 if (cpufd < 0) {
973 goto err;
974 }
975
976 scratch->kvmfd = kvmfd;
977 scratch->vmfd = vmfd;
978 scratch->cpufd = cpufd;
979
980 return true;
981
982 err:
983 if (cpufd >= 0) {
984 close(cpufd);
985 }
986 if (vmfd >= 0) {
987 close(vmfd);
988 }
989 if (kvmfd >= 0) {
990 close(kvmfd);
991 }
992
993 return false;
994 }
995
kvm_riscv_destroy_scratch_vcpu(KVMScratchCPU * scratch)996 static void kvm_riscv_destroy_scratch_vcpu(KVMScratchCPU *scratch)
997 {
998 close(scratch->cpufd);
999 close(scratch->vmfd);
1000 close(scratch->kvmfd);
1001 }
1002
kvm_riscv_init_max_satp_mode(RISCVCPU * cpu,KVMScratchCPU * kvmcpu)1003 static void kvm_riscv_init_max_satp_mode(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
1004 {
1005 struct kvm_one_reg reg;
1006 int ret;
1007
1008 reg.id = RISCV_CONFIG_REG(satp_mode);
1009 reg.addr = (uint64_t)&cpu->cfg.max_satp_mode;
1010 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1011 if (ret != 0) {
1012 error_report("Unable to retrieve satp mode from host, error %d", ret);
1013 }
1014 }
1015
kvm_riscv_init_machine_ids(RISCVCPU * cpu,KVMScratchCPU * kvmcpu)1016 static void kvm_riscv_init_machine_ids(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
1017 {
1018 struct kvm_one_reg reg;
1019 int ret;
1020
1021 reg.id = RISCV_CONFIG_REG(mvendorid);
1022 reg.addr = (uint64_t)&cpu->cfg.mvendorid;
1023 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1024 if (ret != 0) {
1025 error_report("Unable to retrieve mvendorid from host, error %d", ret);
1026 }
1027
1028 reg.id = RISCV_CONFIG_REG(marchid);
1029 reg.addr = (uint64_t)&cpu->cfg.marchid;
1030 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1031 if (ret != 0) {
1032 error_report("Unable to retrieve marchid from host, error %d", ret);
1033 }
1034
1035 reg.id = RISCV_CONFIG_REG(mimpid);
1036 reg.addr = (uint64_t)&cpu->cfg.mimpid;
1037 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1038 if (ret != 0) {
1039 error_report("Unable to retrieve mimpid from host, error %d", ret);
1040 }
1041 }
1042
kvm_riscv_init_misa_ext_mask(RISCVCPU * cpu,KVMScratchCPU * kvmcpu)1043 static void kvm_riscv_init_misa_ext_mask(RISCVCPU *cpu,
1044 KVMScratchCPU *kvmcpu)
1045 {
1046 CPURISCVState *env = &cpu->env;
1047 struct kvm_one_reg reg;
1048 int ret;
1049
1050 reg.id = RISCV_CONFIG_REG(isa);
1051 reg.addr = (uint64_t)&env->misa_ext_mask;
1052 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1053
1054 if (ret) {
1055 error_report("Unable to fetch ISA register from KVM, "
1056 "error %d", ret);
1057 kvm_riscv_destroy_scratch_vcpu(kvmcpu);
1058 exit(EXIT_FAILURE);
1059 }
1060
1061 env->misa_ext = env->misa_ext_mask;
1062 }
1063
kvm_riscv_read_cbomz_blksize(RISCVCPU * cpu,KVMScratchCPU * kvmcpu,KVMCPUConfig * cbomz_cfg)1064 static void kvm_riscv_read_cbomz_blksize(RISCVCPU *cpu, KVMScratchCPU *kvmcpu,
1065 KVMCPUConfig *cbomz_cfg)
1066 {
1067 struct kvm_one_reg reg;
1068 int ret;
1069
1070 reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG,
1071 cbomz_cfg->kvm_reg_id);
1072 reg.addr = (uint64_t)kvmconfig_get_cfg_addr(cpu, cbomz_cfg);
1073 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1074 if (ret != 0) {
1075 error_report("Unable to read KVM reg %s, error %d",
1076 cbomz_cfg->name, ret);
1077 exit(EXIT_FAILURE);
1078 }
1079 }
1080
kvm_riscv_read_multiext_legacy(RISCVCPU * cpu,KVMScratchCPU * kvmcpu)1081 static void kvm_riscv_read_multiext_legacy(RISCVCPU *cpu,
1082 KVMScratchCPU *kvmcpu)
1083 {
1084 uint64_t val;
1085 int i, ret;
1086
1087 for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
1088 KVMCPUConfig *multi_ext_cfg = &kvm_multi_ext_cfgs[i];
1089 struct kvm_one_reg reg;
1090
1091 reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT,
1092 multi_ext_cfg->kvm_reg_id);
1093 reg.addr = (uint64_t)&val;
1094 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1095 if (ret != 0) {
1096 if (errno == EINVAL) {
1097 /* Silently default to 'false' if KVM does not support it. */
1098 multi_ext_cfg->supported = false;
1099 val = false;
1100 } else {
1101 error_report("Unable to read ISA_EXT KVM register %s: %s",
1102 multi_ext_cfg->name, strerror(errno));
1103 exit(EXIT_FAILURE);
1104 }
1105 } else {
1106 multi_ext_cfg->supported = true;
1107 }
1108
1109 kvm_cpu_cfg_set(cpu, multi_ext_cfg, val);
1110 }
1111
1112 if (cpu->cfg.ext_zicbom) {
1113 kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cbom_blocksize);
1114 }
1115
1116 if (cpu->cfg.ext_zicboz) {
1117 kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cboz_blocksize);
1118 }
1119 }
1120
kvm_riscv_read_csr_cfg_legacy(KVMScratchCPU * kvmcpu)1121 static void kvm_riscv_read_csr_cfg_legacy(KVMScratchCPU *kvmcpu)
1122 {
1123 uint64_t val;
1124 int i, ret;
1125
1126 for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) {
1127 KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i];
1128 struct kvm_one_reg reg;
1129
1130 reg.id = csr_cfg->kvm_reg_id;
1131 reg.addr = (uint64_t)&val;
1132 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1133 if (ret != 0) {
1134 if (errno == EINVAL) {
1135 csr_cfg->supported = false;
1136 } else {
1137 error_report("Unable to read KVM CSR %s: %s",
1138 csr_cfg->name, strerror(errno));
1139 exit(EXIT_FAILURE);
1140 }
1141 } else {
1142 csr_cfg->supported = true;
1143 }
1144 }
1145 }
1146
uint64_cmp(const void * a,const void * b)1147 static int uint64_cmp(const void *a, const void *b)
1148 {
1149 uint64_t val1 = *(const uint64_t *)a;
1150 uint64_t val2 = *(const uint64_t *)b;
1151
1152 if (val1 < val2) {
1153 return -1;
1154 }
1155
1156 if (val1 > val2) {
1157 return 1;
1158 }
1159
1160 return 0;
1161 }
1162
kvm_riscv_check_sbi_dbcn_support(RISCVCPU * cpu,struct kvm_reg_list * reglist)1163 static void kvm_riscv_check_sbi_dbcn_support(RISCVCPU *cpu,
1164 struct kvm_reg_list *reglist)
1165 {
1166 struct kvm_reg_list *reg_search;
1167
1168 reg_search = bsearch(&kvm_sbi_dbcn.kvm_reg_id, reglist->reg, reglist->n,
1169 sizeof(uint64_t), uint64_cmp);
1170
1171 if (reg_search) {
1172 kvm_sbi_dbcn.supported = true;
1173 }
1174 }
1175
kvm_riscv_read_vlenb(RISCVCPU * cpu,KVMScratchCPU * kvmcpu,struct kvm_reg_list * reglist)1176 static void kvm_riscv_read_vlenb(RISCVCPU *cpu, KVMScratchCPU *kvmcpu,
1177 struct kvm_reg_list *reglist)
1178 {
1179 struct kvm_one_reg reg;
1180 struct kvm_reg_list *reg_search;
1181 uint64_t val;
1182 int ret;
1183
1184 reg_search = bsearch(&kvm_v_vlenb.kvm_reg_id, reglist->reg, reglist->n,
1185 sizeof(uint64_t), uint64_cmp);
1186
1187 if (reg_search) {
1188 reg.id = kvm_v_vlenb.kvm_reg_id;
1189 reg.addr = (uint64_t)&val;
1190
1191 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1192 if (ret != 0) {
1193 error_report("Unable to read vlenb register, error code: %d",
1194 errno);
1195 exit(EXIT_FAILURE);
1196 }
1197
1198 kvm_v_vlenb.supported = true;
1199 cpu->cfg.vlenb = val;
1200 }
1201 }
1202
kvm_riscv_read_csr_cfg(struct kvm_reg_list * reglist)1203 static void kvm_riscv_read_csr_cfg(struct kvm_reg_list *reglist)
1204 {
1205 struct kvm_reg_list *reg_search;
1206 uint64_t reg_id;
1207
1208 for (int i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) {
1209 KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i];
1210
1211 reg_id = csr_cfg->kvm_reg_id;
1212 reg_search = bsearch(®_id, reglist->reg, reglist->n,
1213 sizeof(uint64_t), uint64_cmp);
1214 if (!reg_search) {
1215 continue;
1216 }
1217
1218 csr_cfg->supported = true;
1219 }
1220 }
1221
kvm_riscv_init_cfg(RISCVCPU * cpu,KVMScratchCPU * kvmcpu)1222 static void kvm_riscv_init_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
1223 {
1224 g_autofree struct kvm_reg_list *reglist = NULL;
1225 KVMCPUConfig *multi_ext_cfg;
1226 struct kvm_one_reg reg;
1227 struct kvm_reg_list rl_struct;
1228 uint64_t val, reg_id, *reg_search;
1229 int i, ret;
1230
1231 rl_struct.n = 0;
1232 ret = ioctl(kvmcpu->cpufd, KVM_GET_REG_LIST, &rl_struct);
1233
1234 /*
1235 * If KVM_GET_REG_LIST isn't supported we'll get errno 22
1236 * (EINVAL). Use read_legacy() in this case.
1237 */
1238 if (errno == EINVAL) {
1239 kvm_riscv_read_multiext_legacy(cpu, kvmcpu);
1240 kvm_riscv_read_csr_cfg_legacy(kvmcpu);
1241 return;
1242 } else if (errno != E2BIG) {
1243 /*
1244 * E2BIG is an expected error message for the API since we
1245 * don't know the number of registers. The right amount will
1246 * be written in rl_struct.n.
1247 *
1248 * Error out if we get any other errno.
1249 */
1250 error_report("Error when accessing get-reg-list: %s",
1251 strerror(errno));
1252 exit(EXIT_FAILURE);
1253 }
1254
1255 reglist = g_malloc(sizeof(struct kvm_reg_list) +
1256 rl_struct.n * sizeof(uint64_t));
1257 reglist->n = rl_struct.n;
1258 ret = ioctl(kvmcpu->cpufd, KVM_GET_REG_LIST, reglist);
1259 if (ret) {
1260 error_report("Error when reading KVM_GET_REG_LIST: %s",
1261 strerror(errno));
1262 exit(EXIT_FAILURE);
1263 }
1264
1265 /* sort reglist to use bsearch() */
1266 qsort(®list->reg, reglist->n, sizeof(uint64_t), uint64_cmp);
1267
1268 for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
1269 multi_ext_cfg = &kvm_multi_ext_cfgs[i];
1270 reg_id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT,
1271 multi_ext_cfg->kvm_reg_id);
1272 reg_search = bsearch(®_id, reglist->reg, reglist->n,
1273 sizeof(uint64_t), uint64_cmp);
1274 if (!reg_search) {
1275 continue;
1276 }
1277
1278 reg.id = reg_id;
1279 reg.addr = (uint64_t)&val;
1280 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1281 if (ret != 0) {
1282 error_report("Unable to read ISA_EXT KVM register %s: %s",
1283 multi_ext_cfg->name, strerror(errno));
1284 exit(EXIT_FAILURE);
1285 }
1286
1287 multi_ext_cfg->supported = true;
1288 kvm_cpu_cfg_set(cpu, multi_ext_cfg, val);
1289 }
1290
1291 if (cpu->cfg.ext_zicbom) {
1292 kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cbom_blocksize);
1293 }
1294
1295 if (cpu->cfg.ext_zicboz) {
1296 kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cboz_blocksize);
1297 }
1298
1299 if (riscv_has_ext(&cpu->env, RVV)) {
1300 kvm_riscv_read_vlenb(cpu, kvmcpu, reglist);
1301 }
1302
1303 kvm_riscv_check_sbi_dbcn_support(cpu, reglist);
1304 kvm_riscv_read_csr_cfg(reglist);
1305 }
1306
riscv_init_kvm_registers(Object * cpu_obj)1307 static void riscv_init_kvm_registers(Object *cpu_obj)
1308 {
1309 RISCVCPU *cpu = RISCV_CPU(cpu_obj);
1310 KVMScratchCPU kvmcpu;
1311
1312 if (!kvm_riscv_create_scratch_vcpu(&kvmcpu)) {
1313 return;
1314 }
1315
1316 kvm_riscv_init_machine_ids(cpu, &kvmcpu);
1317 kvm_riscv_init_misa_ext_mask(cpu, &kvmcpu);
1318 kvm_riscv_init_cfg(cpu, &kvmcpu);
1319 kvm_riscv_init_max_satp_mode(cpu, &kvmcpu);
1320
1321 kvm_riscv_destroy_scratch_vcpu(&kvmcpu);
1322 }
1323
1324 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
1325 KVM_CAP_LAST_INFO
1326 };
1327
kvm_arch_get_registers(CPUState * cs,Error ** errp)1328 int kvm_arch_get_registers(CPUState *cs, Error **errp)
1329 {
1330 int ret = 0;
1331
1332 ret = kvm_riscv_get_regs_core(cs);
1333 if (ret) {
1334 return ret;
1335 }
1336
1337 ret = kvm_riscv_get_regs_csr(cs);
1338 if (ret) {
1339 return ret;
1340 }
1341
1342 ret = kvm_riscv_get_regs_fp(cs);
1343 if (ret) {
1344 return ret;
1345 }
1346
1347 ret = kvm_riscv_get_regs_vector(cs);
1348 if (ret) {
1349 return ret;
1350 }
1351
1352 return ret;
1353 }
1354
kvm_riscv_sync_mpstate_to_kvm(RISCVCPU * cpu,int state)1355 int kvm_riscv_sync_mpstate_to_kvm(RISCVCPU *cpu, int state)
1356 {
1357 if (cap_has_mp_state) {
1358 struct kvm_mp_state mp_state = {
1359 .mp_state = state
1360 };
1361
1362 int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
1363 if (ret) {
1364 fprintf(stderr, "%s: failed to sync MP_STATE %d/%s\n",
1365 __func__, ret, strerror(-ret));
1366 return -1;
1367 }
1368 }
1369
1370 return 0;
1371 }
1372
kvm_arch_put_registers(CPUState * cs,int level,Error ** errp)1373 int kvm_arch_put_registers(CPUState *cs, int level, Error **errp)
1374 {
1375 int ret = 0;
1376
1377 ret = kvm_riscv_put_regs_core(cs);
1378 if (ret) {
1379 return ret;
1380 }
1381
1382 ret = kvm_riscv_put_regs_csr(cs);
1383 if (ret) {
1384 return ret;
1385 }
1386
1387 ret = kvm_riscv_put_regs_fp(cs);
1388 if (ret) {
1389 return ret;
1390 }
1391
1392 ret = kvm_riscv_put_regs_vector(cs);
1393 if (ret) {
1394 return ret;
1395 }
1396
1397 if (KVM_PUT_RESET_STATE == level) {
1398 RISCVCPU *cpu = RISCV_CPU(cs);
1399 if (cs->cpu_index == 0) {
1400 ret = kvm_riscv_sync_mpstate_to_kvm(cpu, KVM_MP_STATE_RUNNABLE);
1401 } else {
1402 ret = kvm_riscv_sync_mpstate_to_kvm(cpu, KVM_MP_STATE_STOPPED);
1403 }
1404 if (ret) {
1405 return ret;
1406 }
1407 }
1408
1409 return ret;
1410 }
1411
kvm_arch_release_virq_post(int virq)1412 int kvm_arch_release_virq_post(int virq)
1413 {
1414 return 0;
1415 }
1416
kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry * route,uint64_t address,uint32_t data,PCIDevice * dev)1417 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
1418 uint64_t address, uint32_t data, PCIDevice *dev)
1419 {
1420 return 0;
1421 }
1422
kvm_arch_destroy_vcpu(CPUState * cs)1423 int kvm_arch_destroy_vcpu(CPUState *cs)
1424 {
1425 return 0;
1426 }
1427
kvm_arch_vcpu_id(CPUState * cpu)1428 unsigned long kvm_arch_vcpu_id(CPUState *cpu)
1429 {
1430 return cpu->cpu_index;
1431 }
1432
kvm_riscv_vm_state_change(void * opaque,bool running,RunState state)1433 static void kvm_riscv_vm_state_change(void *opaque, bool running,
1434 RunState state)
1435 {
1436 CPUState *cs = opaque;
1437
1438 if (running) {
1439 kvm_riscv_put_regs_timer(cs);
1440 } else {
1441 kvm_riscv_get_regs_timer(cs);
1442 }
1443 }
1444
kvm_arch_init_irq_routing(KVMState * s)1445 void kvm_arch_init_irq_routing(KVMState *s)
1446 {
1447 }
1448
kvm_vcpu_set_machine_ids(RISCVCPU * cpu,CPUState * cs)1449 static int kvm_vcpu_set_machine_ids(RISCVCPU *cpu, CPUState *cs)
1450 {
1451 target_ulong reg;
1452 uint64_t id;
1453 int ret;
1454
1455 id = RISCV_CONFIG_REG(mvendorid);
1456 /*
1457 * cfg.mvendorid is an uint32 but a target_ulong will
1458 * be written. Assign it to a target_ulong var to avoid
1459 * writing pieces of other cpu->cfg fields in the reg.
1460 */
1461 reg = cpu->cfg.mvendorid;
1462 ret = kvm_set_one_reg(cs, id, ®);
1463 if (ret != 0) {
1464 return ret;
1465 }
1466
1467 id = RISCV_CONFIG_REG(marchid);
1468 ret = kvm_set_one_reg(cs, id, &cpu->cfg.marchid);
1469 if (ret != 0) {
1470 return ret;
1471 }
1472
1473 id = RISCV_CONFIG_REG(mimpid);
1474 ret = kvm_set_one_reg(cs, id, &cpu->cfg.mimpid);
1475
1476 return ret;
1477 }
1478
kvm_vcpu_enable_sbi_dbcn(RISCVCPU * cpu,CPUState * cs)1479 static int kvm_vcpu_enable_sbi_dbcn(RISCVCPU *cpu, CPUState *cs)
1480 {
1481 target_ulong reg = 1;
1482
1483 if (!kvm_sbi_dbcn.supported) {
1484 return 0;
1485 }
1486
1487 return kvm_set_one_reg(cs, kvm_sbi_dbcn.kvm_reg_id, ®);
1488 }
1489
kvm_arch_pre_create_vcpu(CPUState * cpu,Error ** errp)1490 int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
1491 {
1492 return 0;
1493 }
1494
kvm_arch_init_vcpu(CPUState * cs)1495 int kvm_arch_init_vcpu(CPUState *cs)
1496 {
1497 int ret = 0;
1498 RISCVCPU *cpu = RISCV_CPU(cs);
1499
1500 qemu_add_vm_change_state_handler(kvm_riscv_vm_state_change, cs);
1501
1502 if (!object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST)) {
1503 ret = kvm_vcpu_set_machine_ids(cpu, cs);
1504 if (ret != 0) {
1505 return ret;
1506 }
1507 }
1508
1509 kvm_riscv_update_cpu_misa_ext(cpu, cs);
1510 kvm_riscv_update_cpu_cfg_isa_ext(cpu, cs);
1511
1512 ret = kvm_vcpu_enable_sbi_dbcn(cpu, cs);
1513
1514 return ret;
1515 }
1516
kvm_arch_msi_data_to_gsi(uint32_t data)1517 int kvm_arch_msi_data_to_gsi(uint32_t data)
1518 {
1519 abort();
1520 }
1521
kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry * route,int vector,PCIDevice * dev)1522 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
1523 int vector, PCIDevice *dev)
1524 {
1525 return 0;
1526 }
1527
kvm_arch_get_default_type(MachineState * ms)1528 int kvm_arch_get_default_type(MachineState *ms)
1529 {
1530 return 0;
1531 }
1532
kvm_arch_init(MachineState * ms,KVMState * s)1533 int kvm_arch_init(MachineState *ms, KVMState *s)
1534 {
1535 cap_has_mp_state = kvm_check_extension(s, KVM_CAP_MP_STATE);
1536 return 0;
1537 }
1538
kvm_arch_irqchip_create(KVMState * s)1539 int kvm_arch_irqchip_create(KVMState *s)
1540 {
1541 /*
1542 * We can create the VAIA using the newer device control API.
1543 */
1544 return kvm_check_extension(s, KVM_CAP_DEVICE_CTRL);
1545 }
1546
kvm_arch_process_async_events(CPUState * cs)1547 int kvm_arch_process_async_events(CPUState *cs)
1548 {
1549 return 0;
1550 }
1551
kvm_arch_pre_run(CPUState * cs,struct kvm_run * run)1552 void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
1553 {
1554 }
1555
kvm_arch_post_run(CPUState * cs,struct kvm_run * run)1556 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
1557 {
1558 return MEMTXATTRS_UNSPECIFIED;
1559 }
1560
kvm_arch_stop_on_emulation_error(CPUState * cs)1561 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
1562 {
1563 return true;
1564 }
1565
kvm_riscv_handle_sbi_dbcn(CPUState * cs,struct kvm_run * run)1566 static void kvm_riscv_handle_sbi_dbcn(CPUState *cs, struct kvm_run *run)
1567 {
1568 g_autofree uint8_t *buf = NULL;
1569 RISCVCPU *cpu = RISCV_CPU(cs);
1570 target_ulong num_bytes;
1571 uint64_t addr;
1572 unsigned char ch;
1573 int ret;
1574
1575 switch (run->riscv_sbi.function_id) {
1576 case SBI_EXT_DBCN_CONSOLE_READ:
1577 case SBI_EXT_DBCN_CONSOLE_WRITE:
1578 num_bytes = run->riscv_sbi.args[0];
1579
1580 if (num_bytes == 0) {
1581 run->riscv_sbi.ret[0] = SBI_SUCCESS;
1582 run->riscv_sbi.ret[1] = 0;
1583 break;
1584 }
1585
1586 addr = run->riscv_sbi.args[1];
1587
1588 /*
1589 * Handle the case where a 32 bit CPU is running in a
1590 * 64 bit addressing env.
1591 */
1592 if (riscv_cpu_mxl(&cpu->env) == MXL_RV32) {
1593 addr |= (uint64_t)run->riscv_sbi.args[2] << 32;
1594 }
1595
1596 buf = g_malloc0(num_bytes);
1597
1598 if (run->riscv_sbi.function_id == SBI_EXT_DBCN_CONSOLE_READ) {
1599 ret = qemu_chr_fe_read_all(serial_hd(0)->be, buf, num_bytes);
1600 if (ret < 0) {
1601 error_report("SBI_EXT_DBCN_CONSOLE_READ: error when "
1602 "reading chardev");
1603 exit(1);
1604 }
1605
1606 cpu_physical_memory_write(addr, buf, ret);
1607 } else {
1608 cpu_physical_memory_read(addr, buf, num_bytes);
1609
1610 ret = qemu_chr_fe_write_all(serial_hd(0)->be, buf, num_bytes);
1611 if (ret < 0) {
1612 error_report("SBI_EXT_DBCN_CONSOLE_WRITE: error when "
1613 "writing chardev");
1614 exit(1);
1615 }
1616 }
1617
1618 run->riscv_sbi.ret[0] = SBI_SUCCESS;
1619 run->riscv_sbi.ret[1] = ret;
1620 break;
1621 case SBI_EXT_DBCN_CONSOLE_WRITE_BYTE:
1622 ch = run->riscv_sbi.args[0];
1623 ret = qemu_chr_fe_write_all(serial_hd(0)->be, &ch, sizeof(ch));
1624
1625 if (ret < 0) {
1626 error_report("SBI_EXT_DBCN_CONSOLE_WRITE_BYTE: error when "
1627 "writing chardev");
1628 exit(1);
1629 }
1630
1631 run->riscv_sbi.ret[0] = SBI_SUCCESS;
1632 run->riscv_sbi.ret[1] = 0;
1633 break;
1634 default:
1635 run->riscv_sbi.ret[0] = SBI_ERR_NOT_SUPPORTED;
1636 }
1637 }
1638
kvm_riscv_handle_sbi(CPUState * cs,struct kvm_run * run)1639 static int kvm_riscv_handle_sbi(CPUState *cs, struct kvm_run *run)
1640 {
1641 int ret = 0;
1642 unsigned char ch;
1643 switch (run->riscv_sbi.extension_id) {
1644 case SBI_EXT_0_1_CONSOLE_PUTCHAR:
1645 ch = run->riscv_sbi.args[0];
1646 qemu_chr_fe_write(serial_hd(0)->be, &ch, sizeof(ch));
1647 break;
1648 case SBI_EXT_0_1_CONSOLE_GETCHAR:
1649 ret = qemu_chr_fe_read_all(serial_hd(0)->be, &ch, sizeof(ch));
1650 if (ret == sizeof(ch)) {
1651 run->riscv_sbi.ret[0] = ch;
1652 } else {
1653 run->riscv_sbi.ret[0] = -1;
1654 }
1655 ret = 0;
1656 break;
1657 case SBI_EXT_DBCN:
1658 kvm_riscv_handle_sbi_dbcn(cs, run);
1659 break;
1660 default:
1661 qemu_log_mask(LOG_UNIMP,
1662 "%s: un-handled SBI EXIT, specific reasons is %lu\n",
1663 __func__, run->riscv_sbi.extension_id);
1664 ret = -1;
1665 break;
1666 }
1667 return ret;
1668 }
1669
kvm_riscv_handle_csr(CPUState * cs,struct kvm_run * run)1670 static int kvm_riscv_handle_csr(CPUState *cs, struct kvm_run *run)
1671 {
1672 target_ulong csr_num = run->riscv_csr.csr_num;
1673 target_ulong new_value = run->riscv_csr.new_value;
1674 target_ulong write_mask = run->riscv_csr.write_mask;
1675 int ret = 0;
1676
1677 switch (csr_num) {
1678 case CSR_SEED:
1679 run->riscv_csr.ret_value = riscv_new_csr_seed(new_value, write_mask);
1680 break;
1681 default:
1682 qemu_log_mask(LOG_UNIMP,
1683 "%s: un-handled CSR EXIT for CSR %lx\n",
1684 __func__, csr_num);
1685 ret = -1;
1686 break;
1687 }
1688
1689 return ret;
1690 }
1691
kvm_riscv_handle_debug(CPUState * cs)1692 static bool kvm_riscv_handle_debug(CPUState *cs)
1693 {
1694 RISCVCPU *cpu = RISCV_CPU(cs);
1695 CPURISCVState *env = &cpu->env;
1696
1697 /* Ensure PC is synchronised */
1698 kvm_cpu_synchronize_state(cs);
1699
1700 if (kvm_find_sw_breakpoint(cs, env->pc)) {
1701 return true;
1702 }
1703
1704 return false;
1705 }
1706
kvm_arch_handle_exit(CPUState * cs,struct kvm_run * run)1707 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
1708 {
1709 int ret = 0;
1710 switch (run->exit_reason) {
1711 case KVM_EXIT_RISCV_SBI:
1712 ret = kvm_riscv_handle_sbi(cs, run);
1713 break;
1714 case KVM_EXIT_RISCV_CSR:
1715 ret = kvm_riscv_handle_csr(cs, run);
1716 break;
1717 case KVM_EXIT_DEBUG:
1718 if (kvm_riscv_handle_debug(cs)) {
1719 ret = EXCP_DEBUG;
1720 }
1721 break;
1722 default:
1723 qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n",
1724 __func__, run->exit_reason);
1725 ret = -1;
1726 break;
1727 }
1728 return ret;
1729 }
1730
kvm_riscv_reset_vcpu(RISCVCPU * cpu)1731 void kvm_riscv_reset_vcpu(RISCVCPU *cpu)
1732 {
1733 CPURISCVState *env = &cpu->env;
1734 int i;
1735
1736 for (i = 0; i < 32; i++) {
1737 env->gpr[i] = 0;
1738 }
1739 env->pc = cpu->env.kernel_addr;
1740 env->gpr[10] = kvm_arch_vcpu_id(CPU(cpu)); /* a0 */
1741 env->gpr[11] = cpu->env.fdt_addr; /* a1 */
1742
1743 kvm_riscv_reset_regs_csr(env);
1744 }
1745
kvm_riscv_set_irq(RISCVCPU * cpu,int irq,int level)1746 void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level)
1747 {
1748 int ret;
1749 unsigned virq = level ? KVM_INTERRUPT_SET : KVM_INTERRUPT_UNSET;
1750
1751 if (irq != IRQ_S_EXT) {
1752 perror("kvm riscv set irq != IRQ_S_EXT\n");
1753 abort();
1754 }
1755
1756 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
1757 if (ret < 0) {
1758 perror("Set irq failed");
1759 abort();
1760 }
1761 }
1762
1763 static int aia_mode;
1764
kvm_aia_mode_str(uint64_t mode)1765 static const char *kvm_aia_mode_str(uint64_t mode)
1766 {
1767 switch (mode) {
1768 case KVM_DEV_RISCV_AIA_MODE_EMUL:
1769 return "emul";
1770 case KVM_DEV_RISCV_AIA_MODE_HWACCEL:
1771 return "hwaccel";
1772 case KVM_DEV_RISCV_AIA_MODE_AUTO:
1773 default:
1774 return "auto";
1775 };
1776 }
1777
riscv_get_kvm_aia(Object * obj,Error ** errp)1778 static char *riscv_get_kvm_aia(Object *obj, Error **errp)
1779 {
1780 return g_strdup(kvm_aia_mode_str(aia_mode));
1781 }
1782
riscv_set_kvm_aia(Object * obj,const char * val,Error ** errp)1783 static void riscv_set_kvm_aia(Object *obj, const char *val, Error **errp)
1784 {
1785 if (!strcmp(val, "emul")) {
1786 aia_mode = KVM_DEV_RISCV_AIA_MODE_EMUL;
1787 } else if (!strcmp(val, "hwaccel")) {
1788 aia_mode = KVM_DEV_RISCV_AIA_MODE_HWACCEL;
1789 } else if (!strcmp(val, "auto")) {
1790 aia_mode = KVM_DEV_RISCV_AIA_MODE_AUTO;
1791 } else {
1792 error_setg(errp, "Invalid KVM AIA mode");
1793 error_append_hint(errp, "Valid values are emul, hwaccel, and auto.\n");
1794 }
1795 }
1796
kvm_arch_accel_class_init(ObjectClass * oc)1797 void kvm_arch_accel_class_init(ObjectClass *oc)
1798 {
1799 object_class_property_add_str(oc, "riscv-aia", riscv_get_kvm_aia,
1800 riscv_set_kvm_aia);
1801 object_class_property_set_description(oc, "riscv-aia",
1802 "Set KVM AIA mode. Valid values are 'emul', 'hwaccel' and 'auto'. "
1803 "Changing KVM AIA modes relies on host support. Defaults to 'auto' "
1804 "if the host supports it");
1805 object_property_set_default_str(object_class_property_find(oc, "riscv-aia"),
1806 "auto");
1807 }
1808
kvm_riscv_aia_create(MachineState * machine,uint64_t group_shift,uint64_t aia_irq_num,uint64_t aia_msi_num,uint64_t aplic_base,uint64_t imsic_base,uint64_t guest_num)1809 void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
1810 uint64_t aia_irq_num, uint64_t aia_msi_num,
1811 uint64_t aplic_base, uint64_t imsic_base,
1812 uint64_t guest_num)
1813 {
1814 int ret, i;
1815 int aia_fd = -1;
1816 uint64_t default_aia_mode;
1817 uint64_t socket_count = riscv_socket_count(machine);
1818 uint64_t max_hart_per_socket = 0;
1819 uint64_t socket, base_hart, hart_count, socket_imsic_base, imsic_addr;
1820 uint64_t socket_bits, hart_bits, guest_bits;
1821 uint64_t max_group_id;
1822
1823 aia_fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_RISCV_AIA, false);
1824
1825 if (aia_fd < 0) {
1826 error_report("Unable to create in-kernel irqchip");
1827 exit(1);
1828 }
1829
1830 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
1831 KVM_DEV_RISCV_AIA_CONFIG_MODE,
1832 &default_aia_mode, false, NULL);
1833 if (ret < 0) {
1834 error_report("KVM AIA: failed to get current KVM AIA mode");
1835 exit(1);
1836 }
1837
1838 if (default_aia_mode != aia_mode) {
1839 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
1840 KVM_DEV_RISCV_AIA_CONFIG_MODE,
1841 &aia_mode, true, NULL);
1842 if (ret < 0) {
1843 warn_report("KVM AIA: failed to set KVM AIA mode '%s', using "
1844 "default host mode '%s'",
1845 kvm_aia_mode_str(aia_mode),
1846 kvm_aia_mode_str(default_aia_mode));
1847
1848 /* failed to change AIA mode, use default */
1849 aia_mode = default_aia_mode;
1850 }
1851 }
1852
1853 /*
1854 * Skip APLIC creation in KVM if we're running split mode.
1855 * This is done by leaving KVM_DEV_RISCV_AIA_CONFIG_SRCS
1856 * unset. We can also skip KVM_DEV_RISCV_AIA_ADDR_APLIC
1857 * since KVM won't be using it.
1858 */
1859 if (!kvm_kernel_irqchip_split()) {
1860 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
1861 KVM_DEV_RISCV_AIA_CONFIG_SRCS,
1862 &aia_irq_num, true, NULL);
1863 if (ret < 0) {
1864 error_report("KVM AIA: failed to set number of input irq lines");
1865 exit(1);
1866 }
1867
1868 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_ADDR,
1869 KVM_DEV_RISCV_AIA_ADDR_APLIC,
1870 &aplic_base, true, NULL);
1871 if (ret < 0) {
1872 error_report("KVM AIA: failed to set the base address of APLIC");
1873 exit(1);
1874 }
1875 }
1876
1877 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
1878 KVM_DEV_RISCV_AIA_CONFIG_IDS,
1879 &aia_msi_num, true, NULL);
1880 if (ret < 0) {
1881 error_report("KVM AIA: failed to set number of msi");
1882 exit(1);
1883 }
1884
1885
1886 if (socket_count > 1) {
1887 max_group_id = socket_count - 1;
1888 socket_bits = find_last_bit(&max_group_id, BITS_PER_LONG) + 1;
1889 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
1890 KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS,
1891 &socket_bits, true, NULL);
1892 if (ret < 0) {
1893 error_report("KVM AIA: failed to set group_bits");
1894 exit(1);
1895 }
1896
1897 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
1898 KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT,
1899 &group_shift, true, NULL);
1900 if (ret < 0) {
1901 error_report("KVM AIA: failed to set group_shift");
1902 exit(1);
1903 }
1904 }
1905
1906 guest_bits = guest_num == 0 ? 0 :
1907 find_last_bit(&guest_num, BITS_PER_LONG) + 1;
1908 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
1909 KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS,
1910 &guest_bits, true, NULL);
1911 if (ret < 0) {
1912 error_report("KVM AIA: failed to set guest_bits");
1913 exit(1);
1914 }
1915
1916 for (socket = 0; socket < socket_count; socket++) {
1917 socket_imsic_base = imsic_base + socket * (1U << group_shift);
1918 hart_count = riscv_socket_hart_count(machine, socket);
1919 base_hart = riscv_socket_first_hartid(machine, socket);
1920
1921 if (max_hart_per_socket < hart_count) {
1922 max_hart_per_socket = hart_count;
1923 }
1924
1925 for (i = 0; i < hart_count; i++) {
1926 imsic_addr = socket_imsic_base + i * IMSIC_HART_SIZE(guest_bits);
1927 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_ADDR,
1928 KVM_DEV_RISCV_AIA_ADDR_IMSIC(i + base_hart),
1929 &imsic_addr, true, NULL);
1930 if (ret < 0) {
1931 error_report("KVM AIA: failed to set the IMSIC address for hart %d", i);
1932 exit(1);
1933 }
1934 }
1935 }
1936
1937
1938 if (max_hart_per_socket > 1) {
1939 max_hart_per_socket--;
1940 hart_bits = find_last_bit(&max_hart_per_socket, BITS_PER_LONG) + 1;
1941 } else {
1942 hart_bits = 0;
1943 }
1944
1945 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
1946 KVM_DEV_RISCV_AIA_CONFIG_HART_BITS,
1947 &hart_bits, true, NULL);
1948 if (ret < 0) {
1949 error_report("KVM AIA: failed to set hart_bits");
1950 exit(1);
1951 }
1952
1953 if (kvm_has_gsi_routing()) {
1954 for (uint64_t idx = 0; idx < aia_irq_num + 1; ++idx) {
1955 /* KVM AIA only has one APLIC instance */
1956 kvm_irqchip_add_irq_route(kvm_state, idx, 0, idx);
1957 }
1958 kvm_gsi_routing_allowed = true;
1959 kvm_irqchip_commit_routes(kvm_state);
1960 }
1961
1962 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CTRL,
1963 KVM_DEV_RISCV_AIA_CTRL_INIT,
1964 NULL, true, NULL);
1965 if (ret < 0) {
1966 error_report("KVM AIA: initialized fail");
1967 exit(1);
1968 }
1969
1970 kvm_msi_via_irqfd_allowed = true;
1971 }
1972
kvm_cpu_instance_init(CPUState * cs)1973 static void kvm_cpu_instance_init(CPUState *cs)
1974 {
1975 Object *obj = OBJECT(RISCV_CPU(cs));
1976
1977 riscv_init_kvm_registers(obj);
1978
1979 kvm_riscv_add_cpu_user_properties(obj);
1980 }
1981
1982 /*
1983 * We'll get here via the following path:
1984 *
1985 * riscv_cpu_realize()
1986 * -> cpu_exec_realizefn()
1987 * -> kvm_cpu_realize() (via accel_cpu_common_realize())
1988 */
kvm_cpu_realize(CPUState * cs,Error ** errp)1989 static bool kvm_cpu_realize(CPUState *cs, Error **errp)
1990 {
1991 RISCVCPU *cpu = RISCV_CPU(cs);
1992 int ret;
1993
1994 if (riscv_has_ext(&cpu->env, RVV)) {
1995 ret = prctl(PR_RISCV_V_SET_CONTROL, PR_RISCV_V_VSTATE_CTRL_ON);
1996 if (ret) {
1997 error_setg(errp, "Error in prctl PR_RISCV_V_SET_CONTROL, code: %s",
1998 strerrorname_np(errno));
1999 return false;
2000 }
2001 }
2002
2003 return true;
2004 }
2005
riscv_kvm_cpu_finalize_features(RISCVCPU * cpu,Error ** errp)2006 void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
2007 {
2008 CPURISCVState *env = &cpu->env;
2009 KVMScratchCPU kvmcpu;
2010 struct kvm_one_reg reg;
2011 uint64_t val;
2012 int ret;
2013
2014 /* short-circuit without spinning the scratch CPU */
2015 if (!cpu->cfg.ext_zicbom && !cpu->cfg.ext_zicboz &&
2016 !riscv_has_ext(env, RVV)) {
2017 return;
2018 }
2019
2020 if (!kvm_riscv_create_scratch_vcpu(&kvmcpu)) {
2021 error_setg(errp, "Unable to create scratch KVM cpu");
2022 return;
2023 }
2024
2025 if (cpu->cfg.ext_zicbom &&
2026 riscv_cpu_option_set(kvm_cbom_blocksize.name)) {
2027
2028 reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG,
2029 kvm_cbom_blocksize.kvm_reg_id);
2030 reg.addr = (uint64_t)&val;
2031 ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, ®);
2032 if (ret != 0) {
2033 error_setg(errp, "Unable to read cbom_blocksize, error %d", errno);
2034 return;
2035 }
2036
2037 if (cpu->cfg.cbom_blocksize != val) {
2038 error_setg(errp, "Unable to set cbom_blocksize to a different "
2039 "value than the host (%lu)", val);
2040 return;
2041 }
2042 }
2043
2044 if (cpu->cfg.ext_zicboz &&
2045 riscv_cpu_option_set(kvm_cboz_blocksize.name)) {
2046
2047 reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG,
2048 kvm_cboz_blocksize.kvm_reg_id);
2049 reg.addr = (uint64_t)&val;
2050 ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, ®);
2051 if (ret != 0) {
2052 error_setg(errp, "Unable to read cboz_blocksize, error %d", errno);
2053 return;
2054 }
2055
2056 if (cpu->cfg.cboz_blocksize != val) {
2057 error_setg(errp, "Unable to set cboz_blocksize to a different "
2058 "value than the host (%lu)", val);
2059 return;
2060 }
2061 }
2062
2063 /* Users are setting vlen, not vlenb */
2064 if (riscv_has_ext(env, RVV) && riscv_cpu_option_set("vlen")) {
2065 if (!kvm_v_vlenb.supported) {
2066 error_setg(errp, "Unable to set 'vlenb': register not supported");
2067 return;
2068 }
2069
2070 reg.id = kvm_v_vlenb.kvm_reg_id;
2071 reg.addr = (uint64_t)&val;
2072 ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, ®);
2073 if (ret != 0) {
2074 error_setg(errp, "Unable to read vlenb register, error %d", errno);
2075 return;
2076 }
2077
2078 if (cpu->cfg.vlenb != val) {
2079 error_setg(errp, "Unable to set 'vlen' to a different "
2080 "value than the host (%lu)", val * 8);
2081 return;
2082 }
2083 }
2084
2085 kvm_riscv_destroy_scratch_vcpu(&kvmcpu);
2086 }
2087
kvm_cpu_accel_class_init(ObjectClass * oc,const void * data)2088 static void kvm_cpu_accel_class_init(ObjectClass *oc, const void *data)
2089 {
2090 AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
2091
2092 acc->cpu_instance_init = kvm_cpu_instance_init;
2093 acc->cpu_target_realize = kvm_cpu_realize;
2094 }
2095
2096 static const TypeInfo kvm_cpu_accel_type_info = {
2097 .name = ACCEL_CPU_NAME("kvm"),
2098
2099 .parent = TYPE_ACCEL_CPU,
2100 .class_init = kvm_cpu_accel_class_init,
2101 .abstract = true,
2102 };
kvm_cpu_accel_register_types(void)2103 static void kvm_cpu_accel_register_types(void)
2104 {
2105 type_register_static(&kvm_cpu_accel_type_info);
2106 }
2107 type_init(kvm_cpu_accel_register_types);
2108
2109 static const TypeInfo riscv_kvm_cpu_type_infos[] = {
2110 {
2111 .name = TYPE_RISCV_CPU_HOST,
2112 .parent = TYPE_RISCV_CPU,
2113 #if defined(TARGET_RISCV32)
2114 .class_data = &(const RISCVCPUDef) {
2115 .misa_mxl_max = MXL_RV32,
2116 .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
2117 .vext_spec = RISCV_PROFILE_ATTR_UNUSED,
2118 .cfg.max_satp_mode = -1,
2119 },
2120 #elif defined(TARGET_RISCV64)
2121 .class_data = &(const RISCVCPUDef) {
2122 .misa_mxl_max = MXL_RV64,
2123 .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
2124 .vext_spec = RISCV_PROFILE_ATTR_UNUSED,
2125 .cfg.max_satp_mode = -1,
2126 },
2127 #endif
2128 }
2129 };
2130
2131 DEFINE_TYPES(riscv_kvm_cpu_type_infos)
2132
2133 static const uint32_t ebreak_insn = 0x00100073;
2134 static const uint16_t c_ebreak_insn = 0x9002;
2135
kvm_arch_insert_sw_breakpoint(CPUState * cs,struct kvm_sw_breakpoint * bp)2136 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
2137 {
2138 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 2, 0)) {
2139 return -EINVAL;
2140 }
2141
2142 if ((bp->saved_insn & 0x3) == 0x3) {
2143 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0)
2144 || cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&ebreak_insn, 4, 1)) {
2145 return -EINVAL;
2146 }
2147 } else {
2148 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&c_ebreak_insn, 2, 1)) {
2149 return -EINVAL;
2150 }
2151 }
2152
2153 return 0;
2154 }
2155
kvm_arch_remove_sw_breakpoint(CPUState * cs,struct kvm_sw_breakpoint * bp)2156 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
2157 {
2158 uint32_t ebreak;
2159 uint16_t c_ebreak;
2160
2161 if ((bp->saved_insn & 0x3) == 0x3) {
2162 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&ebreak, 4, 0) ||
2163 ebreak != ebreak_insn ||
2164 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
2165 return -EINVAL;
2166 }
2167 } else {
2168 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&c_ebreak, 2, 0) ||
2169 c_ebreak != c_ebreak_insn ||
2170 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 2, 1)) {
2171 return -EINVAL;
2172 }
2173 }
2174
2175 return 0;
2176 }
2177
kvm_arch_insert_hw_breakpoint(vaddr addr,vaddr len,int type)2178 int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
2179 {
2180 /* TODO; To be implemented later. */
2181 return -EINVAL;
2182 }
2183
kvm_arch_remove_hw_breakpoint(vaddr addr,vaddr len,int type)2184 int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
2185 {
2186 /* TODO; To be implemented later. */
2187 return -EINVAL;
2188 }
2189
kvm_arch_remove_all_hw_breakpoints(void)2190 void kvm_arch_remove_all_hw_breakpoints(void)
2191 {
2192 /* TODO; To be implemented later. */
2193 }
2194
kvm_arch_update_guest_debug(CPUState * cs,struct kvm_guest_debug * dbg)2195 void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
2196 {
2197 if (kvm_sw_breakpoints_active(cs)) {
2198 dbg->control |= KVM_GUESTDBG_ENABLE;
2199 }
2200 }
2201