1 /*
2 * RISC-V implementation of KVM hooks
3 *
4 * Copyright (c) 2020 Huawei Technologies Co., Ltd
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include "qemu/osdep.h"
20 #include <sys/ioctl.h>
21 #include <sys/prctl.h>
22
23 #include <linux/kvm.h>
24
25 #include "qemu/timer.h"
26 #include "qapi/error.h"
27 #include "qemu/error-report.h"
28 #include "qemu/main-loop.h"
29 #include "qapi/visitor.h"
30 #include "system/system.h"
31 #include "system/kvm.h"
32 #include "system/kvm_int.h"
33 #include "cpu.h"
34 #include "trace.h"
35 #include "accel/accel-cpu-target.h"
36 #include "hw/pci/pci.h"
37 #include "exec/memattrs.h"
38 #include "system/address-spaces.h"
39 #include "hw/boards.h"
40 #include "hw/irq.h"
41 #include "hw/intc/riscv_imsic.h"
42 #include "qemu/log.h"
43 #include "hw/loader.h"
44 #include "kvm_riscv.h"
45 #include "sbi_ecall_interface.h"
46 #include "chardev/char-fe.h"
47 #include "migration/misc.h"
48 #include "system/runstate.h"
49 #include "hw/riscv/numa.h"
50
51 #define PR_RISCV_V_SET_CONTROL 69
52 #define PR_RISCV_V_VSTATE_CTRL_ON 2
53
riscv_kvm_aplic_request(void * opaque,int irq,int level)54 void riscv_kvm_aplic_request(void *opaque, int irq, int level)
55 {
56 kvm_set_irq(kvm_state, irq, !!level);
57 }
58
59 static bool cap_has_mp_state;
60
61 #define KVM_RISCV_REG_ID_U32(type, idx) (KVM_REG_RISCV | KVM_REG_SIZE_U32 | \
62 type | idx)
63
64 #define KVM_RISCV_REG_ID_U64(type, idx) (KVM_REG_RISCV | KVM_REG_SIZE_U64 | \
65 type | idx)
66
67 #if defined(TARGET_RISCV64)
68 #define KVM_RISCV_REG_ID_ULONG(type, idx) KVM_RISCV_REG_ID_U64(type, idx)
69 #else
70 #define KVM_RISCV_REG_ID_ULONG(type, idx) KVM_RISCV_REG_ID_U32(type, idx)
71 #endif
72
kvm_encode_reg_size_id(uint64_t id,size_t size_b)73 static uint64_t kvm_encode_reg_size_id(uint64_t id, size_t size_b)
74 {
75 uint64_t size_ctz = __builtin_ctz(size_b);
76
77 return id | (size_ctz << KVM_REG_SIZE_SHIFT);
78 }
79
kvm_riscv_vector_reg_id(RISCVCPU * cpu,uint64_t idx)80 static uint64_t kvm_riscv_vector_reg_id(RISCVCPU *cpu,
81 uint64_t idx)
82 {
83 uint64_t id;
84 size_t size_b;
85
86 g_assert(idx < 32);
87
88 id = KVM_REG_RISCV | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(idx);
89 size_b = cpu->cfg.vlenb;
90
91 return kvm_encode_reg_size_id(id, size_b);
92 }
93
94 #define RISCV_CORE_REG(name) \
95 KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, \
96 KVM_REG_RISCV_CORE_REG(name))
97
98 #define RISCV_CSR_REG(name) \
99 KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CSR, \
100 KVM_REG_RISCV_CSR_REG(name))
101
102 #define RISCV_CONFIG_REG(name) \
103 KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG, \
104 KVM_REG_RISCV_CONFIG_REG(name))
105
106 #define RISCV_TIMER_REG(name) KVM_RISCV_REG_ID_U64(KVM_REG_RISCV_TIMER, \
107 KVM_REG_RISCV_TIMER_REG(name))
108
109 #define RISCV_FP_F_REG(idx) KVM_RISCV_REG_ID_U32(KVM_REG_RISCV_FP_F, idx)
110
111 #define RISCV_FP_D_REG(idx) KVM_RISCV_REG_ID_U64(KVM_REG_RISCV_FP_D, idx)
112
113 #define RISCV_VECTOR_CSR_REG(name) \
114 KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_VECTOR, \
115 KVM_REG_RISCV_VECTOR_CSR_REG(name))
116
117 #define KVM_RISCV_GET_TIMER(cs, name, reg) \
118 do { \
119 int ret = kvm_get_one_reg(cs, RISCV_TIMER_REG(name), ®); \
120 if (ret) { \
121 abort(); \
122 } \
123 } while (0)
124
125 #define KVM_RISCV_SET_TIMER(cs, name, reg) \
126 do { \
127 int ret = kvm_set_one_reg(cs, RISCV_TIMER_REG(name), ®); \
128 if (ret) { \
129 abort(); \
130 } \
131 } while (0)
132
133 typedef struct KVMCPUConfig {
134 const char *name;
135 const char *description;
136 target_ulong offset;
137 uint64_t kvm_reg_id;
138 uint32_t prop_size;
139 bool user_set;
140 bool supported;
141 } KVMCPUConfig;
142
143 #define KVM_MISA_CFG(_bit, _reg_id) \
144 {.offset = _bit, .kvm_reg_id = _reg_id}
145
146 /* KVM ISA extensions */
147 static KVMCPUConfig kvm_misa_ext_cfgs[] = {
148 KVM_MISA_CFG(RVA, KVM_RISCV_ISA_EXT_A),
149 KVM_MISA_CFG(RVC, KVM_RISCV_ISA_EXT_C),
150 KVM_MISA_CFG(RVD, KVM_RISCV_ISA_EXT_D),
151 KVM_MISA_CFG(RVF, KVM_RISCV_ISA_EXT_F),
152 KVM_MISA_CFG(RVH, KVM_RISCV_ISA_EXT_H),
153 KVM_MISA_CFG(RVI, KVM_RISCV_ISA_EXT_I),
154 KVM_MISA_CFG(RVM, KVM_RISCV_ISA_EXT_M),
155 KVM_MISA_CFG(RVV, KVM_RISCV_ISA_EXT_V),
156 };
157
kvm_cpu_get_misa_ext_cfg(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)158 static void kvm_cpu_get_misa_ext_cfg(Object *obj, Visitor *v,
159 const char *name,
160 void *opaque, Error **errp)
161 {
162 KVMCPUConfig *misa_ext_cfg = opaque;
163 target_ulong misa_bit = misa_ext_cfg->offset;
164 RISCVCPU *cpu = RISCV_CPU(obj);
165 CPURISCVState *env = &cpu->env;
166 bool value = env->misa_ext_mask & misa_bit;
167
168 visit_type_bool(v, name, &value, errp);
169 }
170
kvm_cpu_set_misa_ext_cfg(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)171 static void kvm_cpu_set_misa_ext_cfg(Object *obj, Visitor *v,
172 const char *name,
173 void *opaque, Error **errp)
174 {
175 KVMCPUConfig *misa_ext_cfg = opaque;
176 target_ulong misa_bit = misa_ext_cfg->offset;
177 RISCVCPU *cpu = RISCV_CPU(obj);
178 CPURISCVState *env = &cpu->env;
179 bool value, host_bit;
180
181 if (!visit_type_bool(v, name, &value, errp)) {
182 return;
183 }
184
185 host_bit = env->misa_ext_mask & misa_bit;
186
187 if (value == host_bit) {
188 return;
189 }
190
191 if (!value) {
192 misa_ext_cfg->user_set = true;
193 return;
194 }
195
196 /*
197 * Forbid users to enable extensions that aren't
198 * available in the hart.
199 */
200 error_setg(errp, "Enabling MISA bit '%s' is not allowed: it's not "
201 "enabled in the host", misa_ext_cfg->name);
202 }
203
kvm_riscv_update_cpu_misa_ext(RISCVCPU * cpu,CPUState * cs)204 static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs)
205 {
206 CPURISCVState *env = &cpu->env;
207 uint64_t id, reg;
208 int i, ret;
209
210 for (i = 0; i < ARRAY_SIZE(kvm_misa_ext_cfgs); i++) {
211 KVMCPUConfig *misa_cfg = &kvm_misa_ext_cfgs[i];
212 target_ulong misa_bit = misa_cfg->offset;
213
214 if (!misa_cfg->user_set) {
215 continue;
216 }
217
218 /* If we're here we're going to disable the MISA bit */
219 reg = 0;
220 id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT,
221 misa_cfg->kvm_reg_id);
222 ret = kvm_set_one_reg(cs, id, ®);
223 if (ret != 0) {
224 /*
225 * We're not checking for -EINVAL because if the bit is about
226 * to be disabled, it means that it was already enabled by
227 * KVM. We determined that by fetching the 'isa' register
228 * during init() time. Any error at this point is worth
229 * aborting.
230 */
231 error_report("Unable to set KVM reg %s, error %d",
232 misa_cfg->name, ret);
233 exit(EXIT_FAILURE);
234 }
235 env->misa_ext &= ~misa_bit;
236 }
237 }
238
239 #define KVM_CSR_CFG(_name, _env_prop, reg_id) \
240 {.name = _name, .offset = ENV_CSR_OFFSET(_env_prop), \
241 .prop_size = sizeof(((CPURISCVState *)0)->_env_prop), \
242 .kvm_reg_id = reg_id}
243
244 static KVMCPUConfig kvm_csr_cfgs[] = {
245 KVM_CSR_CFG("sstatus", mstatus, RISCV_CSR_REG(sstatus)),
246 KVM_CSR_CFG("sie", mie, RISCV_CSR_REG(sie)),
247 KVM_CSR_CFG("stvec", stvec, RISCV_CSR_REG(stvec)),
248 KVM_CSR_CFG("sscratch", sscratch, RISCV_CSR_REG(sscratch)),
249 KVM_CSR_CFG("sepc", sepc, RISCV_CSR_REG(sepc)),
250 KVM_CSR_CFG("scause", scause, RISCV_CSR_REG(scause)),
251 KVM_CSR_CFG("stval", stval, RISCV_CSR_REG(stval)),
252 KVM_CSR_CFG("sip", mip, RISCV_CSR_REG(sip)),
253 KVM_CSR_CFG("satp", satp, RISCV_CSR_REG(satp)),
254 KVM_CSR_CFG("scounteren", scounteren, RISCV_CSR_REG(scounteren)),
255 KVM_CSR_CFG("senvcfg", senvcfg, RISCV_CSR_REG(senvcfg)),
256 };
257
kvmconfig_get_env_addr(RISCVCPU * cpu,KVMCPUConfig * csr_cfg)258 static void *kvmconfig_get_env_addr(RISCVCPU *cpu, KVMCPUConfig *csr_cfg)
259 {
260 return (void *)&cpu->env + csr_cfg->offset;
261 }
262
kvm_cpu_csr_get_u32(RISCVCPU * cpu,KVMCPUConfig * csr_cfg)263 static uint32_t kvm_cpu_csr_get_u32(RISCVCPU *cpu, KVMCPUConfig *csr_cfg)
264 {
265 uint32_t *val32 = kvmconfig_get_env_addr(cpu, csr_cfg);
266 return *val32;
267 }
268
kvm_cpu_csr_get_u64(RISCVCPU * cpu,KVMCPUConfig * csr_cfg)269 static uint64_t kvm_cpu_csr_get_u64(RISCVCPU *cpu, KVMCPUConfig *csr_cfg)
270 {
271 uint64_t *val64 = kvmconfig_get_env_addr(cpu, csr_cfg);
272 return *val64;
273 }
274
kvm_cpu_csr_set_u32(RISCVCPU * cpu,KVMCPUConfig * csr_cfg,uint32_t val)275 static void kvm_cpu_csr_set_u32(RISCVCPU *cpu, KVMCPUConfig *csr_cfg,
276 uint32_t val)
277 {
278 uint32_t *val32 = kvmconfig_get_env_addr(cpu, csr_cfg);
279 *val32 = val;
280 }
281
kvm_cpu_csr_set_u64(RISCVCPU * cpu,KVMCPUConfig * csr_cfg,uint64_t val)282 static void kvm_cpu_csr_set_u64(RISCVCPU *cpu, KVMCPUConfig *csr_cfg,
283 uint64_t val)
284 {
285 uint64_t *val64 = kvmconfig_get_env_addr(cpu, csr_cfg);
286 *val64 = val;
287 }
288
289 #define KVM_EXT_CFG(_name, _prop, _reg_id) \
290 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \
291 .kvm_reg_id = _reg_id}
292
293 static KVMCPUConfig kvm_multi_ext_cfgs[] = {
294 KVM_EXT_CFG("zicbom", ext_zicbom, KVM_RISCV_ISA_EXT_ZICBOM),
295 KVM_EXT_CFG("zicboz", ext_zicboz, KVM_RISCV_ISA_EXT_ZICBOZ),
296 KVM_EXT_CFG("ziccrse", ext_ziccrse, KVM_RISCV_ISA_EXT_ZICCRSE),
297 KVM_EXT_CFG("zicntr", ext_zicntr, KVM_RISCV_ISA_EXT_ZICNTR),
298 KVM_EXT_CFG("zicond", ext_zicond, KVM_RISCV_ISA_EXT_ZICOND),
299 KVM_EXT_CFG("zicsr", ext_zicsr, KVM_RISCV_ISA_EXT_ZICSR),
300 KVM_EXT_CFG("zifencei", ext_zifencei, KVM_RISCV_ISA_EXT_ZIFENCEI),
301 KVM_EXT_CFG("zihintntl", ext_zihintntl, KVM_RISCV_ISA_EXT_ZIHINTNTL),
302 KVM_EXT_CFG("zihintpause", ext_zihintpause, KVM_RISCV_ISA_EXT_ZIHINTPAUSE),
303 KVM_EXT_CFG("zihpm", ext_zihpm, KVM_RISCV_ISA_EXT_ZIHPM),
304 KVM_EXT_CFG("zimop", ext_zimop, KVM_RISCV_ISA_EXT_ZIMOP),
305 KVM_EXT_CFG("zcmop", ext_zcmop, KVM_RISCV_ISA_EXT_ZCMOP),
306 KVM_EXT_CFG("zabha", ext_zabha, KVM_RISCV_ISA_EXT_ZABHA),
307 KVM_EXT_CFG("zacas", ext_zacas, KVM_RISCV_ISA_EXT_ZACAS),
308 KVM_EXT_CFG("zawrs", ext_zawrs, KVM_RISCV_ISA_EXT_ZAWRS),
309 KVM_EXT_CFG("zfa", ext_zfa, KVM_RISCV_ISA_EXT_ZFA),
310 KVM_EXT_CFG("zfh", ext_zfh, KVM_RISCV_ISA_EXT_ZFH),
311 KVM_EXT_CFG("zfhmin", ext_zfhmin, KVM_RISCV_ISA_EXT_ZFHMIN),
312 KVM_EXT_CFG("zba", ext_zba, KVM_RISCV_ISA_EXT_ZBA),
313 KVM_EXT_CFG("zbb", ext_zbb, KVM_RISCV_ISA_EXT_ZBB),
314 KVM_EXT_CFG("zbc", ext_zbc, KVM_RISCV_ISA_EXT_ZBC),
315 KVM_EXT_CFG("zbkb", ext_zbkb, KVM_RISCV_ISA_EXT_ZBKB),
316 KVM_EXT_CFG("zbkc", ext_zbkc, KVM_RISCV_ISA_EXT_ZBKC),
317 KVM_EXT_CFG("zbkx", ext_zbkx, KVM_RISCV_ISA_EXT_ZBKX),
318 KVM_EXT_CFG("zbs", ext_zbs, KVM_RISCV_ISA_EXT_ZBS),
319 KVM_EXT_CFG("zca", ext_zca, KVM_RISCV_ISA_EXT_ZCA),
320 KVM_EXT_CFG("zcb", ext_zcb, KVM_RISCV_ISA_EXT_ZCB),
321 KVM_EXT_CFG("zcd", ext_zcd, KVM_RISCV_ISA_EXT_ZCD),
322 KVM_EXT_CFG("zcf", ext_zcf, KVM_RISCV_ISA_EXT_ZCF),
323 KVM_EXT_CFG("zknd", ext_zknd, KVM_RISCV_ISA_EXT_ZKND),
324 KVM_EXT_CFG("zkne", ext_zkne, KVM_RISCV_ISA_EXT_ZKNE),
325 KVM_EXT_CFG("zknh", ext_zknh, KVM_RISCV_ISA_EXT_ZKNH),
326 KVM_EXT_CFG("zkr", ext_zkr, KVM_RISCV_ISA_EXT_ZKR),
327 KVM_EXT_CFG("zksed", ext_zksed, KVM_RISCV_ISA_EXT_ZKSED),
328 KVM_EXT_CFG("zksh", ext_zksh, KVM_RISCV_ISA_EXT_ZKSH),
329 KVM_EXT_CFG("zkt", ext_zkt, KVM_RISCV_ISA_EXT_ZKT),
330 KVM_EXT_CFG("ztso", ext_ztso, KVM_RISCV_ISA_EXT_ZTSO),
331 KVM_EXT_CFG("zvbb", ext_zvbb, KVM_RISCV_ISA_EXT_ZVBB),
332 KVM_EXT_CFG("zvbc", ext_zvbc, KVM_RISCV_ISA_EXT_ZVBC),
333 KVM_EXT_CFG("zvfh", ext_zvfh, KVM_RISCV_ISA_EXT_ZVFH),
334 KVM_EXT_CFG("zvfhmin", ext_zvfhmin, KVM_RISCV_ISA_EXT_ZVFHMIN),
335 KVM_EXT_CFG("zvkb", ext_zvkb, KVM_RISCV_ISA_EXT_ZVKB),
336 KVM_EXT_CFG("zvkg", ext_zvkg, KVM_RISCV_ISA_EXT_ZVKG),
337 KVM_EXT_CFG("zvkned", ext_zvkned, KVM_RISCV_ISA_EXT_ZVKNED),
338 KVM_EXT_CFG("zvknha", ext_zvknha, KVM_RISCV_ISA_EXT_ZVKNHA),
339 KVM_EXT_CFG("zvknhb", ext_zvknhb, KVM_RISCV_ISA_EXT_ZVKNHB),
340 KVM_EXT_CFG("zvksed", ext_zvksed, KVM_RISCV_ISA_EXT_ZVKSED),
341 KVM_EXT_CFG("zvksh", ext_zvksh, KVM_RISCV_ISA_EXT_ZVKSH),
342 KVM_EXT_CFG("zvkt", ext_zvkt, KVM_RISCV_ISA_EXT_ZVKT),
343 KVM_EXT_CFG("smnpm", ext_smnpm, KVM_RISCV_ISA_EXT_SMNPM),
344 KVM_EXT_CFG("smstateen", ext_smstateen, KVM_RISCV_ISA_EXT_SMSTATEEN),
345 KVM_EXT_CFG("ssaia", ext_ssaia, KVM_RISCV_ISA_EXT_SSAIA),
346 KVM_EXT_CFG("sscofpmf", ext_sscofpmf, KVM_RISCV_ISA_EXT_SSCOFPMF),
347 KVM_EXT_CFG("ssnpm", ext_ssnpm, KVM_RISCV_ISA_EXT_SSNPM),
348 KVM_EXT_CFG("sstc", ext_sstc, KVM_RISCV_ISA_EXT_SSTC),
349 KVM_EXT_CFG("svade", ext_svade, KVM_RISCV_ISA_EXT_SVADE),
350 KVM_EXT_CFG("svadu", ext_svadu, KVM_RISCV_ISA_EXT_SVADU),
351 KVM_EXT_CFG("svinval", ext_svinval, KVM_RISCV_ISA_EXT_SVINVAL),
352 KVM_EXT_CFG("svnapot", ext_svnapot, KVM_RISCV_ISA_EXT_SVNAPOT),
353 KVM_EXT_CFG("svpbmt", ext_svpbmt, KVM_RISCV_ISA_EXT_SVPBMT),
354 KVM_EXT_CFG("svvptc", ext_svvptc, KVM_RISCV_ISA_EXT_SVVPTC),
355 };
356
kvmconfig_get_cfg_addr(RISCVCPU * cpu,KVMCPUConfig * kvmcfg)357 static void *kvmconfig_get_cfg_addr(RISCVCPU *cpu, KVMCPUConfig *kvmcfg)
358 {
359 return (void *)&cpu->cfg + kvmcfg->offset;
360 }
361
kvm_cpu_cfg_set(RISCVCPU * cpu,KVMCPUConfig * multi_ext,uint32_t val)362 static void kvm_cpu_cfg_set(RISCVCPU *cpu, KVMCPUConfig *multi_ext,
363 uint32_t val)
364 {
365 bool *ext_enabled = kvmconfig_get_cfg_addr(cpu, multi_ext);
366
367 *ext_enabled = val;
368 }
369
kvm_cpu_cfg_get(RISCVCPU * cpu,KVMCPUConfig * multi_ext)370 static uint32_t kvm_cpu_cfg_get(RISCVCPU *cpu,
371 KVMCPUConfig *multi_ext)
372 {
373 bool *ext_enabled = kvmconfig_get_cfg_addr(cpu, multi_ext);
374
375 return *ext_enabled;
376 }
377
kvm_cpu_get_multi_ext_cfg(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)378 static void kvm_cpu_get_multi_ext_cfg(Object *obj, Visitor *v,
379 const char *name,
380 void *opaque, Error **errp)
381 {
382 KVMCPUConfig *multi_ext_cfg = opaque;
383 RISCVCPU *cpu = RISCV_CPU(obj);
384 bool value = kvm_cpu_cfg_get(cpu, multi_ext_cfg);
385
386 visit_type_bool(v, name, &value, errp);
387 }
388
kvm_cpu_set_multi_ext_cfg(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)389 static void kvm_cpu_set_multi_ext_cfg(Object *obj, Visitor *v,
390 const char *name,
391 void *opaque, Error **errp)
392 {
393 KVMCPUConfig *multi_ext_cfg = opaque;
394 RISCVCPU *cpu = RISCV_CPU(obj);
395 bool value, host_val;
396
397 if (!visit_type_bool(v, name, &value, errp)) {
398 return;
399 }
400
401 host_val = kvm_cpu_cfg_get(cpu, multi_ext_cfg);
402
403 /*
404 * Ignore if the user is setting the same value
405 * as the host.
406 */
407 if (value == host_val) {
408 return;
409 }
410
411 if (!multi_ext_cfg->supported) {
412 /*
413 * Error out if the user is trying to enable an
414 * extension that KVM doesn't support. Ignore
415 * option otherwise.
416 */
417 if (value) {
418 error_setg(errp, "KVM does not support disabling extension %s",
419 multi_ext_cfg->name);
420 }
421
422 return;
423 }
424
425 multi_ext_cfg->user_set = true;
426 kvm_cpu_cfg_set(cpu, multi_ext_cfg, value);
427 }
428
429 static KVMCPUConfig kvm_cbom_blocksize = {
430 .name = "cbom_blocksize",
431 .offset = CPU_CFG_OFFSET(cbom_blocksize),
432 .kvm_reg_id = KVM_REG_RISCV_CONFIG_REG(zicbom_block_size)
433 };
434
435 static KVMCPUConfig kvm_cboz_blocksize = {
436 .name = "cboz_blocksize",
437 .offset = CPU_CFG_OFFSET(cboz_blocksize),
438 .kvm_reg_id = KVM_REG_RISCV_CONFIG_REG(zicboz_block_size)
439 };
440
441 static KVMCPUConfig kvm_v_vlenb = {
442 .name = "vlenb",
443 .offset = CPU_CFG_OFFSET(vlenb),
444 .kvm_reg_id = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_VECTOR |
445 KVM_REG_RISCV_VECTOR_CSR_REG(vlenb)
446 };
447
448 static KVMCPUConfig kvm_sbi_dbcn = {
449 .name = "sbi_dbcn",
450 .kvm_reg_id = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
451 KVM_REG_RISCV_SBI_EXT | KVM_RISCV_SBI_EXT_DBCN
452 };
453
kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU * cpu,CPUState * cs)454 static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU *cpu, CPUState *cs)
455 {
456 uint64_t id, reg;
457 int i, ret;
458
459 for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
460 KVMCPUConfig *multi_ext_cfg = &kvm_multi_ext_cfgs[i];
461
462 if (!multi_ext_cfg->user_set) {
463 continue;
464 }
465
466 id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT,
467 multi_ext_cfg->kvm_reg_id);
468 reg = kvm_cpu_cfg_get(cpu, multi_ext_cfg);
469 ret = kvm_set_one_reg(cs, id, ®);
470 if (ret != 0) {
471 if (!reg && ret == -EINVAL) {
472 warn_report("KVM cannot disable extension %s",
473 multi_ext_cfg->name);
474 } else {
475 error_report("Unable to enable extension %s in KVM, error %d",
476 multi_ext_cfg->name, ret);
477 exit(EXIT_FAILURE);
478 }
479 }
480 }
481 }
482
cpu_get_cfg_unavailable(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)483 static void cpu_get_cfg_unavailable(Object *obj, Visitor *v,
484 const char *name,
485 void *opaque, Error **errp)
486 {
487 bool value = false;
488
489 visit_type_bool(v, name, &value, errp);
490 }
491
cpu_set_cfg_unavailable(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)492 static void cpu_set_cfg_unavailable(Object *obj, Visitor *v,
493 const char *name,
494 void *opaque, Error **errp)
495 {
496 const char *propname = opaque;
497 bool value;
498
499 if (!visit_type_bool(v, name, &value, errp)) {
500 return;
501 }
502
503 if (value) {
504 error_setg(errp, "'%s' is not available with KVM",
505 propname);
506 }
507 }
508
riscv_cpu_add_kvm_unavail_prop(Object * obj,const char * prop_name)509 static void riscv_cpu_add_kvm_unavail_prop(Object *obj, const char *prop_name)
510 {
511 /* Check if KVM created the property already */
512 if (object_property_find(obj, prop_name)) {
513 return;
514 }
515
516 /*
517 * Set the default to disabled for every extension
518 * unknown to KVM and error out if the user attempts
519 * to enable any of them.
520 */
521 object_property_add(obj, prop_name, "bool",
522 cpu_get_cfg_unavailable,
523 cpu_set_cfg_unavailable,
524 NULL, (void *)prop_name);
525 }
526
riscv_cpu_add_kvm_unavail_prop_array(Object * obj,const RISCVCPUMultiExtConfig * array)527 static void riscv_cpu_add_kvm_unavail_prop_array(Object *obj,
528 const RISCVCPUMultiExtConfig *array)
529 {
530 const RISCVCPUMultiExtConfig *prop;
531
532 g_assert(array);
533
534 for (prop = array; prop && prop->name; prop++) {
535 riscv_cpu_add_kvm_unavail_prop(obj, prop->name);
536 }
537 }
538
kvm_riscv_add_cpu_user_properties(Object * cpu_obj)539 static void kvm_riscv_add_cpu_user_properties(Object *cpu_obj)
540 {
541 int i;
542
543 riscv_add_satp_mode_properties(cpu_obj);
544
545 for (i = 0; i < ARRAY_SIZE(kvm_misa_ext_cfgs); i++) {
546 KVMCPUConfig *misa_cfg = &kvm_misa_ext_cfgs[i];
547 int bit = misa_cfg->offset;
548
549 misa_cfg->name = riscv_get_misa_ext_name(bit);
550 misa_cfg->description = riscv_get_misa_ext_description(bit);
551
552 object_property_add(cpu_obj, misa_cfg->name, "bool",
553 kvm_cpu_get_misa_ext_cfg,
554 kvm_cpu_set_misa_ext_cfg,
555 NULL, misa_cfg);
556 object_property_set_description(cpu_obj, misa_cfg->name,
557 misa_cfg->description);
558 }
559
560 for (i = 0; misa_bits[i] != 0; i++) {
561 const char *ext_name = riscv_get_misa_ext_name(misa_bits[i]);
562 riscv_cpu_add_kvm_unavail_prop(cpu_obj, ext_name);
563 }
564
565 for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
566 KVMCPUConfig *multi_cfg = &kvm_multi_ext_cfgs[i];
567
568 object_property_add(cpu_obj, multi_cfg->name, "bool",
569 kvm_cpu_get_multi_ext_cfg,
570 kvm_cpu_set_multi_ext_cfg,
571 NULL, multi_cfg);
572 }
573
574 riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_extensions);
575 riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_vendor_exts);
576 riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_experimental_exts);
577
578 /* We don't have the needed KVM support for profiles */
579 for (i = 0; riscv_profiles[i] != NULL; i++) {
580 riscv_cpu_add_kvm_unavail_prop(cpu_obj, riscv_profiles[i]->name);
581 }
582 }
583
kvm_riscv_get_regs_core(CPUState * cs)584 static int kvm_riscv_get_regs_core(CPUState *cs)
585 {
586 int ret = 0;
587 int i;
588 target_ulong reg;
589 CPURISCVState *env = &RISCV_CPU(cs)->env;
590
591 ret = kvm_get_one_reg(cs, RISCV_CORE_REG(regs.pc), ®);
592 if (ret) {
593 return ret;
594 }
595 env->pc = reg;
596
597 for (i = 1; i < 32; i++) {
598 uint64_t id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, i);
599 ret = kvm_get_one_reg(cs, id, ®);
600 if (ret) {
601 return ret;
602 }
603 env->gpr[i] = reg;
604 }
605
606 return ret;
607 }
608
kvm_riscv_put_regs_core(CPUState * cs)609 static int kvm_riscv_put_regs_core(CPUState *cs)
610 {
611 int ret = 0;
612 int i;
613 target_ulong reg;
614 CPURISCVState *env = &RISCV_CPU(cs)->env;
615
616 reg = env->pc;
617 ret = kvm_set_one_reg(cs, RISCV_CORE_REG(regs.pc), ®);
618 if (ret) {
619 return ret;
620 }
621
622 for (i = 1; i < 32; i++) {
623 uint64_t id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, i);
624 reg = env->gpr[i];
625 ret = kvm_set_one_reg(cs, id, ®);
626 if (ret) {
627 return ret;
628 }
629 }
630
631 return ret;
632 }
633
kvm_riscv_get_regs_csr(CPUState * cs)634 static int kvm_riscv_get_regs_csr(CPUState *cs)
635 {
636 RISCVCPU *cpu = RISCV_CPU(cs);
637 uint64_t reg;
638 int i, ret;
639
640 for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) {
641 KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i];
642
643 if (!csr_cfg->supported) {
644 continue;
645 }
646
647 ret = kvm_get_one_reg(cs, csr_cfg->kvm_reg_id, ®);
648 if (ret) {
649 return ret;
650 }
651
652 if (csr_cfg->prop_size == sizeof(uint32_t)) {
653 kvm_cpu_csr_set_u32(cpu, csr_cfg, (uint32_t)reg);
654 } else if (csr_cfg->prop_size == sizeof(uint64_t)) {
655 kvm_cpu_csr_set_u64(cpu, csr_cfg, reg);
656 } else {
657 g_assert_not_reached();
658 }
659 }
660
661 return 0;
662 }
663
kvm_riscv_put_regs_csr(CPUState * cs)664 static int kvm_riscv_put_regs_csr(CPUState *cs)
665 {
666 RISCVCPU *cpu = RISCV_CPU(cs);
667 uint64_t reg;
668 int i, ret;
669
670 for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) {
671 KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i];
672
673 if (!csr_cfg->supported) {
674 continue;
675 }
676
677 if (csr_cfg->prop_size == sizeof(uint32_t)) {
678 reg = kvm_cpu_csr_get_u32(cpu, csr_cfg);
679 } else if (csr_cfg->prop_size == sizeof(uint64_t)) {
680 reg = kvm_cpu_csr_get_u64(cpu, csr_cfg);
681 } else {
682 g_assert_not_reached();
683 }
684
685 ret = kvm_set_one_reg(cs, csr_cfg->kvm_reg_id, ®);
686 if (ret) {
687 return ret;
688 }
689 }
690
691 return 0;
692 }
693
kvm_riscv_reset_regs_csr(CPURISCVState * env)694 static void kvm_riscv_reset_regs_csr(CPURISCVState *env)
695 {
696 env->mstatus = 0;
697 env->mie = 0;
698 env->stvec = 0;
699 env->sscratch = 0;
700 env->sepc = 0;
701 env->scause = 0;
702 env->stval = 0;
703 env->mip = 0;
704 env->satp = 0;
705 env->scounteren = 0;
706 env->senvcfg = 0;
707 }
708
kvm_riscv_get_regs_fp(CPUState * cs)709 static int kvm_riscv_get_regs_fp(CPUState *cs)
710 {
711 int ret = 0;
712 int i;
713 CPURISCVState *env = &RISCV_CPU(cs)->env;
714
715 if (riscv_has_ext(env, RVD)) {
716 uint64_t reg;
717 for (i = 0; i < 32; i++) {
718 ret = kvm_get_one_reg(cs, RISCV_FP_D_REG(i), ®);
719 if (ret) {
720 return ret;
721 }
722 env->fpr[i] = reg;
723 }
724 return ret;
725 }
726
727 if (riscv_has_ext(env, RVF)) {
728 uint32_t reg;
729 for (i = 0; i < 32; i++) {
730 ret = kvm_get_one_reg(cs, RISCV_FP_F_REG(i), ®);
731 if (ret) {
732 return ret;
733 }
734 env->fpr[i] = reg;
735 }
736 return ret;
737 }
738
739 return ret;
740 }
741
kvm_riscv_put_regs_fp(CPUState * cs)742 static int kvm_riscv_put_regs_fp(CPUState *cs)
743 {
744 int ret = 0;
745 int i;
746 CPURISCVState *env = &RISCV_CPU(cs)->env;
747
748 if (riscv_has_ext(env, RVD)) {
749 uint64_t reg;
750 for (i = 0; i < 32; i++) {
751 reg = env->fpr[i];
752 ret = kvm_set_one_reg(cs, RISCV_FP_D_REG(i), ®);
753 if (ret) {
754 return ret;
755 }
756 }
757 return ret;
758 }
759
760 if (riscv_has_ext(env, RVF)) {
761 uint32_t reg;
762 for (i = 0; i < 32; i++) {
763 reg = env->fpr[i];
764 ret = kvm_set_one_reg(cs, RISCV_FP_F_REG(i), ®);
765 if (ret) {
766 return ret;
767 }
768 }
769 return ret;
770 }
771
772 return ret;
773 }
774
kvm_riscv_get_regs_timer(CPUState * cs)775 static void kvm_riscv_get_regs_timer(CPUState *cs)
776 {
777 CPURISCVState *env = &RISCV_CPU(cs)->env;
778
779 if (env->kvm_timer_dirty) {
780 return;
781 }
782
783 KVM_RISCV_GET_TIMER(cs, time, env->kvm_timer_time);
784 KVM_RISCV_GET_TIMER(cs, compare, env->kvm_timer_compare);
785 KVM_RISCV_GET_TIMER(cs, state, env->kvm_timer_state);
786 KVM_RISCV_GET_TIMER(cs, frequency, env->kvm_timer_frequency);
787
788 env->kvm_timer_dirty = true;
789 }
790
kvm_riscv_put_regs_timer(CPUState * cs)791 static void kvm_riscv_put_regs_timer(CPUState *cs)
792 {
793 uint64_t reg;
794 CPURISCVState *env = &RISCV_CPU(cs)->env;
795
796 if (!env->kvm_timer_dirty) {
797 return;
798 }
799
800 KVM_RISCV_SET_TIMER(cs, time, env->kvm_timer_time);
801 KVM_RISCV_SET_TIMER(cs, compare, env->kvm_timer_compare);
802
803 /*
804 * To set register of RISCV_TIMER_REG(state) will occur a error from KVM
805 * on env->kvm_timer_state == 0, It's better to adapt in KVM, but it
806 * doesn't matter that adaping in QEMU now.
807 * TODO If KVM changes, adapt here.
808 */
809 if (env->kvm_timer_state) {
810 KVM_RISCV_SET_TIMER(cs, state, env->kvm_timer_state);
811 }
812
813 /*
814 * For now, migration will not work between Hosts with different timer
815 * frequency. Therefore, we should check whether they are the same here
816 * during the migration.
817 */
818 if (migration_is_running()) {
819 KVM_RISCV_GET_TIMER(cs, frequency, reg);
820 if (reg != env->kvm_timer_frequency) {
821 error_report("Dst Hosts timer frequency != Src Hosts");
822 }
823 }
824
825 env->kvm_timer_dirty = false;
826 }
827
kvm_riscv_get_timebase_frequency(RISCVCPU * cpu)828 uint64_t kvm_riscv_get_timebase_frequency(RISCVCPU *cpu)
829 {
830 uint64_t reg;
831
832 KVM_RISCV_GET_TIMER(CPU(cpu), frequency, reg);
833
834 return reg;
835 }
836
kvm_riscv_get_regs_vector(CPUState * cs)837 static int kvm_riscv_get_regs_vector(CPUState *cs)
838 {
839 RISCVCPU *cpu = RISCV_CPU(cs);
840 CPURISCVState *env = &cpu->env;
841 target_ulong reg;
842 uint64_t vreg_id;
843 int vreg_idx, ret = 0;
844
845 if (!riscv_has_ext(env, RVV)) {
846 return 0;
847 }
848
849 ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vstart), ®);
850 if (ret) {
851 return ret;
852 }
853 env->vstart = reg;
854
855 ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vl), ®);
856 if (ret) {
857 return ret;
858 }
859 env->vl = reg;
860
861 ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vtype), ®);
862 if (ret) {
863 return ret;
864 }
865 env->vtype = reg;
866
867 if (kvm_v_vlenb.supported) {
868 ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vlenb), ®);
869 if (ret) {
870 return ret;
871 }
872 cpu->cfg.vlenb = reg;
873
874 for (int i = 0; i < 32; i++) {
875 /*
876 * vreg[] is statically allocated using RV_VLEN_MAX.
877 * Use it instead of vlenb to calculate vreg_idx for
878 * simplicity.
879 */
880 vreg_idx = i * RV_VLEN_MAX / 64;
881 vreg_id = kvm_riscv_vector_reg_id(cpu, i);
882
883 ret = kvm_get_one_reg(cs, vreg_id, &env->vreg[vreg_idx]);
884 if (ret) {
885 return ret;
886 }
887 }
888 }
889
890 return 0;
891 }
892
kvm_riscv_put_regs_vector(CPUState * cs)893 static int kvm_riscv_put_regs_vector(CPUState *cs)
894 {
895 RISCVCPU *cpu = RISCV_CPU(cs);
896 CPURISCVState *env = &cpu->env;
897 target_ulong reg;
898 uint64_t vreg_id;
899 int vreg_idx, ret = 0;
900
901 if (!riscv_has_ext(env, RVV)) {
902 return 0;
903 }
904
905 reg = env->vstart;
906 ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vstart), ®);
907 if (ret) {
908 return ret;
909 }
910
911 reg = env->vl;
912 ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vl), ®);
913 if (ret) {
914 return ret;
915 }
916
917 reg = env->vtype;
918 ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vtype), ®);
919 if (ret) {
920 return ret;
921 }
922
923 if (kvm_v_vlenb.supported) {
924 reg = cpu->cfg.vlenb;
925 ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vlenb), ®);
926
927 for (int i = 0; i < 32; i++) {
928 /*
929 * vreg[] is statically allocated using RV_VLEN_MAX.
930 * Use it instead of vlenb to calculate vreg_idx for
931 * simplicity.
932 */
933 vreg_idx = i * RV_VLEN_MAX / 64;
934 vreg_id = kvm_riscv_vector_reg_id(cpu, i);
935
936 ret = kvm_set_one_reg(cs, vreg_id, &env->vreg[vreg_idx]);
937 if (ret) {
938 return ret;
939 }
940 }
941 }
942
943 return ret;
944 }
945
946 typedef struct KVMScratchCPU {
947 int kvmfd;
948 int vmfd;
949 int cpufd;
950 } KVMScratchCPU;
951
952 /*
953 * Heavily inspired by kvm_arm_create_scratch_host_vcpu()
954 * from target/arm/kvm.c.
955 */
kvm_riscv_create_scratch_vcpu(KVMScratchCPU * scratch)956 static bool kvm_riscv_create_scratch_vcpu(KVMScratchCPU *scratch)
957 {
958 int kvmfd = -1, vmfd = -1, cpufd = -1;
959
960 kvmfd = qemu_open_old("/dev/kvm", O_RDWR);
961 if (kvmfd < 0) {
962 goto err;
963 }
964 do {
965 vmfd = ioctl(kvmfd, KVM_CREATE_VM, 0);
966 } while (vmfd == -1 && errno == EINTR);
967 if (vmfd < 0) {
968 goto err;
969 }
970 cpufd = ioctl(vmfd, KVM_CREATE_VCPU, 0);
971 if (cpufd < 0) {
972 goto err;
973 }
974
975 scratch->kvmfd = kvmfd;
976 scratch->vmfd = vmfd;
977 scratch->cpufd = cpufd;
978
979 return true;
980
981 err:
982 if (cpufd >= 0) {
983 close(cpufd);
984 }
985 if (vmfd >= 0) {
986 close(vmfd);
987 }
988 if (kvmfd >= 0) {
989 close(kvmfd);
990 }
991
992 return false;
993 }
994
kvm_riscv_destroy_scratch_vcpu(KVMScratchCPU * scratch)995 static void kvm_riscv_destroy_scratch_vcpu(KVMScratchCPU *scratch)
996 {
997 close(scratch->cpufd);
998 close(scratch->vmfd);
999 close(scratch->kvmfd);
1000 }
1001
kvm_riscv_init_max_satp_mode(RISCVCPU * cpu,KVMScratchCPU * kvmcpu)1002 static void kvm_riscv_init_max_satp_mode(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
1003 {
1004 struct kvm_one_reg reg;
1005 int ret;
1006
1007 reg.id = RISCV_CONFIG_REG(satp_mode);
1008 reg.addr = (uint64_t)&cpu->cfg.max_satp_mode;
1009 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1010 if (ret != 0) {
1011 error_report("Unable to retrieve satp mode from host, error %d", ret);
1012 }
1013 }
1014
kvm_riscv_init_machine_ids(RISCVCPU * cpu,KVMScratchCPU * kvmcpu)1015 static void kvm_riscv_init_machine_ids(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
1016 {
1017 struct kvm_one_reg reg;
1018 int ret;
1019
1020 reg.id = RISCV_CONFIG_REG(mvendorid);
1021 reg.addr = (uint64_t)&cpu->cfg.mvendorid;
1022 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1023 if (ret != 0) {
1024 error_report("Unable to retrieve mvendorid from host, error %d", ret);
1025 }
1026
1027 reg.id = RISCV_CONFIG_REG(marchid);
1028 reg.addr = (uint64_t)&cpu->cfg.marchid;
1029 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1030 if (ret != 0) {
1031 error_report("Unable to retrieve marchid from host, error %d", ret);
1032 }
1033
1034 reg.id = RISCV_CONFIG_REG(mimpid);
1035 reg.addr = (uint64_t)&cpu->cfg.mimpid;
1036 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1037 if (ret != 0) {
1038 error_report("Unable to retrieve mimpid from host, error %d", ret);
1039 }
1040 }
1041
kvm_riscv_init_misa_ext_mask(RISCVCPU * cpu,KVMScratchCPU * kvmcpu)1042 static void kvm_riscv_init_misa_ext_mask(RISCVCPU *cpu,
1043 KVMScratchCPU *kvmcpu)
1044 {
1045 CPURISCVState *env = &cpu->env;
1046 struct kvm_one_reg reg;
1047 int ret;
1048
1049 reg.id = RISCV_CONFIG_REG(isa);
1050 reg.addr = (uint64_t)&env->misa_ext_mask;
1051 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1052
1053 if (ret) {
1054 error_report("Unable to fetch ISA register from KVM, "
1055 "error %d", ret);
1056 kvm_riscv_destroy_scratch_vcpu(kvmcpu);
1057 exit(EXIT_FAILURE);
1058 }
1059
1060 env->misa_ext = env->misa_ext_mask;
1061 }
1062
kvm_riscv_read_cbomz_blksize(RISCVCPU * cpu,KVMScratchCPU * kvmcpu,KVMCPUConfig * cbomz_cfg)1063 static void kvm_riscv_read_cbomz_blksize(RISCVCPU *cpu, KVMScratchCPU *kvmcpu,
1064 KVMCPUConfig *cbomz_cfg)
1065 {
1066 struct kvm_one_reg reg;
1067 int ret;
1068
1069 reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG,
1070 cbomz_cfg->kvm_reg_id);
1071 reg.addr = (uint64_t)kvmconfig_get_cfg_addr(cpu, cbomz_cfg);
1072 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1073 if (ret != 0) {
1074 error_report("Unable to read KVM reg %s, error %d",
1075 cbomz_cfg->name, ret);
1076 exit(EXIT_FAILURE);
1077 }
1078 }
1079
kvm_riscv_read_multiext_legacy(RISCVCPU * cpu,KVMScratchCPU * kvmcpu)1080 static void kvm_riscv_read_multiext_legacy(RISCVCPU *cpu,
1081 KVMScratchCPU *kvmcpu)
1082 {
1083 uint64_t val;
1084 int i, ret;
1085
1086 for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
1087 KVMCPUConfig *multi_ext_cfg = &kvm_multi_ext_cfgs[i];
1088 struct kvm_one_reg reg;
1089
1090 reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT,
1091 multi_ext_cfg->kvm_reg_id);
1092 reg.addr = (uint64_t)&val;
1093 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1094 if (ret != 0) {
1095 if (errno == EINVAL) {
1096 /* Silently default to 'false' if KVM does not support it. */
1097 multi_ext_cfg->supported = false;
1098 val = false;
1099 } else {
1100 error_report("Unable to read ISA_EXT KVM register %s: %s",
1101 multi_ext_cfg->name, strerror(errno));
1102 exit(EXIT_FAILURE);
1103 }
1104 } else {
1105 multi_ext_cfg->supported = true;
1106 }
1107
1108 kvm_cpu_cfg_set(cpu, multi_ext_cfg, val);
1109 }
1110
1111 if (cpu->cfg.ext_zicbom) {
1112 kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cbom_blocksize);
1113 }
1114
1115 if (cpu->cfg.ext_zicboz) {
1116 kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cboz_blocksize);
1117 }
1118 }
1119
kvm_riscv_read_csr_cfg_legacy(KVMScratchCPU * kvmcpu)1120 static void kvm_riscv_read_csr_cfg_legacy(KVMScratchCPU *kvmcpu)
1121 {
1122 uint64_t val;
1123 int i, ret;
1124
1125 for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) {
1126 KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i];
1127 struct kvm_one_reg reg;
1128
1129 reg.id = csr_cfg->kvm_reg_id;
1130 reg.addr = (uint64_t)&val;
1131 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1132 if (ret != 0) {
1133 if (errno == EINVAL) {
1134 csr_cfg->supported = false;
1135 } else {
1136 error_report("Unable to read KVM CSR %s: %s",
1137 csr_cfg->name, strerror(errno));
1138 exit(EXIT_FAILURE);
1139 }
1140 } else {
1141 csr_cfg->supported = true;
1142 }
1143 }
1144 }
1145
uint64_cmp(const void * a,const void * b)1146 static int uint64_cmp(const void *a, const void *b)
1147 {
1148 uint64_t val1 = *(const uint64_t *)a;
1149 uint64_t val2 = *(const uint64_t *)b;
1150
1151 if (val1 < val2) {
1152 return -1;
1153 }
1154
1155 if (val1 > val2) {
1156 return 1;
1157 }
1158
1159 return 0;
1160 }
1161
kvm_riscv_check_sbi_dbcn_support(RISCVCPU * cpu,struct kvm_reg_list * reglist)1162 static void kvm_riscv_check_sbi_dbcn_support(RISCVCPU *cpu,
1163 struct kvm_reg_list *reglist)
1164 {
1165 struct kvm_reg_list *reg_search;
1166
1167 reg_search = bsearch(&kvm_sbi_dbcn.kvm_reg_id, reglist->reg, reglist->n,
1168 sizeof(uint64_t), uint64_cmp);
1169
1170 if (reg_search) {
1171 kvm_sbi_dbcn.supported = true;
1172 }
1173 }
1174
kvm_riscv_read_vlenb(RISCVCPU * cpu,KVMScratchCPU * kvmcpu,struct kvm_reg_list * reglist)1175 static void kvm_riscv_read_vlenb(RISCVCPU *cpu, KVMScratchCPU *kvmcpu,
1176 struct kvm_reg_list *reglist)
1177 {
1178 struct kvm_one_reg reg;
1179 struct kvm_reg_list *reg_search;
1180 uint64_t val;
1181 int ret;
1182
1183 reg_search = bsearch(&kvm_v_vlenb.kvm_reg_id, reglist->reg, reglist->n,
1184 sizeof(uint64_t), uint64_cmp);
1185
1186 if (reg_search) {
1187 reg.id = kvm_v_vlenb.kvm_reg_id;
1188 reg.addr = (uint64_t)&val;
1189
1190 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1191 if (ret != 0) {
1192 error_report("Unable to read vlenb register, error code: %d",
1193 errno);
1194 exit(EXIT_FAILURE);
1195 }
1196
1197 kvm_v_vlenb.supported = true;
1198 cpu->cfg.vlenb = val;
1199 }
1200 }
1201
kvm_riscv_read_csr_cfg(struct kvm_reg_list * reglist)1202 static void kvm_riscv_read_csr_cfg(struct kvm_reg_list *reglist)
1203 {
1204 struct kvm_reg_list *reg_search;
1205 uint64_t reg_id;
1206
1207 for (int i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) {
1208 KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i];
1209
1210 reg_id = csr_cfg->kvm_reg_id;
1211 reg_search = bsearch(®_id, reglist->reg, reglist->n,
1212 sizeof(uint64_t), uint64_cmp);
1213 if (!reg_search) {
1214 continue;
1215 }
1216
1217 csr_cfg->supported = true;
1218 }
1219 }
1220
kvm_riscv_init_cfg(RISCVCPU * cpu,KVMScratchCPU * kvmcpu)1221 static void kvm_riscv_init_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
1222 {
1223 g_autofree struct kvm_reg_list *reglist = NULL;
1224 KVMCPUConfig *multi_ext_cfg;
1225 struct kvm_one_reg reg;
1226 struct kvm_reg_list rl_struct;
1227 uint64_t val, reg_id, *reg_search;
1228 int i, ret;
1229
1230 rl_struct.n = 0;
1231 ret = ioctl(kvmcpu->cpufd, KVM_GET_REG_LIST, &rl_struct);
1232
1233 /*
1234 * If KVM_GET_REG_LIST isn't supported we'll get errno 22
1235 * (EINVAL). Use read_legacy() in this case.
1236 */
1237 if (errno == EINVAL) {
1238 kvm_riscv_read_multiext_legacy(cpu, kvmcpu);
1239 kvm_riscv_read_csr_cfg_legacy(kvmcpu);
1240 return;
1241 } else if (errno != E2BIG) {
1242 /*
1243 * E2BIG is an expected error message for the API since we
1244 * don't know the number of registers. The right amount will
1245 * be written in rl_struct.n.
1246 *
1247 * Error out if we get any other errno.
1248 */
1249 error_report("Error when accessing get-reg-list: %s",
1250 strerror(errno));
1251 exit(EXIT_FAILURE);
1252 }
1253
1254 reglist = g_malloc(sizeof(struct kvm_reg_list) +
1255 rl_struct.n * sizeof(uint64_t));
1256 reglist->n = rl_struct.n;
1257 ret = ioctl(kvmcpu->cpufd, KVM_GET_REG_LIST, reglist);
1258 if (ret) {
1259 error_report("Error when reading KVM_GET_REG_LIST: %s",
1260 strerror(errno));
1261 exit(EXIT_FAILURE);
1262 }
1263
1264 /* sort reglist to use bsearch() */
1265 qsort(®list->reg, reglist->n, sizeof(uint64_t), uint64_cmp);
1266
1267 for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
1268 multi_ext_cfg = &kvm_multi_ext_cfgs[i];
1269 reg_id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT,
1270 multi_ext_cfg->kvm_reg_id);
1271 reg_search = bsearch(®_id, reglist->reg, reglist->n,
1272 sizeof(uint64_t), uint64_cmp);
1273 if (!reg_search) {
1274 continue;
1275 }
1276
1277 reg.id = reg_id;
1278 reg.addr = (uint64_t)&val;
1279 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1280 if (ret != 0) {
1281 error_report("Unable to read ISA_EXT KVM register %s: %s",
1282 multi_ext_cfg->name, strerror(errno));
1283 exit(EXIT_FAILURE);
1284 }
1285
1286 multi_ext_cfg->supported = true;
1287 kvm_cpu_cfg_set(cpu, multi_ext_cfg, val);
1288 }
1289
1290 if (cpu->cfg.ext_zicbom) {
1291 kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cbom_blocksize);
1292 }
1293
1294 if (cpu->cfg.ext_zicboz) {
1295 kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cboz_blocksize);
1296 }
1297
1298 if (riscv_has_ext(&cpu->env, RVV)) {
1299 kvm_riscv_read_vlenb(cpu, kvmcpu, reglist);
1300 }
1301
1302 kvm_riscv_check_sbi_dbcn_support(cpu, reglist);
1303 kvm_riscv_read_csr_cfg(reglist);
1304 }
1305
riscv_init_kvm_registers(Object * cpu_obj)1306 static void riscv_init_kvm_registers(Object *cpu_obj)
1307 {
1308 RISCVCPU *cpu = RISCV_CPU(cpu_obj);
1309 KVMScratchCPU kvmcpu;
1310
1311 if (!kvm_riscv_create_scratch_vcpu(&kvmcpu)) {
1312 return;
1313 }
1314
1315 kvm_riscv_init_machine_ids(cpu, &kvmcpu);
1316 kvm_riscv_init_misa_ext_mask(cpu, &kvmcpu);
1317 kvm_riscv_init_cfg(cpu, &kvmcpu);
1318 kvm_riscv_init_max_satp_mode(cpu, &kvmcpu);
1319
1320 kvm_riscv_destroy_scratch_vcpu(&kvmcpu);
1321 }
1322
1323 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
1324 KVM_CAP_LAST_INFO
1325 };
1326
kvm_arch_get_registers(CPUState * cs,Error ** errp)1327 int kvm_arch_get_registers(CPUState *cs, Error **errp)
1328 {
1329 int ret = 0;
1330
1331 ret = kvm_riscv_get_regs_core(cs);
1332 if (ret) {
1333 return ret;
1334 }
1335
1336 ret = kvm_riscv_get_regs_csr(cs);
1337 if (ret) {
1338 return ret;
1339 }
1340
1341 ret = kvm_riscv_get_regs_fp(cs);
1342 if (ret) {
1343 return ret;
1344 }
1345
1346 ret = kvm_riscv_get_regs_vector(cs);
1347 if (ret) {
1348 return ret;
1349 }
1350
1351 return ret;
1352 }
1353
kvm_riscv_sync_mpstate_to_kvm(RISCVCPU * cpu,int state)1354 int kvm_riscv_sync_mpstate_to_kvm(RISCVCPU *cpu, int state)
1355 {
1356 if (cap_has_mp_state) {
1357 struct kvm_mp_state mp_state = {
1358 .mp_state = state
1359 };
1360
1361 int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
1362 if (ret) {
1363 fprintf(stderr, "%s: failed to sync MP_STATE %d/%s\n",
1364 __func__, ret, strerror(-ret));
1365 return -1;
1366 }
1367 }
1368
1369 return 0;
1370 }
1371
kvm_arch_put_registers(CPUState * cs,int level,Error ** errp)1372 int kvm_arch_put_registers(CPUState *cs, int level, Error **errp)
1373 {
1374 int ret = 0;
1375
1376 ret = kvm_riscv_put_regs_core(cs);
1377 if (ret) {
1378 return ret;
1379 }
1380
1381 ret = kvm_riscv_put_regs_csr(cs);
1382 if (ret) {
1383 return ret;
1384 }
1385
1386 ret = kvm_riscv_put_regs_fp(cs);
1387 if (ret) {
1388 return ret;
1389 }
1390
1391 ret = kvm_riscv_put_regs_vector(cs);
1392 if (ret) {
1393 return ret;
1394 }
1395
1396 if (KVM_PUT_RESET_STATE == level) {
1397 RISCVCPU *cpu = RISCV_CPU(cs);
1398 if (cs->cpu_index == 0) {
1399 ret = kvm_riscv_sync_mpstate_to_kvm(cpu, KVM_MP_STATE_RUNNABLE);
1400 } else {
1401 ret = kvm_riscv_sync_mpstate_to_kvm(cpu, KVM_MP_STATE_STOPPED);
1402 }
1403 if (ret) {
1404 return ret;
1405 }
1406 }
1407
1408 return ret;
1409 }
1410
kvm_arch_release_virq_post(int virq)1411 int kvm_arch_release_virq_post(int virq)
1412 {
1413 return 0;
1414 }
1415
kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry * route,uint64_t address,uint32_t data,PCIDevice * dev)1416 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
1417 uint64_t address, uint32_t data, PCIDevice *dev)
1418 {
1419 return 0;
1420 }
1421
kvm_arch_destroy_vcpu(CPUState * cs)1422 int kvm_arch_destroy_vcpu(CPUState *cs)
1423 {
1424 return 0;
1425 }
1426
kvm_arch_vcpu_id(CPUState * cpu)1427 unsigned long kvm_arch_vcpu_id(CPUState *cpu)
1428 {
1429 return cpu->cpu_index;
1430 }
1431
kvm_riscv_vm_state_change(void * opaque,bool running,RunState state)1432 static void kvm_riscv_vm_state_change(void *opaque, bool running,
1433 RunState state)
1434 {
1435 CPUState *cs = opaque;
1436
1437 if (running) {
1438 kvm_riscv_put_regs_timer(cs);
1439 } else {
1440 kvm_riscv_get_regs_timer(cs);
1441 }
1442 }
1443
kvm_arch_init_irq_routing(KVMState * s)1444 void kvm_arch_init_irq_routing(KVMState *s)
1445 {
1446 }
1447
kvm_vcpu_set_machine_ids(RISCVCPU * cpu,CPUState * cs)1448 static int kvm_vcpu_set_machine_ids(RISCVCPU *cpu, CPUState *cs)
1449 {
1450 target_ulong reg;
1451 uint64_t id;
1452 int ret;
1453
1454 id = RISCV_CONFIG_REG(mvendorid);
1455 /*
1456 * cfg.mvendorid is an uint32 but a target_ulong will
1457 * be written. Assign it to a target_ulong var to avoid
1458 * writing pieces of other cpu->cfg fields in the reg.
1459 */
1460 reg = cpu->cfg.mvendorid;
1461 ret = kvm_set_one_reg(cs, id, ®);
1462 if (ret != 0) {
1463 return ret;
1464 }
1465
1466 id = RISCV_CONFIG_REG(marchid);
1467 ret = kvm_set_one_reg(cs, id, &cpu->cfg.marchid);
1468 if (ret != 0) {
1469 return ret;
1470 }
1471
1472 id = RISCV_CONFIG_REG(mimpid);
1473 ret = kvm_set_one_reg(cs, id, &cpu->cfg.mimpid);
1474
1475 return ret;
1476 }
1477
kvm_vcpu_enable_sbi_dbcn(RISCVCPU * cpu,CPUState * cs)1478 static int kvm_vcpu_enable_sbi_dbcn(RISCVCPU *cpu, CPUState *cs)
1479 {
1480 target_ulong reg = 1;
1481
1482 if (!kvm_sbi_dbcn.supported) {
1483 return 0;
1484 }
1485
1486 return kvm_set_one_reg(cs, kvm_sbi_dbcn.kvm_reg_id, ®);
1487 }
1488
kvm_arch_pre_create_vcpu(CPUState * cpu,Error ** errp)1489 int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
1490 {
1491 return 0;
1492 }
1493
kvm_arch_init_vcpu(CPUState * cs)1494 int kvm_arch_init_vcpu(CPUState *cs)
1495 {
1496 int ret = 0;
1497 RISCVCPU *cpu = RISCV_CPU(cs);
1498
1499 qemu_add_vm_change_state_handler(kvm_riscv_vm_state_change, cs);
1500
1501 if (!object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST)) {
1502 ret = kvm_vcpu_set_machine_ids(cpu, cs);
1503 if (ret != 0) {
1504 return ret;
1505 }
1506 }
1507
1508 kvm_riscv_update_cpu_misa_ext(cpu, cs);
1509 kvm_riscv_update_cpu_cfg_isa_ext(cpu, cs);
1510
1511 ret = kvm_vcpu_enable_sbi_dbcn(cpu, cs);
1512
1513 return ret;
1514 }
1515
kvm_arch_msi_data_to_gsi(uint32_t data)1516 int kvm_arch_msi_data_to_gsi(uint32_t data)
1517 {
1518 abort();
1519 }
1520
kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry * route,int vector,PCIDevice * dev)1521 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
1522 int vector, PCIDevice *dev)
1523 {
1524 return 0;
1525 }
1526
kvm_arch_get_default_type(MachineState * ms)1527 int kvm_arch_get_default_type(MachineState *ms)
1528 {
1529 return 0;
1530 }
1531
kvm_arch_init(MachineState * ms,KVMState * s)1532 int kvm_arch_init(MachineState *ms, KVMState *s)
1533 {
1534 cap_has_mp_state = kvm_check_extension(s, KVM_CAP_MP_STATE);
1535 return 0;
1536 }
1537
kvm_arch_irqchip_create(KVMState * s)1538 int kvm_arch_irqchip_create(KVMState *s)
1539 {
1540 /*
1541 * We can create the VAIA using the newer device control API.
1542 */
1543 return kvm_check_extension(s, KVM_CAP_DEVICE_CTRL);
1544 }
1545
kvm_arch_process_async_events(CPUState * cs)1546 int kvm_arch_process_async_events(CPUState *cs)
1547 {
1548 return 0;
1549 }
1550
kvm_arch_pre_run(CPUState * cs,struct kvm_run * run)1551 void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
1552 {
1553 }
1554
kvm_arch_post_run(CPUState * cs,struct kvm_run * run)1555 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
1556 {
1557 return MEMTXATTRS_UNSPECIFIED;
1558 }
1559
kvm_arch_stop_on_emulation_error(CPUState * cs)1560 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
1561 {
1562 return true;
1563 }
1564
kvm_riscv_handle_sbi_dbcn(CPUState * cs,struct kvm_run * run)1565 static void kvm_riscv_handle_sbi_dbcn(CPUState *cs, struct kvm_run *run)
1566 {
1567 g_autofree uint8_t *buf = NULL;
1568 RISCVCPU *cpu = RISCV_CPU(cs);
1569 target_ulong num_bytes;
1570 uint64_t addr;
1571 unsigned char ch;
1572 int ret;
1573
1574 switch (run->riscv_sbi.function_id) {
1575 case SBI_EXT_DBCN_CONSOLE_READ:
1576 case SBI_EXT_DBCN_CONSOLE_WRITE:
1577 num_bytes = run->riscv_sbi.args[0];
1578
1579 if (num_bytes == 0) {
1580 run->riscv_sbi.ret[0] = SBI_SUCCESS;
1581 run->riscv_sbi.ret[1] = 0;
1582 break;
1583 }
1584
1585 addr = run->riscv_sbi.args[1];
1586
1587 /*
1588 * Handle the case where a 32 bit CPU is running in a
1589 * 64 bit addressing env.
1590 */
1591 if (riscv_cpu_mxl(&cpu->env) == MXL_RV32) {
1592 addr |= (uint64_t)run->riscv_sbi.args[2] << 32;
1593 }
1594
1595 buf = g_malloc0(num_bytes);
1596
1597 if (run->riscv_sbi.function_id == SBI_EXT_DBCN_CONSOLE_READ) {
1598 ret = qemu_chr_fe_read_all(serial_hd(0)->be, buf, num_bytes);
1599 if (ret < 0) {
1600 error_report("SBI_EXT_DBCN_CONSOLE_READ: error when "
1601 "reading chardev");
1602 exit(1);
1603 }
1604
1605 cpu_physical_memory_write(addr, buf, ret);
1606 } else {
1607 cpu_physical_memory_read(addr, buf, num_bytes);
1608
1609 ret = qemu_chr_fe_write_all(serial_hd(0)->be, buf, num_bytes);
1610 if (ret < 0) {
1611 error_report("SBI_EXT_DBCN_CONSOLE_WRITE: error when "
1612 "writing chardev");
1613 exit(1);
1614 }
1615 }
1616
1617 run->riscv_sbi.ret[0] = SBI_SUCCESS;
1618 run->riscv_sbi.ret[1] = ret;
1619 break;
1620 case SBI_EXT_DBCN_CONSOLE_WRITE_BYTE:
1621 ch = run->riscv_sbi.args[0];
1622 ret = qemu_chr_fe_write_all(serial_hd(0)->be, &ch, sizeof(ch));
1623
1624 if (ret < 0) {
1625 error_report("SBI_EXT_DBCN_CONSOLE_WRITE_BYTE: error when "
1626 "writing chardev");
1627 exit(1);
1628 }
1629
1630 run->riscv_sbi.ret[0] = SBI_SUCCESS;
1631 run->riscv_sbi.ret[1] = 0;
1632 break;
1633 default:
1634 run->riscv_sbi.ret[0] = SBI_ERR_NOT_SUPPORTED;
1635 }
1636 }
1637
kvm_riscv_handle_sbi(CPUState * cs,struct kvm_run * run)1638 static int kvm_riscv_handle_sbi(CPUState *cs, struct kvm_run *run)
1639 {
1640 int ret = 0;
1641 unsigned char ch;
1642 switch (run->riscv_sbi.extension_id) {
1643 case SBI_EXT_0_1_CONSOLE_PUTCHAR:
1644 ch = run->riscv_sbi.args[0];
1645 qemu_chr_fe_write(serial_hd(0)->be, &ch, sizeof(ch));
1646 break;
1647 case SBI_EXT_0_1_CONSOLE_GETCHAR:
1648 ret = qemu_chr_fe_read_all(serial_hd(0)->be, &ch, sizeof(ch));
1649 if (ret == sizeof(ch)) {
1650 run->riscv_sbi.ret[0] = ch;
1651 } else {
1652 run->riscv_sbi.ret[0] = -1;
1653 }
1654 ret = 0;
1655 break;
1656 case SBI_EXT_DBCN:
1657 kvm_riscv_handle_sbi_dbcn(cs, run);
1658 break;
1659 default:
1660 qemu_log_mask(LOG_UNIMP,
1661 "%s: un-handled SBI EXIT, specific reasons is %lu\n",
1662 __func__, run->riscv_sbi.extension_id);
1663 ret = -1;
1664 break;
1665 }
1666 return ret;
1667 }
1668
kvm_riscv_handle_csr(CPUState * cs,struct kvm_run * run)1669 static int kvm_riscv_handle_csr(CPUState *cs, struct kvm_run *run)
1670 {
1671 target_ulong csr_num = run->riscv_csr.csr_num;
1672 target_ulong new_value = run->riscv_csr.new_value;
1673 target_ulong write_mask = run->riscv_csr.write_mask;
1674 int ret = 0;
1675
1676 switch (csr_num) {
1677 case CSR_SEED:
1678 run->riscv_csr.ret_value = riscv_new_csr_seed(new_value, write_mask);
1679 break;
1680 default:
1681 qemu_log_mask(LOG_UNIMP,
1682 "%s: un-handled CSR EXIT for CSR %lx\n",
1683 __func__, csr_num);
1684 ret = -1;
1685 break;
1686 }
1687
1688 return ret;
1689 }
1690
kvm_riscv_handle_debug(CPUState * cs)1691 static bool kvm_riscv_handle_debug(CPUState *cs)
1692 {
1693 RISCVCPU *cpu = RISCV_CPU(cs);
1694 CPURISCVState *env = &cpu->env;
1695
1696 /* Ensure PC is synchronised */
1697 kvm_cpu_synchronize_state(cs);
1698
1699 if (kvm_find_sw_breakpoint(cs, env->pc)) {
1700 return true;
1701 }
1702
1703 return false;
1704 }
1705
kvm_arch_handle_exit(CPUState * cs,struct kvm_run * run)1706 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
1707 {
1708 int ret = 0;
1709 switch (run->exit_reason) {
1710 case KVM_EXIT_RISCV_SBI:
1711 ret = kvm_riscv_handle_sbi(cs, run);
1712 break;
1713 case KVM_EXIT_RISCV_CSR:
1714 ret = kvm_riscv_handle_csr(cs, run);
1715 break;
1716 case KVM_EXIT_DEBUG:
1717 if (kvm_riscv_handle_debug(cs)) {
1718 ret = EXCP_DEBUG;
1719 }
1720 break;
1721 default:
1722 qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n",
1723 __func__, run->exit_reason);
1724 ret = -1;
1725 break;
1726 }
1727 return ret;
1728 }
1729
kvm_riscv_reset_vcpu(RISCVCPU * cpu)1730 void kvm_riscv_reset_vcpu(RISCVCPU *cpu)
1731 {
1732 CPURISCVState *env = &cpu->env;
1733 int i;
1734
1735 for (i = 0; i < 32; i++) {
1736 env->gpr[i] = 0;
1737 }
1738 env->pc = cpu->env.kernel_addr;
1739 env->gpr[10] = kvm_arch_vcpu_id(CPU(cpu)); /* a0 */
1740 env->gpr[11] = cpu->env.fdt_addr; /* a1 */
1741
1742 kvm_riscv_reset_regs_csr(env);
1743 }
1744
kvm_riscv_set_irq(RISCVCPU * cpu,int irq,int level)1745 void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level)
1746 {
1747 int ret;
1748 unsigned virq = level ? KVM_INTERRUPT_SET : KVM_INTERRUPT_UNSET;
1749
1750 if (irq != IRQ_S_EXT) {
1751 perror("kvm riscv set irq != IRQ_S_EXT\n");
1752 abort();
1753 }
1754
1755 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
1756 if (ret < 0) {
1757 perror("Set irq failed");
1758 abort();
1759 }
1760 }
1761
1762 static int aia_mode;
1763
kvm_aia_mode_str(uint64_t mode)1764 static const char *kvm_aia_mode_str(uint64_t mode)
1765 {
1766 switch (mode) {
1767 case KVM_DEV_RISCV_AIA_MODE_EMUL:
1768 return "emul";
1769 case KVM_DEV_RISCV_AIA_MODE_HWACCEL:
1770 return "hwaccel";
1771 case KVM_DEV_RISCV_AIA_MODE_AUTO:
1772 default:
1773 return "auto";
1774 };
1775 }
1776
riscv_get_kvm_aia(Object * obj,Error ** errp)1777 static char *riscv_get_kvm_aia(Object *obj, Error **errp)
1778 {
1779 return g_strdup(kvm_aia_mode_str(aia_mode));
1780 }
1781
riscv_set_kvm_aia(Object * obj,const char * val,Error ** errp)1782 static void riscv_set_kvm_aia(Object *obj, const char *val, Error **errp)
1783 {
1784 if (!strcmp(val, "emul")) {
1785 aia_mode = KVM_DEV_RISCV_AIA_MODE_EMUL;
1786 } else if (!strcmp(val, "hwaccel")) {
1787 aia_mode = KVM_DEV_RISCV_AIA_MODE_HWACCEL;
1788 } else if (!strcmp(val, "auto")) {
1789 aia_mode = KVM_DEV_RISCV_AIA_MODE_AUTO;
1790 } else {
1791 error_setg(errp, "Invalid KVM AIA mode");
1792 error_append_hint(errp, "Valid values are emul, hwaccel, and auto.\n");
1793 }
1794 }
1795
kvm_arch_accel_class_init(ObjectClass * oc)1796 void kvm_arch_accel_class_init(ObjectClass *oc)
1797 {
1798 object_class_property_add_str(oc, "riscv-aia", riscv_get_kvm_aia,
1799 riscv_set_kvm_aia);
1800 object_class_property_set_description(oc, "riscv-aia",
1801 "Set KVM AIA mode. Valid values are 'emul', 'hwaccel' and 'auto'. "
1802 "Changing KVM AIA modes relies on host support. Defaults to 'auto' "
1803 "if the host supports it");
1804 object_property_set_default_str(object_class_property_find(oc, "riscv-aia"),
1805 "auto");
1806 }
1807
kvm_riscv_aia_create(MachineState * machine,uint64_t group_shift,uint64_t aia_irq_num,uint64_t aia_msi_num,uint64_t aplic_base,uint64_t imsic_base,uint64_t guest_num)1808 void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
1809 uint64_t aia_irq_num, uint64_t aia_msi_num,
1810 uint64_t aplic_base, uint64_t imsic_base,
1811 uint64_t guest_num)
1812 {
1813 int ret, i;
1814 int aia_fd = -1;
1815 uint64_t default_aia_mode;
1816 uint64_t socket_count = riscv_socket_count(machine);
1817 uint64_t max_hart_per_socket = 0;
1818 uint64_t socket, base_hart, hart_count, socket_imsic_base, imsic_addr;
1819 uint64_t socket_bits, hart_bits, guest_bits;
1820 uint64_t max_group_id;
1821
1822 aia_fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_RISCV_AIA, false);
1823
1824 if (aia_fd < 0) {
1825 error_report("Unable to create in-kernel irqchip");
1826 exit(1);
1827 }
1828
1829 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
1830 KVM_DEV_RISCV_AIA_CONFIG_MODE,
1831 &default_aia_mode, false, NULL);
1832 if (ret < 0) {
1833 error_report("KVM AIA: failed to get current KVM AIA mode");
1834 exit(1);
1835 }
1836
1837 if (default_aia_mode != aia_mode) {
1838 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
1839 KVM_DEV_RISCV_AIA_CONFIG_MODE,
1840 &aia_mode, true, NULL);
1841 if (ret < 0) {
1842 warn_report("KVM AIA: failed to set KVM AIA mode '%s', using "
1843 "default host mode '%s'",
1844 kvm_aia_mode_str(aia_mode),
1845 kvm_aia_mode_str(default_aia_mode));
1846
1847 /* failed to change AIA mode, use default */
1848 aia_mode = default_aia_mode;
1849 }
1850 }
1851
1852 /*
1853 * Skip APLIC creation in KVM if we're running split mode.
1854 * This is done by leaving KVM_DEV_RISCV_AIA_CONFIG_SRCS
1855 * unset. We can also skip KVM_DEV_RISCV_AIA_ADDR_APLIC
1856 * since KVM won't be using it.
1857 */
1858 if (!kvm_kernel_irqchip_split()) {
1859 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
1860 KVM_DEV_RISCV_AIA_CONFIG_SRCS,
1861 &aia_irq_num, true, NULL);
1862 if (ret < 0) {
1863 error_report("KVM AIA: failed to set number of input irq lines");
1864 exit(1);
1865 }
1866
1867 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_ADDR,
1868 KVM_DEV_RISCV_AIA_ADDR_APLIC,
1869 &aplic_base, true, NULL);
1870 if (ret < 0) {
1871 error_report("KVM AIA: failed to set the base address of APLIC");
1872 exit(1);
1873 }
1874 }
1875
1876 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
1877 KVM_DEV_RISCV_AIA_CONFIG_IDS,
1878 &aia_msi_num, true, NULL);
1879 if (ret < 0) {
1880 error_report("KVM AIA: failed to set number of msi");
1881 exit(1);
1882 }
1883
1884
1885 if (socket_count > 1) {
1886 max_group_id = socket_count - 1;
1887 socket_bits = find_last_bit(&max_group_id, BITS_PER_LONG) + 1;
1888 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
1889 KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS,
1890 &socket_bits, true, NULL);
1891 if (ret < 0) {
1892 error_report("KVM AIA: failed to set group_bits");
1893 exit(1);
1894 }
1895
1896 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
1897 KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT,
1898 &group_shift, true, NULL);
1899 if (ret < 0) {
1900 error_report("KVM AIA: failed to set group_shift");
1901 exit(1);
1902 }
1903 }
1904
1905 guest_bits = guest_num == 0 ? 0 :
1906 find_last_bit(&guest_num, BITS_PER_LONG) + 1;
1907 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
1908 KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS,
1909 &guest_bits, true, NULL);
1910 if (ret < 0) {
1911 error_report("KVM AIA: failed to set guest_bits");
1912 exit(1);
1913 }
1914
1915 for (socket = 0; socket < socket_count; socket++) {
1916 socket_imsic_base = imsic_base + socket * (1U << group_shift);
1917 hart_count = riscv_socket_hart_count(machine, socket);
1918 base_hart = riscv_socket_first_hartid(machine, socket);
1919
1920 if (max_hart_per_socket < hart_count) {
1921 max_hart_per_socket = hart_count;
1922 }
1923
1924 for (i = 0; i < hart_count; i++) {
1925 imsic_addr = socket_imsic_base + i * IMSIC_HART_SIZE(guest_bits);
1926 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_ADDR,
1927 KVM_DEV_RISCV_AIA_ADDR_IMSIC(i + base_hart),
1928 &imsic_addr, true, NULL);
1929 if (ret < 0) {
1930 error_report("KVM AIA: failed to set the IMSIC address for hart %d", i);
1931 exit(1);
1932 }
1933 }
1934 }
1935
1936
1937 if (max_hart_per_socket > 1) {
1938 max_hart_per_socket--;
1939 hart_bits = find_last_bit(&max_hart_per_socket, BITS_PER_LONG) + 1;
1940 } else {
1941 hart_bits = 0;
1942 }
1943
1944 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
1945 KVM_DEV_RISCV_AIA_CONFIG_HART_BITS,
1946 &hart_bits, true, NULL);
1947 if (ret < 0) {
1948 error_report("KVM AIA: failed to set hart_bits");
1949 exit(1);
1950 }
1951
1952 if (kvm_has_gsi_routing()) {
1953 for (uint64_t idx = 0; idx < aia_irq_num + 1; ++idx) {
1954 /* KVM AIA only has one APLIC instance */
1955 kvm_irqchip_add_irq_route(kvm_state, idx, 0, idx);
1956 }
1957 kvm_gsi_routing_allowed = true;
1958 kvm_irqchip_commit_routes(kvm_state);
1959 }
1960
1961 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CTRL,
1962 KVM_DEV_RISCV_AIA_CTRL_INIT,
1963 NULL, true, NULL);
1964 if (ret < 0) {
1965 error_report("KVM AIA: initialized fail");
1966 exit(1);
1967 }
1968
1969 kvm_msi_via_irqfd_allowed = true;
1970 }
1971
kvm_cpu_instance_init(CPUState * cs)1972 static void kvm_cpu_instance_init(CPUState *cs)
1973 {
1974 Object *obj = OBJECT(RISCV_CPU(cs));
1975
1976 riscv_init_kvm_registers(obj);
1977
1978 kvm_riscv_add_cpu_user_properties(obj);
1979 }
1980
1981 /*
1982 * We'll get here via the following path:
1983 *
1984 * riscv_cpu_realize()
1985 * -> cpu_exec_realizefn()
1986 * -> kvm_cpu_realize() (via accel_cpu_common_realize())
1987 */
kvm_cpu_realize(CPUState * cs,Error ** errp)1988 static bool kvm_cpu_realize(CPUState *cs, Error **errp)
1989 {
1990 RISCVCPU *cpu = RISCV_CPU(cs);
1991 int ret;
1992
1993 if (riscv_has_ext(&cpu->env, RVV)) {
1994 ret = prctl(PR_RISCV_V_SET_CONTROL, PR_RISCV_V_VSTATE_CTRL_ON);
1995 if (ret) {
1996 error_setg(errp, "Error in prctl PR_RISCV_V_SET_CONTROL, code: %s",
1997 strerrorname_np(errno));
1998 return false;
1999 }
2000 }
2001
2002 return true;
2003 }
2004
riscv_kvm_cpu_finalize_features(RISCVCPU * cpu,Error ** errp)2005 void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
2006 {
2007 CPURISCVState *env = &cpu->env;
2008 KVMScratchCPU kvmcpu;
2009 struct kvm_one_reg reg;
2010 uint64_t val;
2011 int ret;
2012
2013 /* short-circuit without spinning the scratch CPU */
2014 if (!cpu->cfg.ext_zicbom && !cpu->cfg.ext_zicboz &&
2015 !riscv_has_ext(env, RVV)) {
2016 return;
2017 }
2018
2019 if (!kvm_riscv_create_scratch_vcpu(&kvmcpu)) {
2020 error_setg(errp, "Unable to create scratch KVM cpu");
2021 return;
2022 }
2023
2024 if (cpu->cfg.ext_zicbom &&
2025 riscv_cpu_option_set(kvm_cbom_blocksize.name)) {
2026
2027 reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG,
2028 kvm_cbom_blocksize.kvm_reg_id);
2029 reg.addr = (uint64_t)&val;
2030 ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, ®);
2031 if (ret != 0) {
2032 error_setg(errp, "Unable to read cbom_blocksize, error %d", errno);
2033 return;
2034 }
2035
2036 if (cpu->cfg.cbom_blocksize != val) {
2037 error_setg(errp, "Unable to set cbom_blocksize to a different "
2038 "value than the host (%lu)", val);
2039 return;
2040 }
2041 }
2042
2043 if (cpu->cfg.ext_zicboz &&
2044 riscv_cpu_option_set(kvm_cboz_blocksize.name)) {
2045
2046 reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG,
2047 kvm_cboz_blocksize.kvm_reg_id);
2048 reg.addr = (uint64_t)&val;
2049 ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, ®);
2050 if (ret != 0) {
2051 error_setg(errp, "Unable to read cboz_blocksize, error %d", errno);
2052 return;
2053 }
2054
2055 if (cpu->cfg.cboz_blocksize != val) {
2056 error_setg(errp, "Unable to set cboz_blocksize to a different "
2057 "value than the host (%lu)", val);
2058 return;
2059 }
2060 }
2061
2062 /* Users are setting vlen, not vlenb */
2063 if (riscv_has_ext(env, RVV) && riscv_cpu_option_set("vlen")) {
2064 if (!kvm_v_vlenb.supported) {
2065 error_setg(errp, "Unable to set 'vlenb': register not supported");
2066 return;
2067 }
2068
2069 reg.id = kvm_v_vlenb.kvm_reg_id;
2070 reg.addr = (uint64_t)&val;
2071 ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, ®);
2072 if (ret != 0) {
2073 error_setg(errp, "Unable to read vlenb register, error %d", errno);
2074 return;
2075 }
2076
2077 if (cpu->cfg.vlenb != val) {
2078 error_setg(errp, "Unable to set 'vlen' to a different "
2079 "value than the host (%lu)", val * 8);
2080 return;
2081 }
2082 }
2083
2084 kvm_riscv_destroy_scratch_vcpu(&kvmcpu);
2085 }
2086
kvm_cpu_accel_class_init(ObjectClass * oc,const void * data)2087 static void kvm_cpu_accel_class_init(ObjectClass *oc, const void *data)
2088 {
2089 AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
2090
2091 acc->cpu_instance_init = kvm_cpu_instance_init;
2092 acc->cpu_target_realize = kvm_cpu_realize;
2093 }
2094
2095 static const TypeInfo kvm_cpu_accel_type_info = {
2096 .name = ACCEL_CPU_NAME("kvm"),
2097
2098 .parent = TYPE_ACCEL_CPU,
2099 .class_init = kvm_cpu_accel_class_init,
2100 .abstract = true,
2101 };
kvm_cpu_accel_register_types(void)2102 static void kvm_cpu_accel_register_types(void)
2103 {
2104 type_register_static(&kvm_cpu_accel_type_info);
2105 }
2106 type_init(kvm_cpu_accel_register_types);
2107
2108 static const TypeInfo riscv_kvm_cpu_type_infos[] = {
2109 {
2110 .name = TYPE_RISCV_CPU_HOST,
2111 .parent = TYPE_RISCV_CPU,
2112 #if defined(TARGET_RISCV32)
2113 .class_data = &(const RISCVCPUDef) {
2114 .misa_mxl_max = MXL_RV32,
2115 .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
2116 .vext_spec = RISCV_PROFILE_ATTR_UNUSED,
2117 .cfg.max_satp_mode = -1,
2118 },
2119 #elif defined(TARGET_RISCV64)
2120 .class_data = &(const RISCVCPUDef) {
2121 .misa_mxl_max = MXL_RV64,
2122 .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
2123 .vext_spec = RISCV_PROFILE_ATTR_UNUSED,
2124 .cfg.max_satp_mode = -1,
2125 },
2126 #endif
2127 }
2128 };
2129
2130 DEFINE_TYPES(riscv_kvm_cpu_type_infos)
2131
2132 static const uint32_t ebreak_insn = 0x00100073;
2133 static const uint16_t c_ebreak_insn = 0x9002;
2134
kvm_arch_insert_sw_breakpoint(CPUState * cs,struct kvm_sw_breakpoint * bp)2135 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
2136 {
2137 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 2, 0)) {
2138 return -EINVAL;
2139 }
2140
2141 if ((bp->saved_insn & 0x3) == 0x3) {
2142 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0)
2143 || cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&ebreak_insn, 4, 1)) {
2144 return -EINVAL;
2145 }
2146 } else {
2147 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&c_ebreak_insn, 2, 1)) {
2148 return -EINVAL;
2149 }
2150 }
2151
2152 return 0;
2153 }
2154
kvm_arch_remove_sw_breakpoint(CPUState * cs,struct kvm_sw_breakpoint * bp)2155 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
2156 {
2157 uint32_t ebreak;
2158 uint16_t c_ebreak;
2159
2160 if ((bp->saved_insn & 0x3) == 0x3) {
2161 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&ebreak, 4, 0) ||
2162 ebreak != ebreak_insn ||
2163 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
2164 return -EINVAL;
2165 }
2166 } else {
2167 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&c_ebreak, 2, 0) ||
2168 c_ebreak != c_ebreak_insn ||
2169 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 2, 1)) {
2170 return -EINVAL;
2171 }
2172 }
2173
2174 return 0;
2175 }
2176
kvm_arch_insert_hw_breakpoint(vaddr addr,vaddr len,int type)2177 int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
2178 {
2179 /* TODO; To be implemented later. */
2180 return -EINVAL;
2181 }
2182
kvm_arch_remove_hw_breakpoint(vaddr addr,vaddr len,int type)2183 int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
2184 {
2185 /* TODO; To be implemented later. */
2186 return -EINVAL;
2187 }
2188
kvm_arch_remove_all_hw_breakpoints(void)2189 void kvm_arch_remove_all_hw_breakpoints(void)
2190 {
2191 /* TODO; To be implemented later. */
2192 }
2193
kvm_arch_update_guest_debug(CPUState * cs,struct kvm_guest_debug * dbg)2194 void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
2195 {
2196 if (kvm_sw_breakpoints_active(cs)) {
2197 dbg->control |= KVM_GUESTDBG_ENABLE;
2198 }
2199 }
2200