1 /*
2 * RISC-V implementation of KVM hooks
3 *
4 * Copyright (c) 2020 Huawei Technologies Co., Ltd
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include "qemu/osdep.h"
20 #include <sys/ioctl.h>
21 #include <sys/prctl.h>
22
23 #include <linux/kvm.h>
24
25 #include "qemu/timer.h"
26 #include "qapi/error.h"
27 #include "qemu/error-report.h"
28 #include "qemu/main-loop.h"
29 #include "qapi/visitor.h"
30 #include "system/system.h"
31 #include "system/kvm.h"
32 #include "system/kvm_int.h"
33 #include "cpu.h"
34 #include "trace.h"
35 #include "accel/accel-cpu-target.h"
36 #include "hw/pci/pci.h"
37 #include "exec/memattrs.h"
38 #include "exec/address-spaces.h"
39 #include "hw/boards.h"
40 #include "hw/irq.h"
41 #include "hw/intc/riscv_imsic.h"
42 #include "qemu/log.h"
43 #include "hw/loader.h"
44 #include "kvm_riscv.h"
45 #include "sbi_ecall_interface.h"
46 #include "chardev/char-fe.h"
47 #include "migration/misc.h"
48 #include "system/runstate.h"
49 #include "hw/riscv/numa.h"
50
51 #define PR_RISCV_V_SET_CONTROL 69
52 #define PR_RISCV_V_VSTATE_CTRL_ON 2
53
riscv_kvm_aplic_request(void * opaque,int irq,int level)54 void riscv_kvm_aplic_request(void *opaque, int irq, int level)
55 {
56 kvm_set_irq(kvm_state, irq, !!level);
57 }
58
59 static bool cap_has_mp_state;
60
61 #define KVM_RISCV_REG_ID_U32(type, idx) (KVM_REG_RISCV | KVM_REG_SIZE_U32 | \
62 type | idx)
63
64 #define KVM_RISCV_REG_ID_U64(type, idx) (KVM_REG_RISCV | KVM_REG_SIZE_U64 | \
65 type | idx)
66
67 #if defined(TARGET_RISCV64)
68 #define KVM_RISCV_REG_ID_ULONG(type, idx) KVM_RISCV_REG_ID_U64(type, idx)
69 #else
70 #define KVM_RISCV_REG_ID_ULONG(type, idx) KVM_RISCV_REG_ID_U32(type, idx)
71 #endif
72
kvm_encode_reg_size_id(uint64_t id,size_t size_b)73 static uint64_t kvm_encode_reg_size_id(uint64_t id, size_t size_b)
74 {
75 uint64_t size_ctz = __builtin_ctz(size_b);
76
77 return id | (size_ctz << KVM_REG_SIZE_SHIFT);
78 }
79
kvm_riscv_vector_reg_id(RISCVCPU * cpu,uint64_t idx)80 static uint64_t kvm_riscv_vector_reg_id(RISCVCPU *cpu,
81 uint64_t idx)
82 {
83 uint64_t id;
84 size_t size_b;
85
86 g_assert(idx < 32);
87
88 id = KVM_REG_RISCV | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(idx);
89 size_b = cpu->cfg.vlenb;
90
91 return kvm_encode_reg_size_id(id, size_b);
92 }
93
94 #define RISCV_CORE_REG(name) \
95 KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, \
96 KVM_REG_RISCV_CORE_REG(name))
97
98 #define RISCV_CSR_REG(name) \
99 KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CSR, \
100 KVM_REG_RISCV_CSR_REG(name))
101
102 #define RISCV_CONFIG_REG(name) \
103 KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG, \
104 KVM_REG_RISCV_CONFIG_REG(name))
105
106 #define RISCV_TIMER_REG(name) KVM_RISCV_REG_ID_U64(KVM_REG_RISCV_TIMER, \
107 KVM_REG_RISCV_TIMER_REG(name))
108
109 #define RISCV_FP_F_REG(idx) KVM_RISCV_REG_ID_U32(KVM_REG_RISCV_FP_F, idx)
110
111 #define RISCV_FP_D_REG(idx) KVM_RISCV_REG_ID_U64(KVM_REG_RISCV_FP_D, idx)
112
113 #define RISCV_VECTOR_CSR_REG(name) \
114 KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_VECTOR, \
115 KVM_REG_RISCV_VECTOR_CSR_REG(name))
116
117 #define KVM_RISCV_GET_TIMER(cs, name, reg) \
118 do { \
119 int ret = kvm_get_one_reg(cs, RISCV_TIMER_REG(name), ®); \
120 if (ret) { \
121 abort(); \
122 } \
123 } while (0)
124
125 #define KVM_RISCV_SET_TIMER(cs, name, reg) \
126 do { \
127 int ret = kvm_set_one_reg(cs, RISCV_TIMER_REG(name), ®); \
128 if (ret) { \
129 abort(); \
130 } \
131 } while (0)
132
133 typedef struct KVMCPUConfig {
134 const char *name;
135 const char *description;
136 target_ulong offset;
137 uint64_t kvm_reg_id;
138 bool user_set;
139 bool supported;
140 } KVMCPUConfig;
141
142 #define KVM_MISA_CFG(_bit, _reg_id) \
143 {.offset = _bit, .kvm_reg_id = _reg_id}
144
145 /* KVM ISA extensions */
146 static KVMCPUConfig kvm_misa_ext_cfgs[] = {
147 KVM_MISA_CFG(RVA, KVM_RISCV_ISA_EXT_A),
148 KVM_MISA_CFG(RVC, KVM_RISCV_ISA_EXT_C),
149 KVM_MISA_CFG(RVD, KVM_RISCV_ISA_EXT_D),
150 KVM_MISA_CFG(RVF, KVM_RISCV_ISA_EXT_F),
151 KVM_MISA_CFG(RVH, KVM_RISCV_ISA_EXT_H),
152 KVM_MISA_CFG(RVI, KVM_RISCV_ISA_EXT_I),
153 KVM_MISA_CFG(RVM, KVM_RISCV_ISA_EXT_M),
154 KVM_MISA_CFG(RVV, KVM_RISCV_ISA_EXT_V),
155 };
156
kvm_cpu_get_misa_ext_cfg(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)157 static void kvm_cpu_get_misa_ext_cfg(Object *obj, Visitor *v,
158 const char *name,
159 void *opaque, Error **errp)
160 {
161 KVMCPUConfig *misa_ext_cfg = opaque;
162 target_ulong misa_bit = misa_ext_cfg->offset;
163 RISCVCPU *cpu = RISCV_CPU(obj);
164 CPURISCVState *env = &cpu->env;
165 bool value = env->misa_ext_mask & misa_bit;
166
167 visit_type_bool(v, name, &value, errp);
168 }
169
kvm_cpu_set_misa_ext_cfg(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)170 static void kvm_cpu_set_misa_ext_cfg(Object *obj, Visitor *v,
171 const char *name,
172 void *opaque, Error **errp)
173 {
174 KVMCPUConfig *misa_ext_cfg = opaque;
175 target_ulong misa_bit = misa_ext_cfg->offset;
176 RISCVCPU *cpu = RISCV_CPU(obj);
177 CPURISCVState *env = &cpu->env;
178 bool value, host_bit;
179
180 if (!visit_type_bool(v, name, &value, errp)) {
181 return;
182 }
183
184 host_bit = env->misa_ext_mask & misa_bit;
185
186 if (value == host_bit) {
187 return;
188 }
189
190 if (!value) {
191 misa_ext_cfg->user_set = true;
192 return;
193 }
194
195 /*
196 * Forbid users to enable extensions that aren't
197 * available in the hart.
198 */
199 error_setg(errp, "Enabling MISA bit '%s' is not allowed: it's not "
200 "enabled in the host", misa_ext_cfg->name);
201 }
202
kvm_riscv_update_cpu_misa_ext(RISCVCPU * cpu,CPUState * cs)203 static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs)
204 {
205 CPURISCVState *env = &cpu->env;
206 uint64_t id, reg;
207 int i, ret;
208
209 for (i = 0; i < ARRAY_SIZE(kvm_misa_ext_cfgs); i++) {
210 KVMCPUConfig *misa_cfg = &kvm_misa_ext_cfgs[i];
211 target_ulong misa_bit = misa_cfg->offset;
212
213 if (!misa_cfg->user_set) {
214 continue;
215 }
216
217 /* If we're here we're going to disable the MISA bit */
218 reg = 0;
219 id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT,
220 misa_cfg->kvm_reg_id);
221 ret = kvm_set_one_reg(cs, id, ®);
222 if (ret != 0) {
223 /*
224 * We're not checking for -EINVAL because if the bit is about
225 * to be disabled, it means that it was already enabled by
226 * KVM. We determined that by fetching the 'isa' register
227 * during init() time. Any error at this point is worth
228 * aborting.
229 */
230 error_report("Unable to set KVM reg %s, error %d",
231 misa_cfg->name, ret);
232 exit(EXIT_FAILURE);
233 }
234 env->misa_ext &= ~misa_bit;
235 }
236 }
237
238 #define KVM_CSR_CFG(_name, _env_prop, reg_id) \
239 {.name = _name, .offset = ENV_CSR_OFFSET(_env_prop), \
240 .kvm_reg_id = reg_id}
241
242 static KVMCPUConfig kvm_csr_cfgs[] = {
243 KVM_CSR_CFG("sstatus", mstatus, RISCV_CSR_REG(sstatus)),
244 KVM_CSR_CFG("sie", mie, RISCV_CSR_REG(sie)),
245 KVM_CSR_CFG("stvec", stvec, RISCV_CSR_REG(stvec)),
246 KVM_CSR_CFG("sscratch", sscratch, RISCV_CSR_REG(sscratch)),
247 KVM_CSR_CFG("sepc", sepc, RISCV_CSR_REG(sepc)),
248 KVM_CSR_CFG("scause", scause, RISCV_CSR_REG(scause)),
249 KVM_CSR_CFG("stval", stval, RISCV_CSR_REG(stval)),
250 KVM_CSR_CFG("sip", mip, RISCV_CSR_REG(sip)),
251 KVM_CSR_CFG("satp", satp, RISCV_CSR_REG(satp)),
252 };
253
kvmconfig_get_env_addr(RISCVCPU * cpu,KVMCPUConfig * csr_cfg)254 static void *kvmconfig_get_env_addr(RISCVCPU *cpu, KVMCPUConfig *csr_cfg)
255 {
256 return (void *)&cpu->env + csr_cfg->offset;
257 }
258
kvm_cpu_csr_get_u32(RISCVCPU * cpu,KVMCPUConfig * csr_cfg)259 static uint32_t kvm_cpu_csr_get_u32(RISCVCPU *cpu, KVMCPUConfig *csr_cfg)
260 {
261 uint32_t *val32 = kvmconfig_get_env_addr(cpu, csr_cfg);
262 return *val32;
263 }
264
kvm_cpu_csr_get_u64(RISCVCPU * cpu,KVMCPUConfig * csr_cfg)265 static uint64_t kvm_cpu_csr_get_u64(RISCVCPU *cpu, KVMCPUConfig *csr_cfg)
266 {
267 uint64_t *val64 = kvmconfig_get_env_addr(cpu, csr_cfg);
268 return *val64;
269 }
270
kvm_cpu_csr_set_u32(RISCVCPU * cpu,KVMCPUConfig * csr_cfg,uint32_t val)271 static void kvm_cpu_csr_set_u32(RISCVCPU *cpu, KVMCPUConfig *csr_cfg,
272 uint32_t val)
273 {
274 uint32_t *val32 = kvmconfig_get_env_addr(cpu, csr_cfg);
275 *val32 = val;
276 }
277
kvm_cpu_csr_set_u64(RISCVCPU * cpu,KVMCPUConfig * csr_cfg,uint64_t val)278 static void kvm_cpu_csr_set_u64(RISCVCPU *cpu, KVMCPUConfig *csr_cfg,
279 uint64_t val)
280 {
281 uint64_t *val64 = kvmconfig_get_env_addr(cpu, csr_cfg);
282 *val64 = val;
283 }
284
285 #define KVM_EXT_CFG(_name, _prop, _reg_id) \
286 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \
287 .kvm_reg_id = _reg_id}
288
289 static KVMCPUConfig kvm_multi_ext_cfgs[] = {
290 KVM_EXT_CFG("zicbom", ext_zicbom, KVM_RISCV_ISA_EXT_ZICBOM),
291 KVM_EXT_CFG("zicboz", ext_zicboz, KVM_RISCV_ISA_EXT_ZICBOZ),
292 KVM_EXT_CFG("ziccrse", ext_ziccrse, KVM_RISCV_ISA_EXT_ZICCRSE),
293 KVM_EXT_CFG("zicntr", ext_zicntr, KVM_RISCV_ISA_EXT_ZICNTR),
294 KVM_EXT_CFG("zicond", ext_zicond, KVM_RISCV_ISA_EXT_ZICOND),
295 KVM_EXT_CFG("zicsr", ext_zicsr, KVM_RISCV_ISA_EXT_ZICSR),
296 KVM_EXT_CFG("zifencei", ext_zifencei, KVM_RISCV_ISA_EXT_ZIFENCEI),
297 KVM_EXT_CFG("zihintntl", ext_zihintntl, KVM_RISCV_ISA_EXT_ZIHINTNTL),
298 KVM_EXT_CFG("zihintpause", ext_zihintpause, KVM_RISCV_ISA_EXT_ZIHINTPAUSE),
299 KVM_EXT_CFG("zihpm", ext_zihpm, KVM_RISCV_ISA_EXT_ZIHPM),
300 KVM_EXT_CFG("zimop", ext_zimop, KVM_RISCV_ISA_EXT_ZIMOP),
301 KVM_EXT_CFG("zcmop", ext_zcmop, KVM_RISCV_ISA_EXT_ZCMOP),
302 KVM_EXT_CFG("zabha", ext_zabha, KVM_RISCV_ISA_EXT_ZABHA),
303 KVM_EXT_CFG("zacas", ext_zacas, KVM_RISCV_ISA_EXT_ZACAS),
304 KVM_EXT_CFG("zawrs", ext_zawrs, KVM_RISCV_ISA_EXT_ZAWRS),
305 KVM_EXT_CFG("zfa", ext_zfa, KVM_RISCV_ISA_EXT_ZFA),
306 KVM_EXT_CFG("zfh", ext_zfh, KVM_RISCV_ISA_EXT_ZFH),
307 KVM_EXT_CFG("zfhmin", ext_zfhmin, KVM_RISCV_ISA_EXT_ZFHMIN),
308 KVM_EXT_CFG("zba", ext_zba, KVM_RISCV_ISA_EXT_ZBA),
309 KVM_EXT_CFG("zbb", ext_zbb, KVM_RISCV_ISA_EXT_ZBB),
310 KVM_EXT_CFG("zbc", ext_zbc, KVM_RISCV_ISA_EXT_ZBC),
311 KVM_EXT_CFG("zbkb", ext_zbkb, KVM_RISCV_ISA_EXT_ZBKB),
312 KVM_EXT_CFG("zbkc", ext_zbkc, KVM_RISCV_ISA_EXT_ZBKC),
313 KVM_EXT_CFG("zbkx", ext_zbkx, KVM_RISCV_ISA_EXT_ZBKX),
314 KVM_EXT_CFG("zbs", ext_zbs, KVM_RISCV_ISA_EXT_ZBS),
315 KVM_EXT_CFG("zca", ext_zca, KVM_RISCV_ISA_EXT_ZCA),
316 KVM_EXT_CFG("zcb", ext_zcb, KVM_RISCV_ISA_EXT_ZCB),
317 KVM_EXT_CFG("zcd", ext_zcd, KVM_RISCV_ISA_EXT_ZCD),
318 KVM_EXT_CFG("zcf", ext_zcf, KVM_RISCV_ISA_EXT_ZCF),
319 KVM_EXT_CFG("zknd", ext_zknd, KVM_RISCV_ISA_EXT_ZKND),
320 KVM_EXT_CFG("zkne", ext_zkne, KVM_RISCV_ISA_EXT_ZKNE),
321 KVM_EXT_CFG("zknh", ext_zknh, KVM_RISCV_ISA_EXT_ZKNH),
322 KVM_EXT_CFG("zkr", ext_zkr, KVM_RISCV_ISA_EXT_ZKR),
323 KVM_EXT_CFG("zksed", ext_zksed, KVM_RISCV_ISA_EXT_ZKSED),
324 KVM_EXT_CFG("zksh", ext_zksh, KVM_RISCV_ISA_EXT_ZKSH),
325 KVM_EXT_CFG("zkt", ext_zkt, KVM_RISCV_ISA_EXT_ZKT),
326 KVM_EXT_CFG("ztso", ext_ztso, KVM_RISCV_ISA_EXT_ZTSO),
327 KVM_EXT_CFG("zvbb", ext_zvbb, KVM_RISCV_ISA_EXT_ZVBB),
328 KVM_EXT_CFG("zvbc", ext_zvbc, KVM_RISCV_ISA_EXT_ZVBC),
329 KVM_EXT_CFG("zvfh", ext_zvfh, KVM_RISCV_ISA_EXT_ZVFH),
330 KVM_EXT_CFG("zvfhmin", ext_zvfhmin, KVM_RISCV_ISA_EXT_ZVFHMIN),
331 KVM_EXT_CFG("zvkb", ext_zvkb, KVM_RISCV_ISA_EXT_ZVKB),
332 KVM_EXT_CFG("zvkg", ext_zvkg, KVM_RISCV_ISA_EXT_ZVKG),
333 KVM_EXT_CFG("zvkned", ext_zvkned, KVM_RISCV_ISA_EXT_ZVKNED),
334 KVM_EXT_CFG("zvknha", ext_zvknha, KVM_RISCV_ISA_EXT_ZVKNHA),
335 KVM_EXT_CFG("zvknhb", ext_zvknhb, KVM_RISCV_ISA_EXT_ZVKNHB),
336 KVM_EXT_CFG("zvksed", ext_zvksed, KVM_RISCV_ISA_EXT_ZVKSED),
337 KVM_EXT_CFG("zvksh", ext_zvksh, KVM_RISCV_ISA_EXT_ZVKSH),
338 KVM_EXT_CFG("zvkt", ext_zvkt, KVM_RISCV_ISA_EXT_ZVKT),
339 KVM_EXT_CFG("smnpm", ext_smnpm, KVM_RISCV_ISA_EXT_SMNPM),
340 KVM_EXT_CFG("smstateen", ext_smstateen, KVM_RISCV_ISA_EXT_SMSTATEEN),
341 KVM_EXT_CFG("ssaia", ext_ssaia, KVM_RISCV_ISA_EXT_SSAIA),
342 KVM_EXT_CFG("sscofpmf", ext_sscofpmf, KVM_RISCV_ISA_EXT_SSCOFPMF),
343 KVM_EXT_CFG("ssnpm", ext_ssnpm, KVM_RISCV_ISA_EXT_SSNPM),
344 KVM_EXT_CFG("sstc", ext_sstc, KVM_RISCV_ISA_EXT_SSTC),
345 KVM_EXT_CFG("svade", ext_svade, KVM_RISCV_ISA_EXT_SVADE),
346 KVM_EXT_CFG("svadu", ext_svadu, KVM_RISCV_ISA_EXT_SVADU),
347 KVM_EXT_CFG("svinval", ext_svinval, KVM_RISCV_ISA_EXT_SVINVAL),
348 KVM_EXT_CFG("svnapot", ext_svnapot, KVM_RISCV_ISA_EXT_SVNAPOT),
349 KVM_EXT_CFG("svpbmt", ext_svpbmt, KVM_RISCV_ISA_EXT_SVPBMT),
350 KVM_EXT_CFG("svvptc", ext_svvptc, KVM_RISCV_ISA_EXT_SVVPTC),
351 };
352
kvmconfig_get_cfg_addr(RISCVCPU * cpu,KVMCPUConfig * kvmcfg)353 static void *kvmconfig_get_cfg_addr(RISCVCPU *cpu, KVMCPUConfig *kvmcfg)
354 {
355 return (void *)&cpu->cfg + kvmcfg->offset;
356 }
357
kvm_cpu_cfg_set(RISCVCPU * cpu,KVMCPUConfig * multi_ext,uint32_t val)358 static void kvm_cpu_cfg_set(RISCVCPU *cpu, KVMCPUConfig *multi_ext,
359 uint32_t val)
360 {
361 bool *ext_enabled = kvmconfig_get_cfg_addr(cpu, multi_ext);
362
363 *ext_enabled = val;
364 }
365
kvm_cpu_cfg_get(RISCVCPU * cpu,KVMCPUConfig * multi_ext)366 static uint32_t kvm_cpu_cfg_get(RISCVCPU *cpu,
367 KVMCPUConfig *multi_ext)
368 {
369 bool *ext_enabled = kvmconfig_get_cfg_addr(cpu, multi_ext);
370
371 return *ext_enabled;
372 }
373
kvm_cpu_get_multi_ext_cfg(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)374 static void kvm_cpu_get_multi_ext_cfg(Object *obj, Visitor *v,
375 const char *name,
376 void *opaque, Error **errp)
377 {
378 KVMCPUConfig *multi_ext_cfg = opaque;
379 RISCVCPU *cpu = RISCV_CPU(obj);
380 bool value = kvm_cpu_cfg_get(cpu, multi_ext_cfg);
381
382 visit_type_bool(v, name, &value, errp);
383 }
384
kvm_cpu_set_multi_ext_cfg(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)385 static void kvm_cpu_set_multi_ext_cfg(Object *obj, Visitor *v,
386 const char *name,
387 void *opaque, Error **errp)
388 {
389 KVMCPUConfig *multi_ext_cfg = opaque;
390 RISCVCPU *cpu = RISCV_CPU(obj);
391 bool value, host_val;
392
393 if (!visit_type_bool(v, name, &value, errp)) {
394 return;
395 }
396
397 host_val = kvm_cpu_cfg_get(cpu, multi_ext_cfg);
398
399 /*
400 * Ignore if the user is setting the same value
401 * as the host.
402 */
403 if (value == host_val) {
404 return;
405 }
406
407 if (!multi_ext_cfg->supported) {
408 /*
409 * Error out if the user is trying to enable an
410 * extension that KVM doesn't support. Ignore
411 * option otherwise.
412 */
413 if (value) {
414 error_setg(errp, "KVM does not support disabling extension %s",
415 multi_ext_cfg->name);
416 }
417
418 return;
419 }
420
421 multi_ext_cfg->user_set = true;
422 kvm_cpu_cfg_set(cpu, multi_ext_cfg, value);
423 }
424
425 static KVMCPUConfig kvm_cbom_blocksize = {
426 .name = "cbom_blocksize",
427 .offset = CPU_CFG_OFFSET(cbom_blocksize),
428 .kvm_reg_id = KVM_REG_RISCV_CONFIG_REG(zicbom_block_size)
429 };
430
431 static KVMCPUConfig kvm_cboz_blocksize = {
432 .name = "cboz_blocksize",
433 .offset = CPU_CFG_OFFSET(cboz_blocksize),
434 .kvm_reg_id = KVM_REG_RISCV_CONFIG_REG(zicboz_block_size)
435 };
436
437 static KVMCPUConfig kvm_v_vlenb = {
438 .name = "vlenb",
439 .offset = CPU_CFG_OFFSET(vlenb),
440 .kvm_reg_id = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_VECTOR |
441 KVM_REG_RISCV_VECTOR_CSR_REG(vlenb)
442 };
443
444 static KVMCPUConfig kvm_sbi_dbcn = {
445 .name = "sbi_dbcn",
446 .kvm_reg_id = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
447 KVM_REG_RISCV_SBI_EXT | KVM_RISCV_SBI_EXT_DBCN
448 };
449
kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU * cpu,CPUState * cs)450 static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU *cpu, CPUState *cs)
451 {
452 uint64_t id, reg;
453 int i, ret;
454
455 for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
456 KVMCPUConfig *multi_ext_cfg = &kvm_multi_ext_cfgs[i];
457
458 if (!multi_ext_cfg->user_set) {
459 continue;
460 }
461
462 id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT,
463 multi_ext_cfg->kvm_reg_id);
464 reg = kvm_cpu_cfg_get(cpu, multi_ext_cfg);
465 ret = kvm_set_one_reg(cs, id, ®);
466 if (ret != 0) {
467 if (!reg && ret == -EINVAL) {
468 warn_report("KVM cannot disable extension %s",
469 multi_ext_cfg->name);
470 } else {
471 error_report("Unable to enable extension %s in KVM, error %d",
472 multi_ext_cfg->name, ret);
473 exit(EXIT_FAILURE);
474 }
475 }
476 }
477 }
478
cpu_get_cfg_unavailable(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)479 static void cpu_get_cfg_unavailable(Object *obj, Visitor *v,
480 const char *name,
481 void *opaque, Error **errp)
482 {
483 bool value = false;
484
485 visit_type_bool(v, name, &value, errp);
486 }
487
cpu_set_cfg_unavailable(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)488 static void cpu_set_cfg_unavailable(Object *obj, Visitor *v,
489 const char *name,
490 void *opaque, Error **errp)
491 {
492 const char *propname = opaque;
493 bool value;
494
495 if (!visit_type_bool(v, name, &value, errp)) {
496 return;
497 }
498
499 if (value) {
500 error_setg(errp, "'%s' is not available with KVM",
501 propname);
502 }
503 }
504
riscv_cpu_add_kvm_unavail_prop(Object * obj,const char * prop_name)505 static void riscv_cpu_add_kvm_unavail_prop(Object *obj, const char *prop_name)
506 {
507 /* Check if KVM created the property already */
508 if (object_property_find(obj, prop_name)) {
509 return;
510 }
511
512 /*
513 * Set the default to disabled for every extension
514 * unknown to KVM and error out if the user attempts
515 * to enable any of them.
516 */
517 object_property_add(obj, prop_name, "bool",
518 cpu_get_cfg_unavailable,
519 cpu_set_cfg_unavailable,
520 NULL, (void *)prop_name);
521 }
522
riscv_cpu_add_kvm_unavail_prop_array(Object * obj,const RISCVCPUMultiExtConfig * array)523 static void riscv_cpu_add_kvm_unavail_prop_array(Object *obj,
524 const RISCVCPUMultiExtConfig *array)
525 {
526 const RISCVCPUMultiExtConfig *prop;
527
528 g_assert(array);
529
530 for (prop = array; prop && prop->name; prop++) {
531 riscv_cpu_add_kvm_unavail_prop(obj, prop->name);
532 }
533 }
534
kvm_riscv_add_cpu_user_properties(Object * cpu_obj)535 static void kvm_riscv_add_cpu_user_properties(Object *cpu_obj)
536 {
537 int i;
538
539 riscv_add_satp_mode_properties(cpu_obj);
540
541 for (i = 0; i < ARRAY_SIZE(kvm_misa_ext_cfgs); i++) {
542 KVMCPUConfig *misa_cfg = &kvm_misa_ext_cfgs[i];
543 int bit = misa_cfg->offset;
544
545 misa_cfg->name = riscv_get_misa_ext_name(bit);
546 misa_cfg->description = riscv_get_misa_ext_description(bit);
547
548 object_property_add(cpu_obj, misa_cfg->name, "bool",
549 kvm_cpu_get_misa_ext_cfg,
550 kvm_cpu_set_misa_ext_cfg,
551 NULL, misa_cfg);
552 object_property_set_description(cpu_obj, misa_cfg->name,
553 misa_cfg->description);
554 }
555
556 for (i = 0; misa_bits[i] != 0; i++) {
557 const char *ext_name = riscv_get_misa_ext_name(misa_bits[i]);
558 riscv_cpu_add_kvm_unavail_prop(cpu_obj, ext_name);
559 }
560
561 for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
562 KVMCPUConfig *multi_cfg = &kvm_multi_ext_cfgs[i];
563
564 object_property_add(cpu_obj, multi_cfg->name, "bool",
565 kvm_cpu_get_multi_ext_cfg,
566 kvm_cpu_set_multi_ext_cfg,
567 NULL, multi_cfg);
568 }
569
570 riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_extensions);
571 riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_vendor_exts);
572 riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_experimental_exts);
573
574 /* We don't have the needed KVM support for profiles */
575 for (i = 0; riscv_profiles[i] != NULL; i++) {
576 riscv_cpu_add_kvm_unavail_prop(cpu_obj, riscv_profiles[i]->name);
577 }
578 }
579
kvm_riscv_get_regs_core(CPUState * cs)580 static int kvm_riscv_get_regs_core(CPUState *cs)
581 {
582 int ret = 0;
583 int i;
584 target_ulong reg;
585 CPURISCVState *env = &RISCV_CPU(cs)->env;
586
587 ret = kvm_get_one_reg(cs, RISCV_CORE_REG(regs.pc), ®);
588 if (ret) {
589 return ret;
590 }
591 env->pc = reg;
592
593 for (i = 1; i < 32; i++) {
594 uint64_t id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, i);
595 ret = kvm_get_one_reg(cs, id, ®);
596 if (ret) {
597 return ret;
598 }
599 env->gpr[i] = reg;
600 }
601
602 return ret;
603 }
604
kvm_riscv_put_regs_core(CPUState * cs)605 static int kvm_riscv_put_regs_core(CPUState *cs)
606 {
607 int ret = 0;
608 int i;
609 target_ulong reg;
610 CPURISCVState *env = &RISCV_CPU(cs)->env;
611
612 reg = env->pc;
613 ret = kvm_set_one_reg(cs, RISCV_CORE_REG(regs.pc), ®);
614 if (ret) {
615 return ret;
616 }
617
618 for (i = 1; i < 32; i++) {
619 uint64_t id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, i);
620 reg = env->gpr[i];
621 ret = kvm_set_one_reg(cs, id, ®);
622 if (ret) {
623 return ret;
624 }
625 }
626
627 return ret;
628 }
629
kvm_riscv_get_regs_csr(CPUState * cs)630 static int kvm_riscv_get_regs_csr(CPUState *cs)
631 {
632 RISCVCPU *cpu = RISCV_CPU(cs);
633 uint64_t reg;
634 int i, ret;
635
636 for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) {
637 KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i];
638
639 if (!csr_cfg->supported) {
640 continue;
641 }
642
643 ret = kvm_get_one_reg(cs, csr_cfg->kvm_reg_id, ®);
644 if (ret) {
645 return ret;
646 }
647
648 if (KVM_REG_SIZE(csr_cfg->kvm_reg_id) == sizeof(uint32_t)) {
649 kvm_cpu_csr_set_u32(cpu, csr_cfg, reg);
650 } else if (KVM_REG_SIZE(csr_cfg->kvm_reg_id) == sizeof(uint64_t)) {
651 kvm_cpu_csr_set_u64(cpu, csr_cfg, reg);
652 } else {
653 g_assert_not_reached();
654 }
655 }
656
657 return 0;
658 }
659
kvm_riscv_put_regs_csr(CPUState * cs)660 static int kvm_riscv_put_regs_csr(CPUState *cs)
661 {
662 RISCVCPU *cpu = RISCV_CPU(cs);
663 uint64_t reg;
664 int i, ret;
665
666 for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) {
667 KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i];
668
669 if (!csr_cfg->supported) {
670 continue;
671 }
672
673 if (KVM_REG_SIZE(csr_cfg->kvm_reg_id) == sizeof(uint32_t)) {
674 reg = kvm_cpu_csr_get_u32(cpu, csr_cfg);
675 } else if (KVM_REG_SIZE(csr_cfg->kvm_reg_id) == sizeof(uint64_t)) {
676 reg = kvm_cpu_csr_get_u64(cpu, csr_cfg);
677 } else {
678 g_assert_not_reached();
679 }
680
681 ret = kvm_set_one_reg(cs, csr_cfg->kvm_reg_id, ®);
682 if (ret) {
683 return ret;
684 }
685 }
686
687 return 0;
688 }
689
kvm_riscv_reset_regs_csr(CPURISCVState * env)690 static void kvm_riscv_reset_regs_csr(CPURISCVState *env)
691 {
692 env->mstatus = 0;
693 env->mie = 0;
694 env->stvec = 0;
695 env->sscratch = 0;
696 env->sepc = 0;
697 env->scause = 0;
698 env->stval = 0;
699 env->mip = 0;
700 env->satp = 0;
701 }
702
kvm_riscv_get_regs_fp(CPUState * cs)703 static int kvm_riscv_get_regs_fp(CPUState *cs)
704 {
705 int ret = 0;
706 int i;
707 CPURISCVState *env = &RISCV_CPU(cs)->env;
708
709 if (riscv_has_ext(env, RVD)) {
710 uint64_t reg;
711 for (i = 0; i < 32; i++) {
712 ret = kvm_get_one_reg(cs, RISCV_FP_D_REG(i), ®);
713 if (ret) {
714 return ret;
715 }
716 env->fpr[i] = reg;
717 }
718 return ret;
719 }
720
721 if (riscv_has_ext(env, RVF)) {
722 uint32_t reg;
723 for (i = 0; i < 32; i++) {
724 ret = kvm_get_one_reg(cs, RISCV_FP_F_REG(i), ®);
725 if (ret) {
726 return ret;
727 }
728 env->fpr[i] = reg;
729 }
730 return ret;
731 }
732
733 return ret;
734 }
735
kvm_riscv_put_regs_fp(CPUState * cs)736 static int kvm_riscv_put_regs_fp(CPUState *cs)
737 {
738 int ret = 0;
739 int i;
740 CPURISCVState *env = &RISCV_CPU(cs)->env;
741
742 if (riscv_has_ext(env, RVD)) {
743 uint64_t reg;
744 for (i = 0; i < 32; i++) {
745 reg = env->fpr[i];
746 ret = kvm_set_one_reg(cs, RISCV_FP_D_REG(i), ®);
747 if (ret) {
748 return ret;
749 }
750 }
751 return ret;
752 }
753
754 if (riscv_has_ext(env, RVF)) {
755 uint32_t reg;
756 for (i = 0; i < 32; i++) {
757 reg = env->fpr[i];
758 ret = kvm_set_one_reg(cs, RISCV_FP_F_REG(i), ®);
759 if (ret) {
760 return ret;
761 }
762 }
763 return ret;
764 }
765
766 return ret;
767 }
768
kvm_riscv_get_regs_timer(CPUState * cs)769 static void kvm_riscv_get_regs_timer(CPUState *cs)
770 {
771 CPURISCVState *env = &RISCV_CPU(cs)->env;
772
773 if (env->kvm_timer_dirty) {
774 return;
775 }
776
777 KVM_RISCV_GET_TIMER(cs, time, env->kvm_timer_time);
778 KVM_RISCV_GET_TIMER(cs, compare, env->kvm_timer_compare);
779 KVM_RISCV_GET_TIMER(cs, state, env->kvm_timer_state);
780 KVM_RISCV_GET_TIMER(cs, frequency, env->kvm_timer_frequency);
781
782 env->kvm_timer_dirty = true;
783 }
784
kvm_riscv_put_regs_timer(CPUState * cs)785 static void kvm_riscv_put_regs_timer(CPUState *cs)
786 {
787 uint64_t reg;
788 CPURISCVState *env = &RISCV_CPU(cs)->env;
789
790 if (!env->kvm_timer_dirty) {
791 return;
792 }
793
794 KVM_RISCV_SET_TIMER(cs, time, env->kvm_timer_time);
795 KVM_RISCV_SET_TIMER(cs, compare, env->kvm_timer_compare);
796
797 /*
798 * To set register of RISCV_TIMER_REG(state) will occur a error from KVM
799 * on env->kvm_timer_state == 0, It's better to adapt in KVM, but it
800 * doesn't matter that adaping in QEMU now.
801 * TODO If KVM changes, adapt here.
802 */
803 if (env->kvm_timer_state) {
804 KVM_RISCV_SET_TIMER(cs, state, env->kvm_timer_state);
805 }
806
807 /*
808 * For now, migration will not work between Hosts with different timer
809 * frequency. Therefore, we should check whether they are the same here
810 * during the migration.
811 */
812 if (migration_is_running()) {
813 KVM_RISCV_GET_TIMER(cs, frequency, reg);
814 if (reg != env->kvm_timer_frequency) {
815 error_report("Dst Hosts timer frequency != Src Hosts");
816 }
817 }
818
819 env->kvm_timer_dirty = false;
820 }
821
kvm_riscv_get_timebase_frequency(RISCVCPU * cpu)822 uint64_t kvm_riscv_get_timebase_frequency(RISCVCPU *cpu)
823 {
824 uint64_t reg;
825
826 KVM_RISCV_GET_TIMER(CPU(cpu), frequency, reg);
827
828 return reg;
829 }
830
kvm_riscv_get_regs_vector(CPUState * cs)831 static int kvm_riscv_get_regs_vector(CPUState *cs)
832 {
833 RISCVCPU *cpu = RISCV_CPU(cs);
834 CPURISCVState *env = &cpu->env;
835 target_ulong reg;
836 uint64_t vreg_id;
837 int vreg_idx, ret = 0;
838
839 if (!riscv_has_ext(env, RVV)) {
840 return 0;
841 }
842
843 ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vstart), ®);
844 if (ret) {
845 return ret;
846 }
847 env->vstart = reg;
848
849 ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vl), ®);
850 if (ret) {
851 return ret;
852 }
853 env->vl = reg;
854
855 ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vtype), ®);
856 if (ret) {
857 return ret;
858 }
859 env->vtype = reg;
860
861 if (kvm_v_vlenb.supported) {
862 ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vlenb), ®);
863 if (ret) {
864 return ret;
865 }
866 cpu->cfg.vlenb = reg;
867
868 for (int i = 0; i < 32; i++) {
869 /*
870 * vreg[] is statically allocated using RV_VLEN_MAX.
871 * Use it instead of vlenb to calculate vreg_idx for
872 * simplicity.
873 */
874 vreg_idx = i * RV_VLEN_MAX / 64;
875 vreg_id = kvm_riscv_vector_reg_id(cpu, i);
876
877 ret = kvm_get_one_reg(cs, vreg_id, &env->vreg[vreg_idx]);
878 if (ret) {
879 return ret;
880 }
881 }
882 }
883
884 return 0;
885 }
886
kvm_riscv_put_regs_vector(CPUState * cs)887 static int kvm_riscv_put_regs_vector(CPUState *cs)
888 {
889 RISCVCPU *cpu = RISCV_CPU(cs);
890 CPURISCVState *env = &cpu->env;
891 target_ulong reg;
892 uint64_t vreg_id;
893 int vreg_idx, ret = 0;
894
895 if (!riscv_has_ext(env, RVV)) {
896 return 0;
897 }
898
899 reg = env->vstart;
900 ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vstart), ®);
901 if (ret) {
902 return ret;
903 }
904
905 reg = env->vl;
906 ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vl), ®);
907 if (ret) {
908 return ret;
909 }
910
911 reg = env->vtype;
912 ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vtype), ®);
913 if (ret) {
914 return ret;
915 }
916
917 if (kvm_v_vlenb.supported) {
918 reg = cpu->cfg.vlenb;
919 ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vlenb), ®);
920
921 for (int i = 0; i < 32; i++) {
922 /*
923 * vreg[] is statically allocated using RV_VLEN_MAX.
924 * Use it instead of vlenb to calculate vreg_idx for
925 * simplicity.
926 */
927 vreg_idx = i * RV_VLEN_MAX / 64;
928 vreg_id = kvm_riscv_vector_reg_id(cpu, i);
929
930 ret = kvm_set_one_reg(cs, vreg_id, &env->vreg[vreg_idx]);
931 if (ret) {
932 return ret;
933 }
934 }
935 }
936
937 return ret;
938 }
939
940 typedef struct KVMScratchCPU {
941 int kvmfd;
942 int vmfd;
943 int cpufd;
944 } KVMScratchCPU;
945
946 /*
947 * Heavily inspired by kvm_arm_create_scratch_host_vcpu()
948 * from target/arm/kvm.c.
949 */
kvm_riscv_create_scratch_vcpu(KVMScratchCPU * scratch)950 static bool kvm_riscv_create_scratch_vcpu(KVMScratchCPU *scratch)
951 {
952 int kvmfd = -1, vmfd = -1, cpufd = -1;
953
954 kvmfd = qemu_open_old("/dev/kvm", O_RDWR);
955 if (kvmfd < 0) {
956 goto err;
957 }
958 do {
959 vmfd = ioctl(kvmfd, KVM_CREATE_VM, 0);
960 } while (vmfd == -1 && errno == EINTR);
961 if (vmfd < 0) {
962 goto err;
963 }
964 cpufd = ioctl(vmfd, KVM_CREATE_VCPU, 0);
965 if (cpufd < 0) {
966 goto err;
967 }
968
969 scratch->kvmfd = kvmfd;
970 scratch->vmfd = vmfd;
971 scratch->cpufd = cpufd;
972
973 return true;
974
975 err:
976 if (cpufd >= 0) {
977 close(cpufd);
978 }
979 if (vmfd >= 0) {
980 close(vmfd);
981 }
982 if (kvmfd >= 0) {
983 close(kvmfd);
984 }
985
986 return false;
987 }
988
kvm_riscv_destroy_scratch_vcpu(KVMScratchCPU * scratch)989 static void kvm_riscv_destroy_scratch_vcpu(KVMScratchCPU *scratch)
990 {
991 close(scratch->cpufd);
992 close(scratch->vmfd);
993 close(scratch->kvmfd);
994 }
995
kvm_riscv_init_machine_ids(RISCVCPU * cpu,KVMScratchCPU * kvmcpu)996 static void kvm_riscv_init_machine_ids(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
997 {
998 struct kvm_one_reg reg;
999 int ret;
1000
1001 reg.id = RISCV_CONFIG_REG(mvendorid);
1002 reg.addr = (uint64_t)&cpu->cfg.mvendorid;
1003 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1004 if (ret != 0) {
1005 error_report("Unable to retrieve mvendorid from host, error %d", ret);
1006 }
1007
1008 reg.id = RISCV_CONFIG_REG(marchid);
1009 reg.addr = (uint64_t)&cpu->cfg.marchid;
1010 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1011 if (ret != 0) {
1012 error_report("Unable to retrieve marchid from host, error %d", ret);
1013 }
1014
1015 reg.id = RISCV_CONFIG_REG(mimpid);
1016 reg.addr = (uint64_t)&cpu->cfg.mimpid;
1017 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1018 if (ret != 0) {
1019 error_report("Unable to retrieve mimpid from host, error %d", ret);
1020 }
1021 }
1022
kvm_riscv_init_misa_ext_mask(RISCVCPU * cpu,KVMScratchCPU * kvmcpu)1023 static void kvm_riscv_init_misa_ext_mask(RISCVCPU *cpu,
1024 KVMScratchCPU *kvmcpu)
1025 {
1026 CPURISCVState *env = &cpu->env;
1027 struct kvm_one_reg reg;
1028 int ret;
1029
1030 reg.id = RISCV_CONFIG_REG(isa);
1031 reg.addr = (uint64_t)&env->misa_ext_mask;
1032 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1033
1034 if (ret) {
1035 error_report("Unable to fetch ISA register from KVM, "
1036 "error %d", ret);
1037 kvm_riscv_destroy_scratch_vcpu(kvmcpu);
1038 exit(EXIT_FAILURE);
1039 }
1040
1041 env->misa_ext = env->misa_ext_mask;
1042 }
1043
kvm_riscv_read_cbomz_blksize(RISCVCPU * cpu,KVMScratchCPU * kvmcpu,KVMCPUConfig * cbomz_cfg)1044 static void kvm_riscv_read_cbomz_blksize(RISCVCPU *cpu, KVMScratchCPU *kvmcpu,
1045 KVMCPUConfig *cbomz_cfg)
1046 {
1047 struct kvm_one_reg reg;
1048 int ret;
1049
1050 reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG,
1051 cbomz_cfg->kvm_reg_id);
1052 reg.addr = (uint64_t)kvmconfig_get_cfg_addr(cpu, cbomz_cfg);
1053 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1054 if (ret != 0) {
1055 error_report("Unable to read KVM reg %s, error %d",
1056 cbomz_cfg->name, ret);
1057 exit(EXIT_FAILURE);
1058 }
1059 }
1060
kvm_riscv_read_multiext_legacy(RISCVCPU * cpu,KVMScratchCPU * kvmcpu)1061 static void kvm_riscv_read_multiext_legacy(RISCVCPU *cpu,
1062 KVMScratchCPU *kvmcpu)
1063 {
1064 uint64_t val;
1065 int i, ret;
1066
1067 for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
1068 KVMCPUConfig *multi_ext_cfg = &kvm_multi_ext_cfgs[i];
1069 struct kvm_one_reg reg;
1070
1071 reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT,
1072 multi_ext_cfg->kvm_reg_id);
1073 reg.addr = (uint64_t)&val;
1074 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1075 if (ret != 0) {
1076 if (errno == EINVAL) {
1077 /* Silently default to 'false' if KVM does not support it. */
1078 multi_ext_cfg->supported = false;
1079 val = false;
1080 } else {
1081 error_report("Unable to read ISA_EXT KVM register %s: %s",
1082 multi_ext_cfg->name, strerror(errno));
1083 exit(EXIT_FAILURE);
1084 }
1085 } else {
1086 multi_ext_cfg->supported = true;
1087 }
1088
1089 kvm_cpu_cfg_set(cpu, multi_ext_cfg, val);
1090 }
1091
1092 if (cpu->cfg.ext_zicbom) {
1093 kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cbom_blocksize);
1094 }
1095
1096 if (cpu->cfg.ext_zicboz) {
1097 kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cboz_blocksize);
1098 }
1099 }
1100
kvm_riscv_read_csr_cfg_legacy(KVMScratchCPU * kvmcpu)1101 static void kvm_riscv_read_csr_cfg_legacy(KVMScratchCPU *kvmcpu)
1102 {
1103 uint64_t val;
1104 int i, ret;
1105
1106 for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) {
1107 KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i];
1108 struct kvm_one_reg reg;
1109
1110 reg.id = csr_cfg->kvm_reg_id;
1111 reg.addr = (uint64_t)&val;
1112 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1113 if (ret != 0) {
1114 if (errno == EINVAL) {
1115 csr_cfg->supported = false;
1116 } else {
1117 error_report("Unable to read KVM CSR %s: %s",
1118 csr_cfg->name, strerror(errno));
1119 exit(EXIT_FAILURE);
1120 }
1121 } else {
1122 csr_cfg->supported = true;
1123 }
1124 }
1125 }
1126
uint64_cmp(const void * a,const void * b)1127 static int uint64_cmp(const void *a, const void *b)
1128 {
1129 uint64_t val1 = *(const uint64_t *)a;
1130 uint64_t val2 = *(const uint64_t *)b;
1131
1132 if (val1 < val2) {
1133 return -1;
1134 }
1135
1136 if (val1 > val2) {
1137 return 1;
1138 }
1139
1140 return 0;
1141 }
1142
kvm_riscv_check_sbi_dbcn_support(RISCVCPU * cpu,struct kvm_reg_list * reglist)1143 static void kvm_riscv_check_sbi_dbcn_support(RISCVCPU *cpu,
1144 struct kvm_reg_list *reglist)
1145 {
1146 struct kvm_reg_list *reg_search;
1147
1148 reg_search = bsearch(&kvm_sbi_dbcn.kvm_reg_id, reglist->reg, reglist->n,
1149 sizeof(uint64_t), uint64_cmp);
1150
1151 if (reg_search) {
1152 kvm_sbi_dbcn.supported = true;
1153 }
1154 }
1155
kvm_riscv_read_vlenb(RISCVCPU * cpu,KVMScratchCPU * kvmcpu,struct kvm_reg_list * reglist)1156 static void kvm_riscv_read_vlenb(RISCVCPU *cpu, KVMScratchCPU *kvmcpu,
1157 struct kvm_reg_list *reglist)
1158 {
1159 struct kvm_one_reg reg;
1160 struct kvm_reg_list *reg_search;
1161 uint64_t val;
1162 int ret;
1163
1164 reg_search = bsearch(&kvm_v_vlenb.kvm_reg_id, reglist->reg, reglist->n,
1165 sizeof(uint64_t), uint64_cmp);
1166
1167 if (reg_search) {
1168 reg.id = kvm_v_vlenb.kvm_reg_id;
1169 reg.addr = (uint64_t)&val;
1170
1171 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1172 if (ret != 0) {
1173 error_report("Unable to read vlenb register, error code: %d",
1174 errno);
1175 exit(EXIT_FAILURE);
1176 }
1177
1178 kvm_v_vlenb.supported = true;
1179 cpu->cfg.vlenb = val;
1180 }
1181 }
1182
kvm_riscv_read_csr_cfg(struct kvm_reg_list * reglist)1183 static void kvm_riscv_read_csr_cfg(struct kvm_reg_list *reglist)
1184 {
1185 struct kvm_reg_list *reg_search;
1186 uint64_t reg_id;
1187
1188 for (int i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) {
1189 KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i];
1190
1191 reg_id = csr_cfg->kvm_reg_id;
1192 reg_search = bsearch(®_id, reglist->reg, reglist->n,
1193 sizeof(uint64_t), uint64_cmp);
1194 if (!reg_search) {
1195 continue;
1196 }
1197
1198 csr_cfg->supported = true;
1199 }
1200 }
1201
kvm_riscv_init_cfg(RISCVCPU * cpu,KVMScratchCPU * kvmcpu)1202 static void kvm_riscv_init_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
1203 {
1204 g_autofree struct kvm_reg_list *reglist = NULL;
1205 KVMCPUConfig *multi_ext_cfg;
1206 struct kvm_one_reg reg;
1207 struct kvm_reg_list rl_struct;
1208 uint64_t val, reg_id, *reg_search;
1209 int i, ret;
1210
1211 rl_struct.n = 0;
1212 ret = ioctl(kvmcpu->cpufd, KVM_GET_REG_LIST, &rl_struct);
1213
1214 /*
1215 * If KVM_GET_REG_LIST isn't supported we'll get errno 22
1216 * (EINVAL). Use read_legacy() in this case.
1217 */
1218 if (errno == EINVAL) {
1219 kvm_riscv_read_multiext_legacy(cpu, kvmcpu);
1220 kvm_riscv_read_csr_cfg_legacy(kvmcpu);
1221 return;
1222 } else if (errno != E2BIG) {
1223 /*
1224 * E2BIG is an expected error message for the API since we
1225 * don't know the number of registers. The right amount will
1226 * be written in rl_struct.n.
1227 *
1228 * Error out if we get any other errno.
1229 */
1230 error_report("Error when accessing get-reg-list: %s",
1231 strerror(errno));
1232 exit(EXIT_FAILURE);
1233 }
1234
1235 reglist = g_malloc(sizeof(struct kvm_reg_list) +
1236 rl_struct.n * sizeof(uint64_t));
1237 reglist->n = rl_struct.n;
1238 ret = ioctl(kvmcpu->cpufd, KVM_GET_REG_LIST, reglist);
1239 if (ret) {
1240 error_report("Error when reading KVM_GET_REG_LIST: %s",
1241 strerror(errno));
1242 exit(EXIT_FAILURE);
1243 }
1244
1245 /* sort reglist to use bsearch() */
1246 qsort(®list->reg, reglist->n, sizeof(uint64_t), uint64_cmp);
1247
1248 for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
1249 multi_ext_cfg = &kvm_multi_ext_cfgs[i];
1250 reg_id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT,
1251 multi_ext_cfg->kvm_reg_id);
1252 reg_search = bsearch(®_id, reglist->reg, reglist->n,
1253 sizeof(uint64_t), uint64_cmp);
1254 if (!reg_search) {
1255 continue;
1256 }
1257
1258 reg.id = reg_id;
1259 reg.addr = (uint64_t)&val;
1260 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1261 if (ret != 0) {
1262 error_report("Unable to read ISA_EXT KVM register %s: %s",
1263 multi_ext_cfg->name, strerror(errno));
1264 exit(EXIT_FAILURE);
1265 }
1266
1267 multi_ext_cfg->supported = true;
1268 kvm_cpu_cfg_set(cpu, multi_ext_cfg, val);
1269 }
1270
1271 if (cpu->cfg.ext_zicbom) {
1272 kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cbom_blocksize);
1273 }
1274
1275 if (cpu->cfg.ext_zicboz) {
1276 kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cboz_blocksize);
1277 }
1278
1279 if (riscv_has_ext(&cpu->env, RVV)) {
1280 kvm_riscv_read_vlenb(cpu, kvmcpu, reglist);
1281 }
1282
1283 kvm_riscv_check_sbi_dbcn_support(cpu, reglist);
1284 kvm_riscv_read_csr_cfg(reglist);
1285 }
1286
riscv_init_kvm_registers(Object * cpu_obj)1287 static void riscv_init_kvm_registers(Object *cpu_obj)
1288 {
1289 RISCVCPU *cpu = RISCV_CPU(cpu_obj);
1290 KVMScratchCPU kvmcpu;
1291
1292 if (!kvm_riscv_create_scratch_vcpu(&kvmcpu)) {
1293 return;
1294 }
1295
1296 kvm_riscv_init_machine_ids(cpu, &kvmcpu);
1297 kvm_riscv_init_misa_ext_mask(cpu, &kvmcpu);
1298 kvm_riscv_init_cfg(cpu, &kvmcpu);
1299
1300 kvm_riscv_destroy_scratch_vcpu(&kvmcpu);
1301 }
1302
1303 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
1304 KVM_CAP_LAST_INFO
1305 };
1306
kvm_arch_get_registers(CPUState * cs,Error ** errp)1307 int kvm_arch_get_registers(CPUState *cs, Error **errp)
1308 {
1309 int ret = 0;
1310
1311 ret = kvm_riscv_get_regs_core(cs);
1312 if (ret) {
1313 return ret;
1314 }
1315
1316 ret = kvm_riscv_get_regs_csr(cs);
1317 if (ret) {
1318 return ret;
1319 }
1320
1321 ret = kvm_riscv_get_regs_fp(cs);
1322 if (ret) {
1323 return ret;
1324 }
1325
1326 ret = kvm_riscv_get_regs_vector(cs);
1327 if (ret) {
1328 return ret;
1329 }
1330
1331 return ret;
1332 }
1333
kvm_riscv_sync_mpstate_to_kvm(RISCVCPU * cpu,int state)1334 int kvm_riscv_sync_mpstate_to_kvm(RISCVCPU *cpu, int state)
1335 {
1336 if (cap_has_mp_state) {
1337 struct kvm_mp_state mp_state = {
1338 .mp_state = state
1339 };
1340
1341 int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
1342 if (ret) {
1343 fprintf(stderr, "%s: failed to sync MP_STATE %d/%s\n",
1344 __func__, ret, strerror(-ret));
1345 return -1;
1346 }
1347 }
1348
1349 return 0;
1350 }
1351
kvm_arch_put_registers(CPUState * cs,int level,Error ** errp)1352 int kvm_arch_put_registers(CPUState *cs, int level, Error **errp)
1353 {
1354 int ret = 0;
1355
1356 ret = kvm_riscv_put_regs_core(cs);
1357 if (ret) {
1358 return ret;
1359 }
1360
1361 ret = kvm_riscv_put_regs_csr(cs);
1362 if (ret) {
1363 return ret;
1364 }
1365
1366 ret = kvm_riscv_put_regs_fp(cs);
1367 if (ret) {
1368 return ret;
1369 }
1370
1371 ret = kvm_riscv_put_regs_vector(cs);
1372 if (ret) {
1373 return ret;
1374 }
1375
1376 if (KVM_PUT_RESET_STATE == level) {
1377 RISCVCPU *cpu = RISCV_CPU(cs);
1378 if (cs->cpu_index == 0) {
1379 ret = kvm_riscv_sync_mpstate_to_kvm(cpu, KVM_MP_STATE_RUNNABLE);
1380 } else {
1381 ret = kvm_riscv_sync_mpstate_to_kvm(cpu, KVM_MP_STATE_STOPPED);
1382 }
1383 if (ret) {
1384 return ret;
1385 }
1386 }
1387
1388 return ret;
1389 }
1390
kvm_arch_release_virq_post(int virq)1391 int kvm_arch_release_virq_post(int virq)
1392 {
1393 return 0;
1394 }
1395
kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry * route,uint64_t address,uint32_t data,PCIDevice * dev)1396 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
1397 uint64_t address, uint32_t data, PCIDevice *dev)
1398 {
1399 return 0;
1400 }
1401
kvm_arch_destroy_vcpu(CPUState * cs)1402 int kvm_arch_destroy_vcpu(CPUState *cs)
1403 {
1404 return 0;
1405 }
1406
kvm_arch_vcpu_id(CPUState * cpu)1407 unsigned long kvm_arch_vcpu_id(CPUState *cpu)
1408 {
1409 return cpu->cpu_index;
1410 }
1411
kvm_riscv_vm_state_change(void * opaque,bool running,RunState state)1412 static void kvm_riscv_vm_state_change(void *opaque, bool running,
1413 RunState state)
1414 {
1415 CPUState *cs = opaque;
1416
1417 if (running) {
1418 kvm_riscv_put_regs_timer(cs);
1419 } else {
1420 kvm_riscv_get_regs_timer(cs);
1421 }
1422 }
1423
kvm_arch_init_irq_routing(KVMState * s)1424 void kvm_arch_init_irq_routing(KVMState *s)
1425 {
1426 }
1427
kvm_vcpu_set_machine_ids(RISCVCPU * cpu,CPUState * cs)1428 static int kvm_vcpu_set_machine_ids(RISCVCPU *cpu, CPUState *cs)
1429 {
1430 target_ulong reg;
1431 uint64_t id;
1432 int ret;
1433
1434 id = RISCV_CONFIG_REG(mvendorid);
1435 /*
1436 * cfg.mvendorid is an uint32 but a target_ulong will
1437 * be written. Assign it to a target_ulong var to avoid
1438 * writing pieces of other cpu->cfg fields in the reg.
1439 */
1440 reg = cpu->cfg.mvendorid;
1441 ret = kvm_set_one_reg(cs, id, ®);
1442 if (ret != 0) {
1443 return ret;
1444 }
1445
1446 id = RISCV_CONFIG_REG(marchid);
1447 ret = kvm_set_one_reg(cs, id, &cpu->cfg.marchid);
1448 if (ret != 0) {
1449 return ret;
1450 }
1451
1452 id = RISCV_CONFIG_REG(mimpid);
1453 ret = kvm_set_one_reg(cs, id, &cpu->cfg.mimpid);
1454
1455 return ret;
1456 }
1457
kvm_vcpu_enable_sbi_dbcn(RISCVCPU * cpu,CPUState * cs)1458 static int kvm_vcpu_enable_sbi_dbcn(RISCVCPU *cpu, CPUState *cs)
1459 {
1460 target_ulong reg = 1;
1461
1462 if (!kvm_sbi_dbcn.supported) {
1463 return 0;
1464 }
1465
1466 return kvm_set_one_reg(cs, kvm_sbi_dbcn.kvm_reg_id, ®);
1467 }
1468
kvm_arch_init_vcpu(CPUState * cs)1469 int kvm_arch_init_vcpu(CPUState *cs)
1470 {
1471 int ret = 0;
1472 RISCVCPU *cpu = RISCV_CPU(cs);
1473
1474 qemu_add_vm_change_state_handler(kvm_riscv_vm_state_change, cs);
1475
1476 if (!object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST)) {
1477 ret = kvm_vcpu_set_machine_ids(cpu, cs);
1478 if (ret != 0) {
1479 return ret;
1480 }
1481 }
1482
1483 kvm_riscv_update_cpu_misa_ext(cpu, cs);
1484 kvm_riscv_update_cpu_cfg_isa_ext(cpu, cs);
1485
1486 ret = kvm_vcpu_enable_sbi_dbcn(cpu, cs);
1487
1488 return ret;
1489 }
1490
kvm_arch_msi_data_to_gsi(uint32_t data)1491 int kvm_arch_msi_data_to_gsi(uint32_t data)
1492 {
1493 abort();
1494 }
1495
kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry * route,int vector,PCIDevice * dev)1496 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
1497 int vector, PCIDevice *dev)
1498 {
1499 return 0;
1500 }
1501
kvm_arch_get_default_type(MachineState * ms)1502 int kvm_arch_get_default_type(MachineState *ms)
1503 {
1504 return 0;
1505 }
1506
kvm_arch_init(MachineState * ms,KVMState * s)1507 int kvm_arch_init(MachineState *ms, KVMState *s)
1508 {
1509 cap_has_mp_state = kvm_check_extension(s, KVM_CAP_MP_STATE);
1510 return 0;
1511 }
1512
kvm_arch_irqchip_create(KVMState * s)1513 int kvm_arch_irqchip_create(KVMState *s)
1514 {
1515 /*
1516 * We can create the VAIA using the newer device control API.
1517 */
1518 return kvm_check_extension(s, KVM_CAP_DEVICE_CTRL);
1519 }
1520
kvm_arch_process_async_events(CPUState * cs)1521 int kvm_arch_process_async_events(CPUState *cs)
1522 {
1523 return 0;
1524 }
1525
kvm_arch_pre_run(CPUState * cs,struct kvm_run * run)1526 void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
1527 {
1528 }
1529
kvm_arch_post_run(CPUState * cs,struct kvm_run * run)1530 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
1531 {
1532 return MEMTXATTRS_UNSPECIFIED;
1533 }
1534
kvm_arch_stop_on_emulation_error(CPUState * cs)1535 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
1536 {
1537 return true;
1538 }
1539
kvm_riscv_handle_sbi_dbcn(CPUState * cs,struct kvm_run * run)1540 static void kvm_riscv_handle_sbi_dbcn(CPUState *cs, struct kvm_run *run)
1541 {
1542 g_autofree uint8_t *buf = NULL;
1543 RISCVCPU *cpu = RISCV_CPU(cs);
1544 target_ulong num_bytes;
1545 uint64_t addr;
1546 unsigned char ch;
1547 int ret;
1548
1549 switch (run->riscv_sbi.function_id) {
1550 case SBI_EXT_DBCN_CONSOLE_READ:
1551 case SBI_EXT_DBCN_CONSOLE_WRITE:
1552 num_bytes = run->riscv_sbi.args[0];
1553
1554 if (num_bytes == 0) {
1555 run->riscv_sbi.ret[0] = SBI_SUCCESS;
1556 run->riscv_sbi.ret[1] = 0;
1557 break;
1558 }
1559
1560 addr = run->riscv_sbi.args[1];
1561
1562 /*
1563 * Handle the case where a 32 bit CPU is running in a
1564 * 64 bit addressing env.
1565 */
1566 if (riscv_cpu_mxl(&cpu->env) == MXL_RV32) {
1567 addr |= (uint64_t)run->riscv_sbi.args[2] << 32;
1568 }
1569
1570 buf = g_malloc0(num_bytes);
1571
1572 if (run->riscv_sbi.function_id == SBI_EXT_DBCN_CONSOLE_READ) {
1573 ret = qemu_chr_fe_read_all(serial_hd(0)->be, buf, num_bytes);
1574 if (ret < 0) {
1575 error_report("SBI_EXT_DBCN_CONSOLE_READ: error when "
1576 "reading chardev");
1577 exit(1);
1578 }
1579
1580 cpu_physical_memory_write(addr, buf, ret);
1581 } else {
1582 cpu_physical_memory_read(addr, buf, num_bytes);
1583
1584 ret = qemu_chr_fe_write_all(serial_hd(0)->be, buf, num_bytes);
1585 if (ret < 0) {
1586 error_report("SBI_EXT_DBCN_CONSOLE_WRITE: error when "
1587 "writing chardev");
1588 exit(1);
1589 }
1590 }
1591
1592 run->riscv_sbi.ret[0] = SBI_SUCCESS;
1593 run->riscv_sbi.ret[1] = ret;
1594 break;
1595 case SBI_EXT_DBCN_CONSOLE_WRITE_BYTE:
1596 ch = run->riscv_sbi.args[0];
1597 ret = qemu_chr_fe_write(serial_hd(0)->be, &ch, sizeof(ch));
1598
1599 if (ret < 0) {
1600 error_report("SBI_EXT_DBCN_CONSOLE_WRITE_BYTE: error when "
1601 "writing chardev");
1602 exit(1);
1603 }
1604
1605 run->riscv_sbi.ret[0] = SBI_SUCCESS;
1606 run->riscv_sbi.ret[1] = 0;
1607 break;
1608 default:
1609 run->riscv_sbi.ret[0] = SBI_ERR_NOT_SUPPORTED;
1610 }
1611 }
1612
kvm_riscv_handle_sbi(CPUState * cs,struct kvm_run * run)1613 static int kvm_riscv_handle_sbi(CPUState *cs, struct kvm_run *run)
1614 {
1615 int ret = 0;
1616 unsigned char ch;
1617 switch (run->riscv_sbi.extension_id) {
1618 case SBI_EXT_0_1_CONSOLE_PUTCHAR:
1619 ch = run->riscv_sbi.args[0];
1620 qemu_chr_fe_write(serial_hd(0)->be, &ch, sizeof(ch));
1621 break;
1622 case SBI_EXT_0_1_CONSOLE_GETCHAR:
1623 ret = qemu_chr_fe_read_all(serial_hd(0)->be, &ch, sizeof(ch));
1624 if (ret == sizeof(ch)) {
1625 run->riscv_sbi.ret[0] = ch;
1626 } else {
1627 run->riscv_sbi.ret[0] = -1;
1628 }
1629 ret = 0;
1630 break;
1631 case SBI_EXT_DBCN:
1632 kvm_riscv_handle_sbi_dbcn(cs, run);
1633 break;
1634 default:
1635 qemu_log_mask(LOG_UNIMP,
1636 "%s: un-handled SBI EXIT, specific reasons is %lu\n",
1637 __func__, run->riscv_sbi.extension_id);
1638 ret = -1;
1639 break;
1640 }
1641 return ret;
1642 }
1643
kvm_riscv_handle_csr(CPUState * cs,struct kvm_run * run)1644 static int kvm_riscv_handle_csr(CPUState *cs, struct kvm_run *run)
1645 {
1646 target_ulong csr_num = run->riscv_csr.csr_num;
1647 target_ulong new_value = run->riscv_csr.new_value;
1648 target_ulong write_mask = run->riscv_csr.write_mask;
1649 int ret = 0;
1650
1651 switch (csr_num) {
1652 case CSR_SEED:
1653 run->riscv_csr.ret_value = riscv_new_csr_seed(new_value, write_mask);
1654 break;
1655 default:
1656 qemu_log_mask(LOG_UNIMP,
1657 "%s: un-handled CSR EXIT for CSR %lx\n",
1658 __func__, csr_num);
1659 ret = -1;
1660 break;
1661 }
1662
1663 return ret;
1664 }
1665
kvm_riscv_handle_debug(CPUState * cs)1666 static bool kvm_riscv_handle_debug(CPUState *cs)
1667 {
1668 RISCVCPU *cpu = RISCV_CPU(cs);
1669 CPURISCVState *env = &cpu->env;
1670
1671 /* Ensure PC is synchronised */
1672 kvm_cpu_synchronize_state(cs);
1673
1674 if (kvm_find_sw_breakpoint(cs, env->pc)) {
1675 return true;
1676 }
1677
1678 return false;
1679 }
1680
kvm_arch_handle_exit(CPUState * cs,struct kvm_run * run)1681 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
1682 {
1683 int ret = 0;
1684 switch (run->exit_reason) {
1685 case KVM_EXIT_RISCV_SBI:
1686 ret = kvm_riscv_handle_sbi(cs, run);
1687 break;
1688 case KVM_EXIT_RISCV_CSR:
1689 ret = kvm_riscv_handle_csr(cs, run);
1690 break;
1691 case KVM_EXIT_DEBUG:
1692 if (kvm_riscv_handle_debug(cs)) {
1693 ret = EXCP_DEBUG;
1694 }
1695 break;
1696 default:
1697 qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n",
1698 __func__, run->exit_reason);
1699 ret = -1;
1700 break;
1701 }
1702 return ret;
1703 }
1704
kvm_riscv_reset_vcpu(RISCVCPU * cpu)1705 void kvm_riscv_reset_vcpu(RISCVCPU *cpu)
1706 {
1707 CPURISCVState *env = &cpu->env;
1708 int i;
1709
1710 for (i = 0; i < 32; i++) {
1711 env->gpr[i] = 0;
1712 }
1713 env->pc = cpu->env.kernel_addr;
1714 env->gpr[10] = kvm_arch_vcpu_id(CPU(cpu)); /* a0 */
1715 env->gpr[11] = cpu->env.fdt_addr; /* a1 */
1716
1717 kvm_riscv_reset_regs_csr(env);
1718 }
1719
kvm_riscv_set_irq(RISCVCPU * cpu,int irq,int level)1720 void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level)
1721 {
1722 int ret;
1723 unsigned virq = level ? KVM_INTERRUPT_SET : KVM_INTERRUPT_UNSET;
1724
1725 if (irq != IRQ_S_EXT) {
1726 perror("kvm riscv set irq != IRQ_S_EXT\n");
1727 abort();
1728 }
1729
1730 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
1731 if (ret < 0) {
1732 perror("Set irq failed");
1733 abort();
1734 }
1735 }
1736
1737 static int aia_mode;
1738
kvm_aia_mode_str(uint64_t mode)1739 static const char *kvm_aia_mode_str(uint64_t mode)
1740 {
1741 switch (mode) {
1742 case KVM_DEV_RISCV_AIA_MODE_EMUL:
1743 return "emul";
1744 case KVM_DEV_RISCV_AIA_MODE_HWACCEL:
1745 return "hwaccel";
1746 case KVM_DEV_RISCV_AIA_MODE_AUTO:
1747 default:
1748 return "auto";
1749 };
1750 }
1751
riscv_get_kvm_aia(Object * obj,Error ** errp)1752 static char *riscv_get_kvm_aia(Object *obj, Error **errp)
1753 {
1754 return g_strdup(kvm_aia_mode_str(aia_mode));
1755 }
1756
riscv_set_kvm_aia(Object * obj,const char * val,Error ** errp)1757 static void riscv_set_kvm_aia(Object *obj, const char *val, Error **errp)
1758 {
1759 if (!strcmp(val, "emul")) {
1760 aia_mode = KVM_DEV_RISCV_AIA_MODE_EMUL;
1761 } else if (!strcmp(val, "hwaccel")) {
1762 aia_mode = KVM_DEV_RISCV_AIA_MODE_HWACCEL;
1763 } else if (!strcmp(val, "auto")) {
1764 aia_mode = KVM_DEV_RISCV_AIA_MODE_AUTO;
1765 } else {
1766 error_setg(errp, "Invalid KVM AIA mode");
1767 error_append_hint(errp, "Valid values are emul, hwaccel, and auto.\n");
1768 }
1769 }
1770
kvm_arch_accel_class_init(ObjectClass * oc)1771 void kvm_arch_accel_class_init(ObjectClass *oc)
1772 {
1773 object_class_property_add_str(oc, "riscv-aia", riscv_get_kvm_aia,
1774 riscv_set_kvm_aia);
1775 object_class_property_set_description(oc, "riscv-aia",
1776 "Set KVM AIA mode. Valid values are 'emul', 'hwaccel' and 'auto'. "
1777 "Changing KVM AIA modes relies on host support. Defaults to 'auto' "
1778 "if the host supports it");
1779 object_property_set_default_str(object_class_property_find(oc, "riscv-aia"),
1780 "auto");
1781 }
1782
kvm_riscv_aia_create(MachineState * machine,uint64_t group_shift,uint64_t aia_irq_num,uint64_t aia_msi_num,uint64_t aplic_base,uint64_t imsic_base,uint64_t guest_num)1783 void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
1784 uint64_t aia_irq_num, uint64_t aia_msi_num,
1785 uint64_t aplic_base, uint64_t imsic_base,
1786 uint64_t guest_num)
1787 {
1788 int ret, i;
1789 int aia_fd = -1;
1790 uint64_t default_aia_mode;
1791 uint64_t socket_count = riscv_socket_count(machine);
1792 uint64_t max_hart_per_socket = 0;
1793 uint64_t socket, base_hart, hart_count, socket_imsic_base, imsic_addr;
1794 uint64_t socket_bits, hart_bits, guest_bits;
1795 uint64_t max_group_id;
1796
1797 aia_fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_RISCV_AIA, false);
1798
1799 if (aia_fd < 0) {
1800 error_report("Unable to create in-kernel irqchip");
1801 exit(1);
1802 }
1803
1804 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
1805 KVM_DEV_RISCV_AIA_CONFIG_MODE,
1806 &default_aia_mode, false, NULL);
1807 if (ret < 0) {
1808 error_report("KVM AIA: failed to get current KVM AIA mode");
1809 exit(1);
1810 }
1811
1812 if (default_aia_mode != aia_mode) {
1813 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
1814 KVM_DEV_RISCV_AIA_CONFIG_MODE,
1815 &aia_mode, true, NULL);
1816 if (ret < 0) {
1817 warn_report("KVM AIA: failed to set KVM AIA mode '%s', using "
1818 "default host mode '%s'",
1819 kvm_aia_mode_str(aia_mode),
1820 kvm_aia_mode_str(default_aia_mode));
1821
1822 /* failed to change AIA mode, use default */
1823 aia_mode = default_aia_mode;
1824 }
1825 }
1826
1827 /*
1828 * Skip APLIC creation in KVM if we're running split mode.
1829 * This is done by leaving KVM_DEV_RISCV_AIA_CONFIG_SRCS
1830 * unset. We can also skip KVM_DEV_RISCV_AIA_ADDR_APLIC
1831 * since KVM won't be using it.
1832 */
1833 if (!kvm_kernel_irqchip_split()) {
1834 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
1835 KVM_DEV_RISCV_AIA_CONFIG_SRCS,
1836 &aia_irq_num, true, NULL);
1837 if (ret < 0) {
1838 error_report("KVM AIA: failed to set number of input irq lines");
1839 exit(1);
1840 }
1841
1842 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_ADDR,
1843 KVM_DEV_RISCV_AIA_ADDR_APLIC,
1844 &aplic_base, true, NULL);
1845 if (ret < 0) {
1846 error_report("KVM AIA: failed to set the base address of APLIC");
1847 exit(1);
1848 }
1849 }
1850
1851 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
1852 KVM_DEV_RISCV_AIA_CONFIG_IDS,
1853 &aia_msi_num, true, NULL);
1854 if (ret < 0) {
1855 error_report("KVM AIA: failed to set number of msi");
1856 exit(1);
1857 }
1858
1859
1860 if (socket_count > 1) {
1861 max_group_id = socket_count - 1;
1862 socket_bits = find_last_bit(&max_group_id, BITS_PER_LONG) + 1;
1863 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
1864 KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS,
1865 &socket_bits, true, NULL);
1866 if (ret < 0) {
1867 error_report("KVM AIA: failed to set group_bits");
1868 exit(1);
1869 }
1870
1871 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
1872 KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT,
1873 &group_shift, true, NULL);
1874 if (ret < 0) {
1875 error_report("KVM AIA: failed to set group_shift");
1876 exit(1);
1877 }
1878 }
1879
1880 guest_bits = guest_num == 0 ? 0 :
1881 find_last_bit(&guest_num, BITS_PER_LONG) + 1;
1882 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
1883 KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS,
1884 &guest_bits, true, NULL);
1885 if (ret < 0) {
1886 error_report("KVM AIA: failed to set guest_bits");
1887 exit(1);
1888 }
1889
1890 for (socket = 0; socket < socket_count; socket++) {
1891 socket_imsic_base = imsic_base + socket * (1U << group_shift);
1892 hart_count = riscv_socket_hart_count(machine, socket);
1893 base_hart = riscv_socket_first_hartid(machine, socket);
1894
1895 if (max_hart_per_socket < hart_count) {
1896 max_hart_per_socket = hart_count;
1897 }
1898
1899 for (i = 0; i < hart_count; i++) {
1900 imsic_addr = socket_imsic_base + i * IMSIC_HART_SIZE(guest_bits);
1901 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_ADDR,
1902 KVM_DEV_RISCV_AIA_ADDR_IMSIC(i + base_hart),
1903 &imsic_addr, true, NULL);
1904 if (ret < 0) {
1905 error_report("KVM AIA: failed to set the IMSIC address for hart %d", i);
1906 exit(1);
1907 }
1908 }
1909 }
1910
1911
1912 if (max_hart_per_socket > 1) {
1913 max_hart_per_socket--;
1914 hart_bits = find_last_bit(&max_hart_per_socket, BITS_PER_LONG) + 1;
1915 } else {
1916 hart_bits = 0;
1917 }
1918
1919 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
1920 KVM_DEV_RISCV_AIA_CONFIG_HART_BITS,
1921 &hart_bits, true, NULL);
1922 if (ret < 0) {
1923 error_report("KVM AIA: failed to set hart_bits");
1924 exit(1);
1925 }
1926
1927 if (kvm_has_gsi_routing()) {
1928 for (uint64_t idx = 0; idx < aia_irq_num + 1; ++idx) {
1929 /* KVM AIA only has one APLIC instance */
1930 kvm_irqchip_add_irq_route(kvm_state, idx, 0, idx);
1931 }
1932 kvm_gsi_routing_allowed = true;
1933 kvm_irqchip_commit_routes(kvm_state);
1934 }
1935
1936 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CTRL,
1937 KVM_DEV_RISCV_AIA_CTRL_INIT,
1938 NULL, true, NULL);
1939 if (ret < 0) {
1940 error_report("KVM AIA: initialized fail");
1941 exit(1);
1942 }
1943
1944 kvm_msi_via_irqfd_allowed = true;
1945 }
1946
kvm_cpu_instance_init(CPUState * cs)1947 static void kvm_cpu_instance_init(CPUState *cs)
1948 {
1949 Object *obj = OBJECT(RISCV_CPU(cs));
1950
1951 riscv_init_kvm_registers(obj);
1952
1953 kvm_riscv_add_cpu_user_properties(obj);
1954 }
1955
1956 /*
1957 * We'll get here via the following path:
1958 *
1959 * riscv_cpu_realize()
1960 * -> cpu_exec_realizefn()
1961 * -> kvm_cpu_realize() (via accel_cpu_common_realize())
1962 */
kvm_cpu_realize(CPUState * cs,Error ** errp)1963 static bool kvm_cpu_realize(CPUState *cs, Error **errp)
1964 {
1965 RISCVCPU *cpu = RISCV_CPU(cs);
1966 int ret;
1967
1968 if (riscv_has_ext(&cpu->env, RVV)) {
1969 ret = prctl(PR_RISCV_V_SET_CONTROL, PR_RISCV_V_VSTATE_CTRL_ON);
1970 if (ret) {
1971 error_setg(errp, "Error in prctl PR_RISCV_V_SET_CONTROL, code: %s",
1972 strerrorname_np(errno));
1973 return false;
1974 }
1975 }
1976
1977 return true;
1978 }
1979
riscv_kvm_cpu_finalize_features(RISCVCPU * cpu,Error ** errp)1980 void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
1981 {
1982 CPURISCVState *env = &cpu->env;
1983 KVMScratchCPU kvmcpu;
1984 struct kvm_one_reg reg;
1985 uint64_t val;
1986 int ret;
1987
1988 /* short-circuit without spinning the scratch CPU */
1989 if (!cpu->cfg.ext_zicbom && !cpu->cfg.ext_zicboz &&
1990 !riscv_has_ext(env, RVV)) {
1991 return;
1992 }
1993
1994 if (!kvm_riscv_create_scratch_vcpu(&kvmcpu)) {
1995 error_setg(errp, "Unable to create scratch KVM cpu");
1996 return;
1997 }
1998
1999 if (cpu->cfg.ext_zicbom &&
2000 riscv_cpu_option_set(kvm_cbom_blocksize.name)) {
2001
2002 reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG,
2003 kvm_cbom_blocksize.kvm_reg_id);
2004 reg.addr = (uint64_t)&val;
2005 ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, ®);
2006 if (ret != 0) {
2007 error_setg(errp, "Unable to read cbom_blocksize, error %d", errno);
2008 return;
2009 }
2010
2011 if (cpu->cfg.cbom_blocksize != val) {
2012 error_setg(errp, "Unable to set cbom_blocksize to a different "
2013 "value than the host (%lu)", val);
2014 return;
2015 }
2016 }
2017
2018 if (cpu->cfg.ext_zicboz &&
2019 riscv_cpu_option_set(kvm_cboz_blocksize.name)) {
2020
2021 reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG,
2022 kvm_cboz_blocksize.kvm_reg_id);
2023 reg.addr = (uint64_t)&val;
2024 ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, ®);
2025 if (ret != 0) {
2026 error_setg(errp, "Unable to read cboz_blocksize, error %d", errno);
2027 return;
2028 }
2029
2030 if (cpu->cfg.cboz_blocksize != val) {
2031 error_setg(errp, "Unable to set cboz_blocksize to a different "
2032 "value than the host (%lu)", val);
2033 return;
2034 }
2035 }
2036
2037 /* Users are setting vlen, not vlenb */
2038 if (riscv_has_ext(env, RVV) && riscv_cpu_option_set("vlen")) {
2039 if (!kvm_v_vlenb.supported) {
2040 error_setg(errp, "Unable to set 'vlenb': register not supported");
2041 return;
2042 }
2043
2044 reg.id = kvm_v_vlenb.kvm_reg_id;
2045 reg.addr = (uint64_t)&val;
2046 ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, ®);
2047 if (ret != 0) {
2048 error_setg(errp, "Unable to read vlenb register, error %d", errno);
2049 return;
2050 }
2051
2052 if (cpu->cfg.vlenb != val) {
2053 error_setg(errp, "Unable to set 'vlen' to a different "
2054 "value than the host (%lu)", val * 8);
2055 return;
2056 }
2057 }
2058
2059 kvm_riscv_destroy_scratch_vcpu(&kvmcpu);
2060 }
2061
kvm_cpu_accel_class_init(ObjectClass * oc,void * data)2062 static void kvm_cpu_accel_class_init(ObjectClass *oc, void *data)
2063 {
2064 AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
2065
2066 acc->cpu_instance_init = kvm_cpu_instance_init;
2067 acc->cpu_target_realize = kvm_cpu_realize;
2068 }
2069
2070 static const TypeInfo kvm_cpu_accel_type_info = {
2071 .name = ACCEL_CPU_NAME("kvm"),
2072
2073 .parent = TYPE_ACCEL_CPU,
2074 .class_init = kvm_cpu_accel_class_init,
2075 .abstract = true,
2076 };
kvm_cpu_accel_register_types(void)2077 static void kvm_cpu_accel_register_types(void)
2078 {
2079 type_register_static(&kvm_cpu_accel_type_info);
2080 }
2081 type_init(kvm_cpu_accel_register_types);
2082
riscv_host_cpu_class_init(ObjectClass * c,void * data)2083 static void riscv_host_cpu_class_init(ObjectClass *c, void *data)
2084 {
2085 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2086
2087 #if defined(TARGET_RISCV32)
2088 mcc->misa_mxl_max = MXL_RV32;
2089 #elif defined(TARGET_RISCV64)
2090 mcc->misa_mxl_max = MXL_RV64;
2091 #endif
2092 }
2093
2094 static const TypeInfo riscv_kvm_cpu_type_infos[] = {
2095 {
2096 .name = TYPE_RISCV_CPU_HOST,
2097 .parent = TYPE_RISCV_CPU,
2098 .class_init = riscv_host_cpu_class_init,
2099 }
2100 };
2101
2102 DEFINE_TYPES(riscv_kvm_cpu_type_infos)
2103
2104 static const uint32_t ebreak_insn = 0x00100073;
2105 static const uint16_t c_ebreak_insn = 0x9002;
2106
kvm_arch_insert_sw_breakpoint(CPUState * cs,struct kvm_sw_breakpoint * bp)2107 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
2108 {
2109 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 2, 0)) {
2110 return -EINVAL;
2111 }
2112
2113 if ((bp->saved_insn & 0x3) == 0x3) {
2114 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0)
2115 || cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&ebreak_insn, 4, 1)) {
2116 return -EINVAL;
2117 }
2118 } else {
2119 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&c_ebreak_insn, 2, 1)) {
2120 return -EINVAL;
2121 }
2122 }
2123
2124 return 0;
2125 }
2126
kvm_arch_remove_sw_breakpoint(CPUState * cs,struct kvm_sw_breakpoint * bp)2127 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
2128 {
2129 uint32_t ebreak;
2130 uint16_t c_ebreak;
2131
2132 if ((bp->saved_insn & 0x3) == 0x3) {
2133 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&ebreak, 4, 0) ||
2134 ebreak != ebreak_insn ||
2135 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
2136 return -EINVAL;
2137 }
2138 } else {
2139 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&c_ebreak, 2, 0) ||
2140 c_ebreak != c_ebreak_insn ||
2141 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 2, 1)) {
2142 return -EINVAL;
2143 }
2144 }
2145
2146 return 0;
2147 }
2148
kvm_arch_insert_hw_breakpoint(vaddr addr,vaddr len,int type)2149 int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
2150 {
2151 /* TODO; To be implemented later. */
2152 return -EINVAL;
2153 }
2154
kvm_arch_remove_hw_breakpoint(vaddr addr,vaddr len,int type)2155 int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
2156 {
2157 /* TODO; To be implemented later. */
2158 return -EINVAL;
2159 }
2160
kvm_arch_remove_all_hw_breakpoints(void)2161 void kvm_arch_remove_all_hw_breakpoints(void)
2162 {
2163 /* TODO; To be implemented later. */
2164 }
2165
kvm_arch_update_guest_debug(CPUState * cs,struct kvm_guest_debug * dbg)2166 void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
2167 {
2168 if (kvm_sw_breakpoints_active(cs)) {
2169 dbg->control |= KVM_GUESTDBG_ENABLE;
2170 }
2171 }
2172