1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * QEMU LoongArch KVM
4 *
5 * Copyright (c) 2023 Loongson Technology Corporation Limited
6 */
7
8 #include "qemu/osdep.h"
9 #include <sys/ioctl.h>
10 #include <linux/kvm.h>
11
12 #include "qapi/error.h"
13 #include "qemu/timer.h"
14 #include "qemu/error-report.h"
15 #include "qemu/main-loop.h"
16 #include "sysemu/sysemu.h"
17 #include "sysemu/kvm.h"
18 #include "sysemu/kvm_int.h"
19 #include "hw/pci/pci.h"
20 #include "exec/memattrs.h"
21 #include "exec/address-spaces.h"
22 #include "hw/boards.h"
23 #include "hw/irq.h"
24 #include "qemu/log.h"
25 #include "hw/loader.h"
26 #include "sysemu/runstate.h"
27 #include "cpu-csr.h"
28 #include "kvm_loongarch.h"
29 #include "trace.h"
30
31 static bool cap_has_mp_state;
32 static unsigned int brk_insn;
33 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
34 KVM_CAP_LAST_INFO
35 };
36
kvm_get_stealtime(CPUState * cs)37 static int kvm_get_stealtime(CPUState *cs)
38 {
39 CPULoongArchState *env = cpu_env(cs);
40 int err;
41 struct kvm_device_attr attr = {
42 .group = KVM_LOONGARCH_VCPU_PVTIME_CTRL,
43 .attr = KVM_LOONGARCH_VCPU_PVTIME_GPA,
44 .addr = (uint64_t)&env->stealtime.guest_addr,
45 };
46
47 err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr);
48 if (err) {
49 return 0;
50 }
51
52 err = kvm_vcpu_ioctl(cs, KVM_GET_DEVICE_ATTR, attr);
53 if (err) {
54 error_report("PVTIME: KVM_GET_DEVICE_ATTR: %s", strerror(errno));
55 return err;
56 }
57
58 return 0;
59 }
60
kvm_set_stealtime(CPUState * cs)61 static int kvm_set_stealtime(CPUState *cs)
62 {
63 CPULoongArchState *env = cpu_env(cs);
64 int err;
65 struct kvm_device_attr attr = {
66 .group = KVM_LOONGARCH_VCPU_PVTIME_CTRL,
67 .attr = KVM_LOONGARCH_VCPU_PVTIME_GPA,
68 .addr = (uint64_t)&env->stealtime.guest_addr,
69 };
70
71 err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr);
72 if (err) {
73 return 0;
74 }
75
76 err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, attr);
77 if (err) {
78 error_report("PVTIME: KVM_SET_DEVICE_ATTR %s with gpa "TARGET_FMT_lx,
79 strerror(errno), env->stealtime.guest_addr);
80 return err;
81 }
82
83 return 0;
84 }
85
kvm_loongarch_get_regs_core(CPUState * cs)86 static int kvm_loongarch_get_regs_core(CPUState *cs)
87 {
88 int ret = 0;
89 int i;
90 struct kvm_regs regs;
91 CPULoongArchState *env = cpu_env(cs);
92
93 /* Get the current register set as KVM seems it */
94 ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s);
95 if (ret < 0) {
96 trace_kvm_failed_get_regs_core(strerror(errno));
97 return ret;
98 }
99 /* gpr[0] value is always 0 */
100 env->gpr[0] = 0;
101 for (i = 1; i < 32; i++) {
102 env->gpr[i] = regs.gpr[i];
103 }
104
105 env->pc = regs.pc;
106 return ret;
107 }
108
kvm_loongarch_put_regs_core(CPUState * cs)109 static int kvm_loongarch_put_regs_core(CPUState *cs)
110 {
111 int ret = 0;
112 int i;
113 struct kvm_regs regs;
114 CPULoongArchState *env = cpu_env(cs);
115
116 /* Set the registers based on QEMU's view of things */
117 for (i = 0; i < 32; i++) {
118 regs.gpr[i] = env->gpr[i];
119 }
120
121 regs.pc = env->pc;
122 ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s);
123 if (ret < 0) {
124 trace_kvm_failed_put_regs_core(strerror(errno));
125 }
126
127 return ret;
128 }
129
kvm_loongarch_get_csr(CPUState * cs)130 static int kvm_loongarch_get_csr(CPUState *cs)
131 {
132 int ret = 0;
133 CPULoongArchState *env = cpu_env(cs);
134
135 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CRMD),
136 &env->CSR_CRMD);
137
138 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRMD),
139 &env->CSR_PRMD);
140
141 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_EUEN),
142 &env->CSR_EUEN);
143
144 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_MISC),
145 &env->CSR_MISC);
146
147 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ECFG),
148 &env->CSR_ECFG);
149
150 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ESTAT),
151 &env->CSR_ESTAT);
152
153 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ERA),
154 &env->CSR_ERA);
155
156 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_BADV),
157 &env->CSR_BADV);
158
159 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_BADI),
160 &env->CSR_BADI);
161
162 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_EENTRY),
163 &env->CSR_EENTRY);
164
165 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBIDX),
166 &env->CSR_TLBIDX);
167
168 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBEHI),
169 &env->CSR_TLBEHI);
170
171 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBELO0),
172 &env->CSR_TLBELO0);
173
174 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBELO1),
175 &env->CSR_TLBELO1);
176
177 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ASID),
178 &env->CSR_ASID);
179
180 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGDL),
181 &env->CSR_PGDL);
182
183 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGDH),
184 &env->CSR_PGDH);
185
186 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGD),
187 &env->CSR_PGD);
188
189 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PWCL),
190 &env->CSR_PWCL);
191
192 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PWCH),
193 &env->CSR_PWCH);
194
195 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_STLBPS),
196 &env->CSR_STLBPS);
197
198 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_RVACFG),
199 &env->CSR_RVACFG);
200
201 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CPUID),
202 &env->CSR_CPUID);
203
204 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG1),
205 &env->CSR_PRCFG1);
206
207 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG2),
208 &env->CSR_PRCFG2);
209
210 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG3),
211 &env->CSR_PRCFG3);
212
213 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(0)),
214 &env->CSR_SAVE[0]);
215
216 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(1)),
217 &env->CSR_SAVE[1]);
218
219 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(2)),
220 &env->CSR_SAVE[2]);
221
222 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(3)),
223 &env->CSR_SAVE[3]);
224
225 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(4)),
226 &env->CSR_SAVE[4]);
227
228 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(5)),
229 &env->CSR_SAVE[5]);
230
231 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(6)),
232 &env->CSR_SAVE[6]);
233
234 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(7)),
235 &env->CSR_SAVE[7]);
236
237 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TID),
238 &env->CSR_TID);
239
240 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CNTC),
241 &env->CSR_CNTC);
242
243 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TICLR),
244 &env->CSR_TICLR);
245
246 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_LLBCTL),
247 &env->CSR_LLBCTL);
248
249 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_IMPCTL1),
250 &env->CSR_IMPCTL1);
251
252 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_IMPCTL2),
253 &env->CSR_IMPCTL2);
254
255 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRENTRY),
256 &env->CSR_TLBRENTRY);
257
258 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRBADV),
259 &env->CSR_TLBRBADV);
260
261 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRERA),
262 &env->CSR_TLBRERA);
263
264 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRSAVE),
265 &env->CSR_TLBRSAVE);
266
267 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRELO0),
268 &env->CSR_TLBRELO0);
269
270 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRELO1),
271 &env->CSR_TLBRELO1);
272
273 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBREHI),
274 &env->CSR_TLBREHI);
275
276 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRPRMD),
277 &env->CSR_TLBRPRMD);
278
279 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(0)),
280 &env->CSR_DMW[0]);
281
282 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(1)),
283 &env->CSR_DMW[1]);
284
285 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(2)),
286 &env->CSR_DMW[2]);
287
288 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(3)),
289 &env->CSR_DMW[3]);
290
291 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TVAL),
292 &env->CSR_TVAL);
293
294 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TCFG),
295 &env->CSR_TCFG);
296
297 return ret;
298 }
299
kvm_loongarch_put_csr(CPUState * cs,int level)300 static int kvm_loongarch_put_csr(CPUState *cs, int level)
301 {
302 int ret = 0;
303 CPULoongArchState *env = cpu_env(cs);
304
305 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CRMD),
306 &env->CSR_CRMD);
307
308 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRMD),
309 &env->CSR_PRMD);
310
311 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_EUEN),
312 &env->CSR_EUEN);
313
314 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_MISC),
315 &env->CSR_MISC);
316
317 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ECFG),
318 &env->CSR_ECFG);
319
320 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ESTAT),
321 &env->CSR_ESTAT);
322
323 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ERA),
324 &env->CSR_ERA);
325
326 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_BADV),
327 &env->CSR_BADV);
328
329 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_BADI),
330 &env->CSR_BADI);
331
332 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_EENTRY),
333 &env->CSR_EENTRY);
334
335 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBIDX),
336 &env->CSR_TLBIDX);
337
338 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBEHI),
339 &env->CSR_TLBEHI);
340
341 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBELO0),
342 &env->CSR_TLBELO0);
343
344 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBELO1),
345 &env->CSR_TLBELO1);
346
347 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ASID),
348 &env->CSR_ASID);
349
350 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGDL),
351 &env->CSR_PGDL);
352
353 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGDH),
354 &env->CSR_PGDH);
355
356 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGD),
357 &env->CSR_PGD);
358
359 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PWCL),
360 &env->CSR_PWCL);
361
362 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PWCH),
363 &env->CSR_PWCH);
364
365 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_STLBPS),
366 &env->CSR_STLBPS);
367
368 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_RVACFG),
369 &env->CSR_RVACFG);
370
371 /* CPUID is constant after poweron, it should be set only once */
372 if (level >= KVM_PUT_FULL_STATE) {
373 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CPUID),
374 &env->CSR_CPUID);
375 }
376
377 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG1),
378 &env->CSR_PRCFG1);
379
380 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG2),
381 &env->CSR_PRCFG2);
382
383 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG3),
384 &env->CSR_PRCFG3);
385
386 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(0)),
387 &env->CSR_SAVE[0]);
388
389 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(1)),
390 &env->CSR_SAVE[1]);
391
392 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(2)),
393 &env->CSR_SAVE[2]);
394
395 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(3)),
396 &env->CSR_SAVE[3]);
397
398 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(4)),
399 &env->CSR_SAVE[4]);
400
401 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(5)),
402 &env->CSR_SAVE[5]);
403
404 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(6)),
405 &env->CSR_SAVE[6]);
406
407 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(7)),
408 &env->CSR_SAVE[7]);
409
410 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TID),
411 &env->CSR_TID);
412
413 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CNTC),
414 &env->CSR_CNTC);
415
416 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TICLR),
417 &env->CSR_TICLR);
418
419 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_LLBCTL),
420 &env->CSR_LLBCTL);
421
422 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_IMPCTL1),
423 &env->CSR_IMPCTL1);
424
425 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_IMPCTL2),
426 &env->CSR_IMPCTL2);
427
428 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRENTRY),
429 &env->CSR_TLBRENTRY);
430
431 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRBADV),
432 &env->CSR_TLBRBADV);
433
434 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRERA),
435 &env->CSR_TLBRERA);
436
437 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRSAVE),
438 &env->CSR_TLBRSAVE);
439
440 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRELO0),
441 &env->CSR_TLBRELO0);
442
443 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRELO1),
444 &env->CSR_TLBRELO1);
445
446 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBREHI),
447 &env->CSR_TLBREHI);
448
449 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRPRMD),
450 &env->CSR_TLBRPRMD);
451
452 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(0)),
453 &env->CSR_DMW[0]);
454
455 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(1)),
456 &env->CSR_DMW[1]);
457
458 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(2)),
459 &env->CSR_DMW[2]);
460
461 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(3)),
462 &env->CSR_DMW[3]);
463 /*
464 * timer cfg must be put at last since it is used to enable
465 * guest timer
466 */
467 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TVAL),
468 &env->CSR_TVAL);
469
470 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TCFG),
471 &env->CSR_TCFG);
472 return ret;
473 }
474
kvm_loongarch_get_regs_fp(CPUState * cs)475 static int kvm_loongarch_get_regs_fp(CPUState *cs)
476 {
477 int ret, i;
478 struct kvm_fpu fpu;
479 CPULoongArchState *env = cpu_env(cs);
480
481 ret = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu);
482 if (ret < 0) {
483 trace_kvm_failed_get_fpu(strerror(errno));
484 return ret;
485 }
486
487 env->fcsr0 = fpu.fcsr;
488 for (i = 0; i < 32; i++) {
489 env->fpr[i].vreg.UD[0] = fpu.fpr[i].val64[0];
490 env->fpr[i].vreg.UD[1] = fpu.fpr[i].val64[1];
491 env->fpr[i].vreg.UD[2] = fpu.fpr[i].val64[2];
492 env->fpr[i].vreg.UD[3] = fpu.fpr[i].val64[3];
493 }
494 for (i = 0; i < 8; i++) {
495 env->cf[i] = fpu.fcc & 0xFF;
496 fpu.fcc = fpu.fcc >> 8;
497 }
498
499 return ret;
500 }
501
kvm_loongarch_put_regs_fp(CPUState * cs)502 static int kvm_loongarch_put_regs_fp(CPUState *cs)
503 {
504 int ret, i;
505 struct kvm_fpu fpu;
506 CPULoongArchState *env = cpu_env(cs);
507
508 fpu.fcsr = env->fcsr0;
509 fpu.fcc = 0;
510 for (i = 0; i < 32; i++) {
511 fpu.fpr[i].val64[0] = env->fpr[i].vreg.UD[0];
512 fpu.fpr[i].val64[1] = env->fpr[i].vreg.UD[1];
513 fpu.fpr[i].val64[2] = env->fpr[i].vreg.UD[2];
514 fpu.fpr[i].val64[3] = env->fpr[i].vreg.UD[3];
515 }
516
517 for (i = 0; i < 8; i++) {
518 fpu.fcc |= env->cf[i] << (8 * i);
519 }
520
521 ret = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu);
522 if (ret < 0) {
523 trace_kvm_failed_put_fpu(strerror(errno));
524 }
525
526 return ret;
527 }
528
kvm_loongarch_put_lbt(CPUState * cs)529 static int kvm_loongarch_put_lbt(CPUState *cs)
530 {
531 CPULoongArchState *env = cpu_env(cs);
532 uint64_t val;
533 int ret;
534
535 /* check whether vm support LBT firstly */
536 if (FIELD_EX32(env->cpucfg[2], CPUCFG2, LBT_ALL) != 7) {
537 return 0;
538 }
539
540 /* set six LBT registers including scr0-scr3, eflags, ftop */
541 ret = kvm_set_one_reg(cs, KVM_REG_LOONGARCH_LBT_SCR0, &env->lbt.scr0);
542 ret |= kvm_set_one_reg(cs, KVM_REG_LOONGARCH_LBT_SCR1, &env->lbt.scr1);
543 ret |= kvm_set_one_reg(cs, KVM_REG_LOONGARCH_LBT_SCR2, &env->lbt.scr2);
544 ret |= kvm_set_one_reg(cs, KVM_REG_LOONGARCH_LBT_SCR3, &env->lbt.scr3);
545 /*
546 * Be cautious, KVM_REG_LOONGARCH_LBT_FTOP is defined as 64-bit however
547 * lbt.ftop is 32-bit; the same with KVM_REG_LOONGARCH_LBT_EFLAGS register
548 */
549 val = env->lbt.eflags;
550 ret |= kvm_set_one_reg(cs, KVM_REG_LOONGARCH_LBT_EFLAGS, &val);
551 val = env->lbt.ftop;
552 ret |= kvm_set_one_reg(cs, KVM_REG_LOONGARCH_LBT_FTOP, &val);
553
554 return ret;
555 }
556
kvm_loongarch_get_lbt(CPUState * cs)557 static int kvm_loongarch_get_lbt(CPUState *cs)
558 {
559 CPULoongArchState *env = cpu_env(cs);
560 uint64_t val;
561 int ret;
562
563 /* check whether vm support LBT firstly */
564 if (FIELD_EX32(env->cpucfg[2], CPUCFG2, LBT_ALL) != 7) {
565 return 0;
566 }
567
568 /* get six LBT registers including scr0-scr3, eflags, ftop */
569 ret = kvm_get_one_reg(cs, KVM_REG_LOONGARCH_LBT_SCR0, &env->lbt.scr0);
570 ret |= kvm_get_one_reg(cs, KVM_REG_LOONGARCH_LBT_SCR1, &env->lbt.scr1);
571 ret |= kvm_get_one_reg(cs, KVM_REG_LOONGARCH_LBT_SCR2, &env->lbt.scr2);
572 ret |= kvm_get_one_reg(cs, KVM_REG_LOONGARCH_LBT_SCR3, &env->lbt.scr3);
573 ret |= kvm_get_one_reg(cs, KVM_REG_LOONGARCH_LBT_EFLAGS, &val);
574 env->lbt.eflags = (uint32_t)val;
575 ret |= kvm_get_one_reg(cs, KVM_REG_LOONGARCH_LBT_FTOP, &val);
576 env->lbt.ftop = (uint32_t)val;
577
578 return ret;
579 }
580
kvm_arch_reset_vcpu(CPUState * cs)581 void kvm_arch_reset_vcpu(CPUState *cs)
582 {
583 CPULoongArchState *env = cpu_env(cs);
584
585 env->mp_state = KVM_MP_STATE_RUNNABLE;
586 kvm_set_one_reg(cs, KVM_REG_LOONGARCH_VCPU_RESET, 0);
587 }
588
kvm_loongarch_get_mpstate(CPUState * cs)589 static int kvm_loongarch_get_mpstate(CPUState *cs)
590 {
591 int ret = 0;
592 struct kvm_mp_state mp_state;
593 CPULoongArchState *env = cpu_env(cs);
594
595 if (cap_has_mp_state) {
596 ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state);
597 if (ret) {
598 trace_kvm_failed_get_mpstate(strerror(errno));
599 return ret;
600 }
601 env->mp_state = mp_state.mp_state;
602 }
603
604 return ret;
605 }
606
kvm_loongarch_put_mpstate(CPUState * cs)607 static int kvm_loongarch_put_mpstate(CPUState *cs)
608 {
609 int ret = 0;
610 struct kvm_mp_state mp_state = {
611 .mp_state = cpu_env(cs)->mp_state
612 };
613
614 if (cap_has_mp_state) {
615 ret = kvm_vcpu_ioctl(cs, KVM_SET_MP_STATE, &mp_state);
616 if (ret) {
617 trace_kvm_failed_put_mpstate(strerror(errno));
618 }
619 }
620
621 return ret;
622 }
623
kvm_loongarch_get_cpucfg(CPUState * cs)624 static int kvm_loongarch_get_cpucfg(CPUState *cs)
625 {
626 int i, ret = 0;
627 uint64_t val;
628 CPULoongArchState *env = cpu_env(cs);
629
630 for (i = 0; i < 21; i++) {
631 ret = kvm_get_one_reg(cs, KVM_IOC_CPUCFG(i), &val);
632 if (ret < 0) {
633 trace_kvm_failed_get_cpucfg(strerror(errno));
634 }
635 env->cpucfg[i] = (uint32_t)val;
636 }
637 return ret;
638 }
639
kvm_check_cpucfg2(CPUState * cs)640 static int kvm_check_cpucfg2(CPUState *cs)
641 {
642 int ret;
643 uint64_t val;
644 struct kvm_device_attr attr = {
645 .group = KVM_LOONGARCH_VCPU_CPUCFG,
646 .attr = 2,
647 .addr = (uint64_t)&val,
648 };
649 CPULoongArchState *env = cpu_env(cs);
650
651 ret = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, &attr);
652
653 if (!ret) {
654 kvm_vcpu_ioctl(cs, KVM_GET_DEVICE_ATTR, &attr);
655 env->cpucfg[2] &= val;
656
657 if (FIELD_EX32(env->cpucfg[2], CPUCFG2, FP)) {
658 /* The FP minimal version is 1. */
659 env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, FP_VER, 1);
660 }
661
662 if (FIELD_EX32(env->cpucfg[2], CPUCFG2, LLFTP)) {
663 /* The LLFTP minimal version is 1. */
664 env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LLFTP_VER, 1);
665 }
666 }
667
668 return ret;
669 }
670
kvm_loongarch_put_cpucfg(CPUState * cs)671 static int kvm_loongarch_put_cpucfg(CPUState *cs)
672 {
673 int i, ret = 0;
674 CPULoongArchState *env = cpu_env(cs);
675 uint64_t val;
676
677 for (i = 0; i < 21; i++) {
678 if (i == 2) {
679 ret = kvm_check_cpucfg2(cs);
680 if (ret) {
681 return ret;
682 }
683 }
684 val = env->cpucfg[i];
685 ret = kvm_set_one_reg(cs, KVM_IOC_CPUCFG(i), &val);
686 if (ret < 0) {
687 trace_kvm_failed_put_cpucfg(strerror(errno));
688 }
689 }
690 return ret;
691 }
692
kvm_arch_get_registers(CPUState * cs,Error ** errp)693 int kvm_arch_get_registers(CPUState *cs, Error **errp)
694 {
695 int ret;
696
697 ret = kvm_loongarch_get_regs_core(cs);
698 if (ret) {
699 return ret;
700 }
701
702 ret = kvm_loongarch_get_cpucfg(cs);
703 if (ret) {
704 return ret;
705 }
706
707 ret = kvm_loongarch_get_csr(cs);
708 if (ret) {
709 return ret;
710 }
711
712 ret = kvm_loongarch_get_regs_fp(cs);
713 if (ret) {
714 return ret;
715 }
716
717 ret = kvm_loongarch_get_lbt(cs);
718 if (ret) {
719 return ret;
720 }
721
722 ret = kvm_get_stealtime(cs);
723 if (ret) {
724 return ret;
725 }
726
727 ret = kvm_loongarch_get_mpstate(cs);
728 return ret;
729 }
730
kvm_arch_put_registers(CPUState * cs,int level,Error ** errp)731 int kvm_arch_put_registers(CPUState *cs, int level, Error **errp)
732 {
733 int ret;
734
735 ret = kvm_loongarch_put_regs_core(cs);
736 if (ret) {
737 return ret;
738 }
739
740 ret = kvm_loongarch_put_cpucfg(cs);
741 if (ret) {
742 return ret;
743 }
744
745 ret = kvm_loongarch_put_csr(cs, level);
746 if (ret) {
747 return ret;
748 }
749
750 ret = kvm_loongarch_put_regs_fp(cs);
751 if (ret) {
752 return ret;
753 }
754
755 ret = kvm_loongarch_put_lbt(cs);
756 if (ret) {
757 return ret;
758 }
759
760 if (level >= KVM_PUT_FULL_STATE) {
761 /*
762 * only KVM_PUT_FULL_STATE is required, kvm kernel will clear
763 * guest_addr for KVM_PUT_RESET_STATE
764 */
765 ret = kvm_set_stealtime(cs);
766 if (ret) {
767 return ret;
768 }
769 }
770
771 ret = kvm_loongarch_put_mpstate(cs);
772 return ret;
773 }
774
kvm_loongarch_vm_stage_change(void * opaque,bool running,RunState state)775 static void kvm_loongarch_vm_stage_change(void *opaque, bool running,
776 RunState state)
777 {
778 int ret;
779 CPUState *cs = opaque;
780 LoongArchCPU *cpu = LOONGARCH_CPU(cs);
781
782 if (running) {
783 ret = kvm_set_one_reg(cs, KVM_REG_LOONGARCH_COUNTER,
784 &cpu->kvm_state_counter);
785 if (ret < 0) {
786 trace_kvm_failed_put_counter(strerror(errno));
787 }
788 } else {
789 ret = kvm_get_one_reg(cs, KVM_REG_LOONGARCH_COUNTER,
790 &cpu->kvm_state_counter);
791 if (ret < 0) {
792 trace_kvm_failed_get_counter(strerror(errno));
793 }
794 }
795 }
796
kvm_feature_supported(CPUState * cs,enum loongarch_features feature)797 static bool kvm_feature_supported(CPUState *cs, enum loongarch_features feature)
798 {
799 int ret;
800 struct kvm_device_attr attr;
801
802 switch (feature) {
803 case LOONGARCH_FEATURE_LBT:
804 /*
805 * Return all if all the LBT features are supported such as:
806 * KVM_LOONGARCH_VM_FEAT_X86BT
807 * KVM_LOONGARCH_VM_FEAT_ARMBT
808 * KVM_LOONGARCH_VM_FEAT_MIPSBT
809 */
810 attr.group = KVM_LOONGARCH_VM_FEAT_CTRL;
811 attr.attr = KVM_LOONGARCH_VM_FEAT_X86BT;
812 ret = kvm_vm_ioctl(kvm_state, KVM_HAS_DEVICE_ATTR, &attr);
813 attr.attr = KVM_LOONGARCH_VM_FEAT_ARMBT;
814 ret |= kvm_vm_ioctl(kvm_state, KVM_HAS_DEVICE_ATTR, &attr);
815 attr.attr = KVM_LOONGARCH_VM_FEAT_MIPSBT;
816 ret |= kvm_vm_ioctl(kvm_state, KVM_HAS_DEVICE_ATTR, &attr);
817 return (ret == 0);
818
819 case LOONGARCH_FEATURE_PMU:
820 attr.group = KVM_LOONGARCH_VM_FEAT_CTRL;
821 attr.attr = KVM_LOONGARCH_VM_FEAT_PMU;
822 ret = kvm_vm_ioctl(kvm_state, KVM_HAS_DEVICE_ATTR, &attr);
823 return (ret == 0);
824
825 default:
826 return false;
827 }
828
829 return false;
830 }
831
kvm_cpu_check_lbt(CPUState * cs,Error ** errp)832 static int kvm_cpu_check_lbt(CPUState *cs, Error **errp)
833 {
834 CPULoongArchState *env = cpu_env(cs);
835 LoongArchCPU *cpu = LOONGARCH_CPU(cs);
836 bool kvm_supported;
837
838 kvm_supported = kvm_feature_supported(cs, LOONGARCH_FEATURE_LBT);
839 if (cpu->lbt == ON_OFF_AUTO_ON) {
840 if (kvm_supported) {
841 env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LBT_ALL, 7);
842 } else {
843 error_setg(errp, "'lbt' feature not supported by KVM on this host");
844 return -ENOTSUP;
845 }
846 } else if ((cpu->lbt == ON_OFF_AUTO_AUTO) && kvm_supported) {
847 env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LBT_ALL, 7);
848 }
849
850 return 0;
851 }
852
kvm_cpu_check_pmu(CPUState * cs,Error ** errp)853 static int kvm_cpu_check_pmu(CPUState *cs, Error **errp)
854 {
855 LoongArchCPU *cpu = LOONGARCH_CPU(cs);
856 CPULoongArchState *env = cpu_env(cs);
857 bool kvm_supported;
858
859 kvm_supported = kvm_feature_supported(cs, LOONGARCH_FEATURE_PMU);
860 if (cpu->pmu == ON_OFF_AUTO_ON) {
861 if (!kvm_supported) {
862 error_setg(errp, "'pmu' feature not supported by KVM on the host");
863 return -ENOTSUP;
864 }
865 } else if (cpu->pmu != ON_OFF_AUTO_AUTO) {
866 /* disable pmu if ON_OFF_AUTO_OFF is set */
867 kvm_supported = false;
868 }
869
870 if (kvm_supported) {
871 env->cpucfg[6] = FIELD_DP32(env->cpucfg[6], CPUCFG6, PMP, 1);
872 env->cpucfg[6] = FIELD_DP32(env->cpucfg[6], CPUCFG6, PMNUM, 3);
873 env->cpucfg[6] = FIELD_DP32(env->cpucfg[6], CPUCFG6, PMBITS, 63);
874 env->cpucfg[6] = FIELD_DP32(env->cpucfg[6], CPUCFG6, UPM, 1);
875 }
876 return 0;
877 }
878
kvm_arch_init_vcpu(CPUState * cs)879 int kvm_arch_init_vcpu(CPUState *cs)
880 {
881 uint64_t val;
882 int ret;
883 Error *local_err = NULL;
884
885 ret = 0;
886 qemu_add_vm_change_state_handler(kvm_loongarch_vm_stage_change, cs);
887
888 if (!kvm_get_one_reg(cs, KVM_REG_LOONGARCH_DEBUG_INST, &val)) {
889 brk_insn = val;
890 }
891
892 ret = kvm_cpu_check_lbt(cs, &local_err);
893 if (ret < 0) {
894 error_report_err(local_err);
895 }
896
897 ret = kvm_cpu_check_pmu(cs, &local_err);
898 if (ret < 0) {
899 error_report_err(local_err);
900 }
901
902 return ret;
903 }
904
kvm_arch_destroy_vcpu(CPUState * cs)905 int kvm_arch_destroy_vcpu(CPUState *cs)
906 {
907 return 0;
908 }
909
kvm_arch_vcpu_id(CPUState * cs)910 unsigned long kvm_arch_vcpu_id(CPUState *cs)
911 {
912 return cs->cpu_index;
913 }
914
kvm_arch_release_virq_post(int virq)915 int kvm_arch_release_virq_post(int virq)
916 {
917 return 0;
918 }
919
kvm_arch_msi_data_to_gsi(uint32_t data)920 int kvm_arch_msi_data_to_gsi(uint32_t data)
921 {
922 abort();
923 }
924
kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry * route,uint64_t address,uint32_t data,PCIDevice * dev)925 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
926 uint64_t address, uint32_t data, PCIDevice *dev)
927 {
928 return 0;
929 }
930
kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry * route,int vector,PCIDevice * dev)931 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
932 int vector, PCIDevice *dev)
933 {
934 return 0;
935 }
936
kvm_arch_init_irq_routing(KVMState * s)937 void kvm_arch_init_irq_routing(KVMState *s)
938 {
939 }
940
kvm_arch_get_default_type(MachineState * ms)941 int kvm_arch_get_default_type(MachineState *ms)
942 {
943 return 0;
944 }
945
kvm_arch_init(MachineState * ms,KVMState * s)946 int kvm_arch_init(MachineState *ms, KVMState *s)
947 {
948 cap_has_mp_state = kvm_check_extension(s, KVM_CAP_MP_STATE);
949 return 0;
950 }
951
kvm_arch_irqchip_create(KVMState * s)952 int kvm_arch_irqchip_create(KVMState *s)
953 {
954 return 0;
955 }
956
kvm_arch_pre_run(CPUState * cs,struct kvm_run * run)957 void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
958 {
959 }
960
kvm_arch_post_run(CPUState * cs,struct kvm_run * run)961 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
962 {
963 return MEMTXATTRS_UNSPECIFIED;
964 }
965
kvm_arch_process_async_events(CPUState * cs)966 int kvm_arch_process_async_events(CPUState *cs)
967 {
968 return cs->halted;
969 }
970
kvm_arch_stop_on_emulation_error(CPUState * cs)971 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
972 {
973 return true;
974 }
975
kvm_arch_update_guest_debug(CPUState * cpu,struct kvm_guest_debug * dbg)976 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
977 {
978 if (kvm_sw_breakpoints_active(cpu)) {
979 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
980 }
981 }
982
kvm_arch_insert_sw_breakpoint(CPUState * cs,struct kvm_sw_breakpoint * bp)983 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
984 {
985 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
986 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) {
987 error_report("%s failed", __func__);
988 return -EINVAL;
989 }
990 return 0;
991 }
992
kvm_arch_remove_sw_breakpoint(CPUState * cs,struct kvm_sw_breakpoint * bp)993 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
994 {
995 static uint32_t brk;
996
997 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) ||
998 brk != brk_insn ||
999 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
1000 error_report("%s failed", __func__);
1001 return -EINVAL;
1002 }
1003 return 0;
1004 }
1005
kvm_arch_insert_hw_breakpoint(vaddr addr,vaddr len,int type)1006 int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
1007 {
1008 return -ENOSYS;
1009 }
1010
kvm_arch_remove_hw_breakpoint(vaddr addr,vaddr len,int type)1011 int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
1012 {
1013 return -ENOSYS;
1014 }
1015
kvm_arch_remove_all_hw_breakpoints(void)1016 void kvm_arch_remove_all_hw_breakpoints(void)
1017 {
1018 }
1019
kvm_loongarch_handle_debug(CPUState * cs,struct kvm_run * run)1020 static bool kvm_loongarch_handle_debug(CPUState *cs, struct kvm_run *run)
1021 {
1022 LoongArchCPU *cpu = LOONGARCH_CPU(cs);
1023 CPULoongArchState *env = &cpu->env;
1024
1025 kvm_cpu_synchronize_state(cs);
1026 if (cs->singlestep_enabled) {
1027 return true;
1028 }
1029
1030 if (kvm_find_sw_breakpoint(cs, env->pc)) {
1031 return true;
1032 }
1033
1034 return false;
1035 }
1036
kvm_arch_handle_exit(CPUState * cs,struct kvm_run * run)1037 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
1038 {
1039 int ret = 0;
1040 CPULoongArchState *env = cpu_env(cs);
1041 MemTxAttrs attrs = {};
1042
1043 attrs.requester_id = env_cpu(env)->cpu_index;
1044
1045 trace_kvm_arch_handle_exit(run->exit_reason);
1046 switch (run->exit_reason) {
1047 case KVM_EXIT_LOONGARCH_IOCSR:
1048 address_space_rw(env->address_space_iocsr,
1049 run->iocsr_io.phys_addr,
1050 attrs,
1051 run->iocsr_io.data,
1052 run->iocsr_io.len,
1053 run->iocsr_io.is_write);
1054 break;
1055
1056 case KVM_EXIT_DEBUG:
1057 if (kvm_loongarch_handle_debug(cs, run)) {
1058 ret = EXCP_DEBUG;
1059 }
1060 break;
1061
1062 default:
1063 ret = -1;
1064 warn_report("KVM: unknown exit reason %d", run->exit_reason);
1065 break;
1066 }
1067 return ret;
1068 }
1069
kvm_loongarch_set_interrupt(LoongArchCPU * cpu,int irq,int level)1070 int kvm_loongarch_set_interrupt(LoongArchCPU *cpu, int irq, int level)
1071 {
1072 struct kvm_interrupt intr;
1073 CPUState *cs = CPU(cpu);
1074
1075 if (level) {
1076 intr.irq = irq;
1077 } else {
1078 intr.irq = -irq;
1079 }
1080
1081 trace_kvm_set_intr(irq, level);
1082 return kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
1083 }
1084
kvm_arch_accel_class_init(ObjectClass * oc)1085 void kvm_arch_accel_class_init(ObjectClass *oc)
1086 {
1087 }
1088