1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * QEMU LoongArch KVM
4 *
5 * Copyright (c) 2023 Loongson Technology Corporation Limited
6 */
7
8 #include "qemu/osdep.h"
9 #include <sys/ioctl.h>
10 #include <linux/kvm.h>
11
12 #include "qemu/timer.h"
13 #include "qemu/error-report.h"
14 #include "qemu/main-loop.h"
15 #include "sysemu/sysemu.h"
16 #include "sysemu/kvm.h"
17 #include "sysemu/kvm_int.h"
18 #include "hw/pci/pci.h"
19 #include "exec/memattrs.h"
20 #include "exec/address-spaces.h"
21 #include "hw/boards.h"
22 #include "hw/irq.h"
23 #include "qemu/log.h"
24 #include "hw/loader.h"
25 #include "sysemu/runstate.h"
26 #include "cpu-csr.h"
27 #include "kvm_loongarch.h"
28 #include "trace.h"
29
30 static bool cap_has_mp_state;
31 static unsigned int brk_insn;
32 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
33 KVM_CAP_LAST_INFO
34 };
35
kvm_loongarch_get_regs_core(CPUState * cs)36 static int kvm_loongarch_get_regs_core(CPUState *cs)
37 {
38 int ret = 0;
39 int i;
40 struct kvm_regs regs;
41 CPULoongArchState *env = cpu_env(cs);
42
43 /* Get the current register set as KVM seems it */
44 ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s);
45 if (ret < 0) {
46 trace_kvm_failed_get_regs_core(strerror(errno));
47 return ret;
48 }
49 /* gpr[0] value is always 0 */
50 env->gpr[0] = 0;
51 for (i = 1; i < 32; i++) {
52 env->gpr[i] = regs.gpr[i];
53 }
54
55 env->pc = regs.pc;
56 return ret;
57 }
58
kvm_loongarch_put_regs_core(CPUState * cs)59 static int kvm_loongarch_put_regs_core(CPUState *cs)
60 {
61 int ret = 0;
62 int i;
63 struct kvm_regs regs;
64 CPULoongArchState *env = cpu_env(cs);
65
66 /* Set the registers based on QEMU's view of things */
67 for (i = 0; i < 32; i++) {
68 regs.gpr[i] = env->gpr[i];
69 }
70
71 regs.pc = env->pc;
72 ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s);
73 if (ret < 0) {
74 trace_kvm_failed_put_regs_core(strerror(errno));
75 }
76
77 return ret;
78 }
79
kvm_loongarch_get_csr(CPUState * cs)80 static int kvm_loongarch_get_csr(CPUState *cs)
81 {
82 int ret = 0;
83 CPULoongArchState *env = cpu_env(cs);
84
85 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CRMD),
86 &env->CSR_CRMD);
87
88 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRMD),
89 &env->CSR_PRMD);
90
91 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_EUEN),
92 &env->CSR_EUEN);
93
94 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_MISC),
95 &env->CSR_MISC);
96
97 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ECFG),
98 &env->CSR_ECFG);
99
100 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ESTAT),
101 &env->CSR_ESTAT);
102
103 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ERA),
104 &env->CSR_ERA);
105
106 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_BADV),
107 &env->CSR_BADV);
108
109 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_BADI),
110 &env->CSR_BADI);
111
112 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_EENTRY),
113 &env->CSR_EENTRY);
114
115 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBIDX),
116 &env->CSR_TLBIDX);
117
118 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBEHI),
119 &env->CSR_TLBEHI);
120
121 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBELO0),
122 &env->CSR_TLBELO0);
123
124 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBELO1),
125 &env->CSR_TLBELO1);
126
127 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ASID),
128 &env->CSR_ASID);
129
130 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGDL),
131 &env->CSR_PGDL);
132
133 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGDH),
134 &env->CSR_PGDH);
135
136 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGD),
137 &env->CSR_PGD);
138
139 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PWCL),
140 &env->CSR_PWCL);
141
142 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PWCH),
143 &env->CSR_PWCH);
144
145 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_STLBPS),
146 &env->CSR_STLBPS);
147
148 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_RVACFG),
149 &env->CSR_RVACFG);
150
151 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CPUID),
152 &env->CSR_CPUID);
153
154 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG1),
155 &env->CSR_PRCFG1);
156
157 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG2),
158 &env->CSR_PRCFG2);
159
160 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG3),
161 &env->CSR_PRCFG3);
162
163 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(0)),
164 &env->CSR_SAVE[0]);
165
166 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(1)),
167 &env->CSR_SAVE[1]);
168
169 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(2)),
170 &env->CSR_SAVE[2]);
171
172 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(3)),
173 &env->CSR_SAVE[3]);
174
175 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(4)),
176 &env->CSR_SAVE[4]);
177
178 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(5)),
179 &env->CSR_SAVE[5]);
180
181 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(6)),
182 &env->CSR_SAVE[6]);
183
184 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(7)),
185 &env->CSR_SAVE[7]);
186
187 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TID),
188 &env->CSR_TID);
189
190 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CNTC),
191 &env->CSR_CNTC);
192
193 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TICLR),
194 &env->CSR_TICLR);
195
196 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_LLBCTL),
197 &env->CSR_LLBCTL);
198
199 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_IMPCTL1),
200 &env->CSR_IMPCTL1);
201
202 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_IMPCTL2),
203 &env->CSR_IMPCTL2);
204
205 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRENTRY),
206 &env->CSR_TLBRENTRY);
207
208 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRBADV),
209 &env->CSR_TLBRBADV);
210
211 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRERA),
212 &env->CSR_TLBRERA);
213
214 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRSAVE),
215 &env->CSR_TLBRSAVE);
216
217 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRELO0),
218 &env->CSR_TLBRELO0);
219
220 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRELO1),
221 &env->CSR_TLBRELO1);
222
223 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBREHI),
224 &env->CSR_TLBREHI);
225
226 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRPRMD),
227 &env->CSR_TLBRPRMD);
228
229 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(0)),
230 &env->CSR_DMW[0]);
231
232 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(1)),
233 &env->CSR_DMW[1]);
234
235 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(2)),
236 &env->CSR_DMW[2]);
237
238 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(3)),
239 &env->CSR_DMW[3]);
240
241 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TVAL),
242 &env->CSR_TVAL);
243
244 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TCFG),
245 &env->CSR_TCFG);
246
247 return ret;
248 }
249
kvm_loongarch_put_csr(CPUState * cs,int level)250 static int kvm_loongarch_put_csr(CPUState *cs, int level)
251 {
252 int ret = 0;
253 CPULoongArchState *env = cpu_env(cs);
254
255 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CRMD),
256 &env->CSR_CRMD);
257
258 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRMD),
259 &env->CSR_PRMD);
260
261 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_EUEN),
262 &env->CSR_EUEN);
263
264 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_MISC),
265 &env->CSR_MISC);
266
267 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ECFG),
268 &env->CSR_ECFG);
269
270 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ESTAT),
271 &env->CSR_ESTAT);
272
273 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ERA),
274 &env->CSR_ERA);
275
276 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_BADV),
277 &env->CSR_BADV);
278
279 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_BADI),
280 &env->CSR_BADI);
281
282 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_EENTRY),
283 &env->CSR_EENTRY);
284
285 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBIDX),
286 &env->CSR_TLBIDX);
287
288 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBEHI),
289 &env->CSR_TLBEHI);
290
291 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBELO0),
292 &env->CSR_TLBELO0);
293
294 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBELO1),
295 &env->CSR_TLBELO1);
296
297 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ASID),
298 &env->CSR_ASID);
299
300 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGDL),
301 &env->CSR_PGDL);
302
303 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGDH),
304 &env->CSR_PGDH);
305
306 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGD),
307 &env->CSR_PGD);
308
309 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PWCL),
310 &env->CSR_PWCL);
311
312 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PWCH),
313 &env->CSR_PWCH);
314
315 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_STLBPS),
316 &env->CSR_STLBPS);
317
318 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_RVACFG),
319 &env->CSR_RVACFG);
320
321 /* CPUID is constant after poweron, it should be set only once */
322 if (level >= KVM_PUT_FULL_STATE) {
323 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CPUID),
324 &env->CSR_CPUID);
325 }
326
327 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG1),
328 &env->CSR_PRCFG1);
329
330 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG2),
331 &env->CSR_PRCFG2);
332
333 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG3),
334 &env->CSR_PRCFG3);
335
336 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(0)),
337 &env->CSR_SAVE[0]);
338
339 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(1)),
340 &env->CSR_SAVE[1]);
341
342 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(2)),
343 &env->CSR_SAVE[2]);
344
345 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(3)),
346 &env->CSR_SAVE[3]);
347
348 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(4)),
349 &env->CSR_SAVE[4]);
350
351 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(5)),
352 &env->CSR_SAVE[5]);
353
354 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(6)),
355 &env->CSR_SAVE[6]);
356
357 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(7)),
358 &env->CSR_SAVE[7]);
359
360 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TID),
361 &env->CSR_TID);
362
363 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CNTC),
364 &env->CSR_CNTC);
365
366 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TICLR),
367 &env->CSR_TICLR);
368
369 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_LLBCTL),
370 &env->CSR_LLBCTL);
371
372 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_IMPCTL1),
373 &env->CSR_IMPCTL1);
374
375 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_IMPCTL2),
376 &env->CSR_IMPCTL2);
377
378 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRENTRY),
379 &env->CSR_TLBRENTRY);
380
381 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRBADV),
382 &env->CSR_TLBRBADV);
383
384 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRERA),
385 &env->CSR_TLBRERA);
386
387 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRSAVE),
388 &env->CSR_TLBRSAVE);
389
390 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRELO0),
391 &env->CSR_TLBRELO0);
392
393 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRELO1),
394 &env->CSR_TLBRELO1);
395
396 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBREHI),
397 &env->CSR_TLBREHI);
398
399 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRPRMD),
400 &env->CSR_TLBRPRMD);
401
402 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(0)),
403 &env->CSR_DMW[0]);
404
405 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(1)),
406 &env->CSR_DMW[1]);
407
408 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(2)),
409 &env->CSR_DMW[2]);
410
411 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(3)),
412 &env->CSR_DMW[3]);
413 /*
414 * timer cfg must be put at last since it is used to enable
415 * guest timer
416 */
417 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TVAL),
418 &env->CSR_TVAL);
419
420 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TCFG),
421 &env->CSR_TCFG);
422 return ret;
423 }
424
kvm_loongarch_get_regs_fp(CPUState * cs)425 static int kvm_loongarch_get_regs_fp(CPUState *cs)
426 {
427 int ret, i;
428 struct kvm_fpu fpu;
429 CPULoongArchState *env = cpu_env(cs);
430
431 ret = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu);
432 if (ret < 0) {
433 trace_kvm_failed_get_fpu(strerror(errno));
434 return ret;
435 }
436
437 env->fcsr0 = fpu.fcsr;
438 for (i = 0; i < 32; i++) {
439 env->fpr[i].vreg.UD[0] = fpu.fpr[i].val64[0];
440 env->fpr[i].vreg.UD[1] = fpu.fpr[i].val64[1];
441 env->fpr[i].vreg.UD[2] = fpu.fpr[i].val64[2];
442 env->fpr[i].vreg.UD[3] = fpu.fpr[i].val64[3];
443 }
444 for (i = 0; i < 8; i++) {
445 env->cf[i] = fpu.fcc & 0xFF;
446 fpu.fcc = fpu.fcc >> 8;
447 }
448
449 return ret;
450 }
451
kvm_loongarch_put_regs_fp(CPUState * cs)452 static int kvm_loongarch_put_regs_fp(CPUState *cs)
453 {
454 int ret, i;
455 struct kvm_fpu fpu;
456 CPULoongArchState *env = cpu_env(cs);
457
458 fpu.fcsr = env->fcsr0;
459 fpu.fcc = 0;
460 for (i = 0; i < 32; i++) {
461 fpu.fpr[i].val64[0] = env->fpr[i].vreg.UD[0];
462 fpu.fpr[i].val64[1] = env->fpr[i].vreg.UD[1];
463 fpu.fpr[i].val64[2] = env->fpr[i].vreg.UD[2];
464 fpu.fpr[i].val64[3] = env->fpr[i].vreg.UD[3];
465 }
466
467 for (i = 0; i < 8; i++) {
468 fpu.fcc |= env->cf[i] << (8 * i);
469 }
470
471 ret = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu);
472 if (ret < 0) {
473 trace_kvm_failed_put_fpu(strerror(errno));
474 }
475
476 return ret;
477 }
478
kvm_arch_reset_vcpu(CPULoongArchState * env)479 void kvm_arch_reset_vcpu(CPULoongArchState *env)
480 {
481 env->mp_state = KVM_MP_STATE_RUNNABLE;
482 }
483
kvm_loongarch_get_mpstate(CPUState * cs)484 static int kvm_loongarch_get_mpstate(CPUState *cs)
485 {
486 int ret = 0;
487 struct kvm_mp_state mp_state;
488 CPULoongArchState *env = cpu_env(cs);
489
490 if (cap_has_mp_state) {
491 ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state);
492 if (ret) {
493 trace_kvm_failed_get_mpstate(strerror(errno));
494 return ret;
495 }
496 env->mp_state = mp_state.mp_state;
497 }
498
499 return ret;
500 }
501
kvm_loongarch_put_mpstate(CPUState * cs)502 static int kvm_loongarch_put_mpstate(CPUState *cs)
503 {
504 int ret = 0;
505 struct kvm_mp_state mp_state = {
506 .mp_state = cpu_env(cs)->mp_state
507 };
508
509 if (cap_has_mp_state) {
510 ret = kvm_vcpu_ioctl(cs, KVM_SET_MP_STATE, &mp_state);
511 if (ret) {
512 trace_kvm_failed_put_mpstate(strerror(errno));
513 }
514 }
515
516 return ret;
517 }
518
kvm_loongarch_get_cpucfg(CPUState * cs)519 static int kvm_loongarch_get_cpucfg(CPUState *cs)
520 {
521 int i, ret = 0;
522 uint64_t val;
523 CPULoongArchState *env = cpu_env(cs);
524
525 for (i = 0; i < 21; i++) {
526 ret = kvm_get_one_reg(cs, KVM_IOC_CPUCFG(i), &val);
527 if (ret < 0) {
528 trace_kvm_failed_get_cpucfg(strerror(errno));
529 }
530 env->cpucfg[i] = (uint32_t)val;
531 }
532 return ret;
533 }
534
kvm_check_cpucfg2(CPUState * cs)535 static int kvm_check_cpucfg2(CPUState *cs)
536 {
537 int ret;
538 uint64_t val;
539 struct kvm_device_attr attr = {
540 .group = KVM_LOONGARCH_VCPU_CPUCFG,
541 .attr = 2,
542 .addr = (uint64_t)&val,
543 };
544 CPULoongArchState *env = cpu_env(cs);
545
546 ret = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, &attr);
547
548 if (!ret) {
549 kvm_vcpu_ioctl(cs, KVM_GET_DEVICE_ATTR, &attr);
550 env->cpucfg[2] &= val;
551
552 if (FIELD_EX32(env->cpucfg[2], CPUCFG2, FP)) {
553 /* The FP minimal version is 1. */
554 env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, FP_VER, 1);
555 }
556
557 if (FIELD_EX32(env->cpucfg[2], CPUCFG2, LLFTP)) {
558 /* The LLFTP minimal version is 1. */
559 env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LLFTP_VER, 1);
560 }
561 }
562
563 return ret;
564 }
565
kvm_loongarch_put_cpucfg(CPUState * cs)566 static int kvm_loongarch_put_cpucfg(CPUState *cs)
567 {
568 int i, ret = 0;
569 CPULoongArchState *env = cpu_env(cs);
570 uint64_t val;
571
572 for (i = 0; i < 21; i++) {
573 if (i == 2) {
574 ret = kvm_check_cpucfg2(cs);
575 if (ret) {
576 return ret;
577 }
578 }
579 val = env->cpucfg[i];
580 ret = kvm_set_one_reg(cs, KVM_IOC_CPUCFG(i), &val);
581 if (ret < 0) {
582 trace_kvm_failed_put_cpucfg(strerror(errno));
583 }
584 }
585 return ret;
586 }
587
kvm_arch_get_registers(CPUState * cs)588 int kvm_arch_get_registers(CPUState *cs)
589 {
590 int ret;
591
592 ret = kvm_loongarch_get_regs_core(cs);
593 if (ret) {
594 return ret;
595 }
596
597 ret = kvm_loongarch_get_cpucfg(cs);
598 if (ret) {
599 return ret;
600 }
601
602 ret = kvm_loongarch_get_csr(cs);
603 if (ret) {
604 return ret;
605 }
606
607 ret = kvm_loongarch_get_regs_fp(cs);
608 if (ret) {
609 return ret;
610 }
611
612 ret = kvm_loongarch_get_mpstate(cs);
613 return ret;
614 }
615
kvm_arch_put_registers(CPUState * cs,int level)616 int kvm_arch_put_registers(CPUState *cs, int level)
617 {
618 int ret;
619
620 ret = kvm_loongarch_put_regs_core(cs);
621 if (ret) {
622 return ret;
623 }
624
625 ret = kvm_loongarch_put_cpucfg(cs);
626 if (ret) {
627 return ret;
628 }
629
630 ret = kvm_loongarch_put_csr(cs, level);
631 if (ret) {
632 return ret;
633 }
634
635 ret = kvm_loongarch_put_regs_fp(cs);
636 if (ret) {
637 return ret;
638 }
639
640 ret = kvm_loongarch_put_mpstate(cs);
641 return ret;
642 }
643
kvm_loongarch_vm_stage_change(void * opaque,bool running,RunState state)644 static void kvm_loongarch_vm_stage_change(void *opaque, bool running,
645 RunState state)
646 {
647 int ret;
648 CPUState *cs = opaque;
649 LoongArchCPU *cpu = LOONGARCH_CPU(cs);
650
651 if (running) {
652 ret = kvm_set_one_reg(cs, KVM_REG_LOONGARCH_COUNTER,
653 &cpu->kvm_state_counter);
654 if (ret < 0) {
655 trace_kvm_failed_put_counter(strerror(errno));
656 }
657 } else {
658 ret = kvm_get_one_reg(cs, KVM_REG_LOONGARCH_COUNTER,
659 &cpu->kvm_state_counter);
660 if (ret < 0) {
661 trace_kvm_failed_get_counter(strerror(errno));
662 }
663 }
664 }
665
kvm_arch_init_vcpu(CPUState * cs)666 int kvm_arch_init_vcpu(CPUState *cs)
667 {
668 uint64_t val;
669
670 qemu_add_vm_change_state_handler(kvm_loongarch_vm_stage_change, cs);
671
672 if (!kvm_get_one_reg(cs, KVM_REG_LOONGARCH_DEBUG_INST, &val)) {
673 brk_insn = val;
674 }
675
676 return 0;
677 }
678
kvm_arch_destroy_vcpu(CPUState * cs)679 int kvm_arch_destroy_vcpu(CPUState *cs)
680 {
681 return 0;
682 }
683
kvm_arch_vcpu_id(CPUState * cs)684 unsigned long kvm_arch_vcpu_id(CPUState *cs)
685 {
686 return cs->cpu_index;
687 }
688
kvm_arch_release_virq_post(int virq)689 int kvm_arch_release_virq_post(int virq)
690 {
691 return 0;
692 }
693
kvm_arch_msi_data_to_gsi(uint32_t data)694 int kvm_arch_msi_data_to_gsi(uint32_t data)
695 {
696 abort();
697 }
698
kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry * route,uint64_t address,uint32_t data,PCIDevice * dev)699 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
700 uint64_t address, uint32_t data, PCIDevice *dev)
701 {
702 return 0;
703 }
704
kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry * route,int vector,PCIDevice * dev)705 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
706 int vector, PCIDevice *dev)
707 {
708 return 0;
709 }
710
kvm_arch_init_irq_routing(KVMState * s)711 void kvm_arch_init_irq_routing(KVMState *s)
712 {
713 }
714
kvm_arch_get_default_type(MachineState * ms)715 int kvm_arch_get_default_type(MachineState *ms)
716 {
717 return 0;
718 }
719
kvm_arch_init(MachineState * ms,KVMState * s)720 int kvm_arch_init(MachineState *ms, KVMState *s)
721 {
722 cap_has_mp_state = kvm_check_extension(s, KVM_CAP_MP_STATE);
723 return 0;
724 }
725
kvm_arch_irqchip_create(KVMState * s)726 int kvm_arch_irqchip_create(KVMState *s)
727 {
728 return 0;
729 }
730
kvm_arch_pre_run(CPUState * cs,struct kvm_run * run)731 void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
732 {
733 }
734
kvm_arch_post_run(CPUState * cs,struct kvm_run * run)735 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
736 {
737 return MEMTXATTRS_UNSPECIFIED;
738 }
739
kvm_arch_process_async_events(CPUState * cs)740 int kvm_arch_process_async_events(CPUState *cs)
741 {
742 return cs->halted;
743 }
744
kvm_arch_stop_on_emulation_error(CPUState * cs)745 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
746 {
747 return true;
748 }
749
kvm_arch_update_guest_debug(CPUState * cpu,struct kvm_guest_debug * dbg)750 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
751 {
752 if (kvm_sw_breakpoints_active(cpu)) {
753 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
754 }
755 }
756
kvm_arch_insert_sw_breakpoint(CPUState * cs,struct kvm_sw_breakpoint * bp)757 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
758 {
759 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
760 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) {
761 error_report("%s failed", __func__);
762 return -EINVAL;
763 }
764 return 0;
765 }
766
kvm_arch_remove_sw_breakpoint(CPUState * cs,struct kvm_sw_breakpoint * bp)767 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
768 {
769 static uint32_t brk;
770
771 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) ||
772 brk != brk_insn ||
773 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
774 error_report("%s failed", __func__);
775 return -EINVAL;
776 }
777 return 0;
778 }
779
kvm_arch_insert_hw_breakpoint(vaddr addr,vaddr len,int type)780 int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
781 {
782 return -ENOSYS;
783 }
784
kvm_arch_remove_hw_breakpoint(vaddr addr,vaddr len,int type)785 int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
786 {
787 return -ENOSYS;
788 }
789
kvm_arch_remove_all_hw_breakpoints(void)790 void kvm_arch_remove_all_hw_breakpoints(void)
791 {
792 }
793
kvm_loongarch_handle_debug(CPUState * cs,struct kvm_run * run)794 static bool kvm_loongarch_handle_debug(CPUState *cs, struct kvm_run *run)
795 {
796 LoongArchCPU *cpu = LOONGARCH_CPU(cs);
797 CPULoongArchState *env = &cpu->env;
798
799 kvm_cpu_synchronize_state(cs);
800 if (cs->singlestep_enabled) {
801 return true;
802 }
803
804 if (kvm_find_sw_breakpoint(cs, env->pc)) {
805 return true;
806 }
807
808 return false;
809 }
810
kvm_arch_handle_exit(CPUState * cs,struct kvm_run * run)811 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
812 {
813 int ret = 0;
814 CPULoongArchState *env = cpu_env(cs);
815 MemTxAttrs attrs = {};
816
817 attrs.requester_id = env_cpu(env)->cpu_index;
818
819 trace_kvm_arch_handle_exit(run->exit_reason);
820 switch (run->exit_reason) {
821 case KVM_EXIT_LOONGARCH_IOCSR:
822 address_space_rw(env->address_space_iocsr,
823 run->iocsr_io.phys_addr,
824 attrs,
825 run->iocsr_io.data,
826 run->iocsr_io.len,
827 run->iocsr_io.is_write);
828 break;
829
830 case KVM_EXIT_DEBUG:
831 if (kvm_loongarch_handle_debug(cs, run)) {
832 ret = EXCP_DEBUG;
833 }
834 break;
835
836 default:
837 ret = -1;
838 warn_report("KVM: unknown exit reason %d", run->exit_reason);
839 break;
840 }
841 return ret;
842 }
843
kvm_loongarch_set_interrupt(LoongArchCPU * cpu,int irq,int level)844 int kvm_loongarch_set_interrupt(LoongArchCPU *cpu, int irq, int level)
845 {
846 struct kvm_interrupt intr;
847 CPUState *cs = CPU(cpu);
848
849 if (level) {
850 intr.irq = irq;
851 } else {
852 intr.irq = -irq;
853 }
854
855 trace_kvm_set_intr(irq, level);
856 return kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
857 }
858
kvm_arch_accel_class_init(ObjectClass * oc)859 void kvm_arch_accel_class_init(ObjectClass *oc)
860 {
861 }
862